input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
the spin multiplicity is 2
# For higher-order radicals the highest allowed spin multiplicity is assumed
conformer.spinMultiplicity = molecule.getRadicalCount() + 1
# No need to determine rotational and vibrational modes for single atoms
if len(molecule.atoms) < 2:
return (conformer, None, None)
linear = molecule.isLinear()
numRotors = molecule.countInternalRotors()
numVibrations = 3 * len(molecule.atoms) - (5 if linear else 6) - numRotors
# Get characteristic frequency groups and the associated frequencies
groupCount = self.getFrequencyGroups(molecule)
frequencies = []
for entry, count in groupCount.iteritems():
if count != 0 and entry.data is not None: frequencies.extend(entry.data.generateFrequencies(count))
# Check that we have the right number of degrees of freedom specified
if len(frequencies) > numVibrations:
# We have too many vibrational modes
difference = len(frequencies) - numVibrations
# First try to remove hindered rotor modes until the proper number of modes remain
if numRotors > difference:
numRotors -= difference
numVibrations = len(frequencies)
logging.warning('For {0}, more characteristic frequencies were generated than vibrational modes allowed. Removed {1:d} internal rotors to compensate.'.format(molecule.toSMILES(), difference))
# If that won't work, turn off functional groups until the problem is underspecified again
else:
groupsRemoved = 0
freqsRemoved = 0
freqCount = len(frequencies)
while freqCount > numVibrations:
minDegeneracy, minEntry = min([(entry.data.symmetry, entry) for entry in groupCount if groupCount[entry] > 0])
if groupCount[minEntry] > 1:
groupCount[minEntry] -= 1
else:
del groupCount[minEntry]
groupsRemoved += 1
freqsRemoved += minDegeneracy
freqCount -= minDegeneracy
# Log warning
logging.warning('For {0}, more characteristic frequencies were generated than vibrational modes allowed. Removed {1:d} groups ({2:d} frequencies) to compensate.'.format(molecule.toSMILES(), groupsRemoved, freqsRemoved))
# Regenerate characteristic frequencies
frequencies = []
for entry, count in groupCount.iteritems():
if count != 0: frequencies.extend(entry.data.generateFrequencies(count))
# Subtract out contributions to heat capacity from the group frequencies
Tlist = numpy.arange(300.0, 1501.0, 100.0, numpy.float64)
Cv = numpy.array([thermoModel.getHeatCapacity(T) / constants.R for T in Tlist], numpy.float64)
ho = HarmonicOscillator(frequencies=(frequencies,"cm^-1"))
for i in range(Tlist.shape[0]):
Cv[i] -= ho.getHeatCapacity(Tlist[i]) / constants.R
# Subtract out translational modes
Cv -= 1.5
# Subtract out external rotational modes
Cv -= (1.5 if not linear else 1.0)
# Subtract out PV term (Cp -> Cv)
Cv -= 1.0
# Fit remaining frequencies and hindered rotors to the heat capacity data
from statmechfit import fitStatmechToHeatCapacity
modes = fitStatmechToHeatCapacity(Tlist, Cv, numVibrations - len(frequencies), numRotors, molecule)
for mode in modes:
if isinstance(mode, HarmonicOscillator):
uncertainties = [0 for f in frequencies] # probably shouldn't be zero
frequencies.extend(mode.frequencies.value_si)
uncertainties.extend(mode.frequencies.uncertainty)
mode.frequencies.value_si = numpy.array(frequencies, numpy.float)
mode.frequencies.uncertainty = numpy.array(uncertainties, numpy.float)
break
else:
modes.insert(0, HarmonicOscillator(frequencies=(frequencies,"cm^-1")))
conformer.modes = modes
return (conformer, None, None)
################################################################################
class StatmechDatabase(object):
"""
A class for working with the RMG statistical mechanics (frequencies) database.
"""
def __init__(self):
self.depository = {}
self.libraries = {}
self.groups = {}
self.libraryOrder = []
self.local_context = {
'HarmonicOscillator': HarmonicOscillator,
'LinearRotor': LinearRotor,
'NonlinearRotor': NonlinearRotor,
'HinderedRotor': HinderedRotor,
'IdealGasTranslation': IdealGasTranslation,
'GroupFrequencies': GroupFrequencies,
}
self.global_context = {}
def __reduce__(self):
"""
A helper function used when pickling a StatmechDatabase object.
"""
d = {
'depository': self.depository,
'libraries': self.libraries,
'groups': self.groups,
'libraryOrder': self.libraryOrder,
}
return (StatmechDatabase, (), d)
def __setstate__(self, d):
"""
A helper function used when unpickling a StatmechDatabase object.
"""
self.depository = d['depository']
self.libraries = d['libraries']
self.groups = d['groups']
self.libraryOrder = d['libraryOrder']
def load(self, path, libraries=None, depository=True):
"""
Load the statmech database from the given `path` on disk, where `path`
points to the top-level folder of the thermo database.
"""
if depository:
self.loadDepository(os.path.join(path, 'depository'))
else:
self.depository = {}
self.loadLibraries(os.path.join(path, 'libraries'), libraries)
self.loadGroups(os.path.join(path, 'groups'))
def loadDepository(self, path):
"""
Load the statmech database from the given `path` on disk, where `path`
points to the top-level folder of the thermo database.
"""
self.depository = {}
self.depository['depository'] = StatmechDepository().load(os.path.join(path, 'depository.py'), self.local_context, self.global_context)
def loadLibraries(self, path, libraries=None):
"""
Load the statmech database from the given `path` on disk, where `path`
points to the top-level folder of the thermo database.
"""
self.libraries = {}; self.libraryOrder = []
for (root, dirs, files) in os.walk(os.path.join(path)):
for f in files:
name, ext = os.path.splitext(f)
if ext.lower() == '.py' and (libraries is None or name in libraries):
logging.info('Loading frequencies library from {0} in {1}...'.format(f, root))
library = StatmechLibrary()
library.load(os.path.join(root, f), self.local_context, self.global_context)
library.label = os.path.splitext(f)[0]
self.libraries[library.label] = library
self.libraryOrder.append(library.label)
if libraries is not None:
self.libraryOrder = libraries
def loadGroups(self, path):
"""
Load the statmech database from the given `path` on disk, where `path`
points to the top-level folder of the thermo database.
"""
logging.info('Loading frequencies group database from {0}...'.format(path))
self.groups = {}
self.groups['groups'] = StatmechGroups().load(os.path.join(path, 'groups.py' ), self.local_context, self.global_context)
def save(self, path):
"""
Save the statmech database to the given `path` on disk, where `path`
points to the top-level folder of the statmech database.
"""
path = os.path.abspath(path)
if not os.path.exists(path): os.mkdir(path)
self.saveDepository(os.path.join(path, 'depository'))
self.saveLibraries(os.path.join(path, 'libraries'))
self.saveGroups(os.path.join(path, 'groups'))
def saveDepository(self, path):
"""
Save the statmech depository to the given `path` on disk, where `path`
points to the top-level folder of the statmech depository.
"""
if not os.path.exists(path): os.mkdir(path)
for name, depository in self.depository.iteritems():
depository.save(os.path.join(path, name + '.py'))
def saveLibraries(self, path):
"""
Save the statmech libraries to the given `path` on disk, where `path`
points to the top-level folder of the statmech libraries.
"""
if not os.path.exists(path): os.mkdir(path)
for library in self.libraries.values():
library.save(os.path.join(path, '{0}.py'.format(library.label)))
def saveGroups(self, path):
"""
Save the statmech groups to the given `path` on disk, where `path`
points to the top-level folder of the statmech groups.
"""
if not os.path.exists(path): os.mkdir(path)
for name, groups in self.groups.iteritems():
groups.save(os.path.join(path, name + '.py'))
def loadOld(self, path):
"""
Load the old RMG thermo database from the given `path` on disk, where
`path` points to the top-level folder of the old RMG database.
"""
# The old database does not have a depository, so create an empty one
self.depository = {}
self.depository['depository'] = StatmechDepository(label='depository', name='Statmech Depository')
for (root, dirs, files) in os.walk(os.path.join(path, 'frequencies_libraries')):
if os.path.exists(os.path.join(root, 'Dictionary.txt')) and os.path.exists(os.path.join(root, 'Library.txt')):
library = StatmechLibrary(label=os.path.basename(root), name=os.path.basename(root))
library.loadOld(
dictstr = os.path.join(root, 'Dictionary.txt'),
treestr = '',
libstr = os.path.join(root, 'Library.txt'),
numParameters = -1,
numLabels = 1,
pattern = False,
)
library.label = os.path.basename(root)
self.libraries[library.label] = library
self.groups['groups'] = StatmechGroups(label='group', name='Functional Group Values').loadOld(
dictstr = os.path.join(path, 'frequencies_groups', 'Dictionary.txt'),
treestr = os.path.join(path, 'frequencies_groups', 'Tree.txt'),
libstr = os.path.join(path, 'frequencies_groups', 'Library.txt'),
numParameters = -1,
numLabels = 1,
pattern = True,
)
def saveOld(self, path):
"""
Save the old RMG thermo database to the given `path` on disk, where
`path` points to the top-level folder of the old RMG database.
"""
# Depository not used in old database, so it is not saved
librariesPath = os.path.join(path, 'frequencies_libraries')
for library in self.libraries.values():
if not os.path.exists(librariesPath): os.mkdir(librariesPath)
libraryPath = os.path.join(librariesPath, library.label)
if not os.path.exists(libraryPath): os.mkdir(libraryPath)
library.saveOld(
dictstr = os.path.join(libraryPath, 'Dictionary.txt'),
treestr = '',
libstr = os.path.join(libraryPath, 'Library.txt'),
)
groupsPath = os.path.join(path, 'frequencies_groups')
if not os.path.exists(groupsPath): os.mkdir(groupsPath)
self.groups.saveOld(
dictstr = os.path.join(groupsPath, 'Dictionary.txt'),
treestr = os.path.join(groupsPath, 'Tree.txt'),
libstr = os.path.join(groupsPath, 'Library.txt'),
)
def getStatmechData(self, molecule, thermoModel=None):
"""
Return the thermodynamic parameters for a given :class:`Molecule`
object `molecule`. This function first searches the loaded libraries
in order, returning the first match found, before falling back to
estimation via group additivity.
"""
statmechModel = None
# Check the libraries in order first; return the first successful match
for label in self.libraryOrder:
statmechModel = self.getStatmechDataFromLibrary(molecule, self.libraries[label])
if statmechModel: break
else:
# Thermo not found in any loaded libraries, so estimate
statmechModel = self.getStatmechDataFromGroups(molecule, thermoModel)
return statmechModel[0]
def getStatmechDataFromDepository(self, molecule):
"""
Return statmech data for the given :class:`Molecule` object `molecule`
by searching the entries in the depository.
Returns a list of tuples (statmechData, depository, entry).
"""
items = []
for name, depository in self.depository.iteritems():
for label, entry in depository.entries.iteritems():
if molecule.isIsomorphic(entry.item):
items.append((entry.data, self.depository[name], entry))
return items
def getStatmechDataFromLibrary(self, molecule, library):
"""
Return statmech data for the given :class:`Molecule` object `molecule`
by searching the entries in the specified :class:`StatmechLibrary` object
`library`. Returns ``None`` if no data was found.
"""
for label, entry in library.entries.iteritems():
if molecule.isIsomorphic(entry.item):
return (entry.data, library, entry)
return None
def getStatmechDataFromGroups(self, molecule, thermoModel):
"""
Return statmech data for the given :class:`Molecule` object `molecule`
by estimating using characteristic | |
uid, ids, context=None):
'''
Confirm the vouchers given in ids and create the journal entries for each of them
'''
if context is None:
context = {}
move_pool = self.pool.get('account.move')
move_line_pool = self.pool.get('account.move.line')
for voucher in self.browse(cr, uid, ids, context=context):
local_context = dict(context, force_company=voucher.journal_id.company_id.id)
if voucher.move_id:
continue
company_currency = self._get_company_currency(cr, uid, voucher.id, context)
current_currency = self._get_current_currency(cr, uid, voucher.id, context)
# we select the context to use accordingly if it's a multicurrency case or not
context = self._sel_context(cr, uid, voucher.id, context)
# But for the operations made by _convert_amount, we always need to give the date in the context
ctx = context.copy()
ctx.update({'date': voucher.date})
# Create the account move record.
move_id = move_pool.create(cr, uid, self.account_move_get(cr, uid, voucher.id, context=context), context=context)
# Get the name of the account_move just created
name = move_pool.browse(cr, uid, move_id, context=context).name
# Create the first line of the voucher
move_line_id = move_line_pool.create(cr, uid, self.first_move_line_get(cr,uid,voucher.id, move_id, company_currency, current_currency, local_context), local_context)
move_line_brw = move_line_pool.browse(cr, uid, move_line_id, context=context)
line_total = move_line_brw.debit - move_line_brw.credit
rec_list_ids = []
if voucher.type == 'sale':
line_total = line_total - self._convert_amount(cr, uid, voucher.tax_amount, voucher.id, context=ctx)
elif voucher.type == 'purchase':
line_total = line_total + self._convert_amount(cr, uid, voucher.tax_amount, voucher.id, context=ctx)
# Create one move line per voucher line where amount is not 0.0
line_total, rec_list_ids = self.voucher_move_line_create(cr, uid, voucher.id, line_total, move_id, company_currency, current_currency, context)
# Create the writeoff line if needed
ml_writeoff = self.writeoff_move_line_get(cr, uid, voucher.id, line_total, move_id, name, company_currency, current_currency, local_context)
if ml_writeoff:
move_line_pool.create(cr, uid, ml_writeoff, local_context)
# We post the voucher.
self.write(cr, uid, [voucher.id], {
'move_id': move_id,
'state': 'posted',
'number': name,
})
if voucher.journal_id.entry_posted:
move_pool.post(cr, uid, [move_id], context={})
# We automatically reconcile the account move lines.
reconcile = False
for rec_ids in rec_list_ids:
if len(rec_ids) >= 2:
reconcile = move_line_pool.reconcile_partial(cr, uid, rec_ids, writeoff_acc_id=voucher.writeoff_acc_id.id, writeoff_period_id=voucher.period_id.id, writeoff_journal_id=voucher.journal_id.id)
return True
def copy(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
default.update({
'state': 'draft',
'number': False,
'move_id': False,
'line_cr_ids': False,
'line_dr_ids': False,
'reference': False
})
if 'date' not in default:
default['date'] = time.strftime('%Y-%m-%d')
return super(account_voucher, self).copy(cr, uid, id, default, context)
class account_voucher_line(osv.osv):
_name = 'account.voucher.line'
_description = 'Voucher Lines'
_order = "move_line_id"
# If the payment is in the same currency than the invoice, we keep the same amount
# Otherwise, we compute from invoice currency to payment currency
def _compute_balance(self, cr, uid, ids, name, args, context=None):
currency_pool = self.pool.get('res.currency')
rs_data = {}
for line in self.browse(cr, uid, ids, context=context):
ctx = context.copy()
ctx.update({'date': line.voucher_id.date})
voucher_rate = self.pool.get('res.currency').read(cr, uid, line.voucher_id.currency_id.id, ['rate'], context=ctx)['rate']
ctx.update({
'voucher_special_currency': line.voucher_id.payment_rate_currency_id and line.voucher_id.payment_rate_currency_id.id or False,
'voucher_special_currency_rate': line.voucher_id.payment_rate * voucher_rate})
res = {}
company_currency = line.voucher_id.journal_id.company_id.currency_id.id
voucher_currency = line.voucher_id.currency_id and line.voucher_id.currency_id.id or company_currency
move_line = line.move_line_id or False
if not move_line:
res['amount_original'] = 0.0
res['amount_unreconciled'] = 0.0
elif move_line.currency_id and voucher_currency==move_line.currency_id.id:
res['amount_original'] = abs(move_line.amount_currency)
res['amount_unreconciled'] = abs(move_line.amount_residual_currency)
else:
#always use the amount booked in the company currency as the basis of the conversion into the voucher currency
res['amount_original'] = currency_pool.compute(cr, uid, company_currency, voucher_currency, move_line.credit or move_line.debit or 0.0, context=ctx)
res['amount_unreconciled'] = currency_pool.compute(cr, uid, company_currency, voucher_currency, abs(move_line.amount_residual), context=ctx)
rs_data[line.id] = res
return rs_data
def _currency_id(self, cr, uid, ids, name, args, context=None):
'''
This function returns the currency id of a voucher line. It's either the currency of the
associated move line (if any) or the currency of the voucher or the company currency.
'''
res = {}
for line in self.browse(cr, uid, ids, context=context):
move_line = line.move_line_id
if move_line:
res[line.id] = move_line.currency_id and move_line.currency_id.id or move_line.company_id.currency_id.id
else:
res[line.id] = line.voucher_id.currency_id and line.voucher_id.currency_id.id or line.voucher_id.company_id.currency_id.id
return res
_columns = {
'voucher_id':fields.many2one('account.voucher', 'Voucher', required=1, ondelete='cascade'),
'name':fields.char('Description', size=256),
'account_id':fields.many2one('account.account','Account', required=True),
'partner_id':fields.related('voucher_id', 'partner_id', type='many2one', relation='res.partner', string='Partner'),
'untax_amount':fields.float('Untax Amount'),
'amount':fields.float('Amount', digits_compute=dp.get_precision('Account')),
'reconcile': fields.boolean('Full Reconcile'),
'type':fields.selection([('dr','Debit'),('cr','Credit')], 'Dr/Cr'),
'account_analytic_id': fields.many2one('account.analytic.account', 'Analytic Account'),
'move_line_id': fields.many2one('account.move.line', 'Journal Item'),
'date_original': fields.related('move_line_id','date', type='date', relation='account.move.line', string='Date', readonly=1),
'date_due': fields.related('move_line_id','date_maturity', type='date', relation='account.move.line', string='Due Date', readonly=1),
'amount_original': fields.function(_compute_balance, multi='dc', type='float', string='Original Amount', store=True, digits_compute=dp.get_precision('Account')),
'amount_unreconciled': fields.function(_compute_balance, multi='dc', type='float', string='Open Balance', store=True, digits_compute=dp.get_precision('Account')),
'company_id': fields.related('voucher_id','company_id', relation='res.company', type='many2one', string='Company', store=True, readonly=True),
'currency_id': fields.function(_currency_id, string='Currency', type='many2one', relation='res.currency', readonly=True),
}
_defaults = {
'name': '',
}
def onchange_reconcile(self, cr, uid, ids, reconcile, amount, amount_unreconciled, context=None):
vals = {'amount': 0.0}
if reconcile:
vals = { 'amount': amount_unreconciled}
return {'value': vals}
def onchange_amount(self, cr, uid, ids, amount, amount_unreconciled, context=None):
vals = {}
if amount:
vals['reconcile'] = (amount == amount_unreconciled)
return {'value': vals}
def onchange_move_line_id(self, cr, user, ids, move_line_id, context=None):
"""
Returns a dict that contains new values and context
@param move_line_id: latest value from user input for field move_line_id
@param args: other arguments
@param context: context arguments, like lang, time zone
@return: Returns a dict which contains new values, and context
"""
res = {}
move_line_pool = self.pool.get('account.move.line')
if move_line_id:
move_line = move_line_pool.browse(cr, user, move_line_id, context=context)
if move_line.credit:
ttype = 'dr'
else:
ttype = 'cr'
res.update({
'account_id': move_line.account_id.id,
'type': ttype,
'currency_id': move_line.currency_id and move_line.currency_id.id or move_line.company_id.currency_id.id,
})
return {
'value':res,
}
def default_get(self, cr, user, fields_list, context=None):
"""
Returns default values for fields
@param fields_list: list of fields, for which default values are required to be read
@param context: context arguments, like lang, time zone
@return: Returns a dict that contains default values for fields
"""
if context is None:
context = {}
journal_id = context.get('journal_id', False)
partner_id = context.get('partner_id', False)
journal_pool = self.pool.get('account.journal')
partner_pool = self.pool.get('res.partner')
values = super(account_voucher_line, self).default_get(cr, user, fields_list, context=context)
if (not journal_id) or ('account_id' not in fields_list):
return values
journal = journal_pool.browse(cr, user, journal_id, context=context)
account_id = False
ttype = 'cr'
if journal.type in ('sale', 'sale_refund'):
account_id = journal.default_credit_account_id and journal.default_credit_account_id.id or False
ttype = 'cr'
elif journal.type in ('purchase', 'expense', 'purchase_refund'):
account_id = journal.default_debit_account_id and journal.default_debit_account_id.id or False
ttype = 'dr'
elif partner_id:
partner = partner_pool.browse(cr, user, partner_id, context=context)
if context.get('type') == 'payment':
ttype = 'dr'
account_id = partner.property_account_payable.id
elif context.get('type') == 'receipt':
account_id = partner.property_account_receivable.id
values.update({
'account_id':account_id,
'type':ttype
})
return values
account_voucher_line()
class account_bank_statement(osv.osv):
_inherit = 'account.bank.statement'
def button_confirm_bank(self, cr, uid, ids, context=None):
voucher_obj = self.pool.get('account.voucher')
voucher_ids = []
for statement in self.browse(cr, uid, ids, context=context):
voucher_ids += [line.voucher_id.id for line in statement.line_ids if line.voucher_id]
if voucher_ids:
voucher_obj.write(cr, uid, voucher_ids, {'active': True}, context=context)
return super(account_bank_statement, self).button_confirm_bank(cr, uid, ids, context=context)
def button_cancel(self, cr, uid, ids, context=None):
voucher_obj = self.pool.get('account.voucher')
for st in self.browse(cr, uid, ids, context=context):
voucher_ids = []
for line in st.line_ids:
if line.voucher_id:
voucher_ids.append(line.voucher_id.id)
voucher_obj.cancel_voucher(cr, uid, voucher_ids, context)
return super(account_bank_statement, self).button_cancel(cr, uid, ids, context=context)
def create_move_from_st_line(self, cr, uid, st_line_id, company_currency_id, next_number, context=None):
voucher_obj = self.pool.get('account.voucher')
wf_service = netsvc.LocalService("workflow")
move_line_obj = self.pool.get('account.move.line')
bank_st_line_obj = self.pool.get('account.bank.statement.line')
st_line = bank_st_line_obj.browse(cr, uid, st_line_id, context=context)
if st_line.voucher_id:
voucher_obj.write(cr, uid, [st_line.voucher_id.id],
{'number': next_number,
'date': st_line.date,
'period_id': st_line.statement_id.period_id.id},
context=context)
if st_line.voucher_id.state == 'cancel':
voucher_obj.action_cancel_draft(cr, uid, [st_line.voucher_id.id], context=context)
wf_service.trg_validate(uid, 'account.voucher', st_line.voucher_id.id, 'proforma_voucher', cr)
v = voucher_obj.browse(cr, uid, st_line.voucher_id.id, context=context)
bank_st_line_obj.write(cr, uid, [st_line_id], {
'move_ids': [(4, v.move_id.id, False)]
})
return move_line_obj.write(cr, uid, [x.id for x in v.move_ids], {'statement_id': st_line.statement_id.id}, context=context)
return super(account_bank_statement, self).create_move_from_st_line(cr, uid, st_line.id, company_currency_id, next_number, context=context)
def write(self, cr, uid, ids, vals, context=None):
# Restrict to modify the journal if we already have some voucher of reconciliation created/generated.
# Because the voucher keeps in memory the journal it was created with.
for bk_st in self.browse(cr, uid, ids, context=context):
if vals.get('journal_id') and bk_st.line_ids:
if any([x.voucher_id and True or False for x in bk_st.line_ids]):
raise osv.except_osv(_('Unable to Change Journal!'), _('You can not change the journal as you already reconciled some statement lines!'))
return super(account_bank_statement, self).write(cr, uid, ids, vals, context=context)
account_bank_statement()
class account_bank_statement_line(osv.osv):
_inherit = 'account.bank.statement.line'
def onchange_partner_id(self, cr, uid, ids, partner_id, context=None):
res = super(account_bank_statement_line, self).onchange_partner_id(cr, uid, ids, partner_id, context=context)
if 'value' not in res:
res['value'] = {}
res['value'].update({'voucher_id' : False})
return res
def onchange_amount(self, cr, uid, ids, amount, context=None):
return {'value' : {'voucher_id' : False}}
def _amount_reconciled(self, cursor, user, ids, name, args, context=None):
if not ids:
return {}
res = {}
for line in self.browse(cursor, user, ids, context=context):
if line.voucher_id:
res[line.id] = line.voucher_id.amount#
else:
res[line.id] = 0.0
return res
def _check_amount(self, cr, uid, ids, context=None):
for obj in self.browse(cr, uid, | |
<filename>Website/FlaskWebsite/env/Lib/site-packages/matplotlib/tests/test_collections.py
import io
from types import SimpleNamespace
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
import pytest
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.backend_bases import MouseEvent
import matplotlib.collections as mcollections
import matplotlib.colors as mcolors
import matplotlib.transforms as mtransforms
from matplotlib.collections import (Collection, LineCollection,
EventCollection, PolyCollection)
from matplotlib.testing.decorators import check_figures_equal, image_comparison
from matplotlib._api.deprecation import MatplotlibDeprecationWarning
def generate_EventCollection_plot():
"""Generate the initial collection and plot it."""
positions = np.array([0., 1., 2., 3., 5., 8., 13., 21.])
extra_positions = np.array([34., 55., 89.])
orientation = 'horizontal'
lineoffset = 1
linelength = .5
linewidth = 2
color = [1, 0, 0, 1]
linestyle = 'solid'
antialiased = True
coll = EventCollection(positions,
orientation=orientation,
lineoffset=lineoffset,
linelength=linelength,
linewidth=linewidth,
color=color,
linestyle=linestyle,
antialiased=antialiased
)
fig, ax = plt.subplots()
ax.add_collection(coll)
ax.set_title('EventCollection: default')
props = {'positions': positions,
'extra_positions': extra_positions,
'orientation': orientation,
'lineoffset': lineoffset,
'linelength': linelength,
'linewidth': linewidth,
'color': color,
'linestyle': linestyle,
'antialiased': antialiased
}
ax.set_xlim(-1, 22)
ax.set_ylim(0, 2)
return ax, coll, props
@image_comparison(['EventCollection_plot__default'])
def test__EventCollection__get_props():
_, coll, props = generate_EventCollection_plot()
# check that the default segments have the correct coordinates
check_segments(coll,
props['positions'],
props['linelength'],
props['lineoffset'],
props['orientation'])
# check that the default positions match the input positions
np.testing.assert_array_equal(props['positions'], coll.get_positions())
# check that the default orientation matches the input orientation
assert props['orientation'] == coll.get_orientation()
# check that the default orientation matches the input orientation
assert coll.is_horizontal()
# check that the default linelength matches the input linelength
assert props['linelength'] == coll.get_linelength()
# check that the default lineoffset matches the input lineoffset
assert props['lineoffset'] == coll.get_lineoffset()
# check that the default linestyle matches the input linestyle
assert coll.get_linestyle() == [(0, None)]
# check that the default color matches the input color
for color in [coll.get_color(), *coll.get_colors()]:
np.testing.assert_array_equal(color, props['color'])
@image_comparison(['EventCollection_plot__set_positions'])
def test__EventCollection__set_positions():
splt, coll, props = generate_EventCollection_plot()
new_positions = np.hstack([props['positions'], props['extra_positions']])
coll.set_positions(new_positions)
np.testing.assert_array_equal(new_positions, coll.get_positions())
check_segments(coll, new_positions,
props['linelength'],
props['lineoffset'],
props['orientation'])
splt.set_title('EventCollection: set_positions')
splt.set_xlim(-1, 90)
@image_comparison(['EventCollection_plot__add_positions'])
def test__EventCollection__add_positions():
splt, coll, props = generate_EventCollection_plot()
new_positions = np.hstack([props['positions'],
props['extra_positions'][0]])
coll.switch_orientation() # Test adding in the vertical orientation, too.
coll.add_positions(props['extra_positions'][0])
coll.switch_orientation()
np.testing.assert_array_equal(new_positions, coll.get_positions())
check_segments(coll,
new_positions,
props['linelength'],
props['lineoffset'],
props['orientation'])
splt.set_title('EventCollection: add_positions')
splt.set_xlim(-1, 35)
@image_comparison(['EventCollection_plot__append_positions'])
def test__EventCollection__append_positions():
splt, coll, props = generate_EventCollection_plot()
new_positions = np.hstack([props['positions'],
props['extra_positions'][2]])
coll.append_positions(props['extra_positions'][2])
np.testing.assert_array_equal(new_positions, coll.get_positions())
check_segments(coll,
new_positions,
props['linelength'],
props['lineoffset'],
props['orientation'])
splt.set_title('EventCollection: append_positions')
splt.set_xlim(-1, 90)
@image_comparison(['EventCollection_plot__extend_positions'])
def test__EventCollection__extend_positions():
splt, coll, props = generate_EventCollection_plot()
new_positions = np.hstack([props['positions'],
props['extra_positions'][1:]])
coll.extend_positions(props['extra_positions'][1:])
np.testing.assert_array_equal(new_positions, coll.get_positions())
check_segments(coll,
new_positions,
props['linelength'],
props['lineoffset'],
props['orientation'])
splt.set_title('EventCollection: extend_positions')
splt.set_xlim(-1, 90)
@image_comparison(['EventCollection_plot__switch_orientation'])
def test__EventCollection__switch_orientation():
splt, coll, props = generate_EventCollection_plot()
new_orientation = 'vertical'
coll.switch_orientation()
assert new_orientation == coll.get_orientation()
assert not coll.is_horizontal()
new_positions = coll.get_positions()
check_segments(coll,
new_positions,
props['linelength'],
props['lineoffset'], new_orientation)
splt.set_title('EventCollection: switch_orientation')
splt.set_ylim(-1, 22)
splt.set_xlim(0, 2)
@image_comparison(['EventCollection_plot__switch_orientation__2x'])
def test__EventCollection__switch_orientation_2x():
"""
Check that calling switch_orientation twice sets the orientation back to
the default.
"""
splt, coll, props = generate_EventCollection_plot()
coll.switch_orientation()
coll.switch_orientation()
new_positions = coll.get_positions()
assert props['orientation'] == coll.get_orientation()
assert coll.is_horizontal()
np.testing.assert_array_equal(props['positions'], new_positions)
check_segments(coll,
new_positions,
props['linelength'],
props['lineoffset'],
props['orientation'])
splt.set_title('EventCollection: switch_orientation 2x')
@image_comparison(['EventCollection_plot__set_orientation'])
def test__EventCollection__set_orientation():
splt, coll, props = generate_EventCollection_plot()
new_orientation = 'vertical'
coll.set_orientation(new_orientation)
assert new_orientation == coll.get_orientation()
assert not coll.is_horizontal()
check_segments(coll,
props['positions'],
props['linelength'],
props['lineoffset'],
new_orientation)
splt.set_title('EventCollection: set_orientation')
splt.set_ylim(-1, 22)
splt.set_xlim(0, 2)
@image_comparison(['EventCollection_plot__set_linelength'])
def test__EventCollection__set_linelength():
splt, coll, props = generate_EventCollection_plot()
new_linelength = 15
coll.set_linelength(new_linelength)
assert new_linelength == coll.get_linelength()
check_segments(coll,
props['positions'],
new_linelength,
props['lineoffset'],
props['orientation'])
splt.set_title('EventCollection: set_linelength')
splt.set_ylim(-20, 20)
@image_comparison(['EventCollection_plot__set_lineoffset'])
def test__EventCollection__set_lineoffset():
splt, coll, props = generate_EventCollection_plot()
new_lineoffset = -5.
coll.set_lineoffset(new_lineoffset)
assert new_lineoffset == coll.get_lineoffset()
check_segments(coll,
props['positions'],
props['linelength'],
new_lineoffset,
props['orientation'])
splt.set_title('EventCollection: set_lineoffset')
splt.set_ylim(-6, -4)
@image_comparison([
'EventCollection_plot__set_linestyle',
'EventCollection_plot__set_linestyle',
'EventCollection_plot__set_linewidth',
])
def test__EventCollection__set_prop():
for prop, value, expected in [
('linestyle', 'dashed', [(0, (6.0, 6.0))]),
('linestyle', (0, (6., 6.)), [(0, (6.0, 6.0))]),
('linewidth', 5, 5),
]:
splt, coll, _ = generate_EventCollection_plot()
coll.set(**{prop: value})
assert plt.getp(coll, prop) == expected
splt.set_title(f'EventCollection: set_{prop}')
@image_comparison(['EventCollection_plot__set_color'])
def test__EventCollection__set_color():
splt, coll, _ = generate_EventCollection_plot()
new_color = np.array([0, 1, 1, 1])
coll.set_color(new_color)
for color in [coll.get_color(), *coll.get_colors()]:
np.testing.assert_array_equal(color, new_color)
splt.set_title('EventCollection: set_color')
def check_segments(coll, positions, linelength, lineoffset, orientation):
"""
Test helper checking that all values in the segment are correct, given a
particular set of inputs.
"""
segments = coll.get_segments()
if (orientation.lower() == 'horizontal'
or orientation.lower() == 'none' or orientation is None):
# if horizontal, the position in is in the y-axis
pos1 = 1
pos2 = 0
elif orientation.lower() == 'vertical':
# if vertical, the position in is in the x-axis
pos1 = 0
pos2 = 1
else:
raise ValueError("orientation must be 'horizontal' or 'vertical'")
# test to make sure each segment is correct
for i, segment in enumerate(segments):
assert segment[0, pos1] == lineoffset + linelength / 2
assert segment[1, pos1] == lineoffset - linelength / 2
assert segment[0, pos2] == positions[i]
assert segment[1, pos2] == positions[i]
def test_null_collection_datalim():
col = mcollections.PathCollection([])
col_data_lim = col.get_datalim(mtransforms.IdentityTransform())
assert_array_equal(col_data_lim.get_points(),
mtransforms.Bbox.null().get_points())
def test_add_collection():
# Test if data limits are unchanged by adding an empty collection.
# GitHub issue #1490, pull #1497.
plt.figure()
ax = plt.axes()
coll = ax.scatter([0, 1], [0, 1])
ax.add_collection(coll)
bounds = ax.dataLim.bounds
coll = ax.scatter([], [])
assert ax.dataLim.bounds == bounds
@mpl.style.context('mpl20')
@check_figures_equal(extensions=['png'])
def test_collection_log_datalim(fig_test, fig_ref):
# Data limits should respect the minimum x/y when using log scale.
x_vals = [4.38462e-6, 5.54929e-6, 7.02332e-6, 8.88889e-6, 1.12500e-5,
1.42383e-5, 1.80203e-5, 2.28070e-5, 2.88651e-5, 3.65324e-5,
4.62363e-5, 5.85178e-5, 7.40616e-5, 9.37342e-5, 1.18632e-4]
y_vals = [0.0, 0.1, 0.182, 0.332, 0.604, 1.1, 2.0, 3.64, 6.64, 12.1, 22.0,
39.6, 71.3]
x, y = np.meshgrid(x_vals, y_vals)
x = x.flatten()
y = y.flatten()
ax_test = fig_test.subplots()
ax_test.set_xscale('log')
ax_test.set_yscale('log')
ax_test.margins = 0
ax_test.scatter(x, y)
ax_ref = fig_ref.subplots()
ax_ref.set_xscale('log')
ax_ref.set_yscale('log')
ax_ref.plot(x, y, marker="o", ls="")
def test_quiver_limits():
ax = plt.axes()
x, y = np.arange(8), np.arange(10)
u = v = np.linspace(0, 10, 80).reshape(10, 8)
q = plt.quiver(x, y, u, v)
assert q.get_datalim(ax.transData).bounds == (0., 0., 7., 9.)
plt.figure()
ax = plt.axes()
x = np.linspace(-5, 10, 20)
y = np.linspace(-2, 4, 10)
y, x = np.meshgrid(y, x)
trans = mtransforms.Affine2D().translate(25, 32) + ax.transData
plt.quiver(x, y, np.sin(x), np.cos(y), transform=trans)
assert ax.dataLim.bounds == (20.0, 30.0, 15.0, 6.0)
def test_barb_limits():
ax = plt.axes()
x = np.linspace(-5, 10, 20)
y = np.linspace(-2, 4, 10)
y, x = np.meshgrid(y, x)
trans = mtransforms.Affine2D().translate(25, 32) + ax.transData
plt.barbs(x, y, np.sin(x), np.cos(y), transform=trans)
# The calculated bounds are approximately the bounds of the original data,
# this is because the entire path is taken into account when updating the
# datalim.
assert_array_almost_equal(ax.dataLim.bounds, (20, 30, 15, 6),
decimal=1)
@image_comparison(['EllipseCollection_test_image.png'], remove_text=True)
def test_EllipseCollection():
# Test basic functionality
fig, ax = plt.subplots()
x = np.arange(4)
y = np.arange(3)
X, Y = np.meshgrid(x, y)
XY = np.vstack((X.ravel(), Y.ravel())).T
ww = X / x[-1]
hh = Y / y[-1]
aa = np.ones_like(ww) * 20 # first axis is 20 degrees CCW from x axis
ec = mcollections.EllipseCollection(ww, hh, aa,
units='x',
offsets=XY,
transOffset=ax.transData,
facecolors='none')
ax.add_collection(ec)
ax.autoscale_view()
@image_comparison(['polycollection_close.png'], remove_text=True)
def test_polycollection_close():
from mpl_toolkits.mplot3d import Axes3D
vertsQuad = [
[[0., 0.], [0., 1.], [1., 1.], [1., 0.]],
[[0., 1.], [2., 3.], [2., 2.], [1., 1.]],
[[2., 2.], [2., 3.], [4., 1.], [3., 1.]],
[[3., 0.], [3., 1.], [4., 1.], [4., 0.]]]
fig = plt.figure()
ax = fig.add_axes(Axes3D(fig, auto_add_to_figure=False))
colors = ['r', 'g', 'b', 'y', 'k']
zpos = list(range(5))
poly = mcollections.PolyCollection(
vertsQuad * len(zpos), linewidth=0.25)
poly.set_alpha(0.7)
# need to have a z-value for *each* polygon = element!
zs = []
cs = []
for z, c in zip(zpos, colors):
zs.extend([z] * len(vertsQuad))
cs.extend([c] * len(vertsQuad))
poly.set_color(cs)
ax.add_collection3d(poly, zs=zs, zdir='y')
# axis limit settings:
ax.set_xlim3d(0, 4)
ax.set_zlim3d(0, 3)
ax.set_ylim3d(0, 4)
@image_comparison(['regularpolycollection_rotate.png'], remove_text=True)
def test_regularpolycollection_rotate():
xx, yy = np.mgrid[:10, :10]
xy_points = np.transpose([xx.flatten(), yy.flatten()])
rotations = np.linspace(0, 2*np.pi, len(xy_points))
fig, ax = plt.subplots()
for xy, alpha in zip(xy_points, rotations):
col = mcollections.RegularPolyCollection(
4, sizes=(100,), rotation=alpha,
offsets=[xy], transOffset=ax.transData)
ax.add_collection(col, autolim=True)
ax.autoscale_view()
@image_comparison(['regularpolycollection_scale.png'], remove_text=True)
def test_regularpolycollection_scale():
# See issue #3860
class SquareCollection(mcollections.RegularPolyCollection):
def __init__(self, **kwargs):
super().__init__(4, rotation=np.pi/4., **kwargs)
def get_transform(self):
"""Return transform scaling circle areas to data space."""
ax = self.axes
pts2pixels = 72.0 / ax.figure.dpi
scale_x = pts2pixels * ax.bbox.width / ax.viewLim.width
scale_y = pts2pixels * ax.bbox.height / ax.viewLim.height
return mtransforms.Affine2D().scale(scale_x, scale_y)
fig, ax = plt.subplots()
xy = [(0, 0)]
# Unit square has a half-diagonal of `1/sqrt(2)`, so `pi * r**2` equals...
circle_areas = [np.pi / 2]
squares = SquareCollection(sizes=circle_areas, offsets=xy,
transOffset=ax.transData)
ax.add_collection(squares, autolim=True)
ax.axis([-1, 1, -1, 1])
def test_picking():
fig, ax = plt.subplots()
col = ax.scatter([0], [0], [1000], picker=True)
fig.savefig(io.BytesIO(), dpi=fig.dpi)
mouse_event = SimpleNamespace(x=325, y=240)
found, indices = col.contains(mouse_event)
assert found
assert_array_equal(indices['ind'], [0])
def test_linestyle_single_dashes():
plt.scatter([0, 1, 2], [0, 1, 2], linestyle=(0., [2., 2.]))
plt.draw()
@image_comparison(['size_in_xy.png'], remove_text=True)
def test_size_in_xy():
fig, ax = plt.subplots()
| |
<gh_stars>0
# Copyright 2014 Juniper Networks. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: <NAME>, <NAME>, <NAME>, <NAME>
import requests
try:
from neutron.api.v2.attributes import ATTR_NOT_SPECIFIED
except Exception:
from neutron_lib.constants import ATTR_NOT_SPECIFIED
try:
from neutron.common.exceptions import ServiceUnavailable
except ImportError:
from neutron_lib.exceptions import ServiceUnavailable
try:
from neutron.common.exceptions import BadRequest
except ImportError:
from neutron_lib.exceptions import BadRequest
try:
from oslo.config import cfg
except ImportError:
from oslo_config import cfg
try:
from neutron.openstack.common import jsonutils as json
except ImportError:
from oslo_serialization import jsonutils as json
try:
from neutron.openstack.common import log as logging
except ImportError:
from oslo_log import log as logging
from simplejson import JSONDecodeError
from eventlet.greenthread import getcurrent
import neutron_plugin_contrail.plugins.opencontrail.contrail_plugin_base as plugin_base
from vnc_api import utils as vncutils
_DEFAULT_KS_CERT_BUNDLE = "/tmp/keystonecertbundle.pem"
_DEFAULT_API_CERT_BUNDLE = "/tmp/apiservercertbundle.pem"
_DEFAULT_SERVER_CONNECT = "http"
_DEFAULT_SECURE_SERVER_CONNECT = "https"
LOG = logging.getLogger(__name__)
class InvalidContrailExtensionError(ServiceUnavailable):
message = "Invalid Contrail Extension: %(ext_name) %(ext_class)"
class NeutronPluginContrailCoreV2(plugin_base.NeutronPluginContrailCoreBase):
PLUGIN_URL_PREFIX = '/neutron'
def _build_auth_details(self):
# keystone
self._authn_token = None
if cfg.CONF.auth_strategy == 'keystone':
kcfg = cfg.CONF.keystone_authtoken
# Keystone SSL Support
self._ksinsecure = kcfg.insecure
kscertfile = kcfg.certfile
kskeyfile = kcfg.keyfile
kscafile = kcfg.cafile
self._use_ks_certs = False
if (cfg.CONF.keystone_authtoken.auth_protocol ==
_DEFAULT_SECURE_SERVER_CONNECT and kscafile):
certs = [kscafile]
if kscertfile and kskeyfile:
certs = [kscertfile, kskeyfile, kscafile]
self._kscertbundle = vncutils.getCertKeyCaBundle(
_DEFAULT_KS_CERT_BUNDLE, certs)
self._use_ks_certs = True
auth_uri = kcfg.auth_uri or ''
try:
auth_type = kcfg.auth_type
except cfg.NoSuchOptError:
auth_type = None
auth_version = kcfg.auth_version
self.ks_sess = None
if ('v2.0' in auth_uri.split('/') or
auth_version == 'v2.0' or
not auth_type):
body = '{"auth":{"passwordCredentials":{'
body += ' "username": "%s",' % (kcfg.admin_user)
body += ' "password": "%s"},' % (kcfg.admin_password)
body += ' "tenantName":"%s"}}' % (kcfg.admin_tenant_name)
self._authn_body = body
self._authn_token = cfg.CONF.keystone_authtoken.admin_token
try:
auth_token_url = cfg.CONF.APISERVER.auth_token_url
except cfg.NoSuchOptError:
auth_token_url = None
if auth_token_url:
self._keystone_url = auth_token_url
else:
self._keystone_url = "%s://%s:%s%s" % (
cfg.CONF.keystone_authtoken.auth_protocol,
cfg.CONF.keystone_authtoken.auth_host,
cfg.CONF.keystone_authtoken.auth_port,
"/v2.0/tokens")
else:
from keystoneauth1 import session
from keystoneauth1 import loading as ks_loading
self.auth_plugin = ks_loading.load_auth_from_conf_options(
cfg.CONF, 'keystone_authtoken')
if self._ksinsecure:
self.ks_sess = session.Session(auth=self.auth_plugin, verify=False)
elif not self._ksinsecure and self._use_ks_certs:
self.ks_sess = session.Session(auth=self.auth_plugin,
verify=self._kscertbundle)
else:
self.ks_sess = session.Session(auth=self.auth_plugin)
# API Server SSL support
self._apiusessl = cfg.CONF.APISERVER.use_ssl
self._apiinsecure = cfg.CONF.APISERVER.insecure
apicertfile = cfg.CONF.APISERVER.certfile
apikeyfile = cfg.CONF.APISERVER.keyfile
apicafile = cfg.CONF.APISERVER.cafile
if self._apiusessl:
self._apiserverconnect = _DEFAULT_SECURE_SERVER_CONNECT
else:
self._apiserverconnect = _DEFAULT_SERVER_CONNECT
self._use_api_certs = False
if self._apiusessl and apicafile:
certs = [apicafile]
if apicertfile and apikeyfile:
certs = [apicertfile, apikeyfile, apicafile]
self._apicertbundle = vncutils.getCertKeyCaBundle(
_DEFAULT_API_CERT_BUNDLE, certs)
self._use_api_certs = True
def get_token(self):
authn_token = None
if self.ks_sess:
authn_token = self.ks_sess.get_token()
else:
kwargs = {
'timeout': (cfg.CONF.APISERVER.connection_timeout,
cfg.CONF.APISERVER.timeout),
'data': self._authn_body,
'headers': {'Content-type': 'application/json'}
}
if self._ksinsecure:
kwargs['verify'] = False
elif not self._ksinsecure and self._use_ks_certs:
kwargs['verify'] = self._kscertbundle
response = requests.post(self._keystone_url, **kwargs)
if (response.status_code == requests.codes.ok):
authn_content = json.loads(response.text)
authn_token = authn_content['access']['token']['id']
return authn_token
def _request_api_server(self, url, data=None, headers=None, retry=True):
# Attempt to post to Api-Server
kwargs = {
'timeout': (cfg.CONF.APISERVER.connection_timeout,
cfg.CONF.APISERVER.timeout),
'data': data,
'headers': headers
}
if self._apiinsecure:
kwargs['verify'] = False
elif not self._apiinsecure and self._use_api_certs:
kwargs['verify'] = self._apicertbundle
response = requests.post(url, **kwargs)
if (response.status_code == requests.codes.unauthorized) and retry:
# Get token from keystone and save it for next request
authn_token = self.get_token()
if authn_token:
# plan is to re-issue original request with new token
auth_headers = headers or {}
self._authn_token = authn_token
auth_headers['X-AUTH-TOKEN'] = self._authn_token
response = self._request_api_server(url, data, auth_headers, retry=False)
else:
raise RuntimeError('Authentication Failure')
return response
def _request_api_server_authn(self, url, data=None, headers=None):
# forward user token to API server for RBAC
# token saved earlier in the pipeline
try:
auth_token = getcurrent().contrail_vars.token
except AttributeError:
auth_token = None
authn_headers = headers or {}
if auth_token or self._authn_token:
authn_headers['X-AUTH-TOKEN'] = auth_token or self._authn_token
response = self._request_api_server(url, data, headers=authn_headers)
return response
def _relay_request(self, url_path, data=None):
"""Send received request to api server."""
exc = None
api_server_count = self.api_servers.len()
api_server_list = self.api_servers.api_servers[:]
for idx in range(api_server_count):
api_server_ip = self.api_servers.get(api_server_list)
url = "%s://%s:%s%s" % (self._apiserverconnect,
api_server_ip,
cfg.CONF.APISERVER.api_server_port,
url_path)
LOG.debug("Relay request to VNC API URL %s", url)
try:
return self._request_api_server_authn(
url,
data=data,
headers={'Content-type': 'application/json'},
)
except Exception as e:
exc = e
api_server_list.remove(api_server_ip)
LOG.warning("Failed to relay request to VNC API URL %s" % url)
msg = ("All VNC API server(s) (%s) are down" %
', '.join(cfg.CONF.APISERVER.api_server_ip.split()))
LOG.critical(msg)
raise exc
def _request_backend(self, context, data_dict, obj_name, action):
context_dict = self._encode_context(context, action, obj_name)
data = json.dumps({'context': context_dict, 'data': data_dict})
url_path = "%s/%s" % (self.PLUGIN_URL_PREFIX, obj_name)
response = self._relay_request(url_path, data=data)
try:
return response.status_code, response.json()
except JSONDecodeError:
return response.status_code, {'message': response.content}
def _encode_context(self, context, operation, apitype):
cdict = {'user_id': getattr(context, 'user_id', ''),
'is_admin': getattr(context, 'is_admin', False),
'operation': operation,
'type': apitype,
'tenant_id': getattr(context, 'tenant_id', None),
'request_id': getattr(context, 'request_id', None)}
if context.roles:
cdict['roles'] = context.roles
if context.tenant:
cdict['tenant'] = context.tenant
return cdict
def _encode_resource(self, resource_id=None, resource=None, fields=None,
filters=None):
# New OpenStack release replace the 'tenant' term by 'project' and
# all tools which call OpenStack APIs also did the moved and use
# 'project_id' instead of 'tenant_id' to query resources for a project
if (filters is not None and 'project_id' in filters and
'tenant_id' not in filters):
filters['tenant_id'] = filters['project_id']
if (filters is not None and 'project_id' in filters and
'tenant_id' in filters):
if (filters['project_id'] == filters['tenant_id']):
filters.pop('tenant_id')
resource_dict = {}
if resource_id:
resource_dict['id'] = resource_id
if resource:
resource_dict['resource'] = resource
resource_dict['filters'] = filters
resource_dict['fields'] = fields
return resource_dict
def _prune(self, resource_dict, fields):
if fields:
return dict(((key, item) for key, item in resource_dict.items()
if key in fields))
return resource_dict
def _transform_response(self, status_code, info=None, obj_name=None,
fields=None):
if status_code == requests.codes.ok:
if not isinstance(info, list):
return self._prune(info, fields)
else:
return [self._prune(items, fields) for items in info]
elif status_code == requests.codes.forbidden:
info['exception'] = 'NotAuthorized'
plugin_base._raise_contrail_error(info, obj_name)
def _create_resource(self, res_type, context, res_data):
"""Create a resource in API server.
This method encodes neutron model, and sends it to the
contrail api server.
"""
keys_to_del = []
for key, value in res_data[res_type].items():
if value == ATTR_NOT_SPECIFIED:
keys_to_del.append(key)
for key in keys_to_del:
del res_data[res_type][key]
res_dict = self._encode_resource(resource=res_data[res_type])
status_code, res_info = self._request_backend(context, res_dict,
res_type, 'CREATE')
res_dicts = self._transform_response(status_code, info=res_info,
obj_name=res_type)
LOG.debug("create_%(res_type)s(): %(res_dicts)s",
{'res_type': res_type, 'res_dicts': res_dicts})
return res_dicts
def _get_resource(self, res_type, context, id, fields):
"""Get a resource from API server.
This method gets a resource from the contrail api server
"""
res_dict = self._encode_resource(resource_id=id, fields=fields)
status_code, res_info = self._request_backend(context, res_dict,
res_type, 'READ')
res_dicts = self._transform_response(status_code, info=res_info,
fields=fields, obj_name=res_type)
LOG.debug("get_%(res_type)s(): %(res_dicts)s",
{'res_type': res_type, 'res_dicts': res_dicts})
return res_dicts
def _update_resource(self, res_type, context, id, res_data):
"""Update a resource in API server.
This method updates a resource in the contrail api server
"""
res_dict = self._encode_resource(resource_id=id,
resource=res_data[res_type])
status_code, res_info = self._request_backend(context, res_dict,
res_type, 'UPDATE')
res_dicts = self._transform_response(status_code, info=res_info,
obj_name=res_type)
LOG.debug("update_%(res_type)s(): %(res_dicts)s",
{'res_type': res_type, 'res_dicts': res_dicts})
return res_dicts
def _delete_resource(self, res_type, context, id):
"""Delete a resource in API server
This method deletes a resource in the contrail api server
"""
res_dict = self._encode_resource(resource_id=id)
LOG.debug("delete_%(res_type)s(): %(id)s",
{'res_type': res_type, 'id': id})
status_code, res_info = self._request_backend(context, res_dict,
res_type, 'DELETE')
if status_code != requests.codes.ok:
plugin_base._raise_contrail_error(info=res_info,
obj_name=res_type)
def _list_resource(self, res_type, context, filters, fields):
res_dict = self._encode_resource(filters=filters, fields=fields)
status_code, res_info = self._request_backend(context, res_dict,
res_type, 'READALL')
res_dicts = self._transform_response(status_code, info=res_info,
fields=fields, obj_name=res_type)
LOG.debug(
"get_%(res_type)s(): filters: %(filters)r data: %(res_dicts)r",
{'res_type': res_type, 'filters': filters,
'res_dicts': res_dicts})
return res_dicts
def _count_resource(self, res_type, context, filters):
res_dict = self._encode_resource(filters=filters)
status_code, res_count = self._request_backend(context, res_dict,
res_type, 'READCOUNT')
LOG.debug("get_%(res_type)s_count(): %(res_count)r",
{'res_type': res_type, 'res_count': res_count})
return res_count
def add_router_interface(self, context, router_id, interface_info):
"""Add interface to a router."""
if not interface_info:
msg = "Either subnet_id or port_id must be specified"
raise BadRequest(resource='router', msg=msg)
if 'port_id' in interface_info:
if 'subnet_id' in interface_info:
msg = "Cannot specify both subnet-id and port-id"
raise BadRequest(resource='router', msg=msg)
res_dict = self._encode_resource(resource_id=router_id,
resource=interface_info)
status_code, res_info = self._request_backend(context, res_dict,
'router', 'ADDINTERFACE')
if status_code != requests.codes.ok:
plugin_base._raise_contrail_error(info=res_info,
obj_name='add_router_interface')
return res_info
def remove_router_interface(self, context, router_id, interface_info):
"""Delete interface from a router."""
if not interface_info:
msg = "Either subnet_id or | |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['MfaDuoArgs', 'MfaDuo']
@pulumi.input_type
class MfaDuoArgs:
def __init__(__self__, *,
api_hostname: pulumi.Input[str],
integration_key: pulumi.Input[str],
mount_accessor: pulumi.Input[str],
secret_key: pulumi.Input[str],
name: Optional[pulumi.Input[str]] = None,
push_info: Optional[pulumi.Input[str]] = None,
username_format: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a MfaDuo resource.
:param pulumi.Input[str] api_hostname: `(string: <required>)` - API hostname for Duo.
:param pulumi.Input[str] integration_key: `(string: <required>)` - Integration key for Duo.
:param pulumi.Input[str] mount_accessor: `(string: <required>)` - The mount to tie this method to for use in automatic mappings. The mapping will use the Name field of Aliases associated with this mount as the username in the mapping.
:param pulumi.Input[str] secret_key: `(string: <required>)` - Secret key for Duo.
:param pulumi.Input[str] name: `(string: <required>)` – Name of the MFA method.
:param pulumi.Input[str] push_info: `(string)` - Push information for Duo.
:param pulumi.Input[str] username_format: `(string)` - A format string for mapping Identity names to MFA method names. Values to substitute should be placed in `{{}}`. For example, `"{{alias.<EMAIL>"`. If blank, the Alias's Name field will be used as-is. Currently-supported mappings:
- alias.name: The name returned by the mount configured via the `mount_accessor` parameter
- entity.name: The name configured for the Entity
- alias.metadata.`<key>`: The value of the Alias's metadata parameter
- entity.metadata.`<key>`: The value of the Entity's metadata parameter
"""
pulumi.set(__self__, "api_hostname", api_hostname)
pulumi.set(__self__, "integration_key", integration_key)
pulumi.set(__self__, "mount_accessor", mount_accessor)
pulumi.set(__self__, "secret_key", secret_key)
if name is not None:
pulumi.set(__self__, "name", name)
if push_info is not None:
pulumi.set(__self__, "push_info", push_info)
if username_format is not None:
pulumi.set(__self__, "username_format", username_format)
@property
@pulumi.getter(name="apiHostname")
def api_hostname(self) -> pulumi.Input[str]:
"""
`(string: <required>)` - API hostname for Duo.
"""
return pulumi.get(self, "api_hostname")
@api_hostname.setter
def api_hostname(self, value: pulumi.Input[str]):
pulumi.set(self, "api_hostname", value)
@property
@pulumi.getter(name="integrationKey")
def integration_key(self) -> pulumi.Input[str]:
"""
`(string: <required>)` - Integration key for Duo.
"""
return pulumi.get(self, "integration_key")
@integration_key.setter
def integration_key(self, value: pulumi.Input[str]):
pulumi.set(self, "integration_key", value)
@property
@pulumi.getter(name="mountAccessor")
def mount_accessor(self) -> pulumi.Input[str]:
"""
`(string: <required>)` - The mount to tie this method to for use in automatic mappings. The mapping will use the Name field of Aliases associated with this mount as the username in the mapping.
"""
return pulumi.get(self, "mount_accessor")
@mount_accessor.setter
def mount_accessor(self, value: pulumi.Input[str]):
pulumi.set(self, "mount_accessor", value)
@property
@pulumi.getter(name="secretKey")
def secret_key(self) -> pulumi.Input[str]:
"""
`(string: <required>)` - Secret key for Duo.
"""
return pulumi.get(self, "secret_key")
@secret_key.setter
def secret_key(self, value: pulumi.Input[str]):
pulumi.set(self, "secret_key", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
`(string: <required>)` – Name of the MFA method.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="pushInfo")
def push_info(self) -> Optional[pulumi.Input[str]]:
"""
`(string)` - Push information for Duo.
"""
return pulumi.get(self, "push_info")
@push_info.setter
def push_info(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "push_info", value)
@property
@pulumi.getter(name="usernameFormat")
def username_format(self) -> Optional[pulumi.Input[str]]:
"""
`(string)` - A format string for mapping Identity names to MFA method names. Values to substitute should be placed in `{{}}`. For example, `"{{alias.<EMAIL>"`. If blank, the Alias's Name field will be used as-is. Currently-supported mappings:
- alias.name: The name returned by the mount configured via the `mount_accessor` parameter
- entity.name: The name configured for the Entity
- alias.metadata.`<key>`: The value of the Alias's metadata parameter
- entity.metadata.`<key>`: The value of the Entity's metadata parameter
"""
return pulumi.get(self, "username_format")
@username_format.setter
def username_format(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "username_format", value)
@pulumi.input_type
class _MfaDuoState:
def __init__(__self__, *,
api_hostname: Optional[pulumi.Input[str]] = None,
integration_key: Optional[pulumi.Input[str]] = None,
mount_accessor: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
push_info: Optional[pulumi.Input[str]] = None,
secret_key: Optional[pulumi.Input[str]] = None,
username_format: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering MfaDuo resources.
:param pulumi.Input[str] api_hostname: `(string: <required>)` - API hostname for Duo.
:param pulumi.Input[str] integration_key: `(string: <required>)` - Integration key for Duo.
:param pulumi.Input[str] mount_accessor: `(string: <required>)` - The mount to tie this method to for use in automatic mappings. The mapping will use the Name field of Aliases associated with this mount as the username in the mapping.
:param pulumi.Input[str] name: `(string: <required>)` – Name of the MFA method.
:param pulumi.Input[str] push_info: `(string)` - Push information for Duo.
:param pulumi.Input[str] secret_key: `(string: <required>)` - Secret key for Duo.
:param pulumi.Input[str] username_format: `(string)` - A format string for mapping Identity names to MFA method names. Values to substitute should be placed in `{{}}`. For example, `"{{alias.name}}@<EMAIL>"`. If blank, the Alias's Name field will be used as-is. Currently-supported mappings:
- alias.name: The name returned by the mount configured via the `mount_accessor` parameter
- entity.name: The name configured for the Entity
- alias.metadata.`<key>`: The value of the Alias's metadata parameter
- entity.metadata.`<key>`: The value of the Entity's metadata parameter
"""
if api_hostname is not None:
pulumi.set(__self__, "api_hostname", api_hostname)
if integration_key is not None:
pulumi.set(__self__, "integration_key", integration_key)
if mount_accessor is not None:
pulumi.set(__self__, "mount_accessor", mount_accessor)
if name is not None:
pulumi.set(__self__, "name", name)
if push_info is not None:
pulumi.set(__self__, "push_info", push_info)
if secret_key is not None:
pulumi.set(__self__, "secret_key", secret_key)
if username_format is not None:
pulumi.set(__self__, "username_format", username_format)
@property
@pulumi.getter(name="apiHostname")
def api_hostname(self) -> Optional[pulumi.Input[str]]:
"""
`(string: <required>)` - API hostname for Duo.
"""
return pulumi.get(self, "api_hostname")
@api_hostname.setter
def api_hostname(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_hostname", value)
@property
@pulumi.getter(name="integrationKey")
def integration_key(self) -> Optional[pulumi.Input[str]]:
"""
`(string: <required>)` - Integration key for Duo.
"""
return pulumi.get(self, "integration_key")
@integration_key.setter
def integration_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "integration_key", value)
@property
@pulumi.getter(name="mountAccessor")
def mount_accessor(self) -> Optional[pulumi.Input[str]]:
"""
`(string: <required>)` - The mount to tie this method to for use in automatic mappings. The mapping will use the Name field of Aliases associated with this mount as the username in the mapping.
"""
return pulumi.get(self, "mount_accessor")
@mount_accessor.setter
def mount_accessor(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "mount_accessor", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
`(string: <required>)` – Name of the MFA method.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="pushInfo")
def push_info(self) -> Optional[pulumi.Input[str]]:
"""
`(string)` - Push information for Duo.
"""
return pulumi.get(self, "push_info")
@push_info.setter
def push_info(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "push_info", value)
@property
@pulumi.getter(name="secretKey")
def secret_key(self) -> Optional[pulumi.Input[str]]:
"""
`(string: <required>)` - Secret key for Duo.
"""
return pulumi.get(self, "secret_key")
@secret_key.setter
def secret_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "secret_key", value)
@property
@pulumi.getter(name="usernameFormat")
def username_format(self) -> Optional[pulumi.Input[str]]:
"""
`(string)` - A format string for mapping Identity names to MFA method names. Values to substitute should be placed in `{{}}`. For example, `"{{alias.<EMAIL>"`. If blank, the Alias's Name field will be used as-is. Currently-supported mappings:
- alias.name: The name returned by the mount configured via the `mount_accessor` parameter
- entity.name: The name configured for the Entity
- alias.metadata.`<key>`: The value of the Alias's metadata parameter
- entity.metadata.`<key>`: The value of the Entity's metadata parameter
"""
return pulumi.get(self, "username_format")
@username_format.setter
def username_format(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "username_format", value)
class MfaDuo(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
api_hostname: Optional[pulumi.Input[str]] = None,
integration_key: Optional[pulumi.Input[str]] = None,
mount_accessor: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
push_info: Optional[pulumi.Input[str]] = None,
secret_key: Optional[pulumi.Input[str]] = None,
username_format: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a resource to manage [Duo MFA](https://www.vaultproject.io/docs/enterprise/mfa/mfa-duo.html).
**Note** this feature is available only with Vault Enterprise.
## Example Usage
```python
import pulumi
import pulumi_vault as vault
userpass = vault.AuthBackend("userpass",
type="userpass",
path="userpass")
my_duo = vault.MfaDuo("myDuo",
mount_accessor=userpass.accessor,
secret_key="<KEY>",
integration_key="BIACEUEAXI20BNWTEYXT",
api_hostname="api-2b5c39f5.duosecurity.com")
```
## Import
Mounts can be imported using the `path`, e.g.
```sh
$ pulumi import vault:index/mfaDuo:MfaDuo my_duo my_duo
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] api_hostname: `(string: <required>)` - API hostname for Duo.
:param pulumi.Input[str] integration_key: `(string: <required>)` - Integration key for Duo.
:param pulumi.Input[str] mount_accessor: `(string: <required>)` - The mount to tie this method to for use in automatic mappings. The mapping will use the Name field of Aliases associated with this mount as the username in the mapping.
:param pulumi.Input[str] name: `(string: <required>)` – Name | |
the snapshot
# should affect only the snapshot, not the real backend
clone_view.commit()
self.assertIsNone(raw_view.storages.try_get(self.storagekey2))
self.assertEqual(self.storageitem2, snapshot_view.storages.try_get(self.storagekey2))
# finally commit to real db
snapshot_view.commit()
self.assertEqual(self.storageitem2, raw_view.storages.try_get(self.storagekey2))
def test_snapshot_clone_delete(self):
raw_view = self.db.get_rawview()
raw_view.storages.put(self.storagekey1, self.storageitem1)
snapshot_view = self.db.get_snapshotview()
# perform a get to fill the cache
snapshot_view.storages.get(self.storagekey1)
clone_view = snapshot_view.clone()
# now test deleting an item
clone_view.storages.delete(self.storagekey1)
# test it's gone in the clone, but nowhere else
self.assertIsNone(clone_view.storages.try_get(self.storagekey1))
self.assertIsNotNone(snapshot_view.storages.try_get(self.storagekey1))
self.assertIsNotNone(raw_view.storages.try_get(self.storagekey1))
# commit the clone into the snapshot
clone_view.commit()
# and validate it is also gone in the snapshot but not the real db
self.assertIsNone(snapshot_view.storages.try_get(self.storagekey1))
self.assertIsNotNone(raw_view.storages.try_get(self.storagekey1))
# finally persist to real db
snapshot_view.commit()
self.assertIsNone(raw_view.storages.try_get(self.storagekey1))
def test_snapshot_clone_update(self):
raw_view = self.db.get_rawview()
raw_view.storages.put(self.storagekey1, self.storageitem1)
snapshot_view = self.db.get_snapshotview()
# perform a get to fill the cache
snapshot_view.storages.get(self.storagekey1)
clone_view = snapshot_view.clone()
value_from_clone = clone_view.storages.get(self.storagekey1) # type: storage.StorageItem
value_from_clone.value = b'\x55\x55'
# validate the snapshot and real backend are not affected
value_from_snapshot = snapshot_view.storages.get(self.storagekey1)
value_from_real_db = raw_view.storages.get(self.storagekey1)
self.assertNotEqual(b'\x55\x55', value_from_snapshot.value)
self.assertNotEqual(b'\x55\x55', value_from_real_db.value)
# commit clone
clone_view.commit()
# now snapshot should be updated, but real db not
value_from_snapshot = snapshot_view.storages.get(self.storagekey1)
value_from_real_db = raw_view.storages.get(self.storagekey1)
self.assertEqual(b'\x55\x55', value_from_snapshot.value)
self.assertNotEqual(b'\x55\x55', value_from_real_db.value)
# finally persist to real db
snapshot_view.commit()
value_from_real_db = raw_view.storages.get(self.storagekey1)
self.assertEqual(b'\x55\x55', value_from_real_db.value)
def test_all(self):
raw_view = self.db.get_rawview()
raw_view.storages.put(self.storagekey1, self.storageitem1)
raw_view.storages.put(self.storagekey2, self.storageitem2)
raw_view.storages.put(self.storagekey3, self.storageitem3)
snapshot_view = self.db.get_snapshotview()
# get a key to fill the cache so we can test sorting and readonly behaviour
# key3 should come after key 2
snapshot_view.storages.get(self.storagekey3, read_only=True)
all_pairs = dict(snapshot_view.storages.all(self.storage2_id))
self.assertEqual(2, len(all_pairs))
self.assertNotIn(self.storagekey1, all_pairs)
self.assertEqual(self.storagekey2, list(all_pairs.keys())[0])
self.assertEqual(self.storagekey3, list(all_pairs.keys())[1])
self.assertNotIn(self.storageitem1, all_pairs.values())
self.assertEqual(self.storageitem2, list(all_pairs.values())[0])
self.assertEqual(self.storageitem3, list(all_pairs.values())[1])
# test results are readonly by modifying the results and requesting it again from the snapshot.
storage_item2 = list(all_pairs.values())[0]
storage_item3 = list(all_pairs.values())[1]
storage_item2.value = b'\x55\x55'
storage_item3.value = b'\x55\x55'
item2_from_snap = snapshot_view.storages.get(self.storagekey2, read_only=True)
item3_from_snap = snapshot_view.storages.get(self.storagekey3, read_only=True)
self.assertNotEqual(b'\x55\x55', item2_from_snap.value)
self.assertNotEqual(b'\x55\x55', item3_from_snap.value)
clone_view = snapshot_view.clone()
clone_view.storages.put(self.storagekey4, self.storageitem4)
all_pairs = dict(clone_view.storages.all(self.storage2_id))
self.assertEqual(3, len(all_pairs))
self.assertEqual(2, len(list(snapshot_view.storages.all(self.storage2_id))))
self.assertNotIn(self.storagekey1, all_pairs)
self.assertEqual(self.storagekey2, list(all_pairs.keys())[0])
self.assertEqual(self.storagekey3, list(all_pairs.keys())[1])
self.assertEqual(self.storagekey4, list(all_pairs.keys())[2])
self.assertNotIn(self.storageitem1, all_pairs.values())
self.assertEqual(self.storageitem2, list(all_pairs.values())[0])
self.assertEqual(self.storageitem3, list(all_pairs.values())[1])
self.assertEqual(self.storageitem4, list(all_pairs.values())[2])
def test_find(self):
raw_view = self.db.get_rawview()
raw_view.storages.put(self.storagekey1, self.storageitem1)
raw_view.storages.put(self.storagekey2, self.storageitem2)
raw_view.storages.put(self.storagekey3, self.storageitem3)
raw_view.storages.put(self.storagekey4, self.storageitem4)
snapshot_view = self.db.get_snapshotview()
# get a key to fill the cache so we can test sorting and readonly behaviour
# key3 should come after key 2
snapshot_view.storages.get(self.storagekey3, read_only=True)
# key2,3 and 4 are of the same smart contract
# only key2 and key3 start with \x03
find_prefix_storage2 = storage.create_find_prefix(self.storage2_id, b'\x03')
all_pairs = list(snapshot_view.storages.find(find_prefix_storage2))
keys = list(map(lambda i: i[0], all_pairs))
items = list(map(lambda i: i[1], all_pairs))
self.assertEqual(2, len(all_pairs))
self.assertNotIn(self.storagekey1, keys)
self.assertEqual(self.storagekey2, keys[0])
self.assertEqual(self.storagekey3, keys[1])
self.assertNotIn(self.storagekey4, keys)
self.assertNotIn(self.storageitem1, items)
self.assertEqual(self.storageitem2, items[0])
self.assertEqual(self.storageitem3, items[1])
self.assertNotIn(self.storageitem4, items)
# test for read only results
items[0].value = b'\x55\x55'
item_from_snap = snapshot_view.storages.get(self.storagekey3, read_only=True)
self.assertNotEqual(b'\x55\x55', item_from_snap.value)
keys[0].key = b'\x77\x77'
# create a storage key that should match the above modification
modified_key = storage.StorageKey(self.storage2_id, b'\x77\x77')
# and we should not find it
self.assertIsNone(snapshot_view.storages.try_get(modified_key, read_only=True))
# test find in clone
clone_key = storage.StorageKey(self.storage2_id, b'\x03\x88')
clone_item = storage.StorageItem(b'\x99\x99')
clone_view = snapshot_view.clone()
clone_view.storages.put(clone_key, clone_item)
all_pairs = list(clone_view.storages.find(find_prefix_storage2))
self.assertEqual(3, len(all_pairs))
def test_find_extra(self):
"""
This test used to fail on Neo 2.x and was reported here: https://github.com/neo-project/neo/issues/946
They changed the logic to be deterministic, so we simulate the same
public class Test : DataCache<StorageKey, StorageItem>
{
static StorageItem si = new StorageItem { IsConstant = false, Value = new byte[] { 0x1 } };
static StorageKey key1 = new StorageKey { ScriptHash = UInt160.Zero, Key = new byte[] { 1 } };
static StorageKey key2 = new StorageKey { ScriptHash = UInt160.Zero, Key = new byte[] { 2 } };
static StorageKey key3 = new StorageKey { ScriptHash = UInt160.Zero, Key = new byte[] { 3 } };
static Dictionary<StorageKey, StorageItem> innerDictionary = new Dictionary<StorageKey, StorageItem>() {
{ key1, si },
{ key2, si },
{ key3, si }
};
public override void DeleteInternal(StorageKey key)
{
throw new NotImplementedException();
}
protected override void AddInternal(StorageKey key, StorageItem value)
{
throw new NotImplementedException();
}
protected override IEnumerable<KeyValuePair<StorageKey, StorageItem>> FindInternal(byte[] key_prefix)
{
foreach (var pair in innerDictionary)
{
yield return new KeyValuePair<StorageKey, StorageItem>(pair.Key, pair.Value);
}
}
protected override StorageItem GetInternal(StorageKey key)
{
return innerDictionary[key];
}
protected override StorageItem TryGetInternal(StorageKey key)
{
return innerDictionary[key];
}
protected override void UpdateInternal(StorageKey key, StorageItem value)
{
throw new NotImplementedException();
}
}
public static void Main(string[] args)
{
DataCache<StorageKey, StorageItem> test = new Test();
StorageKey key1 = new StorageKey { ScriptHash = UInt160.Zero, Key = new byte[] { 1 } };
test.TryGet(key1);
foreach (KeyValuePair<StorageKey, StorageItem> pair in test.Find())
{
Console.WriteLine($"{BitConverter.ToString(pair.Key.Key)}");
}
}
"""
key1 = storage.StorageKey(0, b'key1')
key2 = storage.StorageKey(0, b'key2')
key3 = storage.StorageKey(0, b'key3')
value1 = storage.StorageItem(b'value1')
snapshot_view = self.db.get_snapshotview()
snapshot_view.storages.put(key1, value1)
snapshot_view.storages.put(key2, value1)
snapshot_view.storages.put(key3, value1)
results = list(snapshot_view.storages.all())
self.assertEqual(key1, results[0][0])
self.assertEqual(key2, results[1][0])
self.assertEqual(key3, results[2][0])
# NEO-cli sorts keys based on the serialized TKey value, we make 1 special case where we change the contract
key1 = storage.StorageKey(1, key=b'key1')
snapshot_view = self.db.get_snapshotview()
snapshot_view.storages.put(key1, value1)
snapshot_view.storages.put(key2, value1)
snapshot_view.storages.put(key3, value1)
results = list(snapshot_view.storages.all())
self.assertEqual(key2, results[0][0])
self.assertEqual(key3, results[1][0])
self.assertEqual(key1, results[2][0])
def test_issue_1672(self):
# test if we are affected by https://github.com/neo-project/neo/issues/1672
self.storagekey1 = storage.StorageKey(self.storage1_id, b'\x00\x01')
self.storagekey2 = storage.StorageKey(self.storage1_id, b'\x00\x02')
self.storagekey3 = storage.StorageKey(self.storage1_id, b'\x00\x03')
self.storagekey4 = storage.StorageKey(self.storage1_id, b'\x00\x04')
self.storageitem1 = storage.StorageItem(b'\x01\x01')
self.storageitem2 = storage.StorageItem(b'\x02\x02')
self.storageitem3 = storage.StorageItem(b'\x03\x03')
self.storageitem4 = storage.StorageItem(b'\x04\x04')
# prepare
snapshot = self.db.get_snapshotview()
snapshot.storages.put(self.storagekey1, self.storageitem1)
raw = self.db.get_rawview()
raw.storages.put(self.storagekey2, self.storageitem2)
raw.storages.put(self.storagekey3, self.storageitem3)
raw.storages.put(self.storagekey4, self.storageitem4)
# test
find_prefix = storage.create_find_prefix(self.storage1_id, b'\x00')
iter = snapshot.storages.find(find_prefix)
kv_pair = next(iter)
self.assertEqual(self.storagekey1, kv_pair[0])
kv_pair = snapshot.storages.get(self.storagekey3)
kv_pair = next(iter)
self.assertEqual(self.storagekey2, kv_pair[0])
self.assertEqual(self.storageitem2, kv_pair[1])
kv_pair = next(iter)
self.assertEqual(self.storagekey3, kv_pair[0])
self.assertEqual(self.storageitem3, kv_pair[1])
kv_pair = next(iter)
self.assertEqual(self.storagekey4, kv_pair[0])
self.assertEqual(self.storageitem4, kv_pair[1])
class AbstractTransactionStorageTest(abc.ABC, unittest.TestCase):
"""
A helper class to easily test backend specific code
"""
@abc.abstractmethod
def db_factory(self):
""" Implement to return an instance of your DB """
def setUp(self) -> None:
self.db = self.db_factory()
cosigner = payloads.Signer(account=types.UInt160.from_string("d7678dd97c000be3f33e9362e673101bac4ca654"),
scope=payloads.WitnessScope.GLOBAL)
witness = payloads.Witness(invocation_script=b'', verification_script=b'\x55')
self.tx1 = payloads.Transaction(version=0,
nonce=123,
system_fee=456,
network_fee=789,
valid_until_block=1,
attributes=[],
signers=[cosigner],
script=b'\x01\x02',
witnesses=[witness])
self.tx1_hash = self.tx1.hash()
# by changing the script we change the TX hash
self.tx2 = deepcopy(self.tx1)
self.tx2.script = b'\x03\x04'
self.tx2_hash = self.tx2.hash()
def test_raw(self):
raw_view = self.db.get_rawview()
# we should not find anything in an empty db
target_tx_hash = types.UInt256.zero()
with self.assertRaises(KeyError) as context:
raw_view.transactions.get(target_tx_hash)
self.assertIsNone(raw_view.transactions.try_get(target_tx_hash))
# fill the db
raw_view.transactions.put(self.tx1)
# and test it is immediately added
tx_from_db = raw_view.transactions.try_get(self.tx1_hash)
self.assertIsNotNone(tx_from_db)
self.assertEqual(self.tx1, tx_from_db)
# test getting all transactions
raw_view.transactions.put(self.tx2)
txs = list(raw_view.transactions.all())
self.assertEqual(2, len(txs))
self.assertIn(self.tx1, txs)
self.assertIn(self.tx2, txs)
# finally try removing the tx
raw_view.transactions.delete(self.tx1_hash)
self.assertIsNone(raw_view.transactions.try_get(self.tx1_hash))
def test_snapshot_basic_add_delete_get(self):
# test basic add, delete, get and separation
raw_view = self.db.get_rawview()
snapshot_view = self.db.get_snapshotview()
# we should not find anything in an empty db
target_tx_hash = types.UInt256.zero()
with self.assertRaises(KeyError) as context:
snapshot_view.transactions.get(target_tx_hash)
self.assertIsNone(snapshot_view.transactions.try_get(target_tx_hash))
# add item
snapshot_view.transactions.put(self.tx1)
# real backend should not be affected until a commit is called
self.assertIsNone(raw_view.transactions.try_get(self.tx1_hash))
# persist to backend
snapshot_view.commit()
tx_from_db = raw_view.transactions.try_get(self.tx1_hash)
# and validate
self.assertIsNotNone(tx_from_db)
self.assertEqual(self.tx1, tx_from_db)
# finally, try deleting
# get a clean view with no cache
snapshot_view = self.db.get_snapshotview()
snapshot_view.transactions.delete(self.tx1_hash)
# real backend should still have it, snapshot not
self.assertIsNotNone(raw_view.transactions.try_get(self.tx1_hash))
self.assertIsNone(snapshot_view.transactions.try_get(self.tx1_hash))
# persist and validate real backend also doesn't have it anymore
snapshot_view.commit()
self.assertIsNone(raw_view.transactions.try_get(self.tx1_hash))
def test_snapshot_add_duplicates(self):
snapshot_view = self.db.get_snapshotview()
# test double adding while already in cache
snapshot_view.transactions.put(self.tx1)
with self.assertRaises(ValueError):
snapshot_view.transactions.put(self.tx1)
# test double adding when not in cache, but in real backend
snapshot_view.commit()
# get a clean one with an empty cache
snapshot_view = self.db.get_snapshotview()
with self.assertRaises(ValueError):
snapshot_view.transactions.put(self.tx1)
def test_snapshot_add_while_cache_marked_deleted(self):
# an item can exist in the real backend, and be marked in cache to be deleted
# it should then be possible to delete it from cache without exceptions
# fill real backend
raw_view = self.db.get_rawview()
raw_view.transactions.put(self.tx1)
# ensure item is marked as deleted in cache
snapshot_view = self.db.get_snapshotview()
snapshot_view.transactions.delete(self.tx1_hash)
# now test by adding add the item again
success = False
with suppress(ValueError):
snapshot_view.transactions.put(self.tx1)
success = True
self.assertTrue(success)
def test_snapshot_delete_various(self):
snapshot_view = self.db.get_snapshotview()
# delete non existing item with empty cache should throw no errors
ok = False
with suppress(Exception):
snapshot_view.transactions.delete(self.tx1_hash)
ok = True
self.assertTrue(ok)
# delete an item that was only added to | |
of the tree iterator used by libc++
class stdmap_iterator:
def tree_min(self, x):
logger = lldb.formatters.Logger.Logger()
steps = 0
if x.is_null:
return None
while (not x.left.is_null):
x = x.left
steps += 1
if steps > self.max_count:
logger >> "Returning None - we overflowed"
return None
return x
def tree_max(self, x):
logger = lldb.formatters.Logger.Logger()
if x.is_null:
return None
while (not x.right.is_null):
x = x.right
return x
def tree_is_left_child(self, x):
logger = lldb.formatters.Logger.Logger()
if x.is_null:
return None
return True if x.value == x.parent.left.value else False
def increment_node(self, node):
logger = lldb.formatters.Logger.Logger()
if node.is_null:
return None
if not node.right.is_null:
return self.tree_min(node.right)
steps = 0
while (not self.tree_is_left_child(node)):
steps += 1
if steps > self.max_count:
logger >> "Returning None - we overflowed"
return None
node = node.parent
return node.parent
def __init__(self, node, max_count=0):
logger = lldb.formatters.Logger.Logger()
# we convert the SBValue to an internal node object on entry
self.node = stdmap_iterator_node(node)
self.max_count = max_count
def value(self):
logger = lldb.formatters.Logger.Logger()
return self.node.sbvalue # and return the SBValue back on exit
def next(self):
logger = lldb.formatters.Logger.Logger()
node = self.increment_node(self.node)
if node is not None and node.sbvalue.IsValid() and not(node.is_null):
self.node = node
return self.value()
else:
return None
def advance(self, N):
logger = lldb.formatters.Logger.Logger()
if N < 0:
return None
if N == 0:
return self.value()
if N == 1:
return self.next()
while N > 0:
if self.next() is None:
return None
N = N - 1
return self.value()
class stdmap_SynthProvider:
def __init__(self, valobj, dict):
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj
self.pointer_size = self.valobj.GetProcess().GetAddressByteSize()
self.count = None
def update(self):
logger = lldb.formatters.Logger.Logger()
self.count = None
try:
# we will set this to True if we find out that discovering a node in the map takes more steps than the overall size of the RB tree
# if this gets set to True, then we will merrily return None for
# any child from that moment on
self.garbage = False
self.tree = self.valobj.GetChildMemberWithName('__tree_')
self.root_node = self.tree.GetChildMemberWithName('__begin_node_')
# this data is either lazily-calculated, or cannot be inferred at this moment
# we still need to mark it as None, meaning "please set me ASAP"
self.data_type = None
self.data_size = None
self.skip_size = None
except:
pass
def num_children(self):
global _map_capping_size
logger = lldb.formatters.Logger.Logger()
if self.count is None:
self.count = self.num_children_impl()
if self.count > _map_capping_size:
self.count = _map_capping_size
return self.count
def num_children_impl(self):
logger = lldb.formatters.Logger.Logger()
try:
return self.valobj.GetChildMemberWithName('__tree_').GetChildMemberWithName(
'__pair3_').GetChildMemberWithName('__first_').GetValueAsUnsigned()
except:
return 0
def has_children(self):
return True
def get_data_type(self):
logger = lldb.formatters.Logger.Logger()
if self.data_type is None or self.data_size is None:
if self.num_children() == 0:
return False
deref = self.root_node.Dereference()
if not(deref.IsValid()):
return False
value = deref.GetChildMemberWithName('__value_')
if not(value.IsValid()):
return False
self.data_type = value.GetType()
self.data_size = self.data_type.GetByteSize()
self.skip_size = None
return True
else:
return True
def get_value_offset(self, node):
logger = lldb.formatters.Logger.Logger()
if self.skip_size is None:
node_type = node.GetType()
fields_count = node_type.GetNumberOfFields()
for i in range(fields_count):
field = node_type.GetFieldAtIndex(i)
if field.GetName() == '__value_':
self.skip_size = field.GetOffsetInBytes()
break
return (self.skip_size is not None)
def get_child_index(self, name):
logger = lldb.formatters.Logger.Logger()
try:
return int(name.lstrip('[').rstrip(']'))
except:
return -1
def get_child_at_index(self, index):
logger = lldb.formatters.Logger.Logger()
logger >> "Retrieving child " + str(index)
if index < 0:
return None
if index >= self.num_children():
return None
if self.garbage:
logger >> "Returning None since this tree is garbage"
return None
try:
iterator = stdmap_iterator(
self.root_node, max_count=self.num_children())
# the debug info for libc++ std::map is such that __begin_node_ has a very nice and useful type
# out of which we can grab the information we need - every other node has a less informative
# type which omits all value information and only contains housekeeping information for the RB tree
# hence, we need to know if we are at a node != 0, so that we can
# still get at the data
need_to_skip = (index > 0)
current = iterator.advance(index)
if current is None:
logger >> "Tree is garbage - returning None"
self.garbage = True
return None
if self.get_data_type():
if not(need_to_skip):
current = current.Dereference()
obj = current.GetChildMemberWithName('__value_')
obj_data = obj.GetData()
# make sure we have a valid offset for the next items
self.get_value_offset(current)
# we do not return __value_ because then we would end up with a child named
# __value_ instead of [0]
return self.valobj.CreateValueFromData(
'[' + str(index) + ']', obj_data, self.data_type)
else:
# FIXME we need to have accessed item 0 before accessing
# any other item!
if self.skip_size is None:
logger >> "You asked for item > 0 before asking for item == 0, I will fetch 0 now then retry"
if self.get_child_at_index(0):
return self.get_child_at_index(index)
else:
logger >> "item == 0 could not be found. sorry, nothing can be done here."
return None
return current.CreateChildAtOffset(
'[' + str(index) + ']', self.skip_size, self.data_type)
else:
logger >> "Unable to infer data-type - returning None (should mark tree as garbage here?)"
return None
except Exception as err:
logger >> "Hit an exception: " + str(err)
return None
# Just an example: the actual summary is produced by a summary string:
# size=${svar%#}
def stdmap_SummaryProvider(valobj, dict):
prov = stdmap_SynthProvider(valobj, None)
return 'size=' + str(prov.num_children())
class stddeque_SynthProvider:
def __init__(self, valobj, d):
logger = lldb.formatters.Logger.Logger()
logger.write("init")
self.valobj = valobj
self.pointer_size = self.valobj.GetProcess().GetAddressByteSize()
self.count = None
try:
self.find_block_size()
except:
self.block_size = -1
self.element_size = -1
logger.write(
"block_size=%d, element_size=%d" %
(self.block_size, self.element_size))
def find_block_size(self):
# in order to use the deque we must have the block size, or else
# it's impossible to know what memory addresses are valid
self.element_type = self.valobj.GetType().GetTemplateArgumentType(0)
self.element_size = self.element_type.GetByteSize()
# The code says this, but there must be a better way:
# template <class _Tp, class _Allocator>
# class __deque_base {
# static const difference_type __block_size = sizeof(value_type) < 256 ? 4096 / sizeof(value_type) : 16;
# }
if self.element_size < 256:
self.block_size = 4096 / self.element_size
else:
self.block_size = 16
def num_children(self):
global _deque_capping_size
logger = lldb.formatters.Logger.Logger()
if self.count is None:
return 0
return min(self.count, _deque_capping_size)
def has_children(self):
return True
def get_child_index(self, name):
logger = lldb.formatters.Logger.Logger()
try:
return int(name.lstrip('[').rstrip(']'))
except:
return -1
def get_child_at_index(self, index):
logger = lldb.formatters.Logger.Logger()
logger.write("Fetching child " + str(index))
if index < 0 or self.count is None:
return None
if index >= self.num_children():
return None
try:
i, j = divmod(self.start + index, self.block_size)
return self.first.CreateValueFromExpression(
'[' + str(index) + ']', '*(*(%s + %d) + %d)' %
(self.first.get_expr_path(), i, j))
except:
return None
def update(self):
logger = lldb.formatters.Logger.Logger()
try:
# A deque is effectively a two-dim array, with fixed width.
# 'map' contains pointers to the rows of this array. The
# full memory area allocated by the deque is delimited
# by 'first' and 'end_cap'. However, only a subset of this
# memory contains valid data since a deque may have some slack
# at the front and back in order to have O(1) insertion at
# both ends. The rows in active use are delimited by
# 'begin' and 'end'.
#
# To find the elements that are actually constructed, the 'start'
# variable tells which element in this NxM array is the 0th
# one, and the 'size' element gives the number of elements
# in the deque.
count = self.valobj.GetChildMemberWithName(
'__size_').GetChildMemberWithName('__first_').GetValueAsUnsigned(0)
# give up now if we cant access memory reliably
if self.block_size < 0:
logger.write("block_size < 0")
return
map_ = self.valobj.GetChildMemberWithName('__map_')
start = self.valobj.GetChildMemberWithName(
'__start_').GetValueAsUnsigned(0)
first = map_.GetChildMemberWithName('__first_')
map_first = first.GetValueAsUnsigned(0)
map_begin = map_.GetChildMemberWithName(
'__begin_').GetValueAsUnsigned(0)
map_end = map_.GetChildMemberWithName(
'__end_').GetValueAsUnsigned(0)
map_endcap = map_.GetChildMemberWithName(
'__end_cap_').GetChildMemberWithName('__first_').GetValueAsUnsigned(0)
# check consistency
if not map_first <= map_begin <= map_end <= map_endcap:
logger.write("map pointers are not monotonic")
return
total_rows, junk = divmod(
map_endcap - map_first, self.pointer_size)
if junk:
logger.write("endcap-first doesnt align correctly")
return
active_rows, junk = divmod(map_end - map_begin, self.pointer_size)
if junk:
logger.write("end-begin doesnt align correctly")
return
start_row, junk = divmod(map_begin - map_first, self.pointer_size)
if junk:
logger.write("begin-first doesnt align correctly")
return
if not start_row * \
self.block_size <= start < (start_row + 1) * self.block_size:
logger.write("0th element must be in the 'begin' row")
return
end_row = | |
<filename>test/unit/events.py
"""Collection of events for unit tests."""
import pytest
import uuid
@pytest.fixture()
def standard_valid_input():
new_uuid = str(uuid.uuid4())
request_id = "request_id_" + new_uuid
return {
"request_id": request_id,
"metric_data": [
{
"metric_name": "theMetricname",
"dimensions": [
{
"name": "thename1",
"value": "thevalue1"
},
{
"name": "thename2",
"value": "thevalue2"
}
],
"timestamp": 1528236844480,
"statistic_values": {
"sample_count": 12.17,
"sum": 12.17,
"minimum": 12.17,
"maximum": 12.17
},
"unit": "Seconds",
"storage_resolution": 12
}
]
}
@pytest.fixture()
def basic_valid_input():
new_uuid = str(uuid.uuid4())
request_id = "request_id_" + new_uuid
return {
"request_id": request_id,
"metric_data": [
{
"metric_name": "theMetricName",
"value":123,
}
]
}
@pytest.fixture()
def multiple_metrics_input():
new_uuid = str(uuid.uuid4())
request_id = "request_id_" + new_uuid
return {
"request_id": request_id,
"metric_data": [
{
"metric_name": "theMetricname",
"dimensions": [
{
"name": "thename1",
"value": "thevalue1"
},
{
"name": "thename2",
"value": "thevalue2"
}
],
"timestamp": 1528236844480,
"statistic_values": {
"sample_count": 12.17,
"sum": 12.17,
"minimum": 12.17,
"maximum": 12.17
},
"unit": "Seconds",
"storage_resolution": 12
},
{
"metric_name": "theMetricname2",
"dimensions": [
{
"name": "thename1",
"value": "thevalue1"
},
{
"name": "thename2",
"value": "thevalue2"
}
],
"timestamp": 1528236844480,
"value":123,
"unit": "Seconds",
"storage_resolution": 12
}
]
}
@pytest.fixture()
def value_and_statistic_values_both_included():
return {
"request_id": "an id",
"metric_data": [
{
"metric_name": "theMetricname",
"dimensions": [
{
"name": "thename1",
"value": "thevalue1"
},
{
"name": "thename2",
"value": "thevalue2"
}
],
"timestamp": 1528236844480,
"value": 123,
"statistic_values": {
"sample_count": 12.17,
"sum": 12.17,
"minimum": 12.17,
"maximum": 12.17
},
"unit": "Seconds",
"storage_resolution": 12
}
]
}
@pytest.fixture()
def missing_value_and_statistic_values():
return {
"request_id": "the_id",
"metric_data": [
{
"metric_name": "the_metric_name",
"dimensions": [
{
"name": "the_name_1",
"value": "the_value_1"
},
{
"name": "the_name_2",
"value": "the_value_2"
}
],
"timestamp": 1528236844480,
"unit": "Seconds",
"storage_resolution": 12
}
]
}
@pytest.fixture()
def missing_request_id():
return {
"metric_data": [
{
"metric_name": "the_metric_name",
"dimensions": [
{
"name": "the_name_1",
"value": "the_value_1"
},
{
"name": "the_name_2",
"value": "the_value_2"
}
],
"timestamp": 1528236844480,
"value":123,
"unit": "Seconds",
"storage_resolution": 12
}
]
}
@pytest.fixture()
def missing_metric_name():
return {
"request_id": "the_id",
"metric_data": [
{
"dimensions": [
{
"name": "the_name_1",
"value": "the_value_1"
},
{
"name": "the_name_2",
"value": "the_value_2"
}
],
"timestamp": 1528236844480,
"value":123,
"unit": "Seconds",
"storage_resolution": 12
}
]
}
@pytest.fixture()
def statistic_values_missing_sum():
return {
"request_id": "the_id",
"metric_data": [
{
"metric_name": "the_metric_name",
"dimensions": [
{
"name": "the_name_1",
"value": "the_value_1"
},
{
"name": "the_name_2",
"value": "the_value_2"
}
],
"timestamp": 1528236844480,
"statistic_values": {
"sample_count": 12.17,
"minimum": 12.17,
"maximum": 12.17
},
"unit": "Seconds",
"storage_resolution": 12
}
]
}
@pytest.fixture()
def unit_type_not_available():
return {
"request_id": "the_id",
"metric_data": [
{
"metric_name": "the_metric_name",
"dimensions": [
{
"name": "the_name_1",
"value": "the_value_1"
},
{
"name": "the_name_2",
"value": "the_value_2"
}
],
"timestamp": 1528236844480,
"statistic_values": {
"sample_count": 12.17,
"sum": 12.17,
"minimum": 12.17,
"maximum": 12.17
},
"unit": "minutes",
"storage_resolution": 12
}
]
}
@pytest.fixture()
def storage_resolution_type_invalid():
return {
"request_id": "the_id",
"metric_data": [
{
"metric_name": "the_metric_name",
"dimensions": [
{
"name": "the_name_1",
"value": "the_value_1"
},
{
"name": "the_name_2",
"value": "the_value_2"
}
],
"timestamp": 1528236844480,
"statistic_values": {
"sample_count": 12.17,
"sum": 12.17,
"minimum": 12.17,
"maximum": 12.17
},
"unit": "Seconds",
"storage_resolution": "12"
}
]
}
@pytest.fixture()
def dimension_type_invalid():
return {
"request_id": "an id",
"metric_data": [
{
"metric_name": "theMetricname",
"dimensions": (
{
"name": "thename1",
"value": "thevalue1"
},
{
"name": "thename2",
"value": "thevalue2"
}
),
"timestamp": 1528236844480,
"statistic_values": {
"sample_count": 12.17,
"sum": 12.17,
"minimum": 12.17,
"maximum": 12.17
},
"unit": "Seconds",
"storage_resolution": 12
}
]
}
@pytest.fixture()
def dimension_item_type_invalid():
return {
"request_id": "an id",
"metric_data": [
{
"metric_name": "theMetricname",
"dimensions": [
{
"name": "thename1",
"value": "thevalue1"
},
"a string"
],
"timestamp": 1528236844480,
"statistic_values": {
"sample_count": 12.17,
"sum": 12.17,
"minimum": 12.17,
"maximum": 12.17
},
"unit": "Seconds",
"storage_resolution": 12
}
]
}
@pytest.fixture()
def dimension_item_wrong_property():
return {
"request_id": "an id",
"metric_data": [
{
"metric_name": "theMetricname",
"dimensions": [
{
"name": "thename1",
"value": "thevalue1"
},
{
"name": "thename2",
"Value": "thevalue2"
}
],
"timestamp": 1528236844480,
"statistic_values": {
"sample_count": 12.17,
"sum": 12.17,
"minimum": 12.17,
"maximum": 12.17
},
"unit": "Seconds",
"storage_resolution": 12
}
]
}
@pytest.fixture()
def empty_request_id():
return {
"request_id": "",
"metric_data": [
{
"metric_name": "theMetricname",
"dimensions": [
{
"name": "thename1",
"value": "thevalue1"
},
{
"name": "thename2",
"value": "thevalue2"
}
],
"timestamp": 1528236844480,
"statistic_values": {
"sample_count": 12.17,
"sum": 12.17,
"minimum": 12.17,
"maximum": 12.17
},
"unit": "Seconds",
"storage_resolution": 12
}
]
}
@pytest.fixture()
def empty_dimension_name():
return {
"request_id": "an id",
"metric_data": [
{
"metric_name": "theMetricname",
"dimensions": [
{
"name": "thename1",
"value": "thevalue1"
},
{
"name": "",
"value": "thevalue2"
}
],
"timestamp": 1528236844480,
"statistic_values": {
"sample_count": 12.17,
"sum": 12.17,
"minimum": 12.17,
"maximum": 12.17
},
"unit": "Seconds",
"storage_resolution": 12
}
]
}
@pytest.fixture()
def empty_metric_name():
return {
"request_id": "an id",
"metric_data": [
{
"metric_name": "",
"dimensions": [
{
"name": "thename1",
"value": "thevalue1"
},
{
"name": "thename2",
"value": "thevalue2"
}
],
"timestamp": 1528236844480,
"statistic_values": {
"sample_count": 12.17,
"sum": 12.17,
"minimum": 12.17,
"maximum": 12.17
},
"unit": "Seconds",
"storage_resolution": 12
}
]
}
@pytest.fixture()
def simple_metric_log_event_format():
return {
"metric_name": "theMetricName",
"value": 123,
}
@pytest.fixture()
def simple_metric_put_data_format():
return {
"MetricName":"theMetricName",
"Value": 123,
}
@pytest.fixture()
def complex_metric_log_event_format():
return {
"metric_name": "theMetricname",
"dimensions": [
{
"name": "thename1",
"value": "thevalue1"
},
{
"name": "thename2",
"value": "thevalue2"
}
],
"timestamp": 1528236844480,
"statistic_values": {
"sample_count": 12.17,
"sum": 12.17,
"minimum": 12.17,
"maximum": 12.17
},
"unit": "Seconds",
"storage_resolution": 12
}
@pytest.fixture()
def complex_metric_put_data_format():
return {
"MetricName": "theMetricname",
"Dimensions": [
{
"Name": "thename1",
"Value": "thevalue1"
},
{
"Name": "thename2",
"Value": "thevalue2"
}
],
"Timestamp": 1528236844480,
"StatisticValues": {
"SampleCount": 12.17,
"Sum": 12.17,
"Minimum": 12.17,
"Maximum": 12.17
},
"Unit": "Seconds",
"StorageResolution": 12
}
@pytest.fixture()
def sample_batch():
return [
{
'MetricName': 'theMetricName',
'Value': 17
},
{
'MetricName': 'theMetricName',
'Value': 18
},
{
'MetricName': 'theMetricName',
'Value': 19
},
{
'MetricName': 'theMetricName',
'Value': 20
},
{
'MetricName': 'theMetricName',
'Value': 21
},
{
'MetricName': 'theMetricName',
'Value': 22
}
]
@pytest.fixture()
def unformated_metrics_expected():
return [
{
'metric_name': 'theMetricName',
'value': 17
},
{
'metric_name': 'theMetricName',
'value': 18
},
{
'metric_name': 'theMetricName',
'value': 19
},
{
'metric_name': 'theMetricName',
'value': 20
},
{
'metric_name': 'theMetricName',
'value': 21
},
{
'metric_name': 'theMetricName',
'value': 22
}
]
@pytest.fixture()
def log_events_normal():
return [
{
u'ingestionTime': 1530295428233,
u'timestamp': 1530295428222,
u'message': u"{'metric_data': [{'metric_name': 'theMetricName2', 'value': 17}], 'request_id': 'request_id_61cb9208-b388-466c-87fd-79a5ea89c922'}",
u'eventId': u'34126728423255130347406918563580169267920727811577872384',
u'logStreamName': u'metricPublisherAppNamespace_request_id_61cb9208-b388-466c-87fd-79a5ea89c922'
},
{
u'ingestionTime': 1530295428421,
u'timestamp': 1530295428416,
u'message': u"{'metric_data': [{'metric_name': 'theMetricName2', 'value': 18}], 'request_id': 'request_id_3618d626-b668-4976-abca-6bb544a6b478'}",
u'eventId': u'34126728427581474915921859453265385545568477186932670464',
u'logStreamName': u'metricPublisherAppNamespace_request_id_3618d626-b668-4976-abca-6bb544a6b478'
},
{
u'ingestionTime': 1530295428601,
u'timestamp': 1530295428595,
u'message': u"{'metric_data': [{'metric_name': 'theMetricName2', 'value': 19}], 'request_id': 'request_id_4232fdf3-e66b-4cd4-9db1-96e9be48a4ee'}",
u'eventId': u'34126728431573308306458840995817882099436477345871167488',
u'logStreamName': u'metricPublisherAppNamespace_request_id_4232fdf3-e66b-4cd4-9db1-96e9be48a4ee'
},
{
u'ingestionTime': 1530295428776,
u'timestamp': 1530295428767,
u'message': u"{'metric_data': [{'metric_name': 'theMetricName2', 'value': 20}], 'request_id': 'request_id_d7b593f0-b154-49b4-91f0-864c13740640'}",
u'eventId': u'34126728435409036480606108176373631531088849482571448320',
u'logStreamName': u'metricPublisherAppNamespace_request_id_d7b593f0-b154-49b4-91f0-864c13740640'
},
{
u'ingestionTime': 1530295428950,
u'timestamp': 1530295428945,
u'message': u"{'metric_data': [{'metric_name': 'theMetricName2', 'value': 21}], 'request_id': 'request_id_402ef2a1-eaf3-418f-ac9c-3a4ec4fffa39'}",
u'eventId': u'34126728439378569125944559095777385025694673565001449472',
u'logStreamName': u'metricPublisherAppNamespace_request_id_402ef2a1-eaf3-418f-ac9c-3a4ec4fffa39'
},
{
u'ingestionTime': 1530295429134,
u'timestamp': 1530295429127,
u'message': u"{'metric_data': [{'metric_name': 'theMetricName2', 'value': 22}], 'request_id': 'request_id_1beda470-2c17-44c9-b2bd-cefed4fe40d8'}",
u'eventId': u'34126728443437304752077132507759222312180838774063235072',
u'logStreamName': u'metricPublisherAppNamespace_request_id_1beda470-2c17-44c9-b2bd-cefed4fe40d8'
}
]
@pytest.fixture()
def batched_metrics_normal():
return [
{
'metric_name': 'theMetricName2',
'value': 17
},
{
'metric_name': 'theMetricName2',
'value': 18
},
{
'metric_name': 'theMetricName2',
'value': 19
},
{
'metric_name': 'theMetricName2',
'value': 20
},
{
'metric_name': 'theMetricName2',
'value': 21
},
{
'metric_name': 'theMetricName2',
'value': 22
}
]
@pytest.fixture()
def many_log_events():
return [
{
u'ingestionTime': 1530897511706,
u'timestamp': 1530897511755,
u'message': u"{'metric_data': [{'timestamp': 1530897511448, 'metric_name': 'theMetricName', 'value': 17}], 'request_id': 'request_id_bbf2b361-4cf8-4d13-87de-568bad11b521'}",
u'eventId': u'34140155334712794451397393822139436354307158608090300416',
u'logStreamName': u'metricPublisherAppNamespace_request_id_bbf2b361-4cf8-4d13-87de-568bad11b521'
},
{
u'ingestionTime': 1530897511904,
u'timestamp': 1530897511957,
u'message': u"{'metric_data': [{'timestamp': 1530897511866, 'metric_name': 'theMetricName', 'value': 18}], 'request_id': 'request_id_a8892402-2850-4f0c-b996-e2f961bde951'}",
u'eventId': u'34140155339217544981500579696969049208210848175282388992',
u'logStreamName': u'metricPublisherAppNamespace_request_id_a8892402-2850-4f0c-b996-e2f961bde951'
},
{
u'ingestionTime': 1530897512088,
u'timestamp': 1530897512142,
u'message': u"{'metric_data': [{'timestamp': 1530897512045, 'metric_name': 'theMetricName', 'value': 19}], 'request_id': 'request_id_4f4cb112-f862-40bc-bbd2-ce3f08950aa2'}",
u'eventId': u'34140155343343182843228744978375816959492556136213512192',
u'logStreamName': u'metricPublisherAppNamespace_request_id_4f4cb112-f862-40bc-bbd2-ce3f08950aa2'
},
{
u'ingestionTime': 1530897512257,
u'timestamp': 1530897512311,
u'message': u"{'metric_data': [{'timestamp': 1530897512224, 'metric_name': 'theMetricName', 'value': 20}], 'request_id': 'request_id_3161d1b2-1656-49cc-adc2-d66ad677be84'}",
u'eventId': u'34140155347112008781780420289499265245750404335519203328',
u'logStreamName': u'metricPublisherAppNamespace_request_id_3161d1b2-1656-49cc-adc2-d66ad677be84'
},
{
u'ingestionTime': 1530897512433,
u'timestamp': 1530897512487,
u'message': u"{'metric_data': [{'timestamp': 1530897512396, 'metric_name': 'theMetricName', 'value': 21}], 'request_id': 'request_id_a2414cd1-989c-43d4-b740-276d143e1134'}",
u'eventId': u'34140155351036939936721809962622521924228387618665332736',
u'logStreamName': u'metricPublisherAppNamespace_request_id_a2414cd1-989c-43d4-b740-276d143e1134'
},
{
u'ingestionTime': 1530897512617,
u'timestamp': 1530897512670,
u'message': u"{'metric_data': [{'timestamp': 1530897512576, 'metric_name': 'theMetricName', 'value': 22}], 'request_id': 'request_id_d2ea0faa-ddba-4acf-b44f-ad550b308f61'}",
u'eventId': u'34140155355117976308052913997746049710635287394391556096',
u'logStreamName': u'metricPublisherAppNamespace_request_id_d2ea0faa-ddba-4acf-b44f-ad550b308f61'
},
{
u'ingestionTime': 1530897512800,
u'timestamp': 1530897512853,
u'message': u"{'metric_data': [{'timestamp': 1530897512759, 'metric_name': 'theMetricName', 'value': 23}], 'request_id': 'request_id_1eebb564-2adc-4761-b5b8-c525f97f6605'}",
u'eventId': u'34140155359199012679384018032868374480409545087618056192',
u'logStreamName': u'metricPublisherAppNamespace_request_id_1eebb564-2adc-4761-b5b8-c525f97f6605'
},
{
u'ingestionTime': 1530897512989,
u'timestamp': 1530897513043,
u'message': u"{'metric_data': [{'timestamp': 1530897512945, 'metric_name': 'theMetricName', 'value': 24}], 'request_id': 'request_id_58211db8-3905-4a7f-948f-de1a6eb82525'}",
u'eventId': u'34140155363436154267104836429988455163783523306798317568',
u'logStreamName': u'metricPublisherAppNamespace_request_id_58211db8-3905-4a7f-948f-de1a6eb82525'
},
{
u'ingestionTime': 1530897513235,
u'timestamp': 1530897513223,
u'message': u"{'metric_data': [{'timestamp': 1530897513135, 'metric_name': 'theMetricName', 'value': 25}], 'request_id': 'request_id_9a2d80fb-b9fa-45f6-bffb-b8164805d28b'}",
u'eventId': u'34140155367450288402840348595762125836865631491286695936',
u'logStreamName': u'metricPublisherAppNamespace_request_id_9a2d80fb-b9fa-45f6-bffb-b8164805d28b'
},
{
u'ingestionTime': 1530897513429,
u'timestamp': 1530897513481,
u'message': u"{'metric_data': [{'timestamp': 1530897513390, 'metric_name': 'theMetricName', 'value': 26}], 'request_id': 'request_id_203cfc60-da33-4126-a7c5-c4ead39ebb0f'}",
u'eventId': u'34140155373203880664061249366513296413228639787555749888',
u'logStreamName': u'metricPublisherAppNamespace_request_id_203cfc60-da33-4126-a7c5-c4ead39ebb0f'
},
{
u'ingestionTime': 1530897513620,
u'timestamp': 1530897513672,
u'message': u"{'metric_data': [{'timestamp': 1530897513578, 'metric_name': 'theMetricName', 'value': 27}], 'request_id': 'request_id_92957208-0862-4017-8cd4-0ba6693fa9ae'}",
u'eventId': u'34140155377463322996980598386777568039913421197688176640',
u'logStreamName': u'metricPublisherAppNamespace_request_id_92957208-0862-4017-8cd4-0ba6693fa9ae'
},
{
u'ingestionTime': 1530897513799,
u'timestamp': 1530897513851,
u'message': u"{'metric_data': [{'timestamp': 1530897513760, 'metric_name': 'theMetricName', 'value': 28}], 'request_id': 'request_id_0bbf4dd5-7f17-4e88-9a39-422ee113ba21'}",
u'eventId': u'34140155381455156387517579929328644727692714340910759936',
u'logStreamName': u'metricPublisherAppNamespace_request_id_0bbf4dd5-7f17-4e88-9a39-422ee113ba21'
},
| |
<filename>python/venv/lib/python2.7/site-packages/openstackclient/common/utils.py
# Copyright 2012-2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Common client utilities"""
import getpass
import logging
import os
import six
import time
from oslo_utils import importutils
from openstackclient.common import exceptions
def log_method(log, level=logging.DEBUG):
"""Logs a method and its arguments when entered."""
def decorator(func):
func_name = func.__name__
@six.wraps(func)
def wrapper(self, *args, **kwargs):
if log.isEnabledFor(level):
pretty_args = []
if args:
pretty_args.extend(str(a) for a in args)
if kwargs:
pretty_args.extend(
"%s=%s" % (k, v) for k, v in six.iteritems(kwargs))
log.log(level, "%s(%s)", func_name, ", ".join(pretty_args))
return func(self, *args, **kwargs)
return wrapper
return decorator
def find_resource(manager, name_or_id, **kwargs):
"""Helper for the _find_* methods.
:param manager: A client manager class
:param name_or_id: The resource we are trying to find
:param kwargs: To be used in calling .find()
:rtype: The found resource
This method will attempt to find a resource in a variety of ways.
Primarily .get() methods will be called with `name_or_id` as an integer
value, and tried again as a string value.
If both fail, then a .find() is attempted, which is essentially calling
a .list() function with a 'name' query parameter that is set to
`name_or_id`.
Lastly, if any kwargs are passed in, they will be treated as additional
query parameters. This is particularly handy in the case of finding
resources in a domain.
"""
# Try to get entity as integer id
try:
if isinstance(name_or_id, int) or name_or_id.isdigit():
return manager.get(int(name_or_id), **kwargs)
# FIXME(dtroyer): The exception to catch here is dependent on which
# client library the manager passed in belongs to.
# Eventually this should be pulled from a common set
# of client exceptions.
except Exception as ex:
if type(ex).__name__ == 'NotFound':
pass
else:
raise
# Try directly using the passed value
try:
return manager.get(name_or_id, **kwargs)
except Exception:
pass
if len(kwargs) == 0:
kwargs = {}
# Prepare the kwargs for calling find
if 'NAME_ATTR' in manager.resource_class.__dict__:
# novaclient does this for oddball resources
kwargs[manager.resource_class.NAME_ATTR] = name_or_id
else:
kwargs['name'] = name_or_id
# finally try to find entity by name
try:
return manager.find(**kwargs)
# FIXME(dtroyer): The exception to catch here is dependent on which
# client library the manager passed in belongs to.
# Eventually this should be pulled from a common set
# of client exceptions.
except Exception as ex:
if type(ex).__name__ == 'NotFound':
msg = "No %s with a name or ID of '%s' exists." % \
(manager.resource_class.__name__.lower(), name_or_id)
raise exceptions.CommandError(msg)
if type(ex).__name__ == 'NoUniqueMatch':
msg = "More than one %s exists with the name '%s'." % \
(manager.resource_class.__name__.lower(), name_or_id)
raise exceptions.CommandError(msg)
else:
raise
def format_dict(data):
"""Return a formatted string of key value pairs
:param data: a dict
:param format: optional formatting hints
:rtype: a string formatted to key='value'
"""
output = ""
for s in sorted(data):
output = output + s + "='" + six.text_type(data[s]) + "', "
return output[:-2]
def format_list(data):
"""Return a formatted strings
:param data: a list of strings
:rtype: a string formatted to a,b,c
"""
return ', '.join(sorted(data))
def get_field(item, field):
try:
if isinstance(item, dict):
return item[field]
else:
return getattr(item, field)
except Exception:
msg = "Resource doesn't have field %s" % field
raise exceptions.CommandError(msg)
def get_item_properties(item, fields, mixed_case_fields=[], formatters={}):
"""Return a tuple containing the item properties.
:param item: a single item resource (e.g. Server, Project, etc)
:param fields: tuple of strings with the desired field names
:param mixed_case_fields: tuple of field names to preserve case
:param formatters: dictionary mapping field names to callables
to format the values
"""
row = []
for field in fields:
if field in mixed_case_fields:
field_name = field.replace(' ', '_')
else:
field_name = field.lower().replace(' ', '_')
data = getattr(item, field_name, '')
if field in formatters:
row.append(formatters[field](data))
else:
row.append(data)
return tuple(row)
def get_dict_properties(item, fields, mixed_case_fields=[], formatters={}):
"""Return a tuple containing the item properties.
:param item: a single dict resource
:param fields: tuple of strings with the desired field names
:param mixed_case_fields: tuple of field names to preserve case
:param formatters: dictionary mapping field names to callables
to format the values
"""
row = []
for field in fields:
if field in mixed_case_fields:
field_name = field.replace(' ', '_')
else:
field_name = field.lower().replace(' ', '_')
data = item[field_name] if field_name in item else ''
if field in formatters:
row.append(formatters[field](data))
else:
row.append(data)
return tuple(row)
def sort_items(items, sort_str):
"""Sort items based on sort keys and sort directions given by sort_str.
:param items: a list or generator object of items
:param sort_str: a string defining the sort rules, the format is
'<key1>:[direction1],<key2>:[direction2]...', direction can be 'asc'
for ascending or 'desc' for descending, if direction is not given,
it's ascending by default
:return: sorted items
"""
if not sort_str:
return items
# items may be a generator object, transform it to a list
items = list(items)
sort_keys = sort_str.strip().split(',')
for sort_key in reversed(sort_keys):
reverse = False
if ':' in sort_key:
sort_key, direction = sort_key.split(':', 1)
if not sort_key:
msg = "empty string is not a valid sort key"
raise exceptions.CommandError(msg)
if direction not in ['asc', 'desc']:
if not direction:
direction = "empty string"
msg = ("%s is not a valid sort direction for sort key %s, "
"use asc or desc instead" % (direction, sort_key))
raise exceptions.CommandError(msg)
if direction == 'desc':
reverse = True
items.sort(key=lambda item: get_field(item, sort_key),
reverse=reverse)
return items
def string_to_bool(arg):
return arg.strip().lower() in ('t', 'true', 'yes', '1')
def env(*vars, **kwargs):
"""Search for the first defined of possibly many env vars
Returns the first environment variable defined in vars, or
returns the default defined in kwargs.
"""
for v in vars:
value = os.environ.get(v, None)
if value:
return value
return kwargs.get('default', '')
def get_client_class(api_name, version, version_map):
"""Returns the client class for the requested API version
:param api_name: the name of the API, e.g. 'compute', 'image', etc
:param version: the requested API version
:param version_map: a dict of client classes keyed by version
:rtype: a client class for the requested API version
"""
try:
client_path = version_map[str(version)]
except (KeyError, ValueError):
msg = "Invalid %s client version '%s'. must be one of: %s" % (
(api_name, version, ', '.join(version_map.keys())))
raise exceptions.UnsupportedVersion(msg)
return importutils.import_class(client_path)
def wait_for_status(status_f,
res_id,
status_field='status',
success_status=['active'],
sleep_time=5,
callback=None):
"""Wait for status change on a resource during a long-running operation
:param status_f: a status function that takes a single id argument
:param res_id: the resource id to watch
:param success_status: a list of status strings for successful completion
:param status_field: the status attribute in the returned resource object
:param sleep_time: wait this long (seconds)
:param callback: called per sleep cycle, useful to display progress
:rtype: True on success
"""
while True:
res = status_f(res_id)
status = getattr(res, status_field, '').lower()
if status in success_status:
retval = True
break
elif status == 'error':
retval = False
break
if callback:
progress = getattr(res, 'progress', None) or 0
callback(progress)
time.sleep(sleep_time)
return retval
def wait_for_delete(manager,
res_id,
status_field='status',
sleep_time=5,
timeout=300,
callback=None):
"""Wait for resource deletion
:param res_id: the resource id to watch
:param status_field: the status attribute in the returned resource object,
this is used to check for error states while the resource is being
deleted
:param sleep_time: wait this long between checks (seconds)
:param timeout: check until this long (seconds)
:param callback: called per sleep cycle, useful to display progress; this
function is passed a progress value during each iteration of the wait
loop
:rtype: True on success, False if the resource has gone to error state or
the timeout has been reached
"""
total_time = 0
while total_time < timeout:
try:
# might not be a bad idea to re-use find_resource here if it was
# a bit more friendly | |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from .Borg import Borg
import MySQLdb
from sqlalchemy.exc import OperationalError, ProgrammingError, DataError, IntegrityError
from . import RequirementFactory
from .Environment import Environment
from .ARM import *
from MySQLdb._exceptions import DatabaseError, IntegrityError
from . import Attacker
from . import Asset
from . import Threat
from . import Vulnerability
from . import Persona
from . import MisuseCase
from . import Task
from . import Risk
from . import Response
from . import ClassAssociation
from .ObjectSummary import ObjectSummary
from .AttackerParameters import AttackerParameters
from .PersonaParameters import PersonaParameters
from .GoalParameters import GoalParameters
from .ObstacleParameters import ObstacleParameters
from .AssetParameters import AssetParameters
from .TemplateAssetParameters import TemplateAssetParameters
from .TemplateGoalParameters import TemplateGoalParameters
from .TemplateRequirementParameters import TemplateRequirementParameters
from .SecurityPatternParameters import SecurityPatternParameters
from .ThreatParameters import ThreatParameters
from .VulnerabilityParameters import VulnerabilityParameters
from .RiskParameters import RiskParameters
from .ResponseParameters import ResponseParameters
from .RoleParameters import RoleParameters
from . import ObjectFactory
from .TaskParameters import TaskParameters
from .MisuseCaseParameters import MisuseCaseParameters
from .DomainPropertyParameters import DomainPropertyParameters
from . import Trace
from cairis.core.armid import *
from .DotTraceParameters import DotTraceParameters
from .EnvironmentParameters import EnvironmentParameters
from .Target import Target
from .AttackerEnvironmentProperties import AttackerEnvironmentProperties
from .AssetEnvironmentProperties import AssetEnvironmentProperties
from .ThreatEnvironmentProperties import ThreatEnvironmentProperties
from .VulnerabilityEnvironmentProperties import VulnerabilityEnvironmentProperties
from .AcceptEnvironmentProperties import AcceptEnvironmentProperties
from .TransferEnvironmentProperties import TransferEnvironmentProperties
from .MitigateEnvironmentProperties import MitigateEnvironmentProperties
from .CountermeasureEnvironmentProperties import CountermeasureEnvironmentProperties
from .CountermeasureParameters import CountermeasureParameters
from .PersonaEnvironmentProperties import PersonaEnvironmentProperties
from .TaskEnvironmentProperties import TaskEnvironmentProperties
from .MisuseCaseEnvironmentProperties import MisuseCaseEnvironmentProperties
from .RoleEnvironmentProperties import RoleEnvironmentProperties
from .ClassAssociationParameters import ClassAssociationParameters
from .GoalAssociationParameters import GoalAssociationParameters
from .DependencyParameters import DependencyParameters
from .GoalEnvironmentProperties import GoalEnvironmentProperties
from .ObstacleEnvironmentProperties import ObstacleEnvironmentProperties
from .ValueTypeParameters import ValueTypeParameters
from .ExternalDocumentParameters import ExternalDocumentParameters
from .InternalDocumentParameters import InternalDocumentParameters
from .CodeParameters import CodeParameters
from .MemoParameters import MemoParameters
from .DocumentReferenceParameters import DocumentReferenceParameters
from .ConceptReferenceParameters import ConceptReferenceParameters
from .PersonaCharacteristicParameters import PersonaCharacteristicParameters
from .TaskCharacteristicParameters import TaskCharacteristicParameters
from .UseCaseParameters import UseCaseParameters
from .UseCase import UseCase
from .UseCaseEnvironmentProperties import UseCaseEnvironmentProperties
from .UseCaseParameters import UseCaseParameters
from .Step import Step
from .Steps import Steps
from .ReferenceSynopsis import ReferenceSynopsis
from .ReferenceContribution import ReferenceContribution
from .ConceptMapAssociationParameters import ConceptMapAssociationParameters
from .ComponentViewParameters import ComponentViewParameters;
from .ComponentParameters import ComponentParameters;
from .ConnectorParameters import ConnectorParameters;
from .WeaknessTarget import WeaknessTarget
from .ImpliedProcess import ImpliedProcess
from .ImpliedProcessParameters import ImpliedProcessParameters
from .Location import Location
from .Locations import Locations
from .LocationsParameters import LocationsParameters
from .DataFlow import DataFlow
from .DataFlowParameters import DataFlowParameters
from .TrustBoundary import TrustBoundary
from .ValidationResult import ValidationResult
from .GoalContribution import GoalContribution
from .TaskContribution import TaskContribution
from cairis.tools.PseudoClasses import RiskRating
import string
import os
from numpy import *
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
import sys
from base64 import b64encode
from .dba import canonicalDbUser, canonicalDbName, createDatabaseSchema, createDefaults, createDatabaseAndPrivileges, databases
__author__ = '<NAME>, <NAME>, <NAME>'
class MySQLDatabaseProxy:
def __init__(self, host=None, port=None, user=None, passwd=None, db=None):
b = Borg()
if (user is None or passwd is None or db is None):
user = b.dbUser
passwd = b.dbPasswd
db = b.dbName
host = b.dbHost
port = b.dbPort
try:
dbEngine = create_engine('mysql+mysqldb://'+user+':'+passwd+'@'+host+':'+str(port)+'/'+db+'?charset=utf8mb4')
self.conn = scoped_session(sessionmaker(bind=dbEngine))
self.conn.execute("set session max_sp_recursion_depth = 255")
except OperationalError as e:
exceptionText = 'MySQL error connecting to the CAIRIS database ' + db + ' on host ' + host + ' at port ' + str(port) + ' with user ' + user + ' (message:' + format(e) + ')'
raise DatabaseProxyException(exceptionText)
except DatabaseError as e:
id,msg = e
exceptionText = 'MySQL error connecting to the CAIRIS database ' + db + ' on host ' + host + ' at port ' + str(port) + ' with user ' + user + ' (id:' + str(id) + ',message:' + msg
raise DatabaseProxyException(exceptionText)
self.theDimIdLookup, self.theDimNameLookup = self.buildDimensionLookup()
def reconnect(self,closeConn = True,session_id = None):
b = Borg()
try:
if (closeConn) and self.conn.connection().connection.open:
self.conn.close()
if b.runmode == 'web':
ses_settings = b.get_settings(session_id)
dbUser = ses_settings['dbUser']
dbPasswd = <PASSWORD>_settings['dbPasswd']
dbHost = ses_settings['dbHost']
dbPort = ses_settings['dbPort']
dbName = ses_settings['dbName']
dbEngine = create_engine('mysql+mysqldb://' + dbUser+':' + dbPasswd+'@' + dbHost+':' + str(dbPort)+'/' + dbName + '?charset=utf8mb4')
self.conn = scoped_session(sessionmaker(bind=dbEngine))
elif b.runmode == 'desktop':
dbEngine = create_engine('mysql+mysqldb://' + b.dbUser+':' + b.dbPasswd+'@' + b.dbHost+':' + str(b.dbPort)+'/' + b.dbName + '?charset=utf8mb4')
self.conn = scoped_session(sessionmaker(bind=dbEngine))
else:
raise RuntimeError('Run mode not recognized')
self.conn.execute("set session max_sp_recursion_depth = 255")
except OperationalError as e:
exceptionText = 'MySQL error re-connecting to the CAIRIS database: ' + format(e)
raise DatabaseProxyException(exceptionText)
except ProgrammingError as e:
exceptionText = 'MySQL error re-connecting to the CAIRIS database: ' + format(e)
raise DatabaseProxyException(exceptionText)
except IntegrityError as e:
exceptionText = 'MySQL error re-connecting to the CAIRIS database: ' + format(e)
raise DatabaseProxyException(exceptionText)
except DatabaseError as e:
exceptionText = 'MySQL error re-connecting to the CAIRIS database: ' + format(e)
raise DatabaseProxyException(exceptionText)
self.theDimIdLookup, self.theDimNameLookup = self.buildDimensionLookup()
def buildDimensionLookup(self):
dimRows = self.responseList('call traceDimensions()',{},'MySQL error building trace dimension lookup tables')
idLookup = {}
nameLookup = {}
for dimId,dimName in dimRows:
idLookup[dimId] = dimName
nameLookup[dimName] = dimId
return (idLookup, nameLookup)
def close(self):
self.conn.remove()
def getRequirements(self,constraintId = '',isAsset = 1):
reqRows = self.responseList('call getRequirements(:id,:isAs)',{'id':constraintId,'isAs':isAsset},'MySQL error getting requirements')
reqDict = {}
for reqLabel, reqId, reqName, reqDesc, priority, rationale, fitCriterion, originator, reqVersion, reqType, reqDomain in reqRows:
r = RequirementFactory.build(reqId,reqLabel,reqName,reqDesc,priority,rationale,fitCriterion,originator,reqType,reqDomain,reqVersion)
reqDict[reqDesc] = r
return reqDict
def getRequirement(self,reqId):
reqRows = self.responseList('call getRequirement(:id)',{'id':reqId},'MySQL error getting requirement')
reqDict = {}
for reqLabel, reqId, reqName, reqDesc, priority, rationale, fitCriterion, originator, reqVersion, reqType, reqDomain in reqRows:
return RequirementFactory.build(reqId,reqLabel,reqName,reqDesc,priority,rationale,fitCriterion,originator,reqType,reqDomain,reqVersion)
def getOrderedRequirements(self,constraintId = '',isAsset = True):
reqRows = self.responseList('call getRequirements(:id,:isAs)',{'id':constraintId,'isAs':isAsset},'MySQL error getting requirements')
reqList = []
for reqLabel, reqId, reqName, reqDesc, priority, rationale, fitCriterion, originator, reqVersion, reqType, reqDomain in reqRows:
r = RequirementFactory.build(reqId,reqLabel,reqName,reqDesc,priority,rationale,fitCriterion,originator,reqType,reqDomain,reqVersion)
reqList.append(r)
return reqList
def newId(self):
return self.responseList('call newId()',{},'MySQL error getting new identifier')[0]
def commitDatabase(self,session):
try:
session.commit()
session.close()
except OperationalError as e:
exceptionText = 'Commit error (message:' + format(e) + ')'
raise DatabaseProxyException(exceptionText)
except DatabaseError as e:
id,msg = e
exceptionText = 'Commit error (id:' + str(id) + ',message:' + msg + ')'
raise DatabaseProxyException(exceptionText)
def updateDatabase(self,callTxt,argDict,errorTxt,session = None,doCommit = True):
try:
if (session == None):
session = self.conn()
session.execute(callTxt,argDict)
if (doCommit):
session.commit()
session.close()
return None
else:
return session
except OperationalError as e:
exceptionText = 'Update error (message:' + format(e) + ')'
raise DatabaseProxyException(exceptionText)
except IntegrityError as e:
exceptionText = 'Update error (message:' + format(e) + ')'
raise DatabaseProxyException(exceptionText)
except DatabaseError as e:
id,msg = e
exceptionText = 'Update error (id:' + str(id) + ',message:' + msg + ')'
raise DatabaseProxyException(exceptionText)
def addRequirement(self,req,assetName,isAsset = True):
req.validate()
self.updateDatabase('call addRequirement(:lbl,:id,:vers,:name,:desc,:rationale,:origin,:fCrit,:priority,:type,:asName,:isAs)',{'lbl':req.label(),'id':req.id(),'vers':req.version(),'name':req.name(),'desc':req.description(),'rationale':req.rationale(),'origin':req.originator(),'fCrit':req.fitCriterion(),'priority':req.priority(),'type':req.type(),'asName':assetName,'isAs':isAsset},'MySQL error adding new requirement ' + str(req.id()))
def updateRequirement(self,req):
req.validate()
self.updateDatabase('call updateRequirement(:lbl,:id,:vers,:name,:desc,:rationale,:origin,:fCrit,:priority,:type)',{'lbl':req.label(),'id':req.id(),'vers':req.version(),'name':req.name(),'desc':req.description(),'rationale':req.rationale(),'origin':req.originator(),'fCrit':req.fitCriterion(),'priority':req.priority(),'type':req.type()},'MySQL error updating requirement ' + str(req.id()))
def addValueTensions(self,envId,tensions):
for vtKey in tensions:
spValue = vtKey[0]
prValue = vtKey[1]
vt = tensions[vtKey]
vtValue = vt[0]
vtRationale = vt[1]
self.addValueTension(envId,spValue,prValue,vtValue,vtRationale)
def addValueTension(self,envId,spId,prId,tId,tRationale):
self.updateDatabase('call addValueTension(:env,:sp,:pr,:tId,:rationale)',{'env':envId,'sp':spId,'pr':prId,'tId':tId,'rationale':tRationale},'MySQL error adding value tension for environment id ' + str(envId))
def addEnvironment(self,parameters):
parameters.validate()
environmentId = self.newId()
environmentName = parameters.name()
environmentShortCode = parameters.shortCode()
environmentDescription = parameters.description()
session = self.updateDatabase('call addEnvironment(:id,:name,:sc,:desc)',{'id':environmentId,'name':environmentName,'sc':environmentShortCode,'desc':environmentDescription},'MySQL error adding environment',None,False)
if (len(parameters.environments()) > 0):
for c in parameters.environments():
self.updateDatabase('call addCompositeEnvironment(:id,:c)',{'id':environmentId,'c':c},'MySQL error adding composite environment',session,False)
self.addCompositeEnvironmentProperties(environmentId,parameters.duplicateProperty(),parameters.overridingEnvironment(),session)
self.commitDatabase(session)
assetValues = parameters.assetValues()
if (assetValues != None):
for v in assetValues: self.updateValueType(v)
self.addValueTensions(environmentId,parameters.tensions())
self.commitDatabase(session)
def addCompositeEnvironmentProperties(self,environmentId,duplicateProperty,overridingEnvironment,session = None):
self.updateDatabase('call addCompositeEnvironmentProperties(:id,:dp,:oe)',{'id':environmentId,'dp':duplicateProperty,'oe':overridingEnvironment},'MySQL error adding duplicate properties for environment id ' + str(environmentId),session,False)
def riskEnvironments(self,threatName,vulName):
return self.responseList('call riskEnvironments(:thr,:vul)',{'thr':threatName,'vul':vulName},'MySQL error getting environments associated with threat ' + threatName + ' and vulnerability ' + vulName)
def riskEnvironmentsByRisk(self,riskName):
return self.responseList('call riskEnvironmentsByRisk(:risk)',{'risk':riskName},'MySQL error getting environments associated with risk ' + riskName)
def updateEnvironment(self,parameters):
parameters.validate()
environmentId = parameters.id()
environmentName = parameters.name()
environmentShortCode = parameters.shortCode()
environmentDescription = parameters.description()
session = self.updateDatabase('call deleteEnvironmentComponents(:id)',{'id':parameters.id()},'MySQL error deleting environment components',None,False)
self.updateDatabase('call updateEnvironment(:id,:name,:shortCode,:desc)',{'id':environmentId,'name':environmentName,'shortCode':environmentShortCode,'desc':environmentDescription},'MySQL error updating environment',session,False)
if (len(parameters.environments()) > 0):
for c in parameters.environments(): self.updateDatabase('call addCompositeEnvironment(:id,:c)',{'id':environmentId,'c':c},'MySQL error adding composite environment',session,False)
if (len(parameters.duplicateProperty()) > 0):
self.addCompositeEnvironmentProperties(environmentId,parameters.duplicateProperty(),parameters.overridingEnvironment())
self.commitDatabase(session)
self.addValueTensions(environmentId,parameters.tensions())
self.commitDatabase(session)
def deleteRequirement(self,r):
self.deleteObject(r,'requirement')
def responseList(self,callTxt,argDict,errorTxt,session = None):
try:
persistSession = True
if (session == None):
session = self.conn()
persistSession = False
rs = session.execute(callTxt,argDict)
responseList = []
if (rs.rowcount > 0):
for row in rs.fetchall():
if (len(row) > 1):
responseList.append(tuple(list(row)))
else:
responseList.append(list(row)[0])
rs.close()
if (persistSession == False):
session.close()
return responseList
except OperationalError | |
<reponame>gaybro8777/Optimus<filename>optimus/profiler/profiler.py
import configparser
import json
import logging
import os
from collections import defaultdict
import dateutil
import humanize
import jinja2
import pika
import pyspark.sql.functions as F
from pyspark.sql.types import ArrayType, LongType
from optimus.functions import filter_row_by_data_type as fbdt, plot_hist, plot_freq
from optimus.helpers.decorators import time_it
from optimus.helpers.functions import parse_columns, print_html
from optimus.helpers.raiseit import RaiseIt
from optimus.profiler.functions import fill_missing_var_types, fill_missing_col_types, \
write_json, write_html
class Profiler:
def __init__(self, output_path=None, queue_url=None, queue_exchange=None, queue_routing_key=None):
"""
:param output_path:
:param queue_url:
:param queue_exchange:
:param queue_routing_key:
"""
config = configparser.ConfigParser()
# If not path defined. Try to load from the config.ini file
if output_path is None:
try:
# try to load the config file
config.read("config.ini")
output_path = config["PROFILER"]["Output"]
except (IOError, KeyError):
logging.info("Config.ini not found")
output_path = "data.json"
pass
self.html = None
self.json = None
self.path = output_path
self.queue_url = queue_url
self.queue_exchange = queue_exchange
self.queue_routing_key = queue_routing_key
@staticmethod
@time_it
def dataset_info(df):
"""
Return info about cols, row counts, total missing and disk size
:param df: Dataframe to be processed
:return:
"""
columns = parse_columns(df, df.columns)
cols_count = len(df.columns)
rows_count = df.count()
missing_count = round(sum(df.cols.count_na(columns).values()), 2)
return (
{'cols_count': cols_count,
'rows_count': rows_count,
'missing_count': str(round(missing_count / rows_count, 2)) + "%",
'size': humanize.naturalsize(df.size())}
)
# TODO: This should check only the StringType Columns. The datatype from others columns can be taken from schema().
@staticmethod
@time_it
def count_data_types(df, columns, infer=False):
"""
Count the number of int, float, string, date and booleans and output the count in json format
:param df: Dataframe to be processed
:param columns: Columns to be processed
:return: json
"""
@time_it
def _count_data_types(col_name):
"""
Function for determine if register value is float or int or string.
:param col_name:
:return:
"""
logging.info("Processing column '" + col_name + "'...")
# If String, process the data to try to infer which data type is inside. This a kind of optimization.
# We do not need to analyze the data if the column data type is integer or boolean.etc
temp = col_name + "_type"
col_data_type = df.cols.dtypes(col_name)
count_by_data_type = {}
count_empty_strings = 0
if infer is True and col_data_type == "string":
types = (df
.h_repartition(col_name=col_name)
.withColumn(temp, fbdt(col_name, get_type=True))
.groupBy(temp).count()
.to_json())
for row in types:
count_by_data_type[row[temp]] = row["count"]
count_empty_strings = df.where(F.col(col_name) == '').count()
else:
nulls = df.cols.count_na(col_name)
count_by_data_type[col_data_type] = int(df.count()) - nulls
count_by_data_type["null"] = nulls
count_by_data_type = fill_missing_var_types(count_by_data_type)
# Subtract white spaces to the total string count
data_types_count = {"string": count_by_data_type['string'],
"bool": count_by_data_type['bool'],
"int": count_by_data_type['int'],
"float": count_by_data_type['float'],
"double": count_by_data_type['double'],
"date": count_by_data_type['date'],
"array": count_by_data_type['array']
}
null_missed_count = {"null": count_by_data_type['null'],
"missing": count_empty_strings,
}
# Get the greatest count by column data type
greatest_data_type_count = max(data_types_count, key=data_types_count.get)
if greatest_data_type_count is "string":
cat = "categorical"
elif greatest_data_type_count is "int" or greatest_data_type_count is "float" or greatest_data_type_count is "double":
cat = "numeric"
elif greatest_data_type_count is "date":
cat = "date"
elif greatest_data_type_count is "bool":
cat = "bool"
elif greatest_data_type_count is "array":
cat = "array"
else:
cat = "null"
col = {}
col['dtype'] = greatest_data_type_count
col['type'] = cat
col['details'] = {**data_types_count, **null_missed_count}
return col
columns = parse_columns(df, columns)
# Info from all the columns
type_details = {c: _count_data_types(c) for c in columns}
results = {}
count_types = {}
# Count the categorical, numerical, boolean and date columns
for v in type_details.values():
name = v["type"]
if name in count_types:
count_types[name] += 1
else:
count_types[name] = 1
count_types = fill_missing_col_types(count_types)
results["count_types"] = count_types
results["columns"] = type_details
return results
@time_it
def run(self, df, columns, buckets=40, infer=False, relative_error=1):
"""
Return dataframe statistical information in HTML Format
:param df: Dataframe to be analyzed
:param columns: Columns to be analized
:param buckets: Number of buckets calculated to print the histogram
:param relative_error: Relative Error for quantile discretizer calculation
:return:
"""
columns = parse_columns(df, columns)
output = Profiler.to_json(df, columns, buckets, infer, relative_error)
# Load jinja
path = os.path.dirname(os.path.abspath(__file__))
template_loader = jinja2.FileSystemLoader(searchpath=path + "//templates")
template_env = jinja2.Environment(loader=template_loader, autoescape=True)
# Render template
# Create the profiler info header
html = ""
general_template = template_env.get_template("general_info.html")
html = html + general_template.render(data=output)
template = template_env.get_template("one_column.html")
# Create every column stats
for col_name in columns:
hist_pic = None
col = output["columns"][col_name]
if "hist" in col:
if col["column_dtype"] == "date":
hist_year = plot_hist({col_name: col["hist"]["years"]}, "base64", "years")
hist_month = plot_hist({col_name: col["hist"]["months"]}, "base64", "months")
hist_weekday = plot_hist({col_name: col["hist"]["weekdays"]}, "base64", "weekdays")
hist_hour = plot_hist({col_name: col["hist"]["hours"]}, "base64", "hours")
hist_minute = plot_hist({col_name: col["hist"]["minutes"]}, "base64", "minutes")
hist_pic = {"hist_years": hist_year, "hist_months": hist_month, "hist_weekdays": hist_weekday,
"hist_hours": hist_hour, "hist_minutes": hist_minute}
else:
hist = plot_hist({col_name: col["hist"]}, output="base64")
hist_pic = {"hist_pic": hist}
if "frequency" in col:
freq_pic = plot_freq({col_name: col["frequency"]}, output="base64")
else:
freq_pic = None
html = html + template.render(data=col, freq_pic=freq_pic, **hist_pic)
html = html + df.table_html(10)
# Display HTML
print_html(html)
# send to queue
if self.queue_url is not None:
self.to_queue(output)
# JSON
# Save in case we want to output to a json file
self.json = output
# Save file in json format
write_json(output, self.path)
# Save in case we want to output to a html file
self.html = html
def to_file(self, path=None, output=None):
"""
Save profiler data to a file in the specified format (html, json)
:param output: html or json
:param path: filename in which the data will be saved
:return:
"""
if path is None:
RaiseIt.value_error(path, ["Invalid file path"])
# We need to append a some extra html tags to display it correctly in the browser.
if output is "html":
if self.html is None:
assert self.html is not None, "Please run the profiler first"
header = '''<!doctype html>
<html class="no-js" lang="">
<head>
<meta charset="utf-8">
<meta http-equiv="x-ua-compatible" content="ie=edge">
<title></title>
<meta name="description" content="">
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
<link rel="manifest" href="site.webmanifest">
<link rel="apple-touch-icon" href="icon.png">
<!-- Place favicon.ico in the root directory -->
<link rel="stylesheet" href="css/normalize.css">
<link rel="stylesheet" href="css/main.css">
</head>
<body>'''
footer = '''</body></html>'''
write_html(header + self.html + footer, path)
elif output is "json":
if self.json is None:
assert self.json is not None, "Please run the profiler first"
write_json(self.json, path)
else:
print("sdf")
RaiseIt.type_error(output, ["html", "json"])
def to_queue(self, message):
"""
Send the profiler information to a queue. By default it use a public encryted queue.
:return:
"""
# Access the CLODUAMQP_URL environment variable and parse it (fallback to localhost)
url = os.environ.get('CLOUDAMQP_URL', self.queue_url)
params = pika.URLParameters(url)
connection = pika.BlockingConnection(params)
channel = connection.channel() # start a channel
channel.queue_declare(queue='optimus') # Declare a queue
channel.basic_publish(exchange=self.queue_exchange,
routing_key=self.queue_routing_key,
body=json.dumps(message))
channel.close()
@staticmethod
def to_json(df, columns, buckets=40, infer=False, relative_error=1):
"""
Return the profiling data in json format
:param df: Dataframe to be processed
:param columns: column to calculate the histogram
:param buckets: buckets on the histogram
:return: json file
"""
# Get the stats for all the columns
output = Profiler.columns(df, columns, buckets, infer, relative_error)
# Add the data summary to the output
output["summary"] = Profiler.dataset_info(df)
# Get a data sample and transform it to friendly json format
data = []
for l in df.sample_n(10).to_json():
data.append([v for k, v in l.items()])
output["sample"] = {"columns": df.columns, "data": data}
return output
@staticmethod
def columns(df, columns, buckets=40, infer=False, relative_error=1):
"""
Return statistical information about a specific column in json format
:param df: Dataframe to be processed
:param columns: Columns that you want to profile
:param buckets: Create buckets divided by range. Each bin is equal.
:param relative_error: relative error when the percentile is calculated. 0 is more exact as slow 1 more error and faster
:return: json object with the
"""
columns = parse_columns(df, columns)
# Get just a sample to infer the column data type
# sample_size_number = sample_size(rows_count, 95.0, 2.0)
# fraction = sample_size_number / rows_count
# sample = df.sample(False, fraction, seed=1)
# Initialize Objects
columns_info = {}
columns_info['columns'] = {}
rows_count = df.count()
columns_info['rows_count'] = humanize.intword(rows_count)
count_dtypes = Profiler.count_data_types(df, columns, infer)
columns_info["count_types"] = count_dtypes["count_types"]
columns_info['size'] = humanize.naturalsize(df.size())
# Cast columns to the data type infer by count_data_types()
df = Profiler.cast_columns(df, columns, count_dtypes).cache()
# Calculate stats
stats = Profiler.general_stats(df, columns)
for col_name in columns:
col_info = {}
logging.info("------------------------------")
logging.info("Processing column '" + col_name + "'...")
columns_info['columns'][col_name] = {}
col_info["stats"] = stats[col_name]
col_info.update(Profiler.frequency(df, col_name, buckets))
col_info.update(Profiler.stats_by_column(col_name, stats, count_dtypes, rows_count))
col_info['column_dtype'] = count_dtypes["columns"][col_name]['dtype']
col_info["dtypes_stats"] = count_dtypes["columns"][col_name]['details']
column_type = count_dtypes["columns"][col_name]['type']
if column_type == "numeric":
col_info["stats"].update(Profiler.extra_numeric_stats(df, col_name, stats, relative_error))
col_info["hist"] = df.cols.hist(col_name, | |
{
'key': key,
'props': props,
'provider': provider,
}
@property
def key(self) -> str:
"""The missing context key.
stability
:stability: experimental
"""
return self._values.get('key')
@property
def props(self) -> typing.Mapping[str,typing.Any]:
"""A set of provider-specific options.
stability
:stability: experimental
"""
return self._values.get('props')
@property
def provider(self) -> str:
"""The provider from which we expect this context key to be obtained.
stability
:stability: experimental
"""
return self._values.get('provider')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'MissingContext(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/cx-api.RuntimeInfo", jsii_struct_bases=[], name_mapping={'libraries': 'libraries'})
class RuntimeInfo():
def __init__(self, *, libraries: typing.Mapping[str,str]):
"""Information about the application's runtime components.
:param libraries: The list of libraries loaded in the application, associated with their versions.
stability
:stability: experimental
"""
self._values = {
'libraries': libraries,
}
@property
def libraries(self) -> typing.Mapping[str,str]:
"""The list of libraries loaded in the application, associated with their versions.
stability
:stability: experimental
"""
return self._values.get('libraries')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'RuntimeInfo(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/cx-api.SSMParameterContextQuery", jsii_struct_bases=[], name_mapping={'account': 'account', 'parameter_name': 'parameterName', 'region': 'region'})
class SSMParameterContextQuery():
def __init__(self, *, account: typing.Optional[str]=None, parameter_name: typing.Optional[str]=None, region: typing.Optional[str]=None):
"""Query to hosted zone context provider.
:param account: Query account.
:param parameter_name: Parameter name to query.
:param region: Query region.
stability
:stability: experimental
"""
self._values = {
}
if account is not None: self._values["account"] = account
if parameter_name is not None: self._values["parameter_name"] = parameter_name
if region is not None: self._values["region"] = region
@property
def account(self) -> typing.Optional[str]:
"""Query account.
stability
:stability: experimental
"""
return self._values.get('account')
@property
def parameter_name(self) -> typing.Optional[str]:
"""Parameter name to query.
stability
:stability: experimental
"""
return self._values.get('parameter_name')
@property
def region(self) -> typing.Optional[str]:
"""Query region.
stability
:stability: experimental
"""
return self._values.get('region')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'SSMParameterContextQuery(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/cx-api.SynthesisMessage", jsii_struct_bases=[], name_mapping={'entry': 'entry', 'id': 'id', 'level': 'level'})
class SynthesisMessage():
def __init__(self, *, entry: "MetadataEntry", id: str, level: "SynthesisMessageLevel"):
"""
:param entry:
:param id:
:param level:
stability
:stability: experimental
"""
self._values = {
'entry': entry,
'id': id,
'level': level,
}
@property
def entry(self) -> "MetadataEntry":
"""
stability
:stability: experimental
"""
return self._values.get('entry')
@property
def id(self) -> str:
"""
stability
:stability: experimental
"""
return self._values.get('id')
@property
def level(self) -> "SynthesisMessageLevel":
"""
stability
:stability: experimental
"""
return self._values.get('level')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'SynthesisMessage(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.enum(jsii_type="@aws-cdk/cx-api.SynthesisMessageLevel")
class SynthesisMessageLevel(enum.Enum):
"""
stability
:stability: experimental
"""
INFO = "INFO"
"""
stability
:stability: experimental
"""
WARNING = "WARNING"
"""
stability
:stability: experimental
"""
ERROR = "ERROR"
"""
stability
:stability: experimental
"""
class TreeCloudArtifact(CloudArtifact, metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/cx-api.TreeCloudArtifact"):
"""
stability
:stability: experimental
"""
def __init__(self, assembly: "CloudAssembly", name: str, *, type: "ArtifactType", dependencies: typing.Optional[typing.List[str]]=None, environment: typing.Optional[str]=None, metadata: typing.Optional[typing.Mapping[str,typing.List["MetadataEntry"]]]=None, properties: typing.Optional[typing.Mapping[str,typing.Any]]=None) -> None:
"""
:param assembly: -
:param name: -
:param artifact: -
:param type: The type of artifact.
:param dependencies: IDs of artifacts that must be deployed before this artifact.
:param environment: The environment into which this artifact is deployed.
:param metadata: Associated metadata.
:param properties: The set of properties for this artifact (depends on type).
stability
:stability: experimental
"""
artifact = ArtifactManifest(type=type, dependencies=dependencies, environment=environment, metadata=metadata, properties=properties)
jsii.create(TreeCloudArtifact, self, [assembly, name, artifact])
@property
@jsii.member(jsii_name="file")
def file(self) -> str:
"""
stability
:stability: experimental
"""
return jsii.get(self, "file")
@jsii.data_type(jsii_type="@aws-cdk/cx-api.VpcContextQuery", jsii_struct_bases=[], name_mapping={'filter': 'filter', 'account': 'account', 'region': 'region'})
class VpcContextQuery():
def __init__(self, *, filter: typing.Mapping[str,str], account: typing.Optional[str]=None, region: typing.Optional[str]=None):
"""Query input for looking up a VPC.
:param filter: Filters to apply to the VPC. Filter parameters are the same as passed to DescribeVpcs.
:param account: Query account.
:param region: Query region.
stability
:stability: experimental
"""
self._values = {
'filter': filter,
}
if account is not None: self._values["account"] = account
if region is not None: self._values["region"] = region
@property
def filter(self) -> typing.Mapping[str,str]:
"""Filters to apply to the VPC.
Filter parameters are the same as passed to DescribeVpcs.
see
:see: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcs.html
stability
:stability: experimental
"""
return self._values.get('filter')
@property
def account(self) -> typing.Optional[str]:
"""Query account.
stability
:stability: experimental
"""
return self._values.get('account')
@property
def region(self) -> typing.Optional[str]:
"""Query region.
stability
:stability: experimental
"""
return self._values.get('region')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'VpcContextQuery(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
@jsii.data_type(jsii_type="@aws-cdk/cx-api.VpcContextResponse", jsii_struct_bases=[], name_mapping={'availability_zones': 'availabilityZones', 'vpc_id': 'vpcId', 'isolated_subnet_ids': 'isolatedSubnetIds', 'isolated_subnet_names': 'isolatedSubnetNames', 'isolated_subnet_route_table_ids': 'isolatedSubnetRouteTableIds', 'private_subnet_ids': 'privateSubnetIds', 'private_subnet_names': 'privateSubnetNames', 'private_subnet_route_table_ids': 'privateSubnetRouteTableIds', 'public_subnet_ids': 'publicSubnetIds', 'public_subnet_names': 'publicSubnetNames', 'public_subnet_route_table_ids': 'publicSubnetRouteTableIds', 'vpn_gateway_id': 'vpnGatewayId'})
class VpcContextResponse():
def __init__(self, *, availability_zones: typing.List[str], vpc_id: str, isolated_subnet_ids: typing.Optional[typing.List[str]]=None, isolated_subnet_names: typing.Optional[typing.List[str]]=None, isolated_subnet_route_table_ids: typing.Optional[typing.List[str]]=None, private_subnet_ids: typing.Optional[typing.List[str]]=None, private_subnet_names: typing.Optional[typing.List[str]]=None, private_subnet_route_table_ids: typing.Optional[typing.List[str]]=None, public_subnet_ids: typing.Optional[typing.List[str]]=None, public_subnet_names: typing.Optional[typing.List[str]]=None, public_subnet_route_table_ids: typing.Optional[typing.List[str]]=None, vpn_gateway_id: typing.Optional[str]=None):
"""Properties of a discovered VPC.
:param availability_zones: AZs.
:param vpc_id: VPC id.
:param isolated_subnet_ids: IDs of all isolated subnets. Element count: #(availabilityZones) · #(isolatedGroups)
:param isolated_subnet_names: Name of isolated subnet groups. Element count: #(isolatedGroups)
:param isolated_subnet_route_table_ids: Route Table IDs of isolated subnet groups. Element count: #(availabilityZones) · #(isolatedGroups)
:param private_subnet_ids: IDs of all private subnets. Element count: #(availabilityZones) · #(privateGroups)
:param private_subnet_names: Name of private subnet groups. Element count: #(privateGroups)
:param private_subnet_route_table_ids: Route Table IDs of private subnet groups. Element count: #(availabilityZones) · #(privateGroups)
:param public_subnet_ids: IDs of all public subnets. Element count: #(availabilityZones) · #(publicGroups)
:param public_subnet_names: Name of public subnet groups. Element count: #(publicGroups)
:param public_subnet_route_table_ids: Route Table IDs of public subnet groups. Element count: #(availabilityZones) · #(publicGroups)
:param vpn_gateway_id: The VPN gateway ID.
stability
:stability: experimental
"""
self._values = {
'availability_zones': availability_zones,
'vpc_id': vpc_id,
}
if isolated_subnet_ids is not None: self._values["isolated_subnet_ids"] = isolated_subnet_ids
if isolated_subnet_names is not None: self._values["isolated_subnet_names"] = isolated_subnet_names
if isolated_subnet_route_table_ids is not None: self._values["isolated_subnet_route_table_ids"] = isolated_subnet_route_table_ids
if private_subnet_ids is not None: self._values["private_subnet_ids"] = private_subnet_ids
if private_subnet_names is not None: self._values["private_subnet_names"] = private_subnet_names
if private_subnet_route_table_ids is not None: self._values["private_subnet_route_table_ids"] = private_subnet_route_table_ids
if public_subnet_ids is not None: self._values["public_subnet_ids"] = public_subnet_ids
if public_subnet_names is not None: self._values["public_subnet_names"] = public_subnet_names
if public_subnet_route_table_ids is not None: self._values["public_subnet_route_table_ids"] = public_subnet_route_table_ids
if vpn_gateway_id is not None: self._values["vpn_gateway_id"] = vpn_gateway_id
@property
def availability_zones(self) -> typing.List[str]:
"""AZs.
stability
:stability: experimental
"""
return self._values.get('availability_zones')
@property
def vpc_id(self) -> str:
"""VPC id.
stability
:stability: experimental
"""
return self._values.get('vpc_id')
@property
def isolated_subnet_ids(self) -> typing.Optional[typing.List[str]]:
"""IDs of all isolated subnets.
Element count: #(availabilityZones) · #(isolatedGroups)
stability
:stability: experimental
"""
return self._values.get('isolated_subnet_ids')
@property
def isolated_subnet_names(self) -> typing.Optional[typing.List[str]]:
"""Name of isolated subnet groups.
Element count: #(isolatedGroups)
stability
:stability: experimental
"""
return self._values.get('isolated_subnet_names')
@property
def isolated_subnet_route_table_ids(self) -> typing.Optional[typing.List[str]]:
"""Route Table IDs of isolated subnet groups.
Element count: #(availabilityZones) · #(isolatedGroups)
stability
:stability: experimental
"""
return self._values.get('isolated_subnet_route_table_ids')
@property
def private_subnet_ids(self) -> typing.Optional[typing.List[str]]:
"""IDs of all private subnets.
Element count: #(availabilityZones) · #(privateGroups)
stability
:stability: experimental
"""
return self._values.get('private_subnet_ids')
@property
def private_subnet_names(self) -> typing.Optional[typing.List[str]]:
"""Name of private subnet groups.
Element count: #(privateGroups)
stability
:stability: experimental
"""
return self._values.get('private_subnet_names')
@property
def private_subnet_route_table_ids(self) -> typing.Optional[typing.List[str]]:
"""Route Table IDs of private subnet groups.
Element count: #(availabilityZones) · #(privateGroups)
stability
:stability: experimental
"""
return self._values.get('private_subnet_route_table_ids')
@property
def public_subnet_ids(self) -> typing.Optional[typing.List[str]]:
"""IDs of all public subnets.
Element count: #(availabilityZones) · #(publicGroups)
stability
:stability: experimental
"""
return self._values.get('public_subnet_ids')
@property
def public_subnet_names(self) -> typing.Optional[typing.List[str]]:
"""Name of public subnet groups.
Element count: #(publicGroups)
stability
:stability: experimental
"""
return self._values.get('public_subnet_names')
@property
def public_subnet_route_table_ids(self) -> typing.Optional[typing.List[str]]:
"""Route Table IDs of public subnet groups.
Element count: #(availabilityZones) · #(publicGroups)
stability
:stability: experimental
"""
return self._values.get('public_subnet_route_table_ids')
@property
def vpn_gateway_id(self) -> typing.Optional[str]:
"""The VPN gateway ID.
stability
:stability: experimental
"""
| |
%s: no spades contigs', geneName)
return None
if len(contigList) == 0:
logger.warning('gene %s: empty contig list', geneName)
return None
logger.debug('gene %s: %d spades contigs', geneName, len(contigList))
geneProtein = self.translateGene(result.representativePaftolTargetDict[geneName].seqRecord)
Bio.SeqIO.write([geneProtein], self.makeWorkdirPath('%s-protein.fasta' % geneName), 'fasta')
aminoAcidSet = set(Bio.Alphabet.IUPAC.protein.letters.lower())
# allow stop translation
aminoAcidSet.add('*')
setDiff = set(str(geneProtein.seq).lower()) - aminoAcidSet
if len(setDiff) > 0:
logger.warning('gene %s: invalid amino acids %s' % (geneName, ', '.join(setDiff)))
return None
contigFname = self.makeGeneContigsFname(geneName)
Bio.SeqIO.write(contigList, contigFname, 'fasta')
exonerateRunner = paftol.tools.ExonerateRunner()
exonerateResultList = exonerateRunner.parse(geneProtein, contigFname, 'protein2genome', bestn=len(contigList))
logger.debug('gene %s: %d contigs, %d exonerate results', geneName, len(contigList), len(exonerateResultList))
if len(exonerateResultList) == 0:
logger.warning('gene %s: no exonerate results from %d contigs', geneName, len(contigList))
exonerateResultList.sort(cmpExonerateResultByQueryAlignmentStart)
# reverse complementing extraneous as that is done by exonerate itself
# for exonerateResult in exonerateResultList:
# logger.debug('gene %s, contig %s: targetStrand = %s', geneName, exonerateResult.targetId, exonerateResult.targetStrand)
# logger.debug('gene %s, contig %s, raw: %d -> %d, %s', geneName, exonerateResult.targetId, exonerateResult.targetCdsStart, exonerateResult.targetCdsEnd, str(exonerateResult.targetAlignmentSeq.seq))
# if exonerateResult.targetStrand == '-':
# exonerateResult.reverseComplementTarget()
# logger.debug('gene %s, contig %s, can: %d -> %d, %s', geneName, exonerateResult.targetId, exonerateResult.targetCdsStart, exonerateResult.targetCdsEnd, str(exonerateResult.targetAlignmentSeq.seq))
# logger.warning('provisional filtering and supercontig construction, handling of overlapping contigs not finalised')
filteredExonerateResultList = self.filterExonerateResultList(geneName, exonerateResultList, strictOverlapFiltering)
logger.debug('gene %s: %d exonerate results after filtering', geneName, len(filteredExonerateResultList))
Bio.SeqIO.write([e.targetCdsSeq for e in filteredExonerateResultList], os.path.join(self.makeGeneDirPath(geneName), '%s-fecds.fasta' % geneName), 'fasta')
if len(filteredExonerateResultList) == 0:
logger.warning('gene %s: no exonerate results left after filtering', geneName)
return None
supercontig = Bio.SeqRecord.SeqRecord(Bio.Seq.Seq(''.join([str(e.targetCdsSeq.seq) for e in filteredExonerateResultList])), id='%s_supercontig' % geneName)
logger.debug('gene %s: supercontig length %d', geneName, len(supercontig))
if len(supercontig) == 0:
logger.warning('gene %s: empty supercontig', geneName)
return None
supercontigFname = os.path.join(self.makeGeneDirPath(geneName), '%s-supercontig.fasta' % geneName)
Bio.SeqIO.write([supercontig], supercontigFname, 'fasta')
Bio.SeqIO.write([geneProtein], os.path.join(self.makeGeneDirPath(geneName), '%s-supercontigref.fasta' % geneName), 'fasta')
# FIXME: use exonerate to align "supercontig" to reference and
# retrieve coding sequence of exonerate result with highest
# score. In case of tied highest score, select result with
# shortest CDS, as this is indicative of highest
# "concentration" of matches and fewest gaps.
supercontigErList = exonerateRunner.parse(geneProtein, supercontigFname, 'protein2genome', bestn=1)
logger.debug('gene %s: %d supercontig exonerate results', geneName, len(supercontigErList))
splicedSupercontigEr = None
if len(supercontigErList) == 0:
logger.warning('gene %s: no exonerate results from supercontig', geneName)
return None
if len(supercontigErList) > 1:
splicedSupercontigEr = supercontigErList[0]
minLength = len(splicedSupercontigEr.targetCdsSeq)
for supercontigEr in supercontigErList:
if len(supercontigEr.targetCdsSeq) < minLength:
splicedSupercontigEr = supercontigEr
minLength = len(splicedSupercontigEr.targetCdsSeq)
contigStats = ', '.join(['raw=%d, cdsLen=%d' % (e.rawScore, len(e.targetCdsSeq)) for e in supercontigErList])
logger.warning('gene %s: received %d supercontig exonerate results despite bestn=1 (%s), selected raw=%d, cdsLen=%d', geneName, len(supercontigErList), contigStats, splicedSupercontigEr.rawScore, len(splicedSupercontigEr.targetCdsSeq))
else:
splicedSupercontigEr = supercontigErList[0]
# not filtering for percent identity to gene again, as that is already done
if result.reverseFastq is not None:
readsSpec = '%s, %s' % (result.forwardFastq, result.reverseFastq)
else:
readsSpec = result.forwardFastq
splicedSupercontig = Bio.SeqRecord.SeqRecord(Bio.Seq.Seq(str(splicedSupercontigEr.targetCdsSeq.seq)), id=geneName, description='reconstructed CDS computed by paftol.HybpiperAnalyser, targets: %s, reads: %s' % (result.paftolTargetSet.fastaHandleStr, readsSpec))
logger.debug('gene %s: splicedSupercontig length %d', geneName, len(splicedSupercontig))
splicedSupercontigFname = os.path.join(self.makeGeneDirPath(geneName), '%s-splicedsupercontig.fasta' % geneName)
Bio.SeqIO.write([splicedSupercontig], splicedSupercontigFname, 'fasta')
return splicedSupercontig
class HybpiperBwaAnalyser(HybpiperAnalyser):
"""L{HybpiperAnalyser} subclass that implements an analysis process
close to the HybPiper pipeline.
Some parameters to SPAdes can be controlled via instance variables as
documented below. Defaults of these parameters correspond to the
defaults provided by SPAdes, respectively (at the time of developing
this).
@ivar bwaRunner: SPAdes runner, providing instance variables for configuring BWA
@type bwaRunner: C{paftol.tools.BwaRunner}
"""
def __init__(self, workdirTgz=None, workDirname='pafpipertmp', bwaRunner=None, spadesRunner=None):
super(HybpiperBwaAnalyser, self).__init__(workdirTgz, workDirname, spadesRunner)
if bwaRunner is None:
self.bwaRunner = paftol.tools.BwaRunner()
else:
self.bwaRunner = bwaRunner
def setup(self, result):
logger.debug('setting up')
self.setupTmpdir()
# FIXME: is writing the targets fasta file really part of setup?
result.paftolTargetSet.writeFasta(self.makeTargetsFname(True))
def mapReadsBwa(self, result):
"""Map reads to gene sequences (from multiple organisms possibly).
"""
logger.debug('mapping reads to gene sequences')
referenceFname = self.makeTargetsFname(True)
self.bwaRunner.indexReference(referenceFname)
forwardReadsFname = os.path.join(os.getcwd(), result.forwardFastq)
if result.reverseFastq is None:
reverseReadsFname = None
else:
reverseReadsFname = os.path.join(os.getcwd(), result.reverseFastq)
result.paftolTargetSet.numOfftargetReads = 0
self.bwaRunner.processBwa(result.paftolTargetSet, referenceFname, forwardReadsFname, reverseReadsFname)
# ideas for hybrid / consensus sequence for (multiple) re-mapping
# reference CDS: atgtac------catacagaagagacgtga
# reconstructed CDS: cactcatttcat---gga
# "consensus" atgCACTCAATTCAT GGAgagacgtga
# principe: Where reconstructed symbol is available, use that in preference.
# * gap in reference: use symbols from reconstructed (must be non-gap if pairwise alignment)
# * gap in reconstructed: skip symbols from reference
# * ends / portions with no alignment to reconstructed: fill in from reference
# Problem: avoid non-homologous alignment portions (e.g. around borders of reconstructed)?
def analyse(self, targetsSourcePath, forwardFastq, reverseFastq, allowInvalidBases, strictOverlapFiltering, maxNumReadsPerGene):
logger.debug('starting')
paftolTargetSet = PaftolTargetSet()
paftolTargetSet.readFasta(targetsSourcePath)
# FIXME: put allowInvalidBases in result for subsequent reference?
paftolTargetSet.sanityCheck(allowInvalidBases)
result = HybpiperResult(paftolTargetSet, forwardFastq, reverseFastq)
try:
self.setup(result)
logger.debug('setup done')
self.mapReadsBwa(result)
logger.debug('BWA mapping done')
self.distribute(result, maxNumReadsPerGene)
logger.debug('read distribution done')
self.setRepresentativeGenes(result)
self.writeRepresentativeGenes(result)
logger.debug('representative genes selected')
result.reconstructedCdsDict = {}
for geneName in result.paftolTargetSet.paftolGeneDict:
result.reconstructedCdsDict[geneName] = self.reconstructCds(result, geneName, strictOverlapFiltering)
logger.debug('CDS reconstruction done')
logger.debug('finished')
return result
finally:
self.makeTgz()
logger.debug('tgz file made')
self.cleanup()
logger.debug('cleanup done')
class HybpiperTblastnAnalyser(HybpiperAnalyser):
"""L{HybseqAnalyser} subclass that implements an analysis process
close to the HybPiper pipeline.
The C{tblastnRunner} and C{spadesRunner} should be considered "owned" by
the analyser, i.e. they may be modified by the analyser (e.g. to set parameters
as required), so they should not be modified or otherwise be used by clients.
Some parameters to SPAdes can be controlled via instance variables as
documented below. Defaults of these parameters correspond to the
defaults provided by SPAdes, respectively (at the time of developing
this).
@ivar tblastnRunner: tblastn runner, providing instance variables for configuring tblastn
@type tblastnRunner: C{paftol.tools.TblastnRunner}
@ivar spadesRunner: SPAdes runner, providing instance variables for configuring SPAdes
@type spadesRunner: C{paftol.tools.SpadesRunner}
"""
def __init__(self, workdirTgz=None, workDirname='pafpipertmp', tblastnRunner=None, spadesRunner=None):
super(HybpiperTblastnAnalyser, self).__init__(workdirTgz, workDirname, spadesRunner)
if tblastnRunner is None:
self.tblastnRunner = paftol.tools.TblastnRunner()
else:
self.tblastnRunner = tblastnRunner
def setup(self, result):
logger.debug('setting up')
self.setupTmpdir()
result.paftolTargetSet.writeFasta(self.makeTargetsFname(True))
forwardFastaPath = self.makeWorkdirPath(self.forwardFasta)
paftol.tools.fastqToFasta(result.forwardFastq, forwardFastaPath)
self.tblastnRunner.indexDatabase(forwardFastaPath)
if result.reverseFastq is not None:
reverseFastaPath = self.makeWorkdirPath(self.reverseFasta)
paftol.tools.fastqToFasta(result.reverseFastq, reverseFastaPath)
self.tblastnRunner.indexDatabase(reverseFastaPath)
def mapReadsTblastn(self, result):
"""Map gene sequences to reads (from multiple organisms possibly).
"""
logger.debug('mapping gene sequences to reads')
referenceFname = self.makeTargetsFname(True) ## check this holds
targetProteinList = [self.translateGene(geneSr) for geneSr in result.paftolTargetSet.getSeqRecordList()]
# FIXME: check these parameters, consider numAlignments?
self.tblastnRunner.maxTargetSeqs = 10000000
self.tblastnRunner.maxHsps = 1
result.paftolTargetSet.numOfftargetReads = None
self.tblastnRunner.processTblastn(result.paftolTargetSet, self.makeWorkdirPath(self.forwardFasta), targetProteinList)
# FIXME: should be not None (!!!)
if result.reverseFastq is not None:
self.tblastnRunner.processTblastn(result.paftolTargetSet, self.makeWorkdirPath(self.reverseFasta), targetProteinList)
# ideas for hybrid / consensus sequence for (multiple) re-mapping
# reference CDS: atgtac------catacagaagagacgtga
# reconstructed CDS: cactcatttcat---gga
# "consensus" atgCACTCAATTCAT GGAgagacgtga
# principe: Where reconstructed symbol is available, use that in preference.
# * gap in reference: use symbols from reconstructed (must be non-gap if pairwise alignment)
# * gap in reconstructed: skip symbols from reference
# * ends / portions with no alignment to reconstructed: fill in from reference
# Problem: avoid non-homologous alignment portions (e.g. around borders of reconstructed)?
def analyse(self, targetsSourcePath, forwardFastq, reverseFastq, allowInvalidBases, strictOverlapFiltering, maxNumReadsPerGene):
logger.debug('starting')
paftolTargetSet = PaftolTargetSet()
paftolTargetSet.readFasta(targetsSourcePath)
# FIXME: put allowInvalidBases in result for subsequent reference?
paftolTargetSet.sanityCheck(allowInvalidBases)
result = HybpiperResult(paftolTargetSet, forwardFastq, reverseFastq)
try:
self.setup(result)
logger.debug('setup done')
self.mapReadsTblastn(result)
logger.debug('tblastn mapping done')
self.distribute(result, maxNumReadsPerGene)
logger.debug('read distribution done')
self.setRepresentativeGenes(result)
self.writeRepresentativeGenes(result)
logger.debug('representative genes selected')
result.reconstructedCdsDict = {}
for geneName in result.paftolTargetSet.paftolGeneDict:
result.reconstructedCdsDict[geneName] = self.reconstructCds(result, geneName, strictOverlapFiltering)
logger.debug('CDS reconstruction done')
logger.debug('finished')
return result
finally:
self.makeTgz()
logger.debug('tgz file made')
self.cleanup()
logger.debug('cleanup done')
class OverlapAnalyser(HybseqAnalyser):
def __init__(self, workdirTgz=None, workDirname='pafpipertmp', tblastnRunner=None, spadesRunner=None):
super(OverlapAnalyser, self).__init__(workdirTgz, workDirname)
if tblastnRunner is None:
self.tblastnRunner = paftol.tools.TblastnRunner()
else:
self.tblastnRunner = tblastnRunner
self.windowSizeReference = None
self.relIdentityThresholdReference = None
self.windowSizeReadOverlap = None
self.relIdentityThresholdReadOverlap = None
# hard-coded alignment runner while API is incomplete...
self.alignmentRunner = tools.SemiglobalAlignmentRunner()
def setup(self, result):
logger.debug('setting up')
self.setupTmpdir()
result.paftolTargetSet.writeFasta(self.makeTargetsFname(True))
forwardFastaPath = self.makeWorkdirPath(self.forwardFasta)
paftol.tools.fastqToFasta(result.forwardFastq, forwardFastaPath)
self.tblastnRunner.indexDatabase(forwardFastaPath)
if result.reverseFastq is not None:
reverseFastaPath = self.makeWorkdirPath(self.reverseFasta)
paftol.tools.fastqToFasta(result.reverseFastq, reverseFastaPath)
self.tblastnRunner.indexDatabase(reverseFastaPath)
def mapReadsTblastn(self, result):
"""Map gene sequences to reads (from multiple organisms possibly).
"""
logger.debug('mapping gene sequences to reads')
referenceFname = self.makeTargetsFname(True) ## check this holds
targetProteinList = [self.translateGene(geneSr) for geneSr in result.paftolTargetSet.getSeqRecordList()]
# FIXME: check these parameters, consider numAlignments?
self.tblastnRunner.maxTargetSeqs = 10000000
self.tblastnRunner.maxHsps = 1
result.paftolTargetSet.numOfftargetReads = None
self.tblastnRunner.processTblastn(result.paftolTargetSet, self.makeWorkdirPath(self.forwardFasta), targetProteinList)
# FIXME: should be not None (!!!)
if result.reverseFastq is not None:
self.tblastnRunner.processTblastn(result.paftolTargetSet, self.makeWorkdirPath(self.reverseFasta), targetProteinList)
def assembleGeneSerialOverlap(self, result, geneName):
# logger.debug('tracking: starting with gene %s' % geneName)
overlapCsvFname = self.makeWorkdirPath('overlap-%s.csv' % geneName)
positionedReadDirname = self.makeWorkdirPath('posread-%s' % geneName)
positionedReadFname = self.makeWorkdirPath('posread-%s.fasta' % geneName)
os.mkdir(positionedReadDirname)
readSrFwdList = copy.deepcopy(result.paftolTargetSet.paftolGeneDict[geneName].makeMappedReadsUniqueList(includeForward=True, includeReverse=False))
readSrRevList = copy.deepcopy(result.paftolTargetSet.paftolGeneDict[geneName].makeMappedReadsUniqueList(includeForward=False, includeReverse=True))
readSrList = []
for readSr in readSrFwdList:
readSr.id = '%s-fwd' % readSr.id
readSrList.append(readSr)
| |
# coding: utf-8
"""
OANDA v20 REST API
The full OANDA v20 REST API Specification. This specification defines how to interact with v20 Accounts, Trades, Orders, Pricing and more. To authenticate use the string 'Bearer ' followed by the token which can be obtained at https://www.oanda.com/demo-account/tpa/personal_token # noqa: E501
OpenAPI spec version: 3.0.23
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class CalculatedAccountState(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'unrealized_pl': 'str',
'nav': 'str',
'margin_used': 'str',
'margin_available': 'str',
'position_value': 'str',
'margin_closeout_unrealized_pl': 'str',
'margin_closeout_nav': 'str',
'margin_closeout_margin_used': 'str',
'margin_closeout_percent': 'str',
'margin_closeout_position_value': 'str',
'withdrawal_limit': 'str',
'margin_call_margin_used': 'str',
'margin_call_percent': 'str'
}
attribute_map = {
'unrealized_pl': 'unrealizedPL',
'nav': 'NAV',
'margin_used': 'marginUsed',
'margin_available': 'marginAvailable',
'position_value': 'positionValue',
'margin_closeout_unrealized_pl': 'marginCloseoutUnrealizedPL',
'margin_closeout_nav': 'marginCloseoutNAV',
'margin_closeout_margin_used': 'marginCloseoutMarginUsed',
'margin_closeout_percent': 'marginCloseoutPercent',
'margin_closeout_position_value': 'marginCloseoutPositionValue',
'withdrawal_limit': 'withdrawalLimit',
'margin_call_margin_used': 'marginCallMarginUsed',
'margin_call_percent': 'marginCallPercent'
}
def __init__(self, unrealized_pl=None, nav=None, margin_used=None, margin_available=None, position_value=None, margin_closeout_unrealized_pl=None, margin_closeout_nav=None, margin_closeout_margin_used=None, margin_closeout_percent=None, margin_closeout_position_value=None, withdrawal_limit=None, margin_call_margin_used=None, margin_call_percent=None): # noqa: E501
"""CalculatedAccountState - a model defined in Swagger""" # noqa: E501
self._unrealized_pl = None
self._nav = None
self._margin_used = None
self._margin_available = None
self._position_value = None
self._margin_closeout_unrealized_pl = None
self._margin_closeout_nav = None
self._margin_closeout_margin_used = None
self._margin_closeout_percent = None
self._margin_closeout_position_value = None
self._withdrawal_limit = None
self._margin_call_margin_used = None
self._margin_call_percent = None
self.discriminator = None
if unrealized_pl is not None:
self.unrealized_pl = unrealized_pl
if nav is not None:
self.nav = nav
if margin_used is not None:
self.margin_used = margin_used
if margin_available is not None:
self.margin_available = margin_available
if position_value is not None:
self.position_value = position_value
if margin_closeout_unrealized_pl is not None:
self.margin_closeout_unrealized_pl = margin_closeout_unrealized_pl
if margin_closeout_nav is not None:
self.margin_closeout_nav = margin_closeout_nav
if margin_closeout_margin_used is not None:
self.margin_closeout_margin_used = margin_closeout_margin_used
if margin_closeout_percent is not None:
self.margin_closeout_percent = margin_closeout_percent
if margin_closeout_position_value is not None:
self.margin_closeout_position_value = margin_closeout_position_value
if withdrawal_limit is not None:
self.withdrawal_limit = withdrawal_limit
if margin_call_margin_used is not None:
self.margin_call_margin_used = margin_call_margin_used
if margin_call_percent is not None:
self.margin_call_percent = margin_call_percent
@property
def unrealized_pl(self):
"""Gets the unrealized_pl of this CalculatedAccountState. # noqa: E501
The total unrealized profit/loss for all Trades currently open in the Account. # noqa: E501
:return: The unrealized_pl of this CalculatedAccountState. # noqa: E501
:rtype: str
"""
return self._unrealized_pl
@unrealized_pl.setter
def unrealized_pl(self, unrealized_pl):
"""Sets the unrealized_pl of this CalculatedAccountState.
The total unrealized profit/loss for all Trades currently open in the Account. # noqa: E501
:param unrealized_pl: The unrealized_pl of this CalculatedAccountState. # noqa: E501
:type: str
"""
self._unrealized_pl = unrealized_pl
@property
def nav(self):
"""Gets the nav of this CalculatedAccountState. # noqa: E501
The net asset value of the Account. Equal to Account balance + unrealizedPL. # noqa: E501
:return: The nav of this CalculatedAccountState. # noqa: E501
:rtype: str
"""
return self._nav
@nav.setter
def nav(self, nav):
"""Sets the nav of this CalculatedAccountState.
The net asset value of the Account. Equal to Account balance + unrealizedPL. # noqa: E501
:param nav: The nav of this CalculatedAccountState. # noqa: E501
:type: str
"""
self._nav = nav
@property
def margin_used(self):
"""Gets the margin_used of this CalculatedAccountState. # noqa: E501
Margin currently used for the Account. # noqa: E501
:return: The margin_used of this CalculatedAccountState. # noqa: E501
:rtype: str
"""
return self._margin_used
@margin_used.setter
def margin_used(self, margin_used):
"""Sets the margin_used of this CalculatedAccountState.
Margin currently used for the Account. # noqa: E501
:param margin_used: The margin_used of this CalculatedAccountState. # noqa: E501
:type: str
"""
self._margin_used = margin_used
@property
def margin_available(self):
"""Gets the margin_available of this CalculatedAccountState. # noqa: E501
Margin available for Account currency. # noqa: E501
:return: The margin_available of this CalculatedAccountState. # noqa: E501
:rtype: str
"""
return self._margin_available
@margin_available.setter
def margin_available(self, margin_available):
"""Sets the margin_available of this CalculatedAccountState.
Margin available for Account currency. # noqa: E501
:param margin_available: The margin_available of this CalculatedAccountState. # noqa: E501
:type: str
"""
self._margin_available = margin_available
@property
def position_value(self):
"""Gets the position_value of this CalculatedAccountState. # noqa: E501
The value of the Account's open positions represented in the Account's home currency. # noqa: E501
:return: The position_value of this CalculatedAccountState. # noqa: E501
:rtype: str
"""
return self._position_value
@position_value.setter
def position_value(self, position_value):
"""Sets the position_value of this CalculatedAccountState.
The value of the Account's open positions represented in the Account's home currency. # noqa: E501
:param position_value: The position_value of this CalculatedAccountState. # noqa: E501
:type: str
"""
self._position_value = position_value
@property
def margin_closeout_unrealized_pl(self):
"""Gets the margin_closeout_unrealized_pl of this CalculatedAccountState. # noqa: E501
The Account's margin closeout unrealized PL. # noqa: E501
:return: The margin_closeout_unrealized_pl of this CalculatedAccountState. # noqa: E501
:rtype: str
"""
return self._margin_closeout_unrealized_pl
@margin_closeout_unrealized_pl.setter
def margin_closeout_unrealized_pl(self, margin_closeout_unrealized_pl):
"""Sets the margin_closeout_unrealized_pl of this CalculatedAccountState.
The Account's margin closeout unrealized PL. # noqa: E501
:param margin_closeout_unrealized_pl: The margin_closeout_unrealized_pl of this CalculatedAccountState. # noqa: E501
:type: str
"""
self._margin_closeout_unrealized_pl = margin_closeout_unrealized_pl
@property
def margin_closeout_nav(self):
"""Gets the margin_closeout_nav of this CalculatedAccountState. # noqa: E501
The Account's margin closeout NAV. # noqa: E501
:return: The margin_closeout_nav of this CalculatedAccountState. # noqa: E501
:rtype: str
"""
return self._margin_closeout_nav
@margin_closeout_nav.setter
def margin_closeout_nav(self, margin_closeout_nav):
"""Sets the margin_closeout_nav of this CalculatedAccountState.
The Account's margin closeout NAV. # noqa: E501
:param margin_closeout_nav: The margin_closeout_nav of this CalculatedAccountState. # noqa: E501
:type: str
"""
self._margin_closeout_nav = margin_closeout_nav
@property
def margin_closeout_margin_used(self):
"""Gets the margin_closeout_margin_used of this CalculatedAccountState. # noqa: E501
The Account's margin closeout margin used. # noqa: E501
:return: The margin_closeout_margin_used of this CalculatedAccountState. # noqa: E501
:rtype: str
"""
return self._margin_closeout_margin_used
@margin_closeout_margin_used.setter
def margin_closeout_margin_used(self, margin_closeout_margin_used):
"""Sets the margin_closeout_margin_used of this CalculatedAccountState.
The Account's margin closeout margin used. # noqa: E501
:param margin_closeout_margin_used: The margin_closeout_margin_used of this CalculatedAccountState. # noqa: E501
:type: str
"""
self._margin_closeout_margin_used = margin_closeout_margin_used
@property
def margin_closeout_percent(self):
"""Gets the margin_closeout_percent of this CalculatedAccountState. # noqa: E501
The Account's margin closeout percentage. When this value is 1.0 or above the Account is in a margin closeout situation. # noqa: E501
:return: The margin_closeout_percent of this CalculatedAccountState. # noqa: E501
:rtype: str
"""
return self._margin_closeout_percent
@margin_closeout_percent.setter
def margin_closeout_percent(self, margin_closeout_percent):
"""Sets the margin_closeout_percent of this CalculatedAccountState.
The Account's margin closeout percentage. When this value is 1.0 or above the Account is in a margin closeout situation. # noqa: E501
:param margin_closeout_percent: The margin_closeout_percent of this CalculatedAccountState. # noqa: E501
:type: str
"""
self._margin_closeout_percent = margin_closeout_percent
@property
def margin_closeout_position_value(self):
"""Gets the margin_closeout_position_value of this CalculatedAccountState. # noqa: E501
The value of the Account's open positions as used for margin closeout calculations represented in the Account's home currency. # noqa: E501
:return: The margin_closeout_position_value of this CalculatedAccountState. # noqa: E501
:rtype: str
"""
return self._margin_closeout_position_value
@margin_closeout_position_value.setter
def margin_closeout_position_value(self, margin_closeout_position_value):
"""Sets the margin_closeout_position_value of this CalculatedAccountState.
The value of the Account's open positions as used for margin closeout calculations represented in the Account's home currency. # noqa: E501
:param margin_closeout_position_value: The margin_closeout_position_value of this CalculatedAccountState. # noqa: E501
:type: str
"""
self._margin_closeout_position_value = margin_closeout_position_value
@property
def withdrawal_limit(self):
"""Gets the withdrawal_limit of this CalculatedAccountState. # noqa: E501
The current WithdrawalLimit for the account which will be zero or a positive value indicating how much can be withdrawn from the account. # noqa: E501
:return: The withdrawal_limit of this CalculatedAccountState. # noqa: E501
:rtype: str
"""
return self._withdrawal_limit
@withdrawal_limit.setter
def withdrawal_limit(self, withdrawal_limit):
"""Sets the withdrawal_limit of this CalculatedAccountState.
The current WithdrawalLimit for the account which will be zero or a positive value indicating how much can be withdrawn from the account. # noqa: E501
:param withdrawal_limit: The withdrawal_limit of this CalculatedAccountState. # noqa: E501
:type: str
"""
self._withdrawal_limit = withdrawal_limit
@property
def margin_call_margin_used(self):
"""Gets the margin_call_margin_used of this CalculatedAccountState. # noqa: E501
The Account's margin call margin used. # noqa: E501
:return: The margin_call_margin_used of this CalculatedAccountState. # noqa: E501
:rtype: str
"""
return self._margin_call_margin_used
@margin_call_margin_used.setter
def margin_call_margin_used(self, | |
<reponame>ryoon/PyRTL
import unittest
import random
import io
import pyrtl
import six
from pyrtl import inputoutput
from pyrtl import analysis
from pyrtl.rtllib import testingutils as utils
full_adder_blif = """\
# Generated by Yosys 0.3.0+ (git sha1 7e758d5, clang 3.4-1ubuntu3 -fPIC -Os)
.model full_adder
.inputs x y cin
.outputs sum cout
.names $false
.names $true
1
.names y $not$FA.v:12$3_Y
0 1
.names x $not$FA.v:11$1_Y
0 1
.names cin $not$FA.v:15$6_Y
0 1
.names ind3 ind4 sum
1- 1
-1 1
.names $not$FA.v:15$6_Y ind2 ind3
11 1
.names x $not$FA.v:12$3_Y ind1
11 1
.names ind2 $not$FA.v:16$8_Y
0 1
.names cin $not$FA.v:16$8_Y ind4
11 1
.names x y $and$FA.v:19$11_Y
11 1
.names ind0 ind1 ind2
1- 1
-1 1
.names cin ind2 $and$FA.v:19$12_Y
11 1
.names $and$FA.v:19$11_Y $and$FA.v:19$12_Y cout
1- 1
-1 1
.names $not$FA.v:11$1_Y y ind0
11 1
.end
"""
state_machine_blif = """\
# Generated by Yosys 0.5+ 420 (git sha1 1d62f87, clang 7.0.2 -fPIC -Os)
.model statem
.inputs clk in reset
.outputs out[0] out[1] out[2] out[3]
.names $false
.names $true
1
.names $undef
.names in state[2] $abc$129$n11_1
11 1
.names $abc$129$n11_1 state[3] $auto$fsm_map.cc:238:map_fsm$30[0]
1- 1
-1 1
.names state[2] $abc$129$n13
0 1
.names state[0] $abc$129$n14_1
0 1
.names state[2] state[1] $abc$129$n15
00 1
.names $abc$129$n15 $abc$129$n14_1 $abc$129$n13 out[0]
-00 1
0-0 1
.names state[1] $abc$129$n17
0 1
.names $abc$129$n15 $abc$129$n14_1 $abc$129$n17 out[1]
-00 1
0-0 1
.names $abc$129$n15 $abc$129$n14_1 out[2]
11 1
.names in $abc$129$n13 $auto$fsm_map.cc:118:implement_pattern_cache$38
00 1
# .subckt $_DFF_PP1_ C=clk D=$auto$fsm_map.cc:238:map_fsm$30[0] Q=state[0] R=reset
# .subckt $_DFF_PP0_ C=clk D=$auto$fsm_map.cc:118:implement_pattern_cache$38 Q=state[1] R=reset
# .subckt $_DFF_PP0_ C=clk D=state[0] Q=state[2] R=reset
# .subckt $_DFF_PP0_ C=clk D=state[1] Q=state[3] R=reset
.names $false out[3]
1 1
.end
"""
# Manually set the .latch's init values from 2 to arbitrary non-1 numbers, for testing.
# Should result in the same logic, but allows for testing the parser.
counter4bit_blif = """\
# Generated by Yosys 0.9 (git sha1 UNKNOWN, clang 11.0.0 -fPIC -Os)
.model counter
.inputs clk rst en
.outputs count[0] count[1] count[2] count[3]
.names $false
.names $true
1
.names $undef
.names count[0] $add$counter.v:10$2_Y[0] en $procmux$3_Y[0]
1-0 1
-11 1
.names count[1] $add$counter.v:10$2_Y[1] en $procmux$3_Y[1]
1-0 1
-11 1
.names count[2] $add$counter.v:10$2_Y[2] en $procmux$3_Y[2]
1-0 1
-11 1
.names count[3] $add$counter.v:10$2_Y[3] en $procmux$3_Y[3]
1-0 1
-11 1
.names $procmux$3_Y[0] $false rst $0\count[3:0][0]
1-0 1
-11 1
.names $procmux$3_Y[1] $false rst $0\count[3:0][1]
1-0 1
-11 1
.names $procmux$3_Y[2] $false rst $0\count[3:0][2]
1-0 1
-11 1
.names $procmux$3_Y[3] $false rst $0\count[3:0][3]
1-0 1
-11 1
.latch $0\count[3:0][0] count[0] re clk 2
.latch $0\count[3:0][1] count[1] re clk 0
.latch $0\count[3:0][2] count[2] re clk 3
.latch $0\count[3:0][3] count[3] re clk
.names count[1] count[0] $techmap$add$counter.v:10$2.$auto$alumacc.cc:474:replace_alu$53.lcu.g[1]
11 1
.names count[2] $techmap$add$counter.v:10$2.$auto$alumacc.cc:474:replace_alu$53.lcu.g[1] $techmap$add$counter.v:10$2.$auto$alumacc.cc:474:replace_alu$53.lcu.g[2]
11 1
.names count[1] count[0] $add$counter.v:10$2_Y[1]
10 1
01 1
.names count[2] $techmap$add$counter.v:10$2.$auto$alumacc.cc:474:replace_alu$53.lcu.g[1] $add$counter.v:10$2_Y[2]
10 1
01 1
.names count[3] $techmap$add$counter.v:10$2.$auto$alumacc.cc:474:replace_alu$53.lcu.g[2] $add$counter.v:10$2_Y[3]
10 1
01 1
.names count[0] $true $add$counter.v:10$2_Y[0]
10 1
01 1
.names count[0] $techmap$add$counter.v:10$2.$auto$alumacc.cc:474:replace_alu$53.lcu.g[0]
1 1
.end
""" # noqa
counter4bit_blif_bad_latch_inits = """\
# Generated by Yosys 0.9 (git sha1 UNKNOWN, clang 11.0.0 -fPIC -Os)
.model counter
.inputs clk rst en
.outputs count[0] count[1] count[2] count[3]
.names $false
.names $true
1
.names $undef
.names count[0] $add$counter.v:10$2_Y[0] en $procmux$3_Y[0]
1-0 1
-11 1
.names count[1] $add$counter.v:10$2_Y[1] en $procmux$3_Y[1]
1-0 1
-11 1
.names count[2] $add$counter.v:10$2_Y[2] en $procmux$3_Y[2]
1-0 1
-11 1
.names count[3] $add$counter.v:10$2_Y[3] en $procmux$3_Y[3]
1-0 1
-11 1
.names $procmux$3_Y[0] $false rst $0\count[3:0][0]
1-0 1
-11 1
.names $procmux$3_Y[1] $false rst $0\count[3:0][1]
1-0 1
-11 1
.names $procmux$3_Y[2] $false rst $0\count[3:0][2]
1-0 1
-11 1
.names $procmux$3_Y[3] $false rst $0\count[3:0][3]
1-0 1
-11 1
.latch $0\count[3:0][0] count[0] re clk 1
.latch $0\count[3:0][1] count[1] re clk 1
.latch $0\count[3:0][2] count[2] re clk 0
.latch $0\count[3:0][3] count[3] re clk 2
.names count[1] count[0] $techmap$add$counter.v:10$2.$auto$alumacc.cc:474:replace_alu$53.lcu.g[1]
11 1
.names count[2] $techmap$add$counter.v:10$2.$auto$alumacc.cc:474:replace_alu$53.lcu.g[1] $techmap$add$counter.v:10$2.$auto$alumacc.cc:474:replace_alu$53.lcu.g[2]
11 1
.names count[1] count[0] $add$counter.v:10$2_Y[1]
10 1
01 1
.names count[2] $techmap$add$counter.v:10$2.$auto$alumacc.cc:474:replace_alu$53.lcu.g[1] $add$counter.v:10$2_Y[2]
10 1
01 1
.names count[3] $techmap$add$counter.v:10$2.$auto$alumacc.cc:474:replace_alu$53.lcu.g[2] $add$counter.v:10$2_Y[3]
10 1
01 1
.names count[0] $true $add$counter.v:10$2_Y[0]
10 1
01 1
.names count[0] $techmap$add$counter.v:10$2.$auto$alumacc.cc:474:replace_alu$53.lcu.g[0]
1 1
.end
""" # noqa
blif_with_output_as_arg = """
# Generated by Yosys 0.9+2406 (git sha1 aee43936, clang 11.0.3 -fPIC -Os)
.model Top
.inputs clk in[0] in[1]
.outputs out
.names $false
.names $true
1
.names $undef
.names out $techmap$add$test.v:9$6.$auto$alumacc.cc:485:replace_alu$60.X[0]
0 1
.names in[0] $not$test.v:6$1_Y[0]
0 1
.names r[0] $not$test.v:6$3_Y[0]
0 1
.latch $techmap$add$test.v:9$6.$auto$alumacc.cc:485:replace_alu$60.X[0] r[0] re clk 2
.names $not$test.v:6$1_Y[0] $not$test.v:6$3_Y[0] out
10 1
01 1
.names $true $not$test.v:6$3_Y[1]
1 1
.names $false $techmap$add$test.v:9$6.$auto$alumacc.cc:485:replace_alu$60.X[1]
1 1
.names $false $techmap$add$test.v:9$6.$auto$alumacc.cc:485:replace_alu$60.X[2]
1 1
.end
""" # noqa
simple_unmerged_io_blif = """
# Generated by Yosys 0.9+2406 (git sha1 aee43936, clang 11.0.3 -fPIC -Os)
.model top
.inputs a[0] a[1] a[2] a[3]
.outputs b[0] b[1]
.names $false
.names $true
1
.names $undef
.names a[0] b[0]
1 1
.names a[2] b[1]
1 1
.end
""" # noqa
four_bit_adder_multi_module = """
# Generated by Yosys 0.9+2406 (git sha1 aee43936, clang 11.0.3 -fPIC -Os)
.model four_bit_adder
.inputs a[0] a[1] a[2] a[3] b[0] b[1] b[2] b[3] cin
.outputs s[0] s[1] s[2] s[3] cout
.names $false
.names $true
1
.names $undef
.subckt full_adder a=a[0] b=b[0] cin=cin cout=cout0 s=s[0]
.subckt full_adder a=a[1] b=b[1] cin=cout0 cout=cout1 s=s[1]
.subckt full_adder a=a[2] b=b[2] cin=cout1 cout=cout2 s=s[2]
.subckt full_adder a=a[3] b=b[3] cin=cout2 cout=cout s=s[3]
.end
.model full_adder
.inputs a b cin
.outputs s cout
.names $false
.names $true
1
.names $undef
.names a b $techmap$add$four_bit_adder_subsub.v:34$1.$auto$alumacc.cc:485:replace_alu$46.X[0]
10 1
01 1
.names a b $techmap$add$four_bit_adder_subsub.v:34$1.$auto$alumacc.cc:485:replace_alu$46.CO[0]
11 1
.names $techmap$add$four_bit_adder_subsub.v:34$1.$auto$alumacc.cc:485:replace_alu$46.CO[0] $techmap$add$four_bit_adder_subsub.v:34$2.$auto$alumacc.cc:485:replace_alu$49.CO[0] x
10 1
01 1
.names cin $techmap$add$four_bit_adder_subsub.v:34$1.$auto$alumacc.cc:485:replace_alu$46.X[0] s
10 1
01 1
.names cin $techmap$add$four_bit_adder_subsub.v:34$1.$auto$alumacc.cc:485:replace_alu$46.X[0] $techmap$add$four_bit_adder_subsub.v:34$2.$auto$alumacc.cc:485:replace_alu$49.CO[0]
11 1
.subckt pass_through x=x y=y
.subckt pass_through x=y y=cout
.names $false $techmap$add$four_bit_adder_subsub.v:34$1.$auto$alumacc.cc:485:replace_alu$46.CO[1]
1 1
.names $false $techmap$add$four_bit_adder_subsub.v:34$1.$auto$alumacc.cc:485:replace_alu$46.X[1]
1 1
.end
.model pass_through
.inputs x
.outputs y
.names $false
.names $true
1
.names $undef
.names x y
1 1
.end
""" # noqa
clock_passing_blif = """
# Generated by Yosys 0.9+2406 (git sha1 aee43936, clang 11.0.3 -fPIC -Os)
.model top
.inputs clk a[0] a[1] a[2] a[3] b[0] b[1] b[2] b[3]
.outputs c[0] c[1] c[2] c[3]
.names $false
.names $true
1
.names $undef
.names b[0] w2[0] c[0]
11 1
.names b[1] w2[1] c[1]
11 1
.names b[2] w2[2] c[2]
11 1
.names b[3] w2[3] c[3]
11 1
.subckt passthrough a[0]=a[0] a[1]=a[1] a[2]=a[2] a[3]=a[3] c[0]=w1[0] c[1]=w1[1] c[2]=w1[2] c[3]=w1[3] pclk=clk
.subckt passthrough a[0]=w1[0] a[1]=w1[1] a[2]=w1[2] a[3]=w1[3] c[0]=w2[0] c[1]=w2[1] c[2]=w2[2] c[3]=w2[3] pclk=clk
.end
.model my_4bit_dff
.inputs mclk din[0] din[1] din[2] din[3] en
.outputs q[0] q[1] q[2] q[3]
.names $false
.names $true
1
.names $undef
.names q[0] din[0] en $0\q[3:0][0]
1-0 1
-11 1
.names q[1] din[1] en $0\q[3:0][1]
1-0 1
-11 1
.names q[2] din[2] en $0\q[3:0][2]
1-0 1
-11 1
.names q[3] din[3] en $0\q[3:0][3]
1-0 1
-11 1
.latch $0\q[3:0][0] q[0] re mclk 2
.latch $0\q[3:0][1] q[1] re mclk 2
.latch $0\q[3:0][2] q[2] re mclk 2
.latch $0\q[3:0][3] q[3] re mclk 2
.end
.model passthrough
.inputs a[0] a[1] a[2] a[3] pclk
.outputs c[0] c[1] c[2] c[3]
.names $false
.names $true
1
.names $undef
.subckt my_4bit_dff din[0]=a[0] din[1]=a[1] din[2]=a[2] din[3]=a[3] en=$true mclk=pclk q[0]=c[0] q[1]=c[1] q[2]=c[2] q[3]=c[3]
.end
""" # noqa
class TestInputFromBlif(unittest.TestCase):
def setUp(self):
pyrtl.reset_working_block()
def test_combo_blif_input_has_correct_io_interface(self):
pyrtl.input_from_blif(full_adder_blif)
x, y, cin, sumw, cout, bad = [
pyrtl.working_block().get_wirevector_by_name(s)
for s in ['x', 'y', 'cin', 'sum', 'cout', 'bad']
]
self.assertIsNotNone(x)
self.assertIsNotNone(y)
self.assertIsNotNone(cin)
self.assertIsNotNone(sumw)
self.assertIsNotNone(cout)
self.assertIsNone(bad)
self.assertEquals(len(x), 1)
self.assertEquals(len(y), 1)
self.assertEquals(len(cin), 1)
self.assertEquals(len(sumw), 1)
self.assertEquals(len(cout), 1)
io_input = pyrtl.working_block().wirevector_subset(pyrtl.Input)
self.assertIn(x, io_input)
self.assertIn(y, io_input)
self.assertIn(cin, io_input)
io_output = pyrtl.working_block().wirevector_subset(pyrtl.Output)
self.assertIn(sumw, io_output)
self.assertIn(cout, io_output)
def test_sequential_blif_input_has_correct_io_interface(self):
pyrtl.input_from_blif(state_machine_blif)
inw, reset, out = [
pyrtl.working_block().get_wirevector_by_name(s)
for s in ['in', 'reset', 'out']
]
self.assertIsNotNone(inw)
self.assertIsNotNone(reset)
self.assertIsNotNone(out)
self.assertEquals(len(inw), 1)
self.assertEquals(len(reset), 1)
self.assertEquals(len(out), 4)
io_input = pyrtl.working_block().wirevector_subset(pyrtl.Input)
self.assertIn(inw, io_input)
self.assertIn(reset, io_input)
io_output = pyrtl.working_block().wirevector_subset(pyrtl.Output)
self.assertIn(out, io_output)
def test_sequential_blif_input_has_correct_io_interface_counter(self):
pyrtl.input_from_blif(counter4bit_blif)
rst, en, count = [
pyrtl.working_block().get_wirevector_by_name(s)
for s in ['rst', 'en', 'count']
]
self.assertIsNotNone(rst)
self.assertIsNotNone(en)
self.assertIsNotNone(count)
self.assertEquals(len(rst), 1)
self.assertEquals(len(en), 1)
self.assertEquals(len(count), 4)
io_input = pyrtl.working_block().wirevector_subset(pyrtl.Input)
self.assertIn(rst, io_input)
self.assertIn(en, io_input)
io_output = pyrtl.working_block().wirevector_subset(pyrtl.Output)
self.assertIn(count, io_output)
def test_correct_interface_with_unmerged_io(self):
pyrtl.input_from_blif(simple_unmerged_io_blif, merge_io_vectors=False)
a0, a1, a2, a3, b0, b1 = [
pyrtl.working_block().get_wirevector_by_name(s)
for s in ['a[0]', 'a[1]', 'a[2]', 'a[3]', 'b[0]', 'b[1]']
]
self.assertEquals(len(a0), 1)
self.assertEquals(len(a1), 1)
self.assertEquals(len(a2), 1)
self.assertEquals(len(a3), 1)
self.assertEquals(len(b0), 1)
self.assertEquals(len(b1), 1)
self.assertEquals({a0, a1, a2, a3}, pyrtl.working_block().wirevector_subset(pyrtl.Input))
self.assertEquals({b0, b1}, pyrtl.working_block().wirevector_subset(pyrtl.Output))
def test_blif_input_simulates_correctly_with_merged_outputs(self):
# The 'counter_blif' string contains a model of a standard 4-bit synchronous-reset
# counter with enable. In particular, the model has 4 1-bit outputs named "count[0]",
# "count[1]", "count[2]", and "count[3]". The internal PyRTL representation will by
# default convert these related 1-bit wires into a single 4-bit wire called "count".
# This test simulates the design and, among other things, ensures that this output
# wire conversion occurred correctly.
pyrtl.input_from_blif(counter4bit_blif)
io_vectors = pyrtl.working_block().wirevector_subset((pyrtl.Input, pyrtl.Output))
sim_trace = pyrtl.SimulationTrace(wires_to_track=io_vectors)
sim = pyrtl.Simulation(sim_trace)
inputs = {
'rst': [1] + [0] * 20,
'en': [1] + [1] * 20,
}
expected = {
'count': [0] + list(range(0, 16)) + list(range(0, 4))
}
sim.step_multiple(inputs, expected)
correct_output = (" --- Values in base 10 ---\n"
"count 0 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 0 1 2 3\n"
"en 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n"
"rst 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n")
output = six.StringIO()
sim_trace.print_trace(output)
self.assertEqual(output.getvalue(), correct_output)
def test_blif_input_simulates_correctly_with_unmerged_outputs(self):
pyrtl.input_from_blif(counter4bit_blif, merge_io_vectors=False)
count0, count1, count2, count3 = [
pyrtl.working_block().get_wirevector_by_name(s)
for s in ['count[0]', 'count[1]', 'count[2]', 'count[3]']
]
self.assertEquals(len(count0), 1)
self.assertEquals(len(count1), 1)
self.assertEquals(len(count2), 1)
self.assertEquals(len(count3), 1)
io_vectors = pyrtl.working_block().wirevector_subset((pyrtl.Input, pyrtl.Output))
sim_trace = pyrtl.SimulationTrace(wires_to_track=io_vectors)
sim = pyrtl.Simulation(sim_trace)
inputs = {
'rst': [1] + [0] * 20,
'en': [1] + [1] * 20,
}
expected_merged = [0] + list(range(0, 16)) + list(range(0, 4))
expected = {
'count[0]': [n & 0b0001 for n in expected_merged],
'count[1]': [(n & 0b0010) >> 1 for n in expected_merged],
'count[2]': [(n & 0b0100) >> 2 for n in expected_merged],
'count[3]': [(n & 0b1000) >> 3 for n in expected_merged],
}
sim.step_multiple(inputs, expected)
correct_output = (" --- Values | |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""
==================
prospect.utilities
==================
Utility functions for prospect.
"""
import os, glob
from pkg_resources import resource_string, resource_listdir
import numpy as np
import astropy.io.fits
from astropy.table import Table, vstack
import scipy.ndimage.filters
_desiutil_imported = True
try:
from desiutil.log import get_logger
except ImportError:
_desiutil_imported = False
_desispec_imported = True
try:
import desispec.spectra
import desispec.frame
from desispec.io.util import healpix_subdirectory
except ImportError:
_desispec_imported = False
_desitarget_imported = True
try:
from desitarget.targetmask import desi_mask, bgs_mask, mws_mask, scnd_mask
from desitarget.cmx.cmx_targetmask import cmx_mask
from desitarget.sv1.sv1_targetmask import desi_mask as sv1_desi_mask
from desitarget.sv1.sv1_targetmask import bgs_mask as sv1_bgs_mask
from desitarget.sv1.sv1_targetmask import mws_mask as sv1_mws_mask
from desitarget.sv1.sv1_targetmask import scnd_mask as sv1_scnd_mask
from desitarget.sv2.sv2_targetmask import desi_mask as sv2_desi_mask
from desitarget.sv2.sv2_targetmask import bgs_mask as sv2_bgs_mask
from desitarget.sv2.sv2_targetmask import mws_mask as sv2_mws_mask
from desitarget.sv2.sv2_targetmask import scnd_mask as sv2_scnd_mask
from desitarget.sv3.sv3_targetmask import desi_mask as sv3_desi_mask
from desitarget.sv3.sv3_targetmask import bgs_mask as sv3_bgs_mask
from desitarget.sv3.sv3_targetmask import mws_mask as sv3_mws_mask
from desitarget.sv3.sv3_targetmask import scnd_mask as sv3_scnd_mask
supported_desitarget_masks = {
'DESI_TARGET': desi_mask,
'BGS_TARGET': bgs_mask,
'MWS_TARGET': mws_mask,
'SECONDARY_TARGET': scnd_mask,
'CMX_TARGET': cmx_mask,
'SV1_DESI_TARGET': sv1_desi_mask,
'SV1_BGS_TARGET': sv1_bgs_mask,
'SV1_MWS_TARGET': sv1_mws_mask,
'SV1_SCND_TARGET': sv1_scnd_mask,
'SV2_DESI_TARGET': sv2_desi_mask,
'SV2_BGS_TARGET': sv2_bgs_mask,
'SV2_MWS_TARGET': sv2_mws_mask,
'SV2_SCND_TARGET': sv2_scnd_mask,
'SV3_DESI_TARGET': sv3_desi_mask,
'SV3_BGS_TARGET': sv3_bgs_mask,
'SV3_MWS_TARGET': sv3_mws_mask,
'SV3_SCND_TARGET': sv3_scnd_mask,
}
except ImportError:
_desitarget_imported = False
supported_desitarget_masks = dict()
_redrock_imported = True
try:
import redrock.results
except ImportError:
_redrock_imported = False
vi_flags = [
# Definition of VI flags
# shortlabels for "issue" flags must be a unique single-letter identifier
{"label" : "4", "type" : "quality", "description" : "Confident classification: two or more secure features."},
{"label" : "3", "type" : "quality", "description" : "Probable classification: at least one secure spectral feature + continuum or many weak spectral features."},
{"label" : "2", "type" : "quality", "description" : "Possible classification: one strong spectral feature but unsure what it is."},
{"label" : "1", "type" : "quality", "description" : "Unlikely classification: clear signal but features are unidentified."},
{"label" : "0", "type" : "quality", "description" : "Nothing there, no signal."},
{"label" : "Bad redshift fit", "shortlabel" : "R", "type" : "issue", "description" : "Mis-estimation of redshift by the pipeline fitter"},
{"label" : "Bad spectype fit", "shortlabel" : "C", "type" : "issue", "description" : "Mis-identification of spectral type from the best-fit pipeline solution; e.g., star vs QSO..."},
{"label" : "Bad spectrum", "shortlabel" : "S", "type" : "issue", "description" : "Bad spectrum; e.g. strong cosmic/skyline subtraction residuals."}
]
vi_file_fields = [
# Contents of VI files: [
# field name (in VI file header),
# associated variable in viewer_cds.cds_metadata,
# dtype in VI file
# default value ]
# Ordered list
["TARGETID", "TARGETID", "i8", -1],
["EXPID", "EXPID", "i4", -1],
["NIGHT", "NIGHT", "i4", -1],
["TILEID", "TILEID", "i4", -1],
["Spec_version", "spec_version", "U16", "-1"],
["Redrock_version", "redrock_version", "U16", "-1"],
["Template_version", "template_version", "U16", "-1"],
["Redrock_spectype", "SPECTYPE", "U10", ""],
["Redrock_z", "Z", "U6", "-1"],
["Redrock_deltachi2", "DELTACHI2", "U10", "-1"],
["VI_scanner", "VI_scanner", "U10", " "],
["VI_quality", "VI_quality_flag", "U2", "-1"],
["VI_issue", "VI_issue_flag", "U3", ""],
["VI_z", "VI_z", "U6", ""],
["VI_spectype", "VI_spectype", "U10", ""],
["VI_comment", "VI_comment", "U100", ""]
]
vi_spectypes =[
# List of spectral types to fill in VI categories
# in principle, it should match somehow redrock spectypes...
"STAR",
"GALAXY",
"QSO"
]
vi_std_comments = [
# Standardized VI comments
"Broad absorption line quasar (BAL)",
"Damped Lyman-alpha system (DLA)",
"Two objects in spectrum",
"Blazar"
]
_resource_cache = {'templates': None, 'js': None}
def get_resources(filetype):
"""Find all HTML template or JavaScript files in the package.
Caches the results for quick access.
Parameters
----------
filetype : {'templates', 'js'}
The type of file resource needed.
Returns
-------
:class:`dict`
A dictionary mapping filename to the contents of the file.
Raises
------
ValueError
If `filetype` is unknown.
"""
global _resource_cache
if filetype not in _resource_cache:
raise ValueError("Unknown filetype '{0}' for get_resources()!".format(filetype))
if _resource_cache[filetype] is None:
_resource_cache[filetype] = dict()
for f in resource_listdir('prospect', filetype):
if not f.startswith("."):
_resource_cache[filetype][f] = resource_string('prospect', filetype + '/' + f).decode('utf-8')
return _resource_cache[filetype]
def match_catalog_to_spectra(zcat_in, spectra, return_index=False):
""" Creates a subcatalog, matching a set of DESI spectra
Parameters
----------
zcat_in : :class:`~astropy.table.Table`, with TARGETID keys
spectra : :class:`~desispec.spectra.Spectra`
return_index : :class:`bool`, optional
If ``True``, returns the list of indices in zcat_in which match spectra
Returns
-------
:class:`~astropy.table.Table`
A subtable of zcat_in, with rows matching input spectra's TARGETIDs
If return_index is ``True``, returns (subtable, list of indices)
Raises
------
RuntimeError
If a unique row in zcat_in is not found matching each of spectra's TARGETIDs
"""
if zcat_in is None : return None
zcat_out = Table(dtype=zcat_in.dtype)
index_list = list()
for i_spec in range(spectra.num_spectra()) :
ww, = np.where((zcat_in['TARGETID'] == spectra.fibermap['TARGETID'][i_spec]))
if len(ww)<1 :
raise RuntimeError("No entry in zcat_in for TARGETID "+str(spectra.fibermap['TARGETID'][i_spec]))
elif len(ww)>1 :
raise RuntimeError("Several entries in zcat_in for TARGETID "+str(spectra.fibermap['TARGETID'][i_spec]))
zcat_out.add_row(zcat_in[ww[0]])
index_list.append(ww[0])
if return_index:
return (zcat_out, index_list)
else:
return zcat_out
def match_rrdetails_to_spectra(redrockfile, spectra, Nfit=None):
""" Creates a Table from a detailed Redrock output fit, matching a list of DESI spectra.
Parameters
----------
redrockfile : :class:`str`, filename for the detailed Redrock output file (.h5 file)
spectra : :class:`~desispec.spectra.Spectra`
Nfit : :class:`int`, optional
Number of best-fits to store in output Table. By default, store all fits available in the detailed Redrock file
Returns
-------
:class:`~astropy.table.Table`
Table with the following columns: TARGETID, CHI2, DELTACHI2, COEFF, Z, ZERR, ZWARN, SPECTYPE, SUBTYPE.
The rows are matched to spectra's TARGETIDs
Raises
------
RuntimeError
If a set of Nfit rows in redrockfile is not found matching each of spectra's TARGETIDs
"""
dummy, rr_table = redrock.results.read_zscan(redrockfile)
rr_targets = rr_table['targetid']
if Nfit is None:
ww, = np.where( (rr_targets == rr_targets[0]) )
Nfit = len(ww)
matched_redrock_cat = Table(
dtype=[('TARGETID', '<i8'), ('CHI2', '<f8', (Nfit,)),
('DELTACHI2', '<f8', (Nfit,)), ('COEFF', '<f8', (Nfit,10,)),
('Z', '<f8', (Nfit,)), ('ZERR', '<f8', (Nfit,)),
('ZWARN', '<i8', (Nfit,)), ('SPECTYPE', '<U6', (Nfit,)), ('SUBTYPE', '<U2', (Nfit,))])
for i_spec in range(spectra.num_spectra()):
ww, = np.where((rr_targets == spectra.fibermap['TARGETID'][i_spec]))
if len(ww)<Nfit :
raise RuntimeError("Redrock table cannot match spectra with "+str(Nfit)+" best fits")
ind = np.argsort(rr_table[ww]['chi2'])[0:Nfit] # Sort fit results by chi2 (independently of spectype)
sub_table = rr_table[ww][ind]
the_entry = [ spectra.fibermap['TARGETID'][i_spec] ]
for redrock_key in ['chi2', 'deltachi2', 'coeff', 'z', 'zerr', 'zwarn', 'spectype', 'subtype']:
the_entry.append(sub_table[redrock_key])
matched_redrock_cat.add_row(the_entry)
return matched_redrock_cat
def create_zcat_from_redrock_cat(redrock_cat, fit_num=0):
""" Extract a catalog with unique redshift fits from a redrock catalog containing several fit results per TARGETID
Parameters
----------
redrock_cat : :class:`~astropy.table.Table`
Catalog with rows as defined in `match_rrdetails_to_spectra()`
fit_num : :class:`int`, optional
The (fit_num)th fit in redrock_cat is extracted (default: 0 ie. redrock's best fit)
Returns
-------
:class:`~astropy.table.Table`
Table with the following columns: TARGETID, CHI2, COEFF, Z, ZERR, ZWARN, SPECTYPE, SUBTYPE, DELTACHI2.
"""
rr_cat_num_best_fits = redrock_cat['Z'].shape[1]
if (fit_num >= rr_cat_num_best_fits):
raise ValueError("fit_num too large wrt redrock_cat")
zcat_dtype=[('TARGETID', '<i8'), ('CHI2', '<f8'), ('COEFF', '<f8', (10,)),
('Z', '<f8'), ('ZERR', '<f8'), ('ZWARN', '<i8'),
('SPECTYPE', '<U6'), ('SUBTYPE', '<U2'), ('DELTACHI2', '<f8')]
zcat_out = Table( data=np.zeros(len(redrock_cat), dtype=zcat_dtype) )
zcat_out['TARGETID'] = redrock_cat['TARGETID']
for key in ['CHI2', 'DELTACHI2', 'COEFF', 'SPECTYPE', 'SUBTYPE', 'Z', 'ZERR', 'ZWARN']:
zcat_out[key] = redrock_cat[key][:,fit_num]
return zcat_out
def get_subset_label(subset, dirtree_type):
if dirtree_type=='cumulative':
label = 'thru'+subset
elif dirtree_type=='perexp':
label = 'exp'+subset
elif dirtree_type=='pernight':
label = subset
elif dirtree_type=='exposures':
label = subset
elif dirtree_type=='healpix':
label = subset
else:
raise ValueError("Unrecognized value for dirtree_type.")
return label
def create_subsetdb(datadir, dirtree_type=None, spectra_type='coadd', tiles=None, nights=None, expids=None,
survey_program=None, petals=None, pixels=None, with_zcat=True):
"""Create a 'mini-db' of DESI spectra files, in a given directory tree.
Supports tile-based and exposure-based directory trees for daily, andes, ... to everest.
This routine does not open any file, it just checks they exist.
Parameters
----------
datadir : :class:`string`
No description provided.
dirtree_type : :class:`string`
The directory tree and file names must match the types listed in the notes below.
spectra_type : :class:`string`, optional
[c/s]frames are only supported when dirtree_type='exposures'
petals : :class:`list`, optional
Filter a set of petal numbers.
tiles : :class:`list`, optional
Filter a list of tiles.
nights : :class:`list`, optional
Filter a list of nights (only if dirtree_type='pernight' or 'exposures').
expids : :class:`list`, optional
Filter a list of exposures (only if dirtree_type='perexp' or 'exposures').
survey_program : :class:`list`, optional
Filter a [survey, program], only if dirtree_type='healpix'.
pixels : :class:`list`, optional
Filter a list of Healpix pixels (only if dirtree_type='healpix').
with_zcat : :class:`bool`, optional
If True, filter spectra for which a 'redrock' (or 'zbest') fits file exists at the same location.
Returns
-------
:class:`dict`
Content of the | |
if len(self.selected) <= 0:
dlg=lib.MessageBoxOK("No data to plot. Open files first.",
"",style=wx.OK|wx.ICON_EXCLAMATION)
return
curfmodat=self.GetCurrentFMOData()
nfrg=curfmodat.nfrg
if nfrg <=1:
dlg=lib.MessageBoxOK("No plot data, since the number of fragment=1.",
"",style=wx.OK|wx.ICON_EXCLAMATION)
return
if not curfmodat.pieda:
dlg=lib.MessageBoxOK("No plot data, probably non-PIEDA job.",
"",style=wx.OK|wx.ICON_EXCLAMATION)
return
self.pltpie=False; self.pltctchg=False; self.pltmulchg=False
self.pltespot=False; self.pltden=False; self.pltorb=False
#
prop=[0,0,0,0,0,0] # flags: [pie,ctc,mul,esp,den,orb]
if self.ckbpie.IsEnabled() and self.ckbpie.GetValue(): prop[0]=1
if self.ckbctc.IsEnabled() and self.ckbctc.GetValue(): prop[1]=1
if self.ckbmul.IsEnabled() and self.ckbmul.GetValue(): prop[2]=1
#
if self.ckbesp.IsEnabled() and self.ckbesp.GetValue(): prop[3]=1
if self.ckbden.IsEnabled() and self.ckbden.GetValue(): prop[4]=1
if self.ckborb.IsEnabled() and self.ckborb.GetValue(): prop[5]=1
# draw graph
nprp=len(prop)-1
for i in range(nprp,-1,-1):
if prop[i]:
self.pltprp=i
name=self.graphnam[self.pltprp]
if self.ctrlflag.GetCtrlFlag(name):
self.graph[name].SetFocus(); continue
pos=(-1,-1); size=(660,360); oned=True; child=False
self.graph[name]= \
graph.fuGraph(self,-1,pos,size,oned,self.pltprp,child)
self.ctrlflag.SetCtrlFlag(name,True)
self.graph[name].Show()
#
self.SetGraphData(self.pltprp)
self.graph[self.graphnam[self.pltprp]].DrawGraph(True)
if self.ctrlflag.GetCtrlFlag('pycrustwin'):
self.RunMethod('fuplot.PrintFragmentName()')
def MouseLeftClick(self,pos):
if not self.ctrlflag.GetCtrlFlag('molviewwin'): return
if self.onedmode:
i=self.graph.GetXValue(pos)
if i < 0:
mess='Clicked at outside of plot region.'
self.molview.Message(mess,0,'black')
return
i=int(i); i=self.order[i]
if i >= 0 and i <= len(self.pltdat):
if self.ctrlflag.GetCtrlFlag('molviewwin'):
self.molview.SetSelectAll(False)
mess=self.MakeFragValueMess(i)
frgnam=self.frgnam[i]
self.molview.SelectFragNam(frgnam,True)
#mess="Fragment="+frgnam+', plot data=['
#for i in range(len(self.pltdat)):
#mess=mess+'['
#for j in range(1,len(self.pltdat[i])):
# mess=mess+'%7.2f' % self.pltdat[i][j]
#mess=mess+']'
self.molview.Message(mess,0,'black')
def GetCurrentFMOData(self):
# return curfmodat, the fmodat instance of selected data
if not self.IsDerivedData(self.selected):
curfmodat=self.fmodatadic[self.selected]
else:
drvdat=self.drvdatadic[self.selected]
fmodatlst,cmpsign=self.ResolveDerivedData(drvdat)
curfmodat=self.fmodatadic[fmodatlst[0]]
return curfmodat
def ListFMODataName(self):
for name in self.fmodatadic:
print self.fmodatadic[name].name
def MakePIEDAPlotData(self):
# make pieda for plot.
# if molint=True, subtract component energy from those of complex
tokcal=627.50 # Hartree to kcal/mol, for onbody energy conversion.
nlayer=1
onebody=[]
molint=False
if self.IsDerivedData(self.selected): molint=True
curfmodat=self.GetCurrentFMOData()
pieda=curfmodat.frgpieda
onebody=curfmodat.onebody
nfrg=curfmodat.nfrg
if not molint: return pieda
#
pieda=copy.deepcopy(pieda)
onebody=copy.deepcopy(onebody)
drvdat=self.drvdatadic[self.selected]
fmodat,cmpsign=self.ResolveDerivedData(drvdat)
nlen=len(pieda[0])
nf=0
for i in range(1,len(fmodat)):
datnam=fmodat[i]
tmppieda=self.fmodatadic[datnam].frgpieda
tmpone=self.fmodatadic[datnam].onebody
tmpnfrg=self.fmodatadic[datnam].nfrg
for j in range(len(tmpone)):
onebody[j+nf][1] += cmpsign[i]*tmpone[j][1]
for j in range(len(tmppieda)):
if tmpnfrg == 1:
nf += 1; break
for k in range(len(tmppieda[j])):
i0=j+nf;
j0=k+nf
for l in range(1,len(tmppieda[j][k])):
pieda[i0][j0][l] += cmpsign[i]*tmppieda[j][k][l]
nf += tmpnfrg
for i in range(len(pieda)):
obe=tokcal*onebody[i][1]
for j in range(len(pieda[i])):
if i == j: pieda[i][j].append(obe)
else: pieda[i][j].append(0.0)
return pieda
def MakeCTChargePlotData(self):
#ctcharge=[]
molint=False
if self.IsDerivedData(self.selected): molint=True
curfmodat=self.GetCurrentFMOData()
ctcharge=curfmodat.ctcharge
if not molint: return ctcharge
ctcharge=copy.deepcopy(ctcharge)
drvdat=self.drvdatadic[self.selected]
fmodat,cmpsign=self.ResolveDerivedData(drvdat)
nf=0
for i in range(1,len(fmodat)):
datnam=fmodat[i]
tmpchg=self.fmodatadic[datnam].ctcharge
tmpnfrg=self.fmodatadic[datnam].nfrg
for j in range(len(tmpchg)):
if tmpnfrg == 1:
nf += 1; break
for k in range(len(tmpchg[j])):
i0=j+nf
j0=k+nf
val=tmpchg[j][k][1]
ctcharge[i0][j0][1] += cmpsign[i]*val
nf += tmpnfrg
return ctcharge
def MakeMullikenPlotData(self):
curfmodat=self.GetCurrentFMOData()
mulcharge=curfmodat.mulliken
molint=False
if self.IsDerivedData(self.selected): molint=True
if not molint: return mulcharge
mulcharge=copy.deepcopy(mulcharge)
drvdat=self.drvdatadic[self.selected]
fmodat,cmpsign=self.ResolveDerivedData(drvdat)
nfrg=self.fmodatadic[fmodat[0]].nfrg
nbody=len(mulcharge[0][0])
nf=0
for i in range(1,len(fmodat)):
datnam=fmodat[i]
tmpmulchg=self.fmodatadic[datnam].mulliken
tmpnfrg=self.fmodatadic[datnam].nfrg
if tmpnfrg == 1: # GMS mulliken
for k in range(len(tmpmulchg)):
i0=nf; j0=k
for l in range(1,nbody):
mulcharge[i0][j0][l] += cmpsign[i]*tmpmulchg[k][1]
else:
for j in range(len(tmpmulchg)):
for k in range(len(tmpmulchg[j])):
i0=j+nf; j0=k+nf
for l in range(1,len(tmpmulchg[j][k])):
mulcharge[i0][j0][l] += cmpsign[i]*tmpmulchg[j][k][l]
nf += tmpnfrg
return mulcharge
def OnRemoveData(self,event):
selected=self.lbdat.GetStringSelection()
if self.datadic.has_key(selected): del self.datadic[selected]
self.SetDataList()
if selected == self.tcsel.GetValue(): self.tcsel.SetValue('')
"""
for i in range(len(self.datalist)):
if self.datalist[i] == self.selected:
del self.datalist[i]; break
self.lbdat.Set(self.datalist)
self.selected=''
self.tcrmk.Clear()
self.OnPropClear(0)
"""
def OnPropClear(self,event):
if self.ckbpie.GetValue(): self.ckbpie.SetValue(False)
if self.ckbctc.GetValue(): self.ckbctc.SetValue(False)
if self.ckbmul.GetValue(): self.ckbmul.SetValue(False)
if self.ckbesp.GetValue(): self.ckbesp.SetValue(False)
if self.ckbden.GetValue(): self.ckbden.SetValue(False)
if self.ckborb.GetValue(): self.ckborb.SetValue(False)
def XXOnSelectData(self,event):
self.selected=self.lbdat.GetStringSelection()
if self.selected == '': return
#
self.WriteRemark()
self.pltpie=1
self.SetPropChoice()
self.graph={}
def WriteRemark(self):
if not self.tcrmk: return
eol='\n'
self.tcrmk.Clear()
if self.selected != "":
self.tcrmk.WriteText('data ... '+self.selected+eol)
# derived data
if self.IsDerivedData(self.selected):
txt=''
for cmpo in self.drvdatadic[self.selected]:
txt=txt+' '+cmpo
self.tcrmk.WriteText('comp ...'+txt+eol)
drvnam=self.drvdatadic[self.selected]
cmpdat,cmpsign=self.ResolveDerivedData(drvnam)
#
for cmpnam in cmpdat:
id,name=self.GetIDAndName(cmpnam)
filout=self.fmodatadic[cmpnam].outfile
filinp=self.fmodatadic[cmpnam].inpfile
filpdb=self.fmodatadic[cmpnam].pdbfile
self.tcrmk.WriteText(id+': outfil ...'+filout+eol)
self.tcrmk.WriteText(id+': inpfil ...'+filinp+eol)
self.tcrmk.WriteText(id+': pdbfil ...'+filpdb+eol)
# original fmo data
if self.IsFMOProperty(self.selected):
txt=self.fmodatadic[self.selected].outfile
self.tcrmk.WriteText('outfile ...'+txt+eol)
txt=self.fmodatadic[self.selected].inpfile
self.tcrmk.WriteText('inpfile ...'+txt+eol)
txt=self.fmodatadic[self.selected].pdbfile
self.tcrmk.WriteText('pdbfile ...'+txt+eol)
txt=str(self.fmodatadic[self.selected].nfrg)
self.tcrmk.WriteText('nfrg ...'+txt+eol)
txt=str(self.fmodatadic[self.selected].natm)
self.tcrmk.WriteText('natm ...'+txt+eol)
txt=str(self.fmodatadic[self.selected].nbas)
self.tcrmk.WriteText('nbas ...'+txt+eol)
txt=str(self.fmodatadic[self.selected].tchg)
self.tcrmk.WriteText('tchg ...'+txt+eol)
self.tcrmk.ShowPosition(0)
def ResolveDerivedData(self,drvnam):
#
fmodat=[]; cmpsign=[]
for cmpnam in drvnam:
id,name=self.GetIDAndName(cmpnam)
idsgn=1
if cmpnam[0:1] == '-': idsgn=-1
datnam=self.GetFMOPropName(self.fmodatadic,id)
#
if self.fmodatadic.has_key(datnam):
fmodat.append(datnam)
cmpsign.append(idsgn)
else:
idv,namev=self.GetIDAndName(cmpnam)
drvnamv=self.GetFMOPropName(self.drvdatadic,idv)
if drvnamv == '': continue
cmpv=self.drvdatadic[drvnamv]
#
for cmpnamv in cmpv:
idd,named=self.GetIDAndName(cmpnamv)
iddsgn=1
if cmpnamv[0:1] == '-': iddsgn=-1
datnamd=self.GetFMOPropName(self.fmodatadic,idd)
if self.fmodatadic.has_key(datnamd):
fmodat.append(datnamd)
cmpsign.append(idsgn*iddsgn)
else:
fmodat=[]; cmpsign=[]
dlg=lib.MessageBoxOK("Failed to find components. "+cmpnam,"")
return fmodat,cmpsign
def GetFMOProp(self,dataname):
fmodat=None
if self.fmodatadic.has_key(dataname): fmodat=self.fmodatadic[dataname]
return fmodat
def GetFMOPropName(self,fmodatadic,id):
dataname=''
lst=fmodatadic.keys()
for name in lst:
ns=name.find(':')
iddat=name[:ns]
if iddat == id:
dataname=name; break
return dataname
def OnOpenDerivedPanel(self,event):
#
if self.opendrvpan:
self.drvpan.Destroy()
#
self.opendrvpan=True
#[posx,posy]=self.GetPosition(); [wsize,hsize]=self.GetSize()
#self.drvpanpos=[posx+wsize-100,posy+hsize-40]
self.drvpan=subwin.DeriveDataInput_Frm(self,-1,self.drvpanpos)
self.drvpan.Show()
def AddDerivedDataDic(self,drvnam,drvcmp):
if drvnam == '': return
drvnam.strip()
dup=self.IsDuplicateName(1,drvnam)
if dup: return
find=self.CheckDeriveComp(drvcmp)
if not find: return
#
dataname=self.MakeDataName(drvnam)
self.drvdatadic[dataname]=drvcmp
#
self.SetDataListInSelLB()
self.lbdat.SetStringSelection(dataname)
self.OnSelectData(0)
def IsDerivedData(self,dataname):
ret=False
if self.drvdatadic.has_key(dataname): ret=True
return ret
def IsFMOProperty(self,dataname):
ret=False
if self.fmodatadic.has_key(dataname): ret=True
return ret
def CheckDeriveComp(self,drvcmp):
find=False
for cmpo in drvcmp:
find=self.IsItemInDataDic(cmpo,self.fmodatadic)
#
if not find:
find=self.IsItemInDataDic(cmpo,self.drvdatadic)
if not find:
dlg=lib.MessageBoxOK("No component data. "+cmpo,"")
return find
def IsItemInDataDic(self,item,datadic):
ret=False
idc,namec=self.GetIDAndName(item)
lst=datadic.keys()
for datnam in lst:
id,name=self.GetIDAndName(datnam)
if idc == id:
ret=True; break
return ret
def GetIDAndName(self,dataname):
ns=dataname.find(':')
if ns < 0:
id=dataname; name=''
else:
id=dataname[:ns]; name=dataname[ns+1:]
if id[0:1] == '+' or id[0:1] == '-':
id=id[1:]
return id,name
def MakeDataName(self,name):
self.idmax += 1
dataname=str(self.idmax)+':'+name
return dataname
def IsDuplicateName(self,dset,name):
# dset=0 for self.self.fmodatadic, =1: for self.drvdatadic
dup=False
if len(self.drvdatadic) <= 0: return
#
if dset == 1: lst=self.drvdatadic.keys()
else: lst=self.fmodatadic.keys()
#
for dataname in lst:
id,nam=self.GetIDAndName(dataname)
if nam == name:
dlg=lib.MessageBoxOK("Duplicate name="+name+". Neglected.","")
dup=True; break
return dup
def GetOpenFiles(self,curdir,files):
# get current directory and file names from an instance of OpenMultipleFile_Frm class
self.curdir=curdir
# write current directory on inifile
funame=self.inifile
funame=os.path.join(self.exedir,funame)
lib.WriteDirectoryOnFile(curdir,funame)
self.fmodatadic=self.MakeFMOPropertyDic(curdir,files)
self.datalist=self.MakeDataList()
# show names in select data panel
self.lbdat.Set(self.datalist)
if len(self.datalist) > 0:
# select the first name
self.lbdat.SetSelection(0)
self.OnSelectData(0)
def SetDataListInSelLB(self):
self.datalist=self.MakeDataList()
self.lbdat.Set(self.datalist)
def MakeDataList(self):
datalist=self.fmodatadic.keys()
if len(self.drvdatadic) > 0:
drvlist=self.drvdatadic.keys()
datalist=datalist+drvlist
datalist.sort()
#
return datalist
def MakeFMOPropertyDic(self,curdir,files):
fmodatadic={}
#
for filout in files:
err,name,ext=self.GetNameAndExt(filout)
if err: return
if ext == '.out' or ext == '.log':
dup=self.IsDuplicateName(0,name)
#
if dup: continue
dataname=self.MakeDataName(name)
# search for inp file
filinp=''; filpdb=''
for fil in files:
err,name1,ext1=self.GetNameAndExt(fil)
#
if err: continue
if ext1 == '.inp' and name1 == name:
filinp=fil
if (ext1 == '.ent' or ext1 == '.pdb') and name1 == name:
filpdb=fil
#
filout=os.path.join(curdir,filout)
filinp=os.path.join(curdir,filinp)
filpdb=os.path.join(curdir,filpdb)
fmodatadic[dataname]=FMOProperty(dataname,filout,filinp,filpdb)
return fmodatadic
def GetNameAndExt(self,filename):
err=True
#name=''; ext=''
ext=os.path.splitext(filename)[1]
name=os.path.splitext(filename)[0]
if len(ext) <= 0:
dlg=lib.MessageBoxYesNo('Wrong file name, missing extention. '+filename+". Quit?","")
if wx.YES:
return err,name,ext
err=False
return err,name,ext
def MenuItems(self):
# Menu items
menubar=wx.MenuBar()
submenu=wx.Menu()
# Open
subsubmenu=wx.Menu()
#subsubmenu.Append(-1,"GAMESS output","GAMESS output file")
#subsubmenu.Append(-1,"Cube file","Cube file")
#submenu.AppendSubMenu(subsubmenu,'Open')
submenu.Append(-1,'Open GAMESS output','Open GAMESS output files (multiple files can be opened.)')
submenu.Append(-1,'Open CUBE file','Open cube file')
submenu.AppendSeparator()
# Save FMO info
subsubmenu=wx.Menu()
subsubmenu.Append(-1,"Fragment size","fragment size")
subsubmenu.Append(-1,"Timings","Timings")
subsubmenu.AppendSeparator()
subsubmenu.Append(-1,'FMO statics','FMO statics')
subsubmenu.Append(-1,"PIE/PIEDA","PIEDA table")
subsubmenu.Append(-1,"CT charges","CT charge table")
subsubmenu.Append(-1,"Mulliken charges","Mulliken charges")
subsubmenu.Append(-1,"MO energies","MO energies")
subsubmenu.AppendSeparator()
subsubmenu.Append(-1,"Charge coupling","Charge coupling")
subsubmenu.AppendSeparator()
subsubmenu.Append(-1,"All info","All info")
submenu.AppendSubMenu(subsubmenu,'Save FMO info')
# Save images
subsubmenu=wx.Menu()
subsubmenu.Append(-1,"Fragment size plot","fragment size plot")
subsubmenu.Append(-1,"Timings plot","Timings plot")
subsubmenu.AppendSeparator()
subsubmenu.Append(-1,"PIE/PIEDA graph","PIEDA graph")
subsubmenu.Append(-1,"CT charge graph","CT charge grapf")
subsubmenu.Append(-1,"Mulliken charge graph","Mulliken charge graph")
subsubmenu.Append(-1,"MO energie plot","MO energie plot")
subsubmenu.AppendSeparator()
subsubmenu.Append(-1,"Charge coupling plot","Charge coupling plot")
subsubmenu.AppendSeparator()
subsubmenu.Append(-1,"All graphs","All graphs")
submenu.AppendSubMenu(subsubmenu,'Save Images')
# Edit 'Copy Clipboard','Clear Clipboard'
# Quit
submenu.AppendSeparator()
submenu.Append(-1,'Quit','Close the window')
menubar.Append(submenu,'File')
"""
mfil= ("File", (
("Open","Open GAMESS-FMO input/output file",False),
("","",False),
#("*Save bitmap","Save bitmap on file",False),
#("*Print bitmap","Unfinished",False),
("Quit","Close plot panel",False)))
mplt= ("Plot", (
("PIE/PIEDA","Plot PIEDA in 1D",False),
("CT charge","CT charge",False),
("Mulliken charge", "Mulliken charge",False)))
#("Density","density",False),
#("*ES potential","Electrostatic potential",False),
#("*Molecular orbital","Molecular orbital",False),
#("","",False),
#("*Monomer scf convergence","",False),
#("*Monomer SCC convergence","",False),
#("*Dimer SCF convergence","",False),
#("*Optmization convergence","",False)))
mrep= ("Report", (
("PIE/PIEDA ","Plot PIEDA in 1D/2D",False),
("CT charge ","PIEDA ct charge",False),
("Mulliken charge ","Mulliken charge",False),
("*ES potential ","Electrostatic potential",False),
("*Molecular orbital ","Molecular orbital",False),
("","",False),
("Open setting panel","",False)))
mwin= ("Window", (
#("Mol viewer","molecule viewer",False),
#("","",False),
("PyCrust","python IDE",False),
("","",False),
("MatPlotLib","MatPlotLib",False)))
mhlp= ("Help", (
("About","licence remark",False),
("Version","Version",False)))
#menud=[mfil,mplt,mrep,mwin]
menud=[mfil,mplt,mwin,mhlp]
return menud
"""
return menubar
def OpenFiles(self,case):
if case == "Open GAMESS output":
wcard='Output file(*.out;*.log)|*.out;*.log|All(*.*)|*.*'
elif case == 'Open CUBE file':
wcard='Cube file(*.mep;*.den;*.cub)|*.mep;*.den;*.cub|All(*.*)|*.*'
files=lib.GetFileNames(None,wcard,'r',True,'')
#
self.MakeDataDic(files)
self.SetDataList()
def MakeDataDic(self,files):
| |
spec_scaling[item] * self.spectrum[item][0][:, 1]
if err_scaling[item] is None:
# Variance without error inflation
data_var = self.spectrum[item][0][:, 2] ** 2
else:
# Variance with error inflation (see Piette & Madhusudhan 2020)
data_var = (
self.spectrum[item][0][:, 2] ** 2
+ (err_scaling[item] * model_flux) ** 2
)
if self.spectrum[item][2] is not None:
# The inverted covariance matrix is available
if err_scaling[item] is None:
# Use the inverted covariance matrix directly
data_cov_inv = self.spectrum[item][2]
else:
# Ratio of the inflated and original uncertainties
sigma_ratio = np.sqrt(data_var) / self.spectrum[item][0][:, 2]
sigma_j, sigma_i = np.meshgrid(sigma_ratio, sigma_ratio)
# Calculate the inverted matrix of the inflated covariances
data_cov_inv = np.linalg.inv(
self.spectrum[item][1] * sigma_i * sigma_j
)
if disk_param:
model_tmp = self.diskspec[i].spectrum_interp([disk_param["teff"]])[0, :]
model_tmp *= (disk_param["radius"] * constants.R_JUP) ** 2 / (
distance * constants.PARSEC
) ** 2
model_flux += model_tmp
if "lognorm_ext" in dust_param:
for j, cross_item in enumerate(self.cross_sections[item]):
cross_tmp = cross_item(
dust_param["lognorm_sigma"],
10.0 ** dust_param["lognorm_radius"],
)[0]
model_flux[j] *= np.exp(-cross_tmp * n_grains)
elif "powerlaw_ext" in dust_param:
for j, cross_item in enumerate(self.cross_sections[item]):
# For loop over all wavelengths of a spectrum
cross_tmp = cross_item(
dust_param["powerlaw_exp"], 10.0 ** dust_param["powerlaw_max"]
)[0]
model_flux[j] *= np.exp(-cross_tmp * n_grains)
elif "ism_ext" in dust_param:
ism_reddening = dust_param.get("ism_red", 3.1)
ext_filt = dust_util.ism_extinction(
dust_param["ism_ext"], ism_reddening, self.spectrum[item][0][:, 0]
)
model_flux *= 10.0 ** (-0.4 * ext_filt)
if self.spectrum[item][2] is not None:
# Use the inverted covariance matrix
ln_like += (
-0.5
* weight
* np.dot(
data_flux - model_flux,
np.dot(data_cov_inv, data_flux - model_flux),
)
)
ln_like += -0.5 * weight * np.nansum(np.log(2.0 * np.pi * data_var))
else:
if item in self.fit_corr:
# Covariance model (Wang et al. 2020)
wavel = self.spectrum[item][0][:, 0] # (um)
wavel_j, wavel_i = np.meshgrid(wavel, wavel)
error = np.sqrt(data_var) # (W m-2 um-1)
error_j, error_i = np.meshgrid(error, error)
cov_matrix = (
corr_amp[item] ** 2
* error_i
* error_j
* np.exp(
-((wavel_i - wavel_j) ** 2) / (2.0 * corr_len[item] ** 2)
)
+ (1.0 - corr_amp[item] ** 2)
* np.eye(wavel.shape[0])
* error_i ** 2
)
dot_tmp = np.dot(
data_flux - model_flux,
np.dot(np.linalg.inv(cov_matrix), data_flux - model_flux),
)
ln_like += -0.5 * weight * dot_tmp
ln_like += -0.5 * np.nansum(np.log(2.0 * np.pi * data_var))
else:
# Calculate the chi-square without a covariance matrix
chi_sq = -0.5 * weight * (data_flux - model_flux) ** 2 / data_var
chi_sq += -0.5 * weight * np.log(2.0 * np.pi * data_var)
ln_like += np.nansum(chi_sq)
return ln_like
@typechecked
def run_multinest(
self,
tag: str,
n_live_points: int = 1000,
output: str = "multinest/",
prior: Optional[Dict[str, Tuple[float, float]]] = None,
) -> None:
"""
Function to run the ``PyMultiNest`` wrapper of the
``MultiNest`` sampler. While ``PyMultiNest`` can be
installed with ``pip`` from the PyPI repository,
``MultiNest`` has to to be build manually. See the
``PyMultiNest`` documentation for details:
http://johannesbuchner.github.io/PyMultiNest/install.html.
Note that the library path of ``MultiNest`` should be set
to the environmental variable ``LD_LIBRARY_PATH`` on a
Linux machine and ``DYLD_LIBRARY_PATH`` on a Mac.
Alternatively, the variable can be set before importing
the ``species`` package, for example:
.. code-block:: python
>>> import os
>>> os.environ['DYLD_LIBRARY_PATH'] = '/path/to/MultiNest/lib'
>>> import species
Parameters
----------
tag : str
Database tag where the samples will be stored.
n_live_points : int
Number of live points.
output : str
Path that is used for the output files from MultiNest.
prior : dict(str, tuple(float, float)), None
Dictionary with Gaussian priors for one or multiple
parameters. The prior can be set for any of the
atmosphere or calibration parameters, for example
``prior={'teff': (1200., 100.)}``. Additionally, a
prior can be set for the mass, for example
``prior={'mass': (13., 3.)}`` for an expected mass
of 13 Mjup with an uncertainty of 3 Mjup. The
parameter is not used if set to ``None``.
Returns
-------
NoneType
None
"""
print("Running nested sampling with MultiNest...")
# Get the MPI rank of the process
try:
from mpi4py import MPI
mpi_rank = MPI.COMM_WORLD.Get_rank()
except ModuleNotFoundError:
mpi_rank = 0
# Create the output folder if required
if mpi_rank == 0 and not os.path.exists(output):
os.mkdir(output)
# Add distance to dictionary with Gaussian priors
if prior is None:
prior = {}
prior['distance'] = self.distance
@typechecked
def lnprior_multinest(cube, n_dim: int, n_param: int) -> None:
"""
Function to transform the unit cube into the parameter cube. It is not clear how to
pass additional arguments to the function, therefore it is placed here and not merged
with :func:`~species.analysis.fit_model.FitModel.run_mcmc`.
Parameters
----------
cube : pymultinest.run.LP_c_double
Unit cube.
n_dim : int
Number of dimensions.
n_param : int
Number of parameters.
Returns
-------
NoneType
None
"""
for item in self.cube_index:
if item == "distance":
# Gaussian prior for the distance
cube[self.cube_index[item]] = stats.norm.ppf(
cube[self.cube_index[item]],
loc=self.distance[0],
scale=self.distance[1]
)
else:
# Uniform priors for all parameters
cube[self.cube_index[item]] = (
self.bounds[item][0]
+ (self.bounds[item][1] - self.bounds[item][0])
* cube[self.cube_index[item]]
)
@typechecked
def lnlike_multinest(params, n_dim: int, n_param: int) -> np.float64:
"""
Function for return the log-likelihood for the sampled parameter cube.
Parameters
----------
params : pymultinest.run.LP_c_double
Cube with physical parameters.
n_dim : int
Number of dimensions. This parameter is mandatory but not used by the function.
n_param : int
Number of parameters. This parameter is mandatory but not used by the function.
Returns
-------
float
Log-likelihood.
"""
return self.lnlike_func(params, prior=prior)
pymultinest.run(
lnlike_multinest,
lnprior_multinest,
len(self.modelpar),
outputfiles_basename=output,
resume=False,
n_live_points=n_live_points,
)
# Create the Analyzer object
analyzer = pymultinest.analyse.Analyzer(
len(self.modelpar), outputfiles_basename=output
)
# Get a dictionary with the ln(Z) and its errors, the
# individual modes and their parameters quantiles of
# the parameter posteriors
sampling_stats = analyzer.get_stats()
# Nested sampling global log-evidence
ln_z = sampling_stats["nested sampling global log-evidence"]
ln_z_error = sampling_stats["nested sampling global log-evidence error"]
print(f"Nested sampling global log-evidence: {ln_z:.2f} +/- {ln_z_error:.2f}")
# Nested sampling global log-evidence
ln_z = sampling_stats["nested importance sampling global log-evidence"]
ln_z_error = sampling_stats["nested importance sampling global log-evidence error"]
print(
f"Nested importance sampling global log-evidence: {ln_z:.2f} +/- {ln_z_error:.2f}"
)
# Get the best-fit (highest likelihood) point
print("Sample with the highest likelihood:")
best_params = analyzer.get_best_fit()
max_lnlike = best_params["log_likelihood"]
print(f" - Log-likelihood = {max_lnlike:.2f}")
for i, item in enumerate(best_params["parameters"]):
print(f" - {self.modelpar[i]} = {item:.2f}")
# Get the posterior samples
samples = analyzer.get_equal_weighted_posterior()
spec_labels = []
for item in self.spectrum:
if f"scaling_{item}" in self.bounds:
spec_labels.append(f"scaling_{item}")
ln_prob = samples[:, -1]
samples = samples[:, :-1]
# Adding the fixed parameters to the samples
for key, value in self.fix_param.items():
self.modelpar.append(key)
app_param = np.full(samples.shape[0], value)
app_param = app_param[..., np.newaxis]
samples = np.append(samples, app_param, axis=1)
# Get the MPI rank of the process
try:
from mpi4py import MPI
mpi_rank = MPI.COMM_WORLD.Get_rank()
except ModuleNotFoundError:
mpi_rank = 0
# Add samples to the database
if mpi_rank == 0:
# Writing the samples to the database is only possible when using a single process
species_db = database.Database()
species_db.add_samples(
sampler="multinest",
samples=samples,
ln_prob=ln_prob,
ln_evidence=(ln_z, ln_z_error),
mean_accept=None,
spectrum=("model", self.model),
tag=tag,
modelpar=self.modelpar,
distance=self.distance[0],
spec_labels=spec_labels,
)
@typechecked
def run_ultranest(
self,
tag: str,
min_num_live_points=400,
output: str = "ultranest/",
prior: Optional[Dict[str, Tuple[float, float]]] = None,
) -> None:
"""
Function to run ``UltraNest`` for constructing the posterior
probability distributions on model parameters and computing
the marginal likelihood (i.e. "evidence").
Parameters
----------
tag : str
Database tag where the samples will be stored.
min_num_live_points : int
Minimum number of live points. The default of 400 is a reasonable number (see
https://johannesbuchner.github.io/UltraNest/issues.html). In principle, choosing a very
low number allows nested sampling to make very few iterations and go to the peak
quickly. However, the space will be poorly sampled, giving a large region and thus low
efficiency, and potentially not seeing interesting modes. Therefore, a value above 100
is typically useful.
output : str
Path that is used for the output files from ``UltraNest``.
prior : dict(str, tuple(float, float)), None
Dictionary with Gaussian priors for one or multiple parameters. The prior can be set
for any of the atmosphere or calibration parameters, e.g.
``prior={'teff': (1200., 100.)}``. Additionally, a prior can be set for the mass, e.g.
``prior={'mass': (13., 3.)}`` for an expected mass of 13 Mjup with an uncertainty of
3 Mjup. The parameter is | |
Bid: " +str("%.12f" % currVTHOBTCBid) +" | VTHO Ask: " +str("%.12f" % currVTHOBTCAsk))
#WAVES-BTC
if(response["market"] == "WAVES-BTC" and "bestBid" in response):
currWAVESBTCBid = float(response["bestBid"])
#print("B WAVES Bid: " +str("%.8f" % currWAVESBTCBid) +" | WAVES Ask: " +str("%.8f" % currWAVESBTCAsk))
if(response["market"] == "WAVES-BTC" and "bestAsk" in response):
currWAVESBTCAsk = float(response["bestAsk"])
#print("B WAVES Bid: " +str("%.8f" % currWAVESBTCBid) +" | WAVES Ask: " +str("%.8f" % currWAVESBTCAsk))
#WTC-BTC
if(response["market"] == "WTC-BTC" and "bestBid" in response):
currWTCBTCBid = float(response["bestBid"])
#print("B WTC Bid: " +str("%.9f" % currWTCBTCBid) +" | WTC Ask: " +str("%.9f" % currWTCBTCAsk))
if(response["market"] == "WTC-BTC" and "bestAsk" in response):
currWTCBTCAsk = float(response["bestAsk"])
#print("B WTC Bid: " +str("%.9f" % currWTCBTCBid) +" | WTC Ask: " +str("%.9f" % currWTCBTCAsk))
#XEM-BTC
if(response["market"] == "XEM-BTC" and "bestBid" in response):
currXEMBTCBid = float(response["bestBid"])
#print("B XEM Bid: " +str("%.10f" % currXEMBTCBid) +" | XEM Ask: " +str("%.10f" % currXEMBTCAsk))
if(response["market"] == "XEM-BTC" and "bestAsk" in response):
currXEMBTCAsk = float(response["bestAsk"])
#print("B XEM Bid: " +str("%.10f" % currXEMBTCBid) +" | XEM Ask: " +str("%.10f" % currXEMBTCAsk))
#XLM-BTC
if(response["market"] == "XLM-BTC" and "bestBid" in response):
currXLMBTCBid = float(response["bestBid"])
#print("B XLM Bid: " +str("%.10f" % currXLMBTCBid) +" | XLM Ask: " +str("%.10f" % currXLMBTCAsk))
if(response["market"] == "XLM-BTC" and "bestAsk" in response):
currXLMBTCAsk = float(response["bestAsk"])
#print("B XLM Bid: " +str("%.10f" % currXLMBTCBid) +" | XLM Ask: " +str("%.10f" % currXLMBTCAsk))
#XRP-BTC
if(response["market"] == "XRP-BTC" and "bestBid" in response):
currXRPBTCBid = float(response["bestBid"])
#print("B XRP Bid: " +str("%.9f" % currXRPBTCBid) +" | XRP Ask: " +str("%.9f" % currXRPBTCAsk))
if(response["market"] == "XRP-BTC" and "bestAsk" in response):
currXRPBTCAsk = float(response["bestAsk"])
#print("B XRP Bid: " +str("%.9f" % currXRPBTCBid) +" | XRP Ask: " +str("%.9f" % currXRPBTCAsk))
#XTZ-BTC
if(response["market"] == "XTZ-BTC" and "bestBid" in response):
currXTZBTCBid = float(response["bestBid"])
#print("B XTZ Bid: " +str("%.8f" % currXTZBTCBid) +" | XTZ Ask: " +str("%.8f" % currXTZBTCAsk))
if(response["market"] == "XTZ-BTC" and "bestAsk" in response):
currXTZBTCAsk = float(response["bestAsk"])
#print("B XTZ Bid: " +str("%.8f" % currXTZBTCBid) +" | XTZ Ask: " +str("%.8f" % currXTZBTCAsk))
#XVG-BTC
if(response["market"] == "XVG-BTC" and "bestBid" in response):
currXVGBTCBid = float(response["bestBid"])
#print("B XVG Bid: " +str("%.11f" % currXVGBTCBid) +" | XVG Ask: " +str("%.11f" % currXVGBTCAsk))
if(response["market"] == "XVG-BTC" and "bestAsk" in response):
currXVGBTCAsk = float(response["bestAsk"])
#print("B XVG Bid: " +str("%.11f" % currXVGBTCBid) +" | XVG Ask: " +str("%.11f" % currXVGBTCAsk))
#ZIL-BTC
if(response["market"] == "ZIL-BTC" and "bestBid" in response):
currZILBTCBid = float(response["bestBid"])
#print("B ZIL Bid: " +str("%.11f" % currZILBTCBid) +" | ZIL Ask: " +str("%.11f" % currZILBTCAsk))
if(response["market"] == "ZIL-BTC" and "bestAsk" in response):
currZILBTCAsk = float(response["bestAsk"])
#print("B ZIL Bid: " +str("%.11f" % currZILBTCBid) +" | ZIL Ask: " +str("%.11f" % currZILBTCAsk))
#ZRX-BTC
if(response["market"] == "ZRX-BTC" and "bestBid" in response):
currZRXBTCBid = float(response["bestBid"])
#print("B ZRX Bid: " +str("%.9f" % currZRXBTCBid) +" | ZRX Ask: " +str("%.9f" % currZRXBTCAsk))
if(response["market"] == "ZRX-BTC" and "bestAsk" in response):
currZRXBTCAsk = float(response["bestAsk"])
#print("B ZRX Bid: " +str("%.9f" % currZRXBTCBid) +" | ZRX Ask: " +str("%.9f" % currZRXBTCAsk))
#Calculate arbitrage profits (triangular)
min_profit = 0.10 #minimum profit required to show print NOTE: Fees are not included in min_profit -> so real profit = min_profit - taker_fee*3
#ETH
arb1_profitA = safe_division((currETHEURBid - (currETHBTCAsk*currBTCEURAsk))*100.0,currETHBTCAsk*currBTCEURAsk)
if arb1_profitA >= min_profit:
print(strftime("[%H:%M] ") +"Buy BTC " +str(currBTCEURAsk) +"€ -> Buy ETH " +str(currETHBTCAsk) +" -> Sell ETH for " +str(currETHEURBid) +"€ = " +str("%.3f" % arb1_profitA))
arb1_profitB = safe_division((currETHBTCBid*currBTCEURBid - currETHEURAsk)*100.0,currETHEURAsk)
if arb1_profitB >= min_profit:
print(strftime("[%H:%M] ") +"Buy ETH " +str(currETHEURAsk) +"€ -> Sell ETH " +str(currETHBTCBid) +" -> Sell BTC for " +str(currBTCEURBid) +"€ = " +str("%.3f" % arb1_profitB))
#XRP
arb2_profitA = safe_division((currXRPEURBid - (currXRPBTCAsk*currBTCEURAsk))*100.0,currXRPBTCAsk*currBTCEURAsk)
if arb2_profitA >= min_profit:
print(strftime("[%H:%M] ") +"Buy BTC " +str(currBTCEURAsk) +"€ -> Buy XRP " +str(currXRPBTCAsk) +" -> Sell XRP for " +str(currXRPEURBid) +"€ = " +str("%.3f" % arb2_profitA))
arb2_profitB = safe_division((currXRPBTCBid*currBTCEURBid - currXRPEURAsk)*100.0,currXRPEURAsk)
if arb2_profitB >= min_profit:
print(strftime("[%H:%M] ") +"Buy XRP " +str(currXRPEURAsk) +"€ -> Sell XRP " +str(currXRPBTCBid) +" -> Sell BTC for " +str(currBTCEURBid) +"€ = " +str("%.3f" % arb2_profitB))
#ADA
arb3_profitA = safe_division((currADAEURBid - (currADABTCAsk*currBTCEURAsk))*100.0,currADABTCAsk*currBTCEURAsk)
if arb3_profitA >= min_profit:
print(strftime("[%H:%M] ") +"Buy BTC " +str(currBTCEURAsk) +"€ -> Buy ADA " +str(currADABTCAsk) +" -> Sell ADA for " +str(currADAEURBid) +"€ = " +str("%.3f" % arb3_profitA))
arb3_profitB = safe_division((currADABTCBid*currBTCEURBid - currADAEURAsk)*100.0,currADAEURAsk)
if arb3_profitB >= min_profit:
print(strftime("[%H:%M] ") +"Buy ADA " +str(currADAEURAsk) +"€ -> Sell ADA " +str(currADABTCBid) +" -> Sell BTC for " +str(currBTCEURBid) +"€ = " +str("%.3f" % arb3_profitB))
#AE
arb4_profitA = safe_division((currAEEURBid - (currAEBTCAsk*currBTCEURAsk))*100.0,currAEBTCAsk*currBTCEURAsk)
if arb4_profitA >= min_profit:
print(strftime("[%H:%M] ") +"Buy BTC " +str(currBTCEURAsk) +"€ -> Buy AE " +str(currAEBTCAsk) +" -> Sell AE for " +str(currAEEURBid) +"€ = " +str("%.3f" % arb4_profitA))
arb4_profitB = safe_division((currAEBTCBid*currBTCEURBid - currAEEURAsk)*100.0,currAEEURAsk)
if arb4_profitB >= min_profit:
print(strftime("[%H:%M] ") +"Buy AE " +str(currAEEURAsk) +"€ -> Sell AE " +str(currAEBTCBid) +" -> Sell BTC for " +str(currBTCEURBid) +"€ = " +str("%.3f" % arb4_profitB))
#AION
arb5_profitA = safe_division((currAIONEURBid - (currAIONBTCAsk*currBTCEURAsk))*100.0,currAIONBTCAsk*currBTCEURAsk)
if arb5_profitA >= min_profit:
print(strftime("[%H:%M] ") +"Buy BTC " +str(currBTCEURAsk) +"€ -> Buy AION " +str(currAIONBTCAsk) +" -> Sell AION for " +str(currAIONEURBid) +"€ = " +str("%.3f" % arb5_profitA))
arb5_profitB = safe_division((currAIONBTCBid*currBTCEURBid - currAIONEURAsk)*100.0,currAIONEURAsk)
if arb5_profitB >= min_profit:
print(strftime("[%H:%M] ") +"Buy AION " +str(currAIONEURAsk) +"€ -> Sell AION " +str(currAIONBTCBid) +" -> Sell BTC for " +str(currBTCEURBid) +"€ = " +str("%.3f" % arb5_profitB))
#ANT
arb6_profitA = safe_division((currANTEURBid - (currANTBTCAsk*currBTCEURAsk))*100.0,currANTBTCAsk*currBTCEURAsk)
if arb6_profitA >= min_profit:
print(strftime("[%H:%M] ") +"Buy BTC " +str(currBTCEURAsk) +"€ -> Buy ANT " +str(currANTBTCAsk) +" -> Sell ANT for " +str(currANTEURBid) +"€ = " +str("%.3f" % arb6_profitA))
arb6_profitB = safe_division((currANTBTCBid*currBTCEURBid - currANTEURAsk)*100.0,currANTEURAsk)
if arb6_profitB >= min_profit:
print(strftime("[%H:%M] ") +"Buy ANT " +str(currANTEURAsk) +"€ -> Sell ANT " +str(currANTBTCBid) +" -> Sell BTC for " +str(currBTCEURBid) +"€ = " +str("%.3f" % arb6_profitB))
#ARK
arb7_profitA = safe_division((currARKEURBid - (currARKBTCAsk*currBTCEURAsk))*100.0,currARKBTCAsk*currBTCEURAsk)
if arb7_profitA >= min_profit:
print(strftime("[%H:%M] ") +"Buy BTC " +str(currBTCEURAsk) +"€ -> Buy ARK " +str(currARKBTCAsk) +" -> Sell ARK for " +str(currARKEURBid) +"€ = " +str("%.3f" % arb7_profitA))
arb7_profitB = safe_division((currARKBTCBid*currBTCEURBid - currARKEURAsk)*100.0,currARKEURAsk)
if arb7_profitB >= min_profit:
print(strftime("[%H:%M] ") +"Buy ARK " +str(currARKEURAsk) +"€ -> Sell ARK " +str(currARKBTCBid) +" -> Sell BTC for " +str(currBTCEURBid) +"€ = " +str("%.3f" % arb7_profitB))
#BAT
arb8_profitA = safe_division((currBATEURBid - (currBATBTCAsk*currBTCEURAsk))*100.0,currBATBTCAsk*currBTCEURAsk)
if arb8_profitA >= min_profit:
print(strftime("[%H:%M] ") +"Buy BTC " +str(currBTCEURAsk) +"€ -> Buy BAT " +str(currBATBTCAsk) +" -> Sell BAT for " +str(currBATEURBid) +"€ = " +str("%.3f" % arb8_profitA))
arb8_profitB = safe_division((currBATBTCBid*currBTCEURBid - currBATEURAsk)*100.0,currBATEURAsk)
if arb8_profitB >= min_profit:
print(strftime("[%H:%M] ") +"Buy BAT " +str(currBATEURAsk) +"€ -> Sell BAT " +str(currBATBTCBid) +" -> Sell BTC for " +str(currBTCEURBid) +"€ = " +str("%.3f" % arb8_profitB))
#BCH
arb9_profitA = safe_division((currBCHEURBid - (currBCHBTCAsk*currBTCEURAsk))*100.0,currBCHBTCAsk*currBTCEURAsk)
if arb9_profitA >= min_profit:
print(strftime("[%H:%M] ") +"Buy BTC " +str(currBTCEURAsk) +"€ -> Buy BCH " +str(currBCHBTCAsk) +" -> Sell BCH for " +str(currBCHEURBid) +"€ = " +str("%.3f" % arb9_profitA))
arb9_profitB = safe_division((currBCHBTCBid*currBTCEURBid - currBCHEURAsk)*100.0,currBCHEURAsk)
if arb9_profitB >= min_profit:
print(strftime("[%H:%M] ") +"Buy BCH " +str(currBCHEURAsk) +"€ -> Sell BCH " +str(currBCHBTCBid) +" -> Sell BTC for " +str(currBTCEURBid) +"€ = " +str("%.3f" % arb9_profitB))
#BSV
arb10_profitA = safe_division((currBSVEURBid - (currBSVBTCAsk*currBTCEURAsk))*100.0,currBSVBTCAsk*currBTCEURAsk)
if arb10_profitA >= min_profit:
print(strftime("[%H:%M] ") +"Buy BTC " +str(currBTCEURAsk) +"€ -> Buy BSV " +str(currBSVBTCAsk) +" -> Sell BSV for " +str(currBSVEURBid) +"€ = " +str("%.3f" % arb10_profitA))
arb10_profitB = safe_division((currBSVBTCBid*currBTCEURBid - currBSVEURAsk)*100.0,currBSVEURAsk)
if arb10_profitB >= min_profit:
print(strftime("[%H:%M] ") +"Buy BSV " +str(currBSVEURAsk) +"€ -> Sell BSV " +str(currBSVBTCBid) +" -> Sell BTC for " +str(currBTCEURBid) +"€ = " +str("%.3f" % arb10_profitB))
#CMT
arb11_profitA = safe_division((currCMTEURBid - (currCMTBTCAsk*currBTCEURAsk))*100.0,currCMTBTCAsk*currBTCEURAsk)
if arb11_profitA >= min_profit:
print(strftime("[%H:%M] ") +"Buy BTC " +str(currBTCEURAsk) +"€ -> Buy CMT " +str(currCMTBTCAsk) +" -> Sell CMT for " +str(currCMTEURBid) +"€ = " +str("%.3f" % arb11_profitA))
arb11_profitB = safe_division((currCMTBTCBid*currBTCEURBid - currCMTEURAsk)*100.0,currCMTEURAsk)
if arb11_profitB >= min_profit:
print(strftime("[%H:%M] ") +"Buy CMT " +str(currCMTEURAsk) +"€ -> Sell CMT " +str(currCMTBTCBid) +" -> Sell BTC for " +str(currBTCEURBid) +"€ = " +str("%.3f" % arb11_profitB))
#DCR
arb12_profitA = safe_division((currDCREURBid - (currDCRBTCAsk*currBTCEURAsk))*100.0,currDCRBTCAsk*currBTCEURAsk)
if arb12_profitA >= min_profit:
print(strftime("[%H:%M] ") +"Buy BTC " +str(currBTCEURAsk) +"€ -> Buy DCR " | |
str) -> bool:
return Counter(s) == Counter(t)
"""
# - Group Anagrams -
# https://leetcode.com/problems/group-anagrams/
Given an array of strings strs, group the anagrams together. You can return
the answer in any order.
An Anagram is a word or phrase formed by rearranging the letters of a different
word or phrase, typically using all the original letters exactly once.
Example 1:
Input: strs = ["eat","tea","tan","ate","nat","bat"]
Output: [["bat"],["nat","tan"],["ate","eat","tea"]]
Example 2:
Input: strs = [""]
Output: [[""]]
Example 3:
Input: strs = ["a"]
Output: [["a"]]
"""
def groupAnagrams(self, strs: List[str]) -> List[List[str]]:
d: Dict[str, List[str]] = {}
for word in strs:
d.setdefault("".join(sorted(word)), []).append(word)
return list(d.values())
"""
# - Valid Parentheses -
# https://leetcode.com/problems/valid-parentheses/
Given a string s containing just the characters '(', ')', '{', '}',
'[' and ']', determine if the input string is valid.
An input string is valid if:
Open brackets must be closed by the same type of brackets.
Open brackets must be closed in the correct order.
Example 1:
Input: s = "()"
Output: true
Example 2:
Input: s = "()[]{}"
Output: true
Example 3:
Input: s = "(]"
Output: false
"""
def isValid(self, s: str) -> bool:
match, stack = {"(": ")", "[": "]", "{": "}"}, []
for x in s:
if x in match:
stack.append(x)
elif not stack or match[stack.pop()] != x:
return False
return not stack
"""
# - Valid Palindrome -
# https://leetcode.com/problems/valid-palindrome/
A phrase is a palindrome if, after converting all uppercase letters into
lowercase letters and removing all non-alphanumeric characters, it reads the
same forward and backward. Alphanumeric characters include letters and
numbers.
Given a string s, return true if it is a palindrome, or false otherwise.
Example 1:
Input: s = "A man, a plan, a canal: Panama"
Output: true
Explanation: "amanaplanacanalpanama" is a palindrome.
Example 2:
Input: s = "race a car"
Output: false
Explanation: "raceacar" is not a palindrome.
Example 3:
Input: s = " "
Output: true
Explanation: s is an empty string "" after removing non-alphanumeric
characters.
Since an empty string reads the same forward and backward, it is a palindrome.
"""
def isPalindrome(self, s: str) -> bool:
s = "".join(c for c in s.lower() if c.isalnum())
return s == s[::-1]
def isPalindrome_(self, s: str) -> bool:
t = re.sub(r"[\W_]+", "", s).upper()
return t == t[::-1]
"""
# - Longest Palindromic Substring -
# https://leetcode.com/problems/longest-palindromic-substring/
Given a string s, return the longest palindromic substring in s.
Example 1:
Input: s = "babad"
Output: "bab"
Explanation: "aba" is also a valid answer.
Example 2:
Input: s = "cbbd"
Output: "bb"
"""
def longestPalindrome(self, s: str) -> str:
def helper(i: int, j: int) -> str:
while i >= 0 and j < len(s) and s[i] == s[j]:
i, j = i - 1, j + 1
return s[i + 1 : j]
ans = ""
for k in range(len(s)):
ans = max(helper(k, k), helper(k, k + 1), ans, key=len)
return ans
"""
# - Palindromic Substrings -
# https://leetcode.com/problems/palindromic-substrings/
Given a string s, return the number of palindromic substrings in it.
A string is a palindrome when it reads the same backward as forward.
A substring is a contiguous sequence of characters within the string.
Example 1:
Input: s = "abc"
Output: 3
Explanation: Three palindromic strings: "a", "b", "c".
Example 2:
Input: s = "aaa"
Output: 6
Explanation: Six palindromic strings: "a", "a", "a", "aa", "aa", "aaa".
"""
def countSubstrings(self, s: str) -> int:
n: Final = len(s)
dp = [[0] * n for _ in range(n)]
ans = 0
for i in reversed(range(n)):
for j in range(i, n):
dp[i][j] = s[i] == s[j] and ((j - i + 1) < 3 or dp[i + 1][j - 1])
ans += dp[i][j]
return ans
"""
# - Encode and Decode Strings (Leetcode Premium) -
# https://leetcode.com/problems/encode-and-decode-strings/
Design an algorithm to encode a list of strings to a string. The encoded string is
then sent over the network and is decoded back to the original list of strings.
Machine 1 (sender) has the function:
string encode(vector<string> strs) {
// ... your code
return encoded_string;
}
Machine 2 (receiver) has the function:
vector<string> decode(string s) {
//... your code
return strs;
}
So Machine 1 does:
string encoded_string = encode(strs);
and Machine 2 does:
vector<string> strs2 = decode(encoded_string);
strs2 in Machine 2 should be the same as strs in Machine 1.
Implement the encode and decode methods.
You are not allowed to solve the problem using any serialize methods (such as eval).
Example 1:
Input: dummy_input = ["Hello","World"]
Output: ["Hello","World"]
Explanation:
Machine 1:
Codec encoder = new Codec();
String msg = encoder.encode(strs);
Machine 1 ---msg---> Machine 2
Machine 2:
Codec decoder = new Codec();
String[] strs = decoder.decode(msg);
Example 2:
Input: dummy_input = [""]
Output: [""]
NOTE strs[i] contains any possible characters out of 256 valid ASCII characters.
"""
class Codec:
# simple non ascii solution
def encode(self, strs: List[str]) -> str:
# O(n) time, O(1) space
if len(strs) == 0:
return chr(258)
return chr(257).join(str_ for str_ in strs)
def decode(self, s: str) -> List[str]:
# O(n) time, O(n) space
if s == chr(258):
return []
return s.split(chr(257))
class Codec_:
# chuncked transfer encoding
# eg "4|abcd3|efg"
def encode(self, strs: List[str]) -> str:
# O(n) time, O(1) space
return "".join(f"{len(ss)}|{ss}" for ss in strs)
def decode(self, s: str) -> List[str]:
# O(n) time, O(n) space
ans = []
i = 0
while i < len(s):
ii = s.find("|", i)
i = ii + int(s[i:ii]) + 1
ans.append(s[ii + 1 : i])
return ans
"""
# - Edit Distance -
# https://leetcode.com/problems/edit-distance/
Given two strings word1 and word2, return the minimum number of operations
required to convert word1 to word2.
You have the following three operations permitted on a word:
Insert a character
Delete a character
Replace a character
Example 1:
Input: word1 = "horse", word2 = "ros"
Output: 3
Explanation:
horse -> rorse (replace 'h' with 'r')
rorse -> rose (remove 'r')
rose -> ros (remove 'e')
Example 2:
Input: word1 = "intention", word2 = "execution"
Output: 5
Explanation:
intention -> inention (remove 't')
inention -> enention (replace 'i' with 'e')
enention -> exention (replace 'n' with 'x')
exention -> exection (replace 'n' with 'c')
exection -> execution (insert 'u')
"""
# NOTE levenshtein distance, not in original 75 list
def minDistance(self, word1: str, word2: str) -> int:
m, n = len(word1), len(word2)
@lru_cache(maxsize=None)
def fn(i, j):
"""Return edit distance between word1[i:] and word2[j:]"""
if i == m or j == n:
return m + n - i - j
if word1[i] == word2[j]:
return fn(i + 1, j + 1)
return 1 + min(fn(i + 1, j), fn(i, j + 1), fn(i + 1, j + 1))
return fn(0, 0)
class TreeNode:
def __init__(
self,
val: int = 0,
left: Optional[TreeNode] = None,
right: Optional[TreeNode] = None,
):
self.val = val
self.left = left
self.right = right
TrieNodeType = Dict[str, "TrieNodeType"] # type:ignore
TrieNode: Callable[[], TrieNodeType] = lambda: defaultdict(TrieNode)
class TrieNode_:
def __init__(self):
self.word = False
self.children = {}
class Tree:
r"""
# - Maximum Depth of Binary Tree -
# https://leetcode.com/problems/maximum-depth-of-binary-tree/
Given the root of a binary tree, return its maximum depth.
A binary tree's maximum depth is the number of nodes along the longest
path from the root node down to the farthest leaf node.
Example 1:
(3)
/ \
(9) (20)
/ \
(15) (7)
Input: root = [3,9,20,null,null,15,7]
Output: 3
Example 2:
Input: root = [1,null,2]
Output: 2
"""
def maxDepth(self, root: Optional[TreeNode]) -> int:
def fn(node):
if not node:
return 0
return 1 + max(fn(node.left), fn(node.right))
return fn(root)
r"""
# - Same Tree -
# https://leetcode.com/problems/same-tree/
Given the roots of two binary trees p and q, write a function to check if
they are the same or not.
Two binary | |
import tempfile
import typing
import asyncio
from urllib.parse import SplitResult, parse_qsl, urlencode, urlsplit
from rpcpy.types import Environ, Scope
from rpcpy.utils import cached_property
__all__ = [
"FormData",
"MutableHeaders",
"Headers",
"UploadFile",
"URL",
]
class URL:
def __init__(
self,
url: str = "",
*,
scope: Scope = None,
environ: Environ = None,
**components: typing.Any,
) -> None:
if scope is not None or environ is not None:
assert not (url or components)
if scope is not None:
scheme = scope.get("scheme", "http")
server = scope.get("server", None)
path = scope.get("root_path", "") + scope["path"]
query_string = scope.get("query_string", b"")
host_header = None
for key, value in scope["headers"]:
if key == b"host":
host_header = value.decode("latin-1")
break
elif environ is not None:
scheme = environ["wsgi.url_scheme"]
server = (environ["SERVER_NAME"], environ["SERVER_PORT"])
path = environ.get("SCRIPT_NAME", "") + environ.get("PATH_INFO", "")
query_string = environ.get("QUERY_STRING", "").encode("ascii")
host_header = environ.get("HTTP_HOST", None)
if host_header is not None:
url = f"{scheme}://{host_header}{path}"
elif server is None:
url = path
else:
host, port = server
default_port = {"http": 80, "https": 443, "ws": 80, "wss": 443}[scheme]
if port == default_port:
url = f"{scheme}://{host}{path}"
else:
url = f"{scheme}://{host}:{port}{path}"
if query_string:
url += "?" + query_string.decode()
elif components:
assert not url, 'Cannot set both "url" and "**components".'
url = URL("").replace(**components).components.geturl()
self._url = url
@cached_property
def components(self) -> SplitResult:
return urlsplit(self._url)
@property
def scheme(self) -> str:
return self.components.scheme
@property
def netloc(self) -> str:
return self.components.netloc
@property
def path(self) -> str:
return self.components.path
@property
def query(self) -> str:
return self.components.query
@property
def fragment(self) -> str:
return self.components.fragment
@property
def username(self) -> typing.Union[None, str]:
return self.components.username
@property
def password(self) -> typing.Union[None, str]:
return self.components.password
@property
def hostname(self) -> typing.Union[None, str]:
return self.components.hostname
@property
def port(self) -> typing.Optional[int]:
return self.components.port
@property
def is_secure(self) -> bool:
return self.scheme in ("https", "wss")
def replace(self, **kwargs: typing.Any) -> "URL":
if (
"username" in kwargs
or "password" in kwargs
or "hostname" in kwargs
or "port" in kwargs
):
hostname = kwargs.pop("hostname", self.hostname)
port = kwargs.pop("port", self.port)
username = kwargs.pop("username", self.username)
password = kwargs.pop("password", self.password)
netloc = hostname
if port is not None:
netloc += f":{port}"
if username is not None:
userpass = username
if password is not None:
userpass += f":{password}"
netloc = f"{userpass}@{netloc}"
kwargs["netloc"] = netloc
components = self.components._replace(**kwargs)
return self.__class__(components.geturl())
def include_query_params(self, **kwargs: typing.Any) -> "URL":
params = MultiDict(parse_qsl(self.query, keep_blank_values=True))
params.update({str(key): str(value) for key, value in kwargs.items()})
query = urlencode(params.multi_items())
return self.replace(query=query)
def replace_query_params(self, **kwargs: typing.Any) -> "URL":
query = urlencode([(str(key), str(value)) for key, value in kwargs.items()])
return self.replace(query=query)
def remove_query_params(
self, keys: typing.Union[str, typing.Sequence[str]]
) -> "URL":
if isinstance(keys, str):
keys = [keys]
params = MultiDict(parse_qsl(self.query, keep_blank_values=True))
for key in keys:
params.pop(key, None)
query = urlencode(params.multi_items())
return self.replace(query=query)
def __eq__(self, other: typing.Any) -> bool:
return str(self) == str(other)
def __str__(self) -> str:
return self._url
def __repr__(self) -> str:
url = str(self)
if self.password:
url = str(self.replace(password="********"))
return f"{self.__class__.__name__}({repr(url)})"
class ImmutableMultiDict(typing.Mapping):
def __init__(
self,
*args: typing.Union[
"ImmutableMultiDict",
typing.Mapping,
typing.List[typing.Tuple[typing.Any, typing.Any]],
],
**kwargs: typing.Any,
) -> None:
assert len(args) < 2, "Too many arguments."
if args:
value = args[0]
else:
value = []
if kwargs:
value = (
ImmutableMultiDict(value).multi_items()
+ ImmutableMultiDict(kwargs).multi_items()
)
if not value:
_items = [] # type: typing.List[typing.Tuple[typing.Any, typing.Any]]
elif hasattr(value, "multi_items"):
value = typing.cast(ImmutableMultiDict, value)
_items = list(value.multi_items())
elif hasattr(value, "items"):
value = typing.cast(typing.Mapping, value)
_items = list(value.items())
else:
value = typing.cast(
typing.List[typing.Tuple[typing.Any, typing.Any]], value
)
_items = list(value)
self._dict = {k: v for k, v in _items}
self._list = _items
def getlist(self, key: typing.Any) -> typing.List[str]:
return [item_value for item_key, item_value in self._list if item_key == key]
def keys(self) -> typing.KeysView:
return self._dict.keys()
def values(self) -> typing.ValuesView:
return self._dict.values()
def items(self) -> typing.ItemsView:
return self._dict.items()
def multi_items(self) -> typing.List[typing.Tuple[str, str]]:
return list(self._list)
def get(self, key: typing.Any, default: typing.Any = None) -> typing.Any:
if key in self._dict:
return self._dict[key]
return default
def __getitem__(self, key: typing.Any) -> str:
return self._dict[key]
def __contains__(self, key: typing.Any) -> bool:
return key in self._dict
def __iter__(self) -> typing.Iterator[typing.Any]:
return iter(self.keys())
def __len__(self) -> int:
return len(self._dict)
def __eq__(self, other: typing.Any) -> bool:
if not isinstance(other, self.__class__):
return False
return sorted(self._list) == sorted(other._list)
def __repr__(self) -> str:
class_name = self.__class__.__name__
items = self.multi_items()
return f"{class_name}({items!r})"
class MultiDict(ImmutableMultiDict):
def __setitem__(self, key: typing.Any, value: typing.Any) -> None:
self.setlist(key, [value])
def __delitem__(self, key: typing.Any) -> None:
self._list = [(k, v) for k, v in self._list if k != key]
del self._dict[key]
def pop(self, key: typing.Any, default: typing.Any = None) -> typing.Any:
self._list = [(k, v) for k, v in self._list if k != key]
return self._dict.pop(key, default)
def popitem(self) -> typing.Tuple:
key, value = self._dict.popitem()
self._list = [(k, v) for k, v in self._list if k != key]
return key, value
def poplist(self, key: typing.Any) -> typing.List:
values = [v for k, v in self._list if k == key]
self.pop(key)
return values
def clear(self) -> None:
self._dict.clear()
self._list.clear()
def setdefault(self, key: typing.Any, default: typing.Any = None) -> typing.Any:
if key not in self:
self._dict[key] = default
self._list.append((key, default))
return self[key]
def setlist(self, key: typing.Any, values: typing.List) -> None:
if not values:
self.pop(key, None)
else:
existing_items = [(k, v) for (k, v) in self._list if k != key]
self._list = existing_items + [(key, value) for value in values]
self._dict[key] = values[-1]
def append(self, key: typing.Any, value: typing.Any) -> None:
self._list.append((key, value))
self._dict[key] = value
def update(
self,
*args: typing.Union[
"MultiDict",
typing.Mapping,
typing.List[typing.Tuple[typing.Any, typing.Any]],
],
**kwargs: typing.Any,
) -> None:
value = MultiDict(*args, **kwargs)
existing_items = [(k, v) for (k, v) in self._list if k not in value.keys()]
self._list = existing_items + value.multi_items()
self._dict.update(value)
class UploadFile:
"""
An uploaded file included as part of the request data.
"""
spool_max_size = 1024 * 1024
def __init__(self, filename: str, content_type: str = "") -> None:
self.filename = filename
self.content_type = content_type
self.file = tempfile.SpooledTemporaryFile(max_size=self.spool_max_size)
@property
def in_memory(self) -> bool:
rolled_to_disk = getattr(self.file, "_rolled", True)
return not rolled_to_disk
def write(self, data: bytes) -> None:
self.file.write(data)
async def awrite(self, data: bytes) -> None:
if self.in_memory:
self.write(data) # type: ignore
else:
await asyncio.get_event_loop().run_in_executor(None, self.write, data)
def read(self, size: int = -1) -> bytes:
return self.file.read(size)
async def aread(self, size: int = -1) -> bytes:
if self.in_memory:
return self.read(size)
return await asyncio.get_event_loop().run_in_executor(None, self.read, size)
def seek(self, offset: int) -> None:
self.file.seek(offset)
async def aseek(self, offset: int) -> None:
if self.in_memory:
self.seek(offset)
else:
await asyncio.get_event_loop().run_in_executor(None, self.seek, offset)
def close(self) -> None:
self.file.close()
async def aclose(self) -> None:
if self.in_memory:
self.close()
else:
await asyncio.get_event_loop().run_in_executor(None, self.close)
class FormData(ImmutableMultiDict):
"""
An immutable multidict, containing both file uploads and text input.
"""
def __init__(
self,
*args: typing.Union[
"FormData",
typing.Mapping[str, typing.Union[str, UploadFile]],
typing.List[typing.Tuple[str, typing.Union[str, UploadFile]]],
],
**kwargs: typing.Union[str, UploadFile],
) -> None:
super().__init__(*args, **kwargs)
def close(self) -> None:
for key, value in self.multi_items():
if isinstance(value, UploadFile):
value.close()
async def aclose(self) -> None:
for key, value in self.multi_items():
if isinstance(value, UploadFile):
await value.aclose()
class Headers(typing.Mapping[str, str]):
"""
An immutable, case-insensitive multidict.
"""
def __init__(
self,
headers: typing.Mapping[str, str] = None,
raw: typing.List[typing.Tuple[bytes, bytes]] = None,
scope: Scope = None,
environ: Environ = None,
) -> None:
self._list = [] # type: typing.List[typing.Tuple[bytes, bytes]]
if headers is not None:
assert raw is None, 'Cannot set both "headers" and "raw".'
assert scope is None, 'Cannot set both "headers" and "scope".'
self._list = [
(key.lower().encode("latin-1"), value.encode("latin-1"))
for key, value in headers.items()
]
elif raw is not None:
assert scope is None, 'Cannot set both "raw" and "scope".'
self._list = raw
elif scope is not None:
self._list = scope["headers"]
elif environ is not None:
self._list = [
(
key.lower().replace("_", "-").encode("latin-1"),
value.encode("latin-1"),
)
for key, value in environ.items()
if key.startswith("HTTP_") or key in ("CONTENT_TYPE", "CONTENT_LENGTH")
]
@property
def raw(self) -> typing.List[typing.Tuple[bytes, bytes]]:
return list(self._list)
def keys(self) -> typing.List[str]: # type: ignore
return [key.decode("latin-1") for key, value in self._list]
def values(self) -> typing.List[str]: # type: ignore
return [value.decode("latin-1") for key, value in self._list]
def items(self) -> typing.List[typing.Tuple[str, str]]: # type: ignore
return [
(key.decode("latin-1"), value.decode("latin-1"))
for key, value in self._list
]
def get(self, key: str, default: typing.Any = None) | |
<gh_stars>0
# -*- coding: utf-8 -*-
__author__ = 'ffuentes'
import graphene
import norduniclient as nc
from apps.noclook.forms import *
from apps.noclook.models import SwitchType as SwitchTypeModel
import apps.noclook.vakt.utils as sriutils
from apps.noclook.schema.types import *
from apps.noclook.views.edit import _nh_safe_get, _handle_location
from .common import get_unique_relation_processor
from graphene import Field
from binascii import Error as BinasciiError
logger = logging.getLogger(__name__)
## generic relation_processors
location_relation_processor = get_unique_relation_processor(
'Located_in',
helpers.set_location,
False,
)
provider_relation_processor = get_unique_relation_processor(
'Provides',
helpers.set_provider
)
responsible_relation_processor = get_unique_relation_processor(
'Takes_responsibility',
helpers.set_takes_responsibility
)
supports_relation_processor = get_unique_relation_processor(
'Supports',
helpers.set_supports
)
owner_relation_processor = get_unique_relation_processor(
'Owns',
helpers.set_owner
)
## Organizations
class NICustomersMutationFactory(NIMutationFactory):
class NIMetaClass:
create_form = NewCustomerForm
update_form = EditCustomerForm
request_path = '/'
graphql_type = Customer
unique_node = True
class Meta:
abstract = False
class NIEndUsersMutationFactory(NIMutationFactory):
class NIMetaClass:
create_form = NewEndUserForm
update_form = EditEndUserForm
request_path = '/'
graphql_type = EndUser
unique_node = True
class Meta:
abstract = False
class NIProvidersMutationFactory(NIMutationFactory):
class NIMetaClass:
create_form = NewProviderForm
update_form = EditProviderForm
request_path = '/'
graphql_type = Provider
unique_node = True
class Meta:
abstract = False
class NISiteOwnersMutationFactory(NIMutationFactory):
class NIMetaClass:
create_form = NewSiteOwnerForm
update_form = EditSiteOwnerForm
request_path = '/'
graphql_type = SiteOwner
unique_node = True
class Meta:
abstract = False
## Cables and Equipment
class NIPortMutationFactory(NIMutationFactory):
class NIMetaClass:
create_form = NewPortForm
update_form = EditPortForm
request_path = '/'
graphql_type = Port
create_exclude = ('relationship_parent', )
update_exclude = ('relationship_parent', )
class Meta:
abstract = False
class NICableMutationFactory(NIMutationFactory):
class NIMetaClass:
form = EditCableForm
request_path = '/'
graphql_type = Cable
relations_processors = {
'relationship_provider': provider_relation_processor,
}
create_exclude = ('relationship_end_a', 'relationship_end_b')
update_exclude = ('relationship_end_a', 'relationship_end_b')
class Meta:
abstract = False
def process_switch_type(request, form, nodehandler, relation_name):
if relation_name in form.cleaned_data and form.cleaned_data[relation_name]:
switch_type = SwitchTypeModel.objects.get(pk=form.cleaned_data[relation_name])
helpers.dict_update_node(
request.user, nodehandler.handle_id, {"model":switch_type.name})
if switch_type.ports:
for port in switch_type.ports.split(","):
helpers.create_port(nodehandler, port.strip(), request.user)
class NISwitchMutationFactory(NIMutationFactory):
class NIMetaClass:
create_form = NewSwitchHostForm
update_form = EditSwitchForm
graphql_type = Switch
unique_node = True
relations_processors = {
'relationship_provider': provider_relation_processor,
'switch_type': process_switch_type,
'responsible_group': responsible_relation_processor,
'support_group': supports_relation_processor,
'relationship_location': location_relation_processor,
'relationship_owner': owner_relation_processor,
}
class Meta:
abstract = False
update_exclude = ('relationship_ports', 'relationship_depends_on')
class NIUnitMutationFactory(NIMutationFactory):
class NIMetaClass:
form = UnitForm
request_path = '/'
graphql_type = Unit
class Meta:
abstract = False
property_update = ('name', 'description', 'wlan')
class NIRouterMutationFactory(NIMutationFactory):
class NIMetaClass:
form = EditRouterForm
request_path = '/'
graphql_type = Router
relations_processors = {
'relationship_location': location_relation_processor,
}
update_exclude = ('relationship_ports', )
class Meta:
abstract = False
class NIFirewallMutationFactory(NIMutationFactory):
class NIMetaClass:
form = EditFirewallNewForm
graphql_type = Firewall
unique_node = True
relations_processors = {
'relationship_provider': provider_relation_processor,
'switch_type': process_switch_type,
'responsible_group': responsible_relation_processor,
'support_group': supports_relation_processor,
'relationship_owner': owner_relation_processor,
'relationship_location': location_relation_processor,
}
class Meta:
abstract = False
class NIExternalEquipmentMutationFactory(NIMutationFactory):
class NIMetaClass:
create_form = NewExternalEquipmentForm
update_form = EditExternalEquipmentForm
graphql_type = ExternalEquipment
unique_node = True
relations_processors = {
'relationship_location': location_relation_processor,
'relationship_owner': owner_relation_processor,
}
class Meta:
abstract = False
class CreateHost(CreateNIMutation):
@classmethod
def do_request(cls, request, **kwargs):
form_class = kwargs.get('form_class')
nimetaclass = getattr(cls, 'NIMetaClass')
graphql_type = getattr(nimetaclass, 'graphql_type')
relations_processors = getattr(nimetaclass, 'relations_processors')
nimetatype = getattr(graphql_type, 'NIMetaType')
node_type = getattr(nimetatype, 'ni_type').lower()
has_error = False
context = sriutils.get_network_context()
# check it can write on this context
authorized = sriutils.authorize_create_resource(request.user, context)
if not authorized:
raise GraphQLAuthException()
# Get needed data from node
if request.POST:
# replace relay ids for handle_id in contacts if present
post_data = request.POST.copy()
relay_extra_ids = relations_processors.keys()
for field in relay_extra_ids:
handle_id = post_data.get(field)
# check if it's already converted to int version
try:
handle_id = int(handle_id)
continue
except:
pass
if handle_id:
try:
handle_id = relay.Node.from_global_id(handle_id)[1]
post_data.pop(field)
post_data.update({field: handle_id})
except BinasciiError:
pass # the id is already in handle_id format
form = form_class(post_data)
form.strict_validation = True
if form.is_valid():
data = form.cleaned_data
if data['relationship_owner'] or data['relationship_location']:
meta_type = 'Physical'
else:
meta_type = 'Logical'
try:
nh = helpers.form_to_generic_node_handle(request, form,
node_type, meta_type, context)
except UniqueNodeError:
has_error = True
return has_error, [ErrorType(field="_", messages=["A {} with that name already exists.".format(node_type)])]
# Generic node update
helpers.form_update_node(request.user, nh.handle_id, form)
nh_reload, host_nh = helpers.get_nh_node(nh.handle_id)
# add default context
NodeHandleContext(nodehandle=nh, context=context).save()
node = nh.get_node()
# Set relations
for relation_name, relation_f in relations_processors.items():
relation_f(request, form, node, relation_name)
return has_error, { graphql_type.__name__.lower(): nh }
else:
# get the errors and return them
has_error = True
errordict = cls.format_error_array(form.errors)
return has_error, errordict
else:
# get the errors and return them
has_error = True
errordict = cls.format_error_array(form.errors)
return has_error, errordict
class NIMetaClass:
django_form = EditSRIHostForm
request_path = '/'
graphql_type = Host
is_create = True
relations_processors = {
'relationship_owner': owner_relation_processor,
'responsible_group': responsible_relation_processor,
'support_group': supports_relation_processor,
'relationship_location': location_relation_processor,
}
class EditHost(CreateNIMutation):
@classmethod
def do_request(cls, request, **kwargs):
form_class = kwargs.get('form_class')
nimetaclass = getattr(cls, 'NIMetaClass')
graphql_type = getattr(nimetaclass, 'graphql_type')
nimetatype = getattr(graphql_type, 'NIMetaType')
node_type = getattr(nimetatype, 'ni_type').lower()
relations_processors = getattr(nimetaclass, 'relations_processors')
id = request.POST.get('id')
has_error = False
# check authorization
handle_id = relay.Node.from_global_id(id)[1]
authorized = sriutils.authorice_write_resource(request.user, handle_id)
if not authorized:
raise GraphQLAuthException()
# Get needed data from node
nh, host = helpers.get_nh_node(handle_id)
relations = host.get_relations()
out_relations = host.outgoing
if request.POST:
# set handle_id into POST data and remove relay id
post_data = request.POST.copy()
post_data.pop('id')
post_data.update({'handle_id': handle_id})
relay_extra_ids = relations_processors.keys()
relay_extra_ids = (
'relationship_user', 'relationship_owner',
'relationship_depends_on', 'relationship_location',
'relationship_location'
)
for field in relay_extra_ids:
handle_id = post_data.get(field)
if handle_id:
# check if it's already converted to int version
try:
handle_id = int(handle_id)
continue
except:
pass
try:
handle_id = relay.Node.from_global_id(handle_id)[1]
post_data.pop(field)
post_data.update({field: handle_id})
except BinasciiError:
pass # the id is already in handle_id format
form = form_class(post_data)
if form.is_valid():
# Generic node update
helpers.form_update_node(request.user, host.handle_id, form)
# Set relations
for relation_name, relation_f in relations_processors.items():
relation_f(request, form, host, relation_name)
# You can not set location and depends on at the same time
if form.cleaned_data['relationship_depends_on']:
depends_on_nh = _nh_safe_get(form.cleaned_data['relationship_depends_on'])
if depends_on_nh:
helpers.set_depends_on(request.user, host, depends_on_nh.handle_id)
elif form.cleaned_data['relationship_location']:
_handle_location(request.user,
host,
form.cleaned_data['relationship_location'])
if form.cleaned_data['services_locked'] and form.cleaned_data['services_checked']:
helpers.remove_rogue_service_marker(request.user, host.handle_id)
return has_error, { graphql_type.__name__.lower(): nh }
else:
# get the errors and return them
has_error = True
errordict = cls.format_error_array(form.errors)
return has_error, errordict
else:
# get the errors and return them
has_error = True
errordict = cls.format_error_array(form.errors)
return has_error, errordict
class NIMetaClass:
django_form = EditSRIHostForm
request_path = '/'
graphql_type = Host
is_create = False
exclude = ('relationship_ports', 'relationship_depends_on', )
relations_processors = {
'relationship_owner': owner_relation_processor,
'relationship_user': get_unique_relation_processor(
'Uses',
helpers.set_user
),
'responsible_group': responsible_relation_processor,
'support_group': supports_relation_processor,
'relationship_location': location_relation_processor,
}
class NIHostMutationFactory(NIMutationFactory):
class NIMetaClass:
create_form = NewSRIHostForm
update_form = EditSRIHostForm
graphql_type = Host
unique_node = True
relations_processors = {
'relationship_owner': owner_relation_processor,
'responsible_group': responsible_relation_processor,
'support_group': supports_relation_processor,
'relationship_location': location_relation_processor,
}
manual_create = CreateHost
manual_update = EditHost
class Meta:
abstract = False
class ConvertHost(relay.ClientIDMutation):
class Input:
id = graphene.ID(required=True)
slug = graphene.String(required=True)
success = graphene.Boolean(required=True)
new_id = graphene.ID()
new_type = graphene.Field(NINodeType)
@classmethod
def mutate_and_get_payload(cls, root, info, **input):
id = input.get("id")
slug = input.get("slug")
success = False
new_id = None
new_type = None
handle_id = relay.Node.from_global_id(id)[1]
allowed_types = allowed_types_converthost # Types that can be added as Hosts by nmap
user = info.context.user
# check write permissions over host node
authorized = sriutils.authorice_write_resource(user, handle_id)
if not authorized:
return ConvertHost(success=False)
if NodeHandle.objects.filter(handle_id=handle_id).exists():
nh = NodeHandle.objects.get(handle_id=handle_id)
if slug in allowed_types and nh.node_type.type == 'Host':
node_type = helpers.slug_to_node_type(slug, create=True)
nh, node = helpers.logical_to_physical(user, handle_id)
node.switch_type(nh.node_type.get_label(), node_type.get_label())
nh.node_type = node_type
nh.save()
node_properties = {
'backup': ''
}
helpers.dict_update_node(
user, node.handle_id, node_properties, node_properties.keys())
new_type = nh.node_type
new_id = relay.Node.to_global_id(str(nh.node_type),
str(nh.handle_id))
success = True
return ConvertHost(success=success, new_id=new_id, new_type=new_type)
class NIOpticalNodeMutationFactory(NIMutationFactory):
class NIMetaClass:
form = OpticalNodeForm
request_path = '/'
graphql_type = OpticalNode
unique_node = True
relations_processors = {
'relationship_location': location_relation_processor,
}
class Meta:
abstract = False
class NIODFMutationFactory(NIMutationFactory):
class NIMetaClass:
create_form = NewOdfForm
update_form = EditOdfForm
graphql_type = ODF
relations_processors = {
'relationship_location': location_relation_processor,
}
class Meta:
abstract = False
## Optical Nodes
class NIOpticalFilterMutationFactory(NIMutationFactory):
class NIMetaClass:
create_form = NewOpticalFilter
update_form = EditOpticalFilterForm
graphql_type = OpticalFilter
relations_processors = {
'relationship_location': location_relation_processor,
}
class Meta:
abstract = False
class NIOpticalLinkMutationFactory(NIMutationFactory):
class NIMetaClass:
create_form = NewOpticalLinkForm
update_form = EditOpticalLinkForm
graphql_type = OpticalLink
unique_node = True
relations_processors = {
'relationship_provider': provider_relation_processor,
}
class Meta:
abstract = False
class NIOpticalMultiplexSectionMutationFactory(NIMutationFactory):
class NIMetaClass:
form = NewOpticalMultiplexSectionForm
graphql_type = OpticalMultiplexSection
unique_node = True
relations_processors = {
'relationship_provider': provider_relation_processor,
}
class Meta:
abstract = False
class NIOpticalPathMutationFactory(NIMutationFactory):
class NIMetaClass:
form = EditOpticalPathForm
graphql_type = OpticalPath
unique_node = True
relations_processors = {
'relationship_provider': provider_relation_processor,
}
create_exclude = ('relationship_depends_on', )
update_exclude = ('relationship_depends_on', )
class Meta:
abstract = False
## Peering
class NIPeeringPartnerMutationFactory(NIMutationFactory):
class NIMetaClass:
form = EditPeeringPartnerForm
request_path = '/'
graphql_type = PeeringPartner
unique_node = True
class Meta:
abstract = False
class NIPeeringGroupMutationFactory(NIMutationFactory):
class NIMetaClass:
form = EditPeeringGroupForm
request_path = '/'
| |
_fbthrift_py3lite_exceptions.ApplicationError(
_fbthrift_py3lite_exceptions.ApplicationErrorType.MISSING_RESULT,
"Empty Response",
)
async def nested_map_argument(
self,
struct_map: _typing.Mapping[str, _typing.Sequence[module.lite_types.SimpleStruct]]
) -> int:
resp = await self._send_request(
"SimpleService",
"nested_map_argument",
module.lite_types._fbthrift_SimpleService_nested_map_argument_args(
struct_map=struct_map,),
module.lite_types._fbthrift_SimpleService_nested_map_argument_result,
)
# shortcut to success path for non-void returns
if resp.success is not None:
return resp.success
raise _fbthrift_py3lite_exceptions.ApplicationError(
_fbthrift_py3lite_exceptions.ApplicationErrorType.MISSING_RESULT,
"Empty Response",
)
async def make_sentence(
self,
word_chars: _typing.Sequence[_typing.Sequence[str]]
) -> str:
resp = await self._send_request(
"SimpleService",
"make_sentence",
module.lite_types._fbthrift_SimpleService_make_sentence_args(
word_chars=word_chars,),
module.lite_types._fbthrift_SimpleService_make_sentence_result,
)
# shortcut to success path for non-void returns
if resp.success is not None:
return resp.success
raise _fbthrift_py3lite_exceptions.ApplicationError(
_fbthrift_py3lite_exceptions.ApplicationErrorType.MISSING_RESULT,
"Empty Response",
)
async def get_union(
self,
sets: _typing.Sequence[_typing.AbstractSet[int]]
) -> _typing.AbstractSet[int]:
resp = await self._send_request(
"SimpleService",
"get_union",
module.lite_types._fbthrift_SimpleService_get_union_args(
sets=sets,),
module.lite_types._fbthrift_SimpleService_get_union_result,
)
# shortcut to success path for non-void returns
if resp.success is not None:
return resp.success
raise _fbthrift_py3lite_exceptions.ApplicationError(
_fbthrift_py3lite_exceptions.ApplicationErrorType.MISSING_RESULT,
"Empty Response",
)
async def get_keys(
self,
string_map: _typing.Sequence[_typing.Mapping[str, str]]
) -> _typing.AbstractSet[str]:
resp = await self._send_request(
"SimpleService",
"get_keys",
module.lite_types._fbthrift_SimpleService_get_keys_args(
string_map=string_map,),
module.lite_types._fbthrift_SimpleService_get_keys_result,
)
# shortcut to success path for non-void returns
if resp.success is not None:
return resp.success
raise _fbthrift_py3lite_exceptions.ApplicationError(
_fbthrift_py3lite_exceptions.ApplicationErrorType.MISSING_RESULT,
"Empty Response",
)
async def lookup_double(
self,
key: int
) -> float:
resp = await self._send_request(
"SimpleService",
"lookup_double",
module.lite_types._fbthrift_SimpleService_lookup_double_args(
key=key,),
module.lite_types._fbthrift_SimpleService_lookup_double_result,
)
# shortcut to success path for non-void returns
if resp.success is not None:
return resp.success
raise _fbthrift_py3lite_exceptions.ApplicationError(
_fbthrift_py3lite_exceptions.ApplicationErrorType.MISSING_RESULT,
"Empty Response",
)
async def retrieve_binary(
self,
something: bytes
) -> bytes:
resp = await self._send_request(
"SimpleService",
"retrieve_binary",
module.lite_types._fbthrift_SimpleService_retrieve_binary_args(
something=something,),
module.lite_types._fbthrift_SimpleService_retrieve_binary_result,
)
# shortcut to success path for non-void returns
if resp.success is not None:
return resp.success
raise _fbthrift_py3lite_exceptions.ApplicationError(
_fbthrift_py3lite_exceptions.ApplicationErrorType.MISSING_RESULT,
"Empty Response",
)
async def contain_binary(
self,
binaries: _typing.Sequence[bytes]
) -> _typing.AbstractSet[bytes]:
resp = await self._send_request(
"SimpleService",
"contain_binary",
module.lite_types._fbthrift_SimpleService_contain_binary_args(
binaries=binaries,),
module.lite_types._fbthrift_SimpleService_contain_binary_result,
)
# shortcut to success path for non-void returns
if resp.success is not None:
return resp.success
raise _fbthrift_py3lite_exceptions.ApplicationError(
_fbthrift_py3lite_exceptions.ApplicationErrorType.MISSING_RESULT,
"Empty Response",
)
async def contain_enum(
self,
the_enum: _typing.Sequence[module.lite_types.AnEnum]
) -> _typing.Sequence[module.lite_types.AnEnum]:
resp = await self._send_request(
"SimpleService",
"contain_enum",
module.lite_types._fbthrift_SimpleService_contain_enum_args(
the_enum=the_enum,),
module.lite_types._fbthrift_SimpleService_contain_enum_result,
)
# shortcut to success path for non-void returns
if resp.success is not None:
return resp.success
raise _fbthrift_py3lite_exceptions.ApplicationError(
_fbthrift_py3lite_exceptions.ApplicationErrorType.MISSING_RESULT,
"Empty Response",
)
async def get_binary_union_struct(
self,
u: module.lite_types.BinaryUnion
) -> module.lite_types.BinaryUnionStruct:
resp = await self._send_request(
"SimpleService",
"get_binary_union_struct",
module.lite_types._fbthrift_SimpleService_get_binary_union_struct_args(
u=u,),
module.lite_types._fbthrift_SimpleService_get_binary_union_struct_result,
)
# shortcut to success path for non-void returns
if resp.success is not None:
return resp.success
raise _fbthrift_py3lite_exceptions.ApplicationError(
_fbthrift_py3lite_exceptions.ApplicationErrorType.MISSING_RESULT,
"Empty Response",
)
class Sync(_fbthrift_py3lite_SyncClient):
def get_five(
self
) -> int:
resp = self._send_request(
"SimpleService",
"get_five",
module.lite_types._fbthrift_SimpleService_get_five_args(),
module.lite_types._fbthrift_SimpleService_get_five_result,
)
# shortcut to success path for non-void returns
if resp.success is not None:
return resp.success
raise _fbthrift_py3lite_exceptions.ApplicationError(
_fbthrift_py3lite_exceptions.ApplicationErrorType.MISSING_RESULT,
"Empty Response",
)
def add_five(
self,
num: int
) -> int:
resp = self._send_request(
"SimpleService",
"add_five",
module.lite_types._fbthrift_SimpleService_add_five_args(
num=num,),
module.lite_types._fbthrift_SimpleService_add_five_result,
)
# shortcut to success path for non-void returns
if resp.success is not None:
return resp.success
raise _fbthrift_py3lite_exceptions.ApplicationError(
_fbthrift_py3lite_exceptions.ApplicationErrorType.MISSING_RESULT,
"Empty Response",
)
def do_nothing(
self
) -> None:
resp = self._send_request(
"SimpleService",
"do_nothing",
module.lite_types._fbthrift_SimpleService_do_nothing_args(),
module.lite_types._fbthrift_SimpleService_do_nothing_result,
)
def concat(
self,
first: str,
second: str
) -> str:
resp = self._send_request(
"SimpleService",
"concat",
module.lite_types._fbthrift_SimpleService_concat_args(
first=first,
second=second,),
module.lite_types._fbthrift_SimpleService_concat_result,
)
# shortcut to success path for non-void returns
if resp.success is not None:
return resp.success
raise _fbthrift_py3lite_exceptions.ApplicationError(
_fbthrift_py3lite_exceptions.ApplicationErrorType.MISSING_RESULT,
"Empty Response",
)
def get_value(
self,
simple_struct: module.lite_types.SimpleStruct
) -> int:
resp = self._send_request(
"SimpleService",
"get_value",
module.lite_types._fbthrift_SimpleService_get_value_args(
simple_struct=simple_struct,),
module.lite_types._fbthrift_SimpleService_get_value_result,
)
# shortcut to success path for non-void returns
if resp.success is not None:
return resp.success
raise _fbthrift_py3lite_exceptions.ApplicationError(
_fbthrift_py3lite_exceptions.ApplicationErrorType.MISSING_RESULT,
"Empty Response",
)
def negate(
self,
input: bool
) -> bool:
resp = self._send_request(
"SimpleService",
"negate",
module.lite_types._fbthrift_SimpleService_negate_args(
input=input,),
module.lite_types._fbthrift_SimpleService_negate_result,
)
# shortcut to success path for non-void returns
if resp.success is not None:
return resp.success
raise _fbthrift_py3lite_exceptions.ApplicationError(
_fbthrift_py3lite_exceptions.ApplicationErrorType.MISSING_RESULT,
"Empty Response",
)
def tiny(
self,
input: int
) -> int:
resp = self._send_request(
"SimpleService",
"tiny",
module.lite_types._fbthrift_SimpleService_tiny_args(
input=input,),
module.lite_types._fbthrift_SimpleService_tiny_result,
)
# shortcut to success path for non-void returns
if resp.success is not None:
return resp.success
raise _fbthrift_py3lite_exceptions.ApplicationError(
_fbthrift_py3lite_exceptions.ApplicationErrorType.MISSING_RESULT,
"Empty Response",
)
def small(
self,
input: int
) -> int:
resp = self._send_request(
"SimpleService",
"small",
module.lite_types._fbthrift_SimpleService_small_args(
input=input,),
module.lite_types._fbthrift_SimpleService_small_result,
)
# shortcut to success path for non-void returns
if resp.success is not None:
return resp.success
raise _fbthrift_py3lite_exceptions.ApplicationError(
_fbthrift_py3lite_exceptions.ApplicationErrorType.MISSING_RESULT,
"Empty Response",
)
def big(
self,
input: int
) -> int:
resp = self._send_request(
"SimpleService",
"big",
module.lite_types._fbthrift_SimpleService_big_args(
input=input,),
module.lite_types._fbthrift_SimpleService_big_result,
)
# shortcut to success path for non-void returns
if resp.success is not None:
return resp.success
raise _fbthrift_py3lite_exceptions.ApplicationError(
_fbthrift_py3lite_exceptions.ApplicationErrorType.MISSING_RESULT,
"Empty Response",
)
def two(
self,
input: float
) -> float:
resp = self._send_request(
"SimpleService",
"two",
module.lite_types._fbthrift_SimpleService_two_args(
input=input,),
module.lite_types._fbthrift_SimpleService_two_result,
)
# shortcut to success path for non-void returns
if resp.success is not None:
return resp.success
raise _fbthrift_py3lite_exceptions.ApplicationError(
_fbthrift_py3lite_exceptions.ApplicationErrorType.MISSING_RESULT,
"Empty Response",
)
def expected_exception(
self
) -> None:
resp = self._send_request(
"SimpleService",
"expected_exception",
module.lite_types._fbthrift_SimpleService_expected_exception_args(),
module.lite_types._fbthrift_SimpleService_expected_exception_result,
)
if resp.se is not None:
raise resp.se
def unexpected_exception(
self
) -> int:
resp = self._send_request(
"SimpleService",
"unexpected_exception",
module.lite_types._fbthrift_SimpleService_unexpected_exception_args(),
module.lite_types._fbthrift_SimpleService_unexpected_exception_result,
)
# shortcut to success path for non-void returns
if resp.success is not None:
return resp.success
raise _fbthrift_py3lite_exceptions.ApplicationError(
_fbthrift_py3lite_exceptions.ApplicationErrorType.MISSING_RESULT,
"Empty Response",
)
def sum_i16_list(
self,
numbers: _typing.Sequence[int]
) -> int:
resp = self._send_request(
"SimpleService",
"sum_i16_list",
module.lite_types._fbthrift_SimpleService_sum_i16_list_args(
numbers=numbers,),
module.lite_types._fbthrift_SimpleService_sum_i16_list_result,
)
# shortcut to success path for non-void returns
if resp.success is not None:
return resp.success
raise _fbthrift_py3lite_exceptions.ApplicationError(
_fbthrift_py3lite_exceptions.ApplicationErrorType.MISSING_RESULT,
"Empty Response",
)
def sum_i32_list(
self,
numbers: _typing.Sequence[int]
) -> int:
resp = self._send_request(
"SimpleService",
"sum_i32_list",
module.lite_types._fbthrift_SimpleService_sum_i32_list_args(
numbers=numbers,),
module.lite_types._fbthrift_SimpleService_sum_i32_list_result,
)
# shortcut to success path for non-void returns
if resp.success is not None:
return resp.success
raise _fbthrift_py3lite_exceptions.ApplicationError(
_fbthrift_py3lite_exceptions.ApplicationErrorType.MISSING_RESULT,
"Empty Response",
)
def sum_i64_list(
self,
numbers: _typing.Sequence[int]
) -> int:
resp = self._send_request(
"SimpleService",
"sum_i64_list",
module.lite_types._fbthrift_SimpleService_sum_i64_list_args(
numbers=numbers,),
module.lite_types._fbthrift_SimpleService_sum_i64_list_result,
)
# shortcut to success path for non-void returns
if resp.success is not None:
return resp.success
raise _fbthrift_py3lite_exceptions.ApplicationError(
_fbthrift_py3lite_exceptions.ApplicationErrorType.MISSING_RESULT,
"Empty Response",
)
def concat_many(
self,
words: _typing.Sequence[str]
) -> str:
resp = self._send_request(
"SimpleService",
"concat_many",
module.lite_types._fbthrift_SimpleService_concat_many_args(
words=words,),
module.lite_types._fbthrift_SimpleService_concat_many_result,
)
# shortcut to success path for non-void returns
if resp.success is not None:
return resp.success
raise _fbthrift_py3lite_exceptions.ApplicationError(
_fbthrift_py3lite_exceptions.ApplicationErrorType.MISSING_RESULT,
"Empty Response",
)
def count_structs(
self,
items: _typing.Sequence[module.lite_types.SimpleStruct]
) -> int:
resp = self._send_request(
"SimpleService",
"count_structs",
module.lite_types._fbthrift_SimpleService_count_structs_args(
items=items,),
module.lite_types._fbthrift_SimpleService_count_structs_result,
)
# shortcut to success path for non-void returns
if resp.success is not None:
return resp.success
raise _fbthrift_py3lite_exceptions.ApplicationError(
_fbthrift_py3lite_exceptions.ApplicationErrorType.MISSING_RESULT,
"Empty Response",
)
def sum_set(
self,
numbers: _typing.AbstractSet[int]
) -> int:
resp = self._send_request(
"SimpleService",
"sum_set",
module.lite_types._fbthrift_SimpleService_sum_set_args(
numbers=numbers,),
module.lite_types._fbthrift_SimpleService_sum_set_result,
)
# shortcut to success path for non-void returns
if resp.success is not None:
return resp.success
raise _fbthrift_py3lite_exceptions.ApplicationError(
_fbthrift_py3lite_exceptions.ApplicationErrorType.MISSING_RESULT,
"Empty Response",
)
def contains_word(
self,
words: _typing.AbstractSet[str],
word: str
) -> bool:
resp = self._send_request(
"SimpleService",
"contains_word",
module.lite_types._fbthrift_SimpleService_contains_word_args(
words=words,
word=word,),
module.lite_types._fbthrift_SimpleService_contains_word_result,
)
# shortcut to success path for non-void returns
if resp.success is not None:
return resp.success
raise _fbthrift_py3lite_exceptions.ApplicationError(
_fbthrift_py3lite_exceptions.ApplicationErrorType.MISSING_RESULT,
"Empty Response",
)
def get_map_value(
self,
words: _typing.Mapping[str, str],
key: str
) -> str:
resp = self._send_request(
"SimpleService",
"get_map_value",
module.lite_types._fbthrift_SimpleService_get_map_value_args(
words=words,
key=key,),
module.lite_types._fbthrift_SimpleService_get_map_value_result,
)
# shortcut to success path for non-void returns
if resp.success is not None:
return resp.success
raise _fbthrift_py3lite_exceptions.ApplicationError(
_fbthrift_py3lite_exceptions.ApplicationErrorType.MISSING_RESULT,
"Empty Response",
)
def map_length(
self,
items: _typing.Mapping[str, module.lite_types.SimpleStruct]
) -> int:
resp = self._send_request(
"SimpleService",
"map_length",
module.lite_types._fbthrift_SimpleService_map_length_args(
items=items,),
module.lite_types._fbthrift_SimpleService_map_length_result,
)
# shortcut to success path for non-void returns
if resp.success is not None:
return resp.success
raise _fbthrift_py3lite_exceptions.ApplicationError(
_fbthrift_py3lite_exceptions.ApplicationErrorType.MISSING_RESULT,
"Empty Response",
)
def sum_map_values(
self,
items: _typing.Mapping[str, int]
) -> int:
resp = self._send_request(
"SimpleService",
"sum_map_values",
module.lite_types._fbthrift_SimpleService_sum_map_values_args(
items=items,),
module.lite_types._fbthrift_SimpleService_sum_map_values_result,
)
# shortcut to success path for non-void returns
if resp.success is not None:
return resp.success
raise _fbthrift_py3lite_exceptions.ApplicationError(
_fbthrift_py3lite_exceptions.ApplicationErrorType.MISSING_RESULT,
"Empty Response",
)
def complex_sum_i32(
self,
counter: module.lite_types.ComplexStruct
) -> int:
resp = self._send_request(
"SimpleService",
"complex_sum_i32",
module.lite_types._fbthrift_SimpleService_complex_sum_i32_args(
counter=counter,),
module.lite_types._fbthrift_SimpleService_complex_sum_i32_result,
)
# shortcut to success path for non-void returns
if resp.success is not None:
return resp.success
raise _fbthrift_py3lite_exceptions.ApplicationError(
_fbthrift_py3lite_exceptions.ApplicationErrorType.MISSING_RESULT,
"Empty Response",
)
def repeat_name(
self,
counter: module.lite_types.ComplexStruct
) -> str:
resp = self._send_request(
"SimpleService",
"repeat_name",
module.lite_types._fbthrift_SimpleService_repeat_name_args(
counter=counter,),
module.lite_types._fbthrift_SimpleService_repeat_name_result,
)
# shortcut to success path for non-void returns
if resp.success is not None:
return resp.success
raise _fbthrift_py3lite_exceptions.ApplicationError(
_fbthrift_py3lite_exceptions.ApplicationErrorType.MISSING_RESULT,
"Empty Response",
)
def get_struct(
self
) -> module.lite_types.SimpleStruct:
resp = self._send_request(
"SimpleService",
"get_struct",
module.lite_types._fbthrift_SimpleService_get_struct_args(),
module.lite_types._fbthrift_SimpleService_get_struct_result,
)
# shortcut to success path for non-void returns
if resp.success is not None:
return resp.success
raise _fbthrift_py3lite_exceptions.ApplicationError(
_fbthrift_py3lite_exceptions.ApplicationErrorType.MISSING_RESULT,
"Empty Response",
| |
<reponame>bopopescu/Nova-31
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2010 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A driver for XenServer or Xen Cloud Platform.
**Related Flags**
:xenapi_connection_url: URL for connection to XenServer/Xen Cloud Platform.
:xenapi_connection_username: Username for connection to XenServer/Xen Cloud
Platform (default: root).
:xenapi_connection_password: Password for connection to XenServer/Xen Cloud
Platform.
:target_host: the iSCSI Target Host IP address, i.e. the IP
address for the nova-volume host
:target_port: iSCSI Target Port, 3260 Default
:iqn_prefix: IQN Prefix, e.g. 'iqn.2010-10.org.openstack'
**Variable Naming Scheme**
- suffix "_ref" for opaque references
- suffix "_uuid" for UUIDs
- suffix "_rec" for record objects
"""
import contextlib
import cPickle as pickle
import urlparse
import xmlrpclib
from eventlet import queue
from eventlet import timeout
from oslo.config import cfg
from nova import context
from nova import exception
from nova.openstack.common import log as logging
from nova.virt import driver
from nova.virt.xenapi import host
from nova.virt.xenapi import pool
from nova.virt.xenapi import pool_states
from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import vmops
from nova.virt.xenapi import volumeops
LOG = logging.getLogger(__name__)
xenapi_opts = [
cfg.StrOpt('xenapi_connection_url',
default=None,
help='URL for connection to XenServer/Xen Cloud Platform. '
'Required if compute_driver=xenapi.XenAPIDriver'),
cfg.StrOpt('xenapi_connection_username',
default='root',
help='Username for connection to XenServer/Xen Cloud Platform. '
'Used only if compute_driver=xenapi.XenAPIDriver'),
cfg.StrOpt('xenapi_connection_password',
default=None,
help='Password for connection to XenServer/Xen Cloud Platform. '
'Used only if compute_driver=xenapi.XenAPIDriver',
secret=True),
cfg.IntOpt('xenapi_connection_concurrent',
default=5,
help='Maximum number of concurrent XenAPI connections. '
'Used only if compute_driver=xenapi.XenAPIDriver'),
cfg.FloatOpt('xenapi_vhd_coalesce_poll_interval',
default=5.0,
help='The interval used for polling of coalescing vhds. '
'Used only if compute_driver=xenapi.XenAPIDriver'),
cfg.BoolOpt('xenapi_check_host',
default=True,
help='Ensure compute service is running on host XenAPI '
'connects to.'),
cfg.IntOpt('xenapi_vhd_coalesce_max_attempts',
default=5,
help='Max number of times to poll for VHD to coalesce. '
'Used only if compute_driver=xenapi.XenAPIDriver'),
cfg.StrOpt('xenapi_sr_base_path',
default='/var/run/sr-mount',
help='Base path to the storage repository'),
cfg.StrOpt('target_host',
default=None,
help='iSCSI Target Host'),
cfg.StrOpt('target_port',
default='3260',
help='iSCSI Target Port, 3260 Default'),
cfg.StrOpt('iqn_prefix',
default='iqn.2010-10.org.openstack',
help='IQN Prefix'),
# NOTE(sirp): This is a work-around for a bug in Ubuntu Maverick,
# when we pull support for it, we should remove this
cfg.BoolOpt('xenapi_remap_vbd_dev',
default=False,
help='Used to enable the remapping of VBD dev '
'(Works around an issue in Ubuntu Maverick)'),
cfg.StrOpt('xenapi_remap_vbd_dev_prefix',
default='sd',
help='Specify prefix to remap VBD dev to '
'(ex. /dev/xvdb -> /dev/sdb)'),
cfg.IntOpt('xenapi_login_timeout',
default=10,
help='Timeout in seconds for XenAPI login.'),
]
CONF = cfg.CONF
CONF.register_opts(xenapi_opts)
CONF.import_opt('host', 'nova.netconf')
class XenAPIDriver(driver.ComputeDriver):
"""A connection to XenServer or Xen Cloud Platform."""
def __init__(self, virtapi, read_only=False):
super(XenAPIDriver, self).__init__(virtapi)
url = CONF.xenapi_connection_url
username = CONF.xenapi_connection_username
password = CONF.xenapi_connection_password
if not url or password is None:
raise Exception(_('Must specify xenapi_connection_url, '
'xenapi_connection_username (optionally), and '
'xenapi_connection_password to use '
'compute_driver=xenapi.XenAPIDriver'))
self._session = XenAPISession(url, username, password, self.virtapi)
self._volumeops = volumeops.VolumeOps(self._session)
self._host_state = None
self._host = host.Host(self._session, self.virtapi)
self._vmops = vmops.VMOps(self._session, self.virtapi)
self._initiator = None
self._hypervisor_hostname = None
self._pool = pool.ResourcePool(self._session, self.virtapi)
@property
def host_state(self):
if not self._host_state:
self._host_state = host.HostState(self._session)
return self._host_state
def init_host(self, host):
if CONF.xenapi_check_host:
vm_utils.ensure_correct_host(self._session)
try:
vm_utils.cleanup_attached_vdis(self._session)
except Exception:
LOG.exception(_('Failure while cleaning up attached VDIs'))
def list_instances(self):
"""List VM instances."""
return self._vmops.list_instances()
def list_instance_uuids(self):
"""Get the list of nova instance uuids for VMs found on the
hypervisor.
"""
return self._vmops.list_instance_uuids()
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
"""Create VM instance."""
self._vmops.spawn(context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info)
def confirm_migration(self, migration, instance, network_info):
"""Confirms a resize, destroying the source VM."""
# TODO(Vek): Need to pass context in for access to auth_token
self._vmops.confirm_migration(migration, instance, network_info)
def finish_revert_migration(self, instance, network_info,
block_device_info=None):
"""Finish reverting a resize, powering back on the instance."""
# NOTE(vish): Xen currently does not use network info.
self._vmops.finish_revert_migration(instance, block_device_info)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance=False,
block_device_info=None):
"""Completes a resize, turning on the migrated instance."""
self._vmops.finish_migration(context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info)
def snapshot(self, context, instance, image_id, update_task_state):
"""Create snapshot from a running VM instance."""
self._vmops.snapshot(context, instance, image_id, update_task_state)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
"""Reboot VM instance."""
self._vmops.reboot(instance, reboot_type,
bad_volumes_callback=bad_volumes_callback)
def set_admin_password(self, instance, new_pass):
"""Set the root/admin password on the VM instance."""
self._vmops.set_admin_password(instance, new_pass)
def inject_file(self, instance, b64_path, b64_contents):
"""Create a file on the VM instance. The file path and contents
should be base64-encoded.
"""
self._vmops.inject_file(instance, b64_path, b64_contents)
def change_instance_metadata(self, context, instance, diff):
"""Apply a diff to the instance metadata."""
self._vmops.change_instance_metadata(instance, diff)
def destroy(self, instance, network_info, block_device_info=None,
destroy_disks=True):
"""Destroy VM instance."""
self._vmops.destroy(instance, network_info, block_device_info,
destroy_disks)
def pause(self, instance):
"""Pause VM instance."""
self._vmops.pause(instance)
def unpause(self, instance):
"""Unpause paused VM instance."""
self._vmops.unpause(instance)
def migrate_disk_and_power_off(self, context, instance, dest,
instance_type, network_info,
block_device_info=None):
"""Transfers the VHD of a running instance to another host, then shuts
off the instance copies over the COW disk"""
# NOTE(vish): Xen currently does not use network info.
rv = self._vmops.migrate_disk_and_power_off(context, instance,
dest, instance_type)
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
name_label = self._vmops._get_orig_vm_name_label(instance)
for vol in block_device_mapping:
connection_info = vol['connection_info']
mount_device = vol['mount_device'].rpartition("/")[2]
self._volumeops.detach_volume(connection_info,
name_label, mount_device)
return rv
def suspend(self, instance):
"""suspend the specified instance."""
self._vmops.suspend(instance)
def resume(self, instance, network_info, block_device_info=None):
"""resume the specified instance."""
self._vmops.resume(instance)
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
"""Rescue the specified instance."""
self._vmops.rescue(context, instance, network_info, image_meta,
rescue_password)
def unrescue(self, instance, network_info):
"""Unrescue the specified instance."""
self._vmops.unrescue(instance)
def power_off(self, instance):
"""Power off the specified instance."""
self._vmops.power_off(instance)
def power_on(self, instance):
"""Power on the specified instance."""
self._vmops.power_on(instance)
def soft_delete(self, instance):
"""Soft delete the specified instance."""
self._vmops.soft_delete(instance)
def restore(self, instance):
"""Restore the specified instance."""
self._vmops.restore(instance)
def poll_rebooting_instances(self, timeout, instances):
"""Poll for rebooting instances."""
self._vmops.poll_rebooting_instances(timeout, instances)
def reset_network(self, instance):
"""reset networking for specified instance."""
self._vmops.reset_network(instance)
def inject_network_info(self, instance, network_info):
"""inject network info for specified instance."""
self._vmops.inject_network_info(instance, network_info)
def plug_vifs(self, instance_ref, network_info):
"""Plug VIFs into networks."""
self._vmops.plug_vifs(instance_ref, network_info)
def unplug_vifs(self, instance_ref, network_info):
"""Unplug VIFs from networks."""
self._vmops.unplug_vifs(instance_ref, network_info)
def get_info(self, instance):
"""Return data about VM instance."""
return self._vmops.get_info(instance)
def get_diagnostics(self, instance):
"""Return data about VM diagnostics."""
return self._vmops.get_diagnostics(instance)
def get_all_bw_counters(self, instances):
"""Return bandwidth usage counters for each interface on each
running VM"""
# we only care about VMs that correspond to a nova-managed
# instance:
imap = dict([(inst['name'], inst['uuid']) for inst in instances])
bwcounters = []
# get a dictionary of instance names. values are dictionaries
# of mac addresses with values that are the bw counters:
# e.g. {'instance-001' : { 12:34:56:78:90:12 : {'bw_in': 0, ....}}
all_counters = self._vmops.get_all_bw_counters()
for instance_name, counters in all_counters.iteritems():
if instance_name in imap:
# yes these are stats for a nova-managed vm
# correlate the stats with the nova instance uuid:
for vif_counter in counters.values():
vif_counter['uuid'] = imap[instance_name]
bwcounters.append(vif_counter)
return bwcounters
def get_console_output(self, instance):
"""Return snapshot of console."""
return self._vmops.get_console_output(instance)
def get_vnc_console(self, instance):
"""Return link to instance's VNC console."""
return self._vmops.get_vnc_console(instance)
def get_volume_connector(self, instance):
"""Return volume connector information."""
if not self._initiator or not self._hypervisor_hostname:
stats = self.get_host_stats(refresh=True)
try:
self._initiator = stats['host_other-config']['iscsi_iqn']
self._hypervisor_hostname = stats['host_hostname']
except (TypeError, KeyError) as err:
LOG.warn(_('Could not determine key: %s') % err,
instance=instance)
self._initiator = None
return {
'ip': self.get_host_ip_addr(),
'initiator': self._initiator,
'host': self._hypervisor_hostname
}
@staticmethod
def get_host_ip_addr():
xs_url = urlparse.urlparse(CONF.xenapi_connection_url)
return xs_url.netloc
def attach_volume(self, connection_info, instance, mountpoint):
"""Attach volume storage to VM instance."""
return self._volumeops.attach_volume(connection_info,
instance['name'],
mountpoint)
def detach_volume(self, connection_info, instance, mountpoint):
"""Detach volume storage to VM instance."""
return self._volumeops.detach_volume(connection_info,
instance['name'],
mountpoint)
def get_console_pool_info(self, console_type):
xs_url = urlparse.urlparse(CONF.xenapi_connection_url)
return {'address': xs_url.netloc,
'username': CONF.xenapi_connection_username,
'password': CONF.xenapi_connection_password}
def get_available_resource(self, nodename):
"""Retrieve resource info.
This method is called when nova-compute launches, and
as part of a periodic task.
:param nodename: ignored in this driver
:returns: dictionary describing resources
"""
host_stats = self.get_host_stats(refresh=True)
# Updating host information
total_ram_mb = host_stats['host_memory_total'] / (1024 * 1024)
# NOTE(belliott) memory-free-computed is a value provided by XenServer
# for gauging free memory more conservatively than memory-free.
free_ram_mb = host_stats['host_memory_free_computed'] / (1024 * 1024)
total_disk_gb = host_stats['disk_total'] / (1024 * 1024 * 1024)
used_disk_gb = host_stats['disk_used'] / (1024 * 1024 * 1024)
dic = {'vcpus': 0,
'memory_mb': total_ram_mb,
'local_gb': total_disk_gb,
'vcpus_used': 0,
'memory_mb_used': | |
<gh_stars>10-100
import os
import numpy as np
import time
from scipy.signal import savgol_filter
import sys
import scipy.io as sio
import utils.utils as utils
def str2ind(categoryname, classlist):
return [i for i in range(len(classlist)) if categoryname == classlist[i]][0]
def filter_segments(segment_predict, videonames, ambilist, factor):
ind = np.zeros(np.shape(segment_predict)[0])
for i in range(np.shape(segment_predict)[0]):
vn = videonames[int(segment_predict[i, 0])]
for a in ambilist:
if a[0] == vn:
gt = range(int(round(float(a[2]) * factor)), int(round(float(a[3]) * factor)))
pd = range(int(segment_predict[i][1]), int(segment_predict[i][2]))
IoU = float(len(set(gt).intersection(set(pd)))) / float(len(set(gt).union(set(pd))))
if IoU > 0:
ind[i] = 1
s = [segment_predict[i, :] for i in range(np.shape(segment_predict)[0]) if ind[i] == 0]
return np.array(s)
def getActLoc(vid_preds, frm_preds, vid_lens, act_thresh_cas, annotation_path, args):
gtsegments = np.load(annotation_path + '/segments.npy')
gtlabels = np.load(annotation_path + '/labels.npy')
videoname = np.load(annotation_path + '/videoname.npy')
videoname = np.array([v.decode('utf-8') for v in videoname])
subset = np.load(annotation_path + '/subset.npy')
subset = np.array([s.decode('utf-8') for s in subset])
classlist = np.load(annotation_path + '/classlist.npy')
classlist = np.array([c.decode('utf-8') for c in classlist])
ambilist = annotation_path + '/Ambiguous_test.txt'
if os.path.isfile(ambilist):
ambilist = list(open(ambilist, 'r'))
ambilist = [a.strip('\n').split(' ') for a in ambilist]
else:
ambilist = None
if args.feature_type == 'UNT':
factor = 10.0 / 4.0
else:
factor = 25.0 / 16.0
# Keep only the test subset annotations
gts, gtl, vn, vp, fp, vl = [], [], [], [], [], []
for i, s in enumerate(subset):
if subset[i] == 'test':
gts.append(gtsegments[i])
gtl.append(gtlabels[i])
vn.append(videoname[i])
gtsegments = gts
gtlabels = gtl
videoname = vn
# keep ground truth and predictions for instances with temporal annotations
gtl, vn, vp, fp, vl = [], [], [], [], []
for i, s in enumerate(gtsegments):
if len(s):
gtl.append(gtlabels[i])
vn.append(videoname[i])
vp.append(vid_preds[i])
fp.append(frm_preds[i])
vl.append(vid_lens[i])
gtlabels = gtl
videoname = vn
# which categories have temporal labels ?
templabelcategories = sorted(list(set([l for gtl in gtlabels for l in gtl])))
# the number index for those categories.
templabelidx = []
for t in templabelcategories:
templabelidx.append(str2ind(t, classlist))
dataset_segment_predict = []
class_threshold = args.class_threshold
for c in templabelidx:
c_temp = []
# Get list of all predictions for class c
for i in range(len(fp)):
vid_cls_score = vp[i][c]
vid_cas = fp[i][:, c]
vid_cls_proposal = []
if vid_cls_score < class_threshold:
continue
for t in range(len(act_thresh_cas)):
thres = act_thresh_cas[t]
vid_pred = np.concatenate([np.zeros(1), (vid_cas > thres).astype('float32'), np.zeros(1)], axis=0)
vid_pred_diff = [vid_pred[idt] - vid_pred[idt - 1] for idt in range(1, len(vid_pred))]
s = [idk for idk, item in enumerate(vid_pred_diff) if item == 1]
e = [idk for idk, item in enumerate(vid_pred_diff) if item == -1]
for j in range(len(s)):
len_proposal = e[j] - s[j]
if len_proposal >= 3:
inner_score = np.mean(vid_cas[s[j]:e[j] + 1])
outer_s = max(0, int(s[j]- 0.25 * len_proposal))
outer_e = min(int(vid_cas.shape[0]-1), int(e[j] + 0.25 * len_proposal + 1))
outer_temp_list = list(range(outer_s, int(s[j]))) + list(range(int(e[j] + 1), outer_e))
if len(outer_temp_list) == 0:
outer_score = 0
else:
outer_score = np.mean(vid_cas[outer_temp_list])
c_score = inner_score - 0.6 * outer_score
vid_cls_proposal.append([i, s[j], e[j] + 1, c_score])
pick_idx = NonMaximumSuppression(np.array(vid_cls_proposal), 0.2)
nms_vid_cls_proposal = [vid_cls_proposal[k] for k in pick_idx]
c_temp += nms_vid_cls_proposal
if len(c_temp) > 0:
c_temp = np.array(c_temp)
if ambilist is not None:
c_temp = filter_segments(c_temp, videoname, ambilist, factor) # filtering segment in ambilist
dataset_segment_predict.append(c_temp)
return dataset_segment_predict
def IntergrateSegs(rgb_segs, flow_segs, th, args):
NUM_CLASS = args.class_num
NUM_VID = 212
segs = []
for i in range(NUM_CLASS):
class_seg = []
rgb_seg = rgb_segs[i]
flow_seg = flow_segs[i]
rgb_seg_ind = np.array(rgb_seg)[:, 0]
flow_seg_ind = np.array(flow_seg)[:, 0]
for j in range(NUM_VID):
rgb_find = np.where(rgb_seg_ind == j)
flow_find = np.where(flow_seg_ind == j)
if len(rgb_find[0]) == 0 and len(flow_find[0]) == 0:
continue
elif len(rgb_find[0]) != 0 and len(flow_find[0]) != 0:
rgb_vid_seg = rgb_seg[rgb_find[0]]
flow_vid_seg = flow_seg[flow_find[0]]
fuse_seg = np.concatenate([rgb_vid_seg, flow_vid_seg], axis=0)
pick_idx = NonMaximumSuppression(fuse_seg, th)
fuse_segs = fuse_seg[pick_idx]
class_seg.append(fuse_segs)
elif len(rgb_find[0]) != 0 and len(flow_find[0]) == 0:
vid_seg = rgb_seg[rgb_find[0]]
class_seg.append(vid_seg)
elif len(rgb_find[0]) == 0 and len(flow_find[0]) != 0:
vid_seg = flow_seg[flow_find[0]]
class_seg.append(vid_seg)
class_seg = np.concatenate(class_seg, axis=0)
segs.append(class_seg)
return segs
def NonMaximumSuppression(segs, overlapThresh):
# if there are no boxes, return an empty list
if len(segs) == 0:
return []
# if the bounding boxes integers, convert them to floats --
# this is important since we'll be doing a bunch of divisions
if segs.dtype.kind == "i":
segs = segs.astype("float")
# initialize the list of picked indexes
pick = []
# grab the coordinates of the segments
s = segs[:, 1]
e = segs[:, 2]
scores = segs[:, 3]
# compute the area of the bounding boxes and sort the bounding
# boxes by the score of the bounding box
area = e - s + 1
idxs = np.argsort(scores)
# keep looping while some indexes still remain in the indexes
# list
while len(idxs) > 0:
# grab the last index in the indexes list and add the
# index value to the list of picked indexes
last = len(idxs) - 1
i = idxs[last]
pick.append(i)
# find the largest coordinates for the start of
# the segments and the smallest coordinates
# for the end of the segments
maxs = np.maximum(s[i], s[idxs[:last]])
mine = np.minimum(e[i], e[idxs[:last]])
# compute the length of the overlapping area
l = np.maximum(0, mine - maxs + 1)
# compute the ratio of overlap
overlap = l / area[idxs[:last]]
# delete segments beyond the threshold
idxs = np.delete(idxs, np.concatenate(([last], np.where(overlap > overlapThresh)[0])))
return pick
def getLocMAP(seg_preds, th, annotation_path, args):
gtsegments = np.load(annotation_path + '/segments.npy')
gtlabels = np.load(annotation_path + '/labels.npy')
videoname = np.load(annotation_path + '/videoname.npy')
videoname = np.array([v.decode('utf-8') for v in videoname])
subset = np.load(annotation_path + '/subset.npy')
subset = np.array([s.decode('utf-8') for s in subset])
classlist = np.load(annotation_path + '/classlist.npy')
classlist = np.array([c.decode('utf-8') for c in classlist])
if args.feature_type == 'UNT':
factor = 10.0 / 4.0
else:
factor = 25.0 / 16.0
# Keep only the test subset annotations
gts, gtl, vn = [], [], []
for i, s in enumerate(subset):
if subset[i] == 'test':
gts.append(gtsegments[i])
gtl.append(gtlabels[i])
vn.append(videoname[i])
gtsegments = gts
gtlabels = gtl
videoname = vn
# keep ground truth and predictions for instances with temporal annotations
gts, gtl, vn = [], [], []
for i, s in enumerate(gtsegments):
if len(s):
gts.append(gtsegments[i])
gtl.append(gtlabels[i])
vn.append(videoname[i])
gtsegments = gts
gtlabels = gtl
videoname = vn
# which categories have temporal labels ?
templabelcategories = sorted(list(set([l for gtl in gtlabels for l in gtl])))
# the number index for those categories.
templabelidx = []
for t in templabelcategories:
templabelidx.append(str2ind(t, classlist))
ap = []
for c in templabelidx:
segment_predict = seg_preds[c]
# Sort the list of predictions for class c based on score
if len(segment_predict) == 0:
return 0
segment_predict = segment_predict[np.argsort(-segment_predict[:, 3])]
# Create gt list
segment_gt = [[i, gtsegments[i][j][0], gtsegments[i][j][1]] for i in range(len(gtsegments)) for j in
range(len(gtsegments[i])) if str2ind(gtlabels[i][j], classlist) == c]
gtpos = len(segment_gt)
# Compare predictions and gt
tp, fp = [], []
for i in range(len(segment_predict)):
matched = False
best_iou = 0
for j in range(len(segment_gt)):
if segment_predict[i][0] == segment_gt[j][0]:
gt = range(int(round(segment_gt[j][1] * factor)), int(round(segment_gt[j][2] * factor)))
p = range(int(segment_predict[i][1]), int(segment_predict[i][2]))
IoU = float(len(set(gt).intersection(set(p)))) / float(len(set(gt).union(set(p))))
if IoU >= th:
matched = True
if IoU > best_iou:
best_iou = IoU
best_j = j
if matched:
del segment_gt[best_j]
tp.append(float(matched))
fp.append(1. - float(matched))
tp_c = np.cumsum(tp)
fp_c = np.cumsum(fp)
if sum(tp) == 0:
prc = 0.
else:
cur_prec = tp_c / (fp_c + tp_c)
cur_rec = tp_c / gtpos
prc = _ap_from_pr(cur_prec, cur_rec)
ap.append(prc)
if ap:
return 100 * np.mean(ap)
else:
return 0
# Inspired by Pascal VOC evaluation tool.
def _ap_from_pr(prec, rec):
mprec = np.hstack([[0], prec, [0]])
mrec = np.hstack([[0], rec, [1]])
for i in range(len(mprec) - 1)[::-1]:
mprec[i] = max(mprec[i], mprec[i + 1])
idx = np.where(mrec[1::] != mrec[0:-1])[0] + 1
ap = np.sum((mrec[idx] - mrec[idx - 1]) * mprec[idx])
return ap
def compute_iou(dur1, dur2):
# find the each edge of intersect rectangle
left_line = max(dur1[0], dur2[0])
right_line = min(dur1[1], dur2[1])
# judge if there is an intersect
if left_line >= right_line:
return 0
else:
intersect = right_line - left_line
union = max(dur1[1], dur2[1]) - min(dur1[0], dur2[0])
return intersect / union
def getSingleStreamDetectionMAP(vid_preds, frm_preds, vid_lens, annotation_path, args):
iou_list = [0.1, | |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Digital Ocean Driver
"""
from libcloud.utils.py3 import httplib
from libcloud.common.base import ConnectionUserAndKey, ConnectionKey
from libcloud.common.base import JsonResponse
from libcloud.compute.types import Provider, NodeState, InvalidCredsError
from libcloud.compute.base import NodeDriver, Node
from libcloud.compute.base import NodeImage, NodeSize, NodeLocation, KeyPair
__all__ = [
'DigitalOceanNodeDriver',
'DigitalOcean_v1_NodeDriver',
'DigitalOcean_v1_NodeDriver'
]
class DigitalOceanNodeDriver(NodeDriver):
"""
DigitalOcean NodeDriver defaulting to using APIv2.
:keyword api_version: Specifies the API version to use. ``v1`` and
``v2`` are the only valid options. Defaults to
using ``v2`` (optional)
:type api_version: ``str``
"""
type = Provider.DIGITAL_OCEAN
name = 'DigitalOcean'
website = 'https://www.digitalocean.com'
def __new__(cls, key, secret=None, api_version='v2', **kwargs):
if cls is DigitalOceanNodeDriver:
if api_version == 'v1':
cls = DigitalOcean_v1_NodeDriver
elif api_version == 'v2':
cls = DigitalOcean_v2_NodeDriver
else:
raise NotImplementedError('Unsupported API version: %s' %
(api_version))
return super(DigitalOceanNodeDriver, cls).__new__(cls, **kwargs)
class DigitalOcean_v1_Response(JsonResponse):
def parse_error(self):
if self.status == httplib.FOUND and '/api/error' in self.body:
# Hacky, but DigitalOcean error responses are awful
raise InvalidCredsError(self.body)
elif self.status == httplib.UNAUTHORIZED:
body = self.parse_body()
raise InvalidCredsError(body['message'])
else:
body = self.parse_body()
if 'error_message' in body:
error = '%s (code: %s)' % (body['error_message'], self.status)
else:
error = body
return error
class DigitalOcean_v2_Response(JsonResponse):
valid_response_codes = [httplib.OK, httplib.ACCEPTED, httplib.CREATED,
httplib.NO_CONTENT]
def parse_error(self):
if self.status == httplib.UNAUTHORIZED:
body = self.parse_body()
raise InvalidCredsError(body['message'])
else:
body = self.parse_body()
if 'message' in body:
error = '%s (code: %s)' % (body['message'], self.status)
else:
error = body
return error
def success(self):
return self.status in self.valid_response_codes
class SSHKey(object):
def __init__(self, id, name, pub_key):
self.id = id
self.name = name
self.pub_key = pub_key
def __repr__(self):
return (('<SSHKey: id=%s, name=%s, pub_key=%s>') %
(self.id, self.name, self.pub_key))
class DigitalOcean_v1_Connection(ConnectionUserAndKey):
"""
Connection class for the DigitalOcean (v1) driver.
"""
host = 'api.digitalocean.com'
responseCls = DigitalOcean_v1_Response
def add_default_params(self, params):
"""
Add parameters that are necessary for every request
This method adds ``client_id`` and ``api_key`` to
the request.
"""
params['client_id'] = self.user_id
params['api_key'] = self.key
return params
class DigitalOcean_v2_Connection(ConnectionKey):
"""
Connection class for the DigitalOcean (v2) driver.
"""
host = 'api.digitalocean.com'
responseCls = DigitalOcean_v2_Response
def add_default_headers(self, headers):
"""
Add headers that are necessary for every request
This method adds ``token`` to the request.
"""
headers['Authorization'] = 'Bearer %s' % (self.key)
headers['Content-Type'] = 'application/json'
return headers
class DigitalOcean_v1_NodeDriver(DigitalOceanNodeDriver):
"""
DigitalOcean NodeDriver using v1 of the API.
"""
connectionCls = DigitalOcean_v1_Connection
NODE_STATE_MAP = {'new': NodeState.PENDING,
'off': NodeState.REBOOTING,
'active': NodeState.RUNNING}
def list_nodes(self):
data = self.connection.request('/v1/droplets').object['droplets']
return list(map(self._to_node, data))
def list_locations(self):
data = self.connection.request('/v1/regions').object['regions']
return list(map(self._to_location, data))
def list_images(self):
data = self.connection.request('/v1/images').object['images']
return list(map(self._to_image, data))
def list_sizes(self):
data = self.connection.request('/v1/sizes').object['sizes']
return list(map(self._to_size, data))
def create_node(self, name, size, image, location, ex_ssh_key_ids=None):
"""
Create a node.
:keyword ex_ssh_key_ids: A list of ssh key ids which will be added
to the server. (optional)
:type ex_ssh_key_ids: ``list`` of ``str``
:return: The newly created node.
:rtype: :class:`Node`
"""
params = {'name': name, 'size_id': size.id, 'image_id': image.id,
'region_id': location.id}
if ex_ssh_key_ids:
params['ssh_key_ids'] = ','.join(ex_ssh_key_ids)
data = self.connection.request('/v1/droplets/new', params=params)
# TODO: Handle this in the response class
status = data.object.get('status', 'OK')
if status == 'ERROR':
message = data.object.get('message', None)
error_message = data.object.get('error_message', message)
raise ValueError('Failed to create node: %s' % (error_message))
return self._to_node(data=data.object['droplet'])
def reboot_node(self, node):
res = self.connection.request('/v1/droplets/%s/reboot/' % (node.id))
return res.status == httplib.OK
def destroy_node(self, node):
params = {'scrub_data': '1'}
res = self.connection.request('/v1/droplets/%s/destroy/' % (node.id),
params=params)
return res.status == httplib.OK
def ex_rename_node(self, node, name):
params = {'name': name}
res = self.connection.request('/v1/droplets/%s/rename/' % (node.id),
params=params)
return res.status == httplib.OK
def ex_list_ssh_keys(self):
"""
List all the available SSH keys.
:return: Available SSH keys.
:rtype: ``list`` of :class:`SSHKey`
"""
data = self.connection.request('/v1/ssh_keys').object['ssh_keys']
return list(map(self._to_ssh_key, data))
def ex_create_ssh_key(self, name, ssh_key_pub):
"""
Create a new SSH key.
:param name: Key name (required)
:type name: ``str``
:param name: Valid public key string (required)
:type name: ``str``
"""
params = {'name': name, 'ssh_pub_key': ssh_key_pub}
data = self.connection.request('/v1/ssh_keys/new/', method='GET',
params=params).object
assert 'ssh_key' in data
return self._to_ssh_key(data=data['ssh_key'])
def ex_destroy_ssh_key(self, key_id):
"""
Delete an existing SSH key.
:param key_id: SSH key id (required)
:type key_id: ``str``
"""
res = self.connection.request('/v1/ssh_keys/%s/destroy/' % (key_id))
return res.status == httplib.OK
def _to_node(self, data):
extra_keys = ['backups_active', 'region_id', 'image_id', 'size_id']
if 'status' in data:
state = self.NODE_STATE_MAP.get(data['status'], NodeState.UNKNOWN)
else:
state = NodeState.UNKNOWN
if 'ip_address' in data and data['ip_address'] is not None:
public_ips = [data['ip_address']]
else:
public_ips = []
extra = {}
for key in extra_keys:
if key in data:
extra[key] = data[key]
node = Node(id=data['id'], name=data['name'], state=state,
public_ips=public_ips, private_ips=None, extra=extra,
driver=self)
return node
def _to_image(self, data):
extra = {'distribution': data['distribution']}
return NodeImage(id=data['id'], name=data['name'], extra=extra,
driver=self)
def _to_location(self, data):
return NodeLocation(id=data['id'], name=data['name'], country=None,
driver=self)
def _to_size(self, data):
ram = data['name'].lower()
if 'mb' in ram:
ram = int(ram.replace('mb', ''))
elif 'gb' in ram:
ram = int(ram.replace('gb', '')) * 1024
return NodeSize(id=data['id'], name=data['name'], ram=ram, disk=0,
bandwidth=0, price=0, driver=self)
def _to_ssh_key(self, data):
return SSHKey(id=data['id'], name=data['name'],
pub_key=data.get('ssh_pub_key', None))
class DigitalOcean_v2_NodeDriver(DigitalOceanNodeDriver):
"""
DigitalOcean NodeDriver using v2 of the API.
"""
connectionCls = DigitalOcean_v2_Connection
NODE_STATE_MAP = {'new': NodeState.PENDING,
'off': NodeState.STOPPED,
'active': NodeState.RUNNING,
'archive': NodeState.TERMINATED}
def list_nodes(self):
data = self._paginated_request('/v2/droplets', 'droplets')
return list(map(self._to_node, data))
def list_locations(self):
data = self.connection.request('/v2/regions').object['regions']
return list(map(self._to_location, data))
def list_images(self):
data = self._paginated_request('/v2/images', 'images')
return list(map(self._to_image, data))
def list_sizes(self):
data = self.connection.request('/v2/sizes').object['sizes']
return list(map(self._to_size, data))
def create_node(self, name, size, image, location, ex_ssh_key_ids=None):
"""
Create a node.
:keyword ex_ssh_key_ids: A list of ssh key ids which will be added
to the server. (optional)
:type ex_ssh_key_ids: ``list`` of ``str``
:return: The newly created node.
:rtype: :class:`Node`
"""
params = {'name': name, 'size': size.name, 'image': image.id,
'region': location.id}
if ex_ssh_key_ids:
params['ssh_key_ids'] = ','.join(ex_ssh_key_ids)
data = self.connection.request('/v2/droplets',
params=params, method='POST').object
# TODO: Handle this in the response class
status = data.object.get('status', 'OK')
if status == 'ERROR':
message = data.object.get('message', None)
error_message = data.object.get('error_message', message)
raise ValueError('Failed to create node: %s' % (error_message))
return self._to_node(data=data['droplet'])
def reboot_node(self, node):
params = {'type': 'reboot'}
res = self.connection.request('/v2/droplets/%s/actions' % (node.id),
params=params, method='POST')
return res.status == httplib.CREATED
def destroy_node(self, node):
res = self.connection.request('/v2/droplets/%s' % (node.id),
method='DELETE')
return res.status == httplib.NO_CONTENT
def get_image(self, image_id):
"""
Get an image based on an image_id
@inherits: :class:`NodeDriver.get_image`
:param image_id: Image identifier
:type image_id: ``int``
:return: A NodeImage object
:rtype: :class:`NodeImage`
"""
res = self.connection.request('/v2/images/%s' % (image_id))
data = res.object['image']
return self._to_image(data)
def create_image(self, node, name):
"""
Create an image fron a Node.
@inherits: :class:`NodeDriver.create_image`
:param node: Node to use as base for image
:type node: :class:`Node`
:param node: Name for image
:type node: ``str``
:rtype: ``bool``
"""
params = {'type': 'snapshot', 'name': name}
res = self.connection.request('/v2/droplets/%s/actions' % (node.id),
params=params, method='POST')
return res.status == httplib.CREATED
def delete_image(self, image):
"""Delete an image for node.
@inherits: :class:`NodeDriver.delete_image`
:param image: the image to be deleted
:type image: :class:`NodeImage`
:rtype: ``bool``
"""
res = self.connection.request('/v2/images/%s' % (image.id),
method='DELETE')
return res.status == httplib.NO_CONTENT
def ex_rename_node(self, node, name):
params = {'type': 'rename', 'name': name}
res = self.connection.request('/v2/droplets/%s/actions' % (node.id),
params=params, method='POST')
return res.status == httplib.CREATED
def ex_shutdown_node(self, node):
params = {'type': 'shutdown'}
res = self.connection.request('/v2/droplets/%s/actions' % (node.id),
params=params, method='POST')
return res.status == httplib.CREATED
def ex_power_on_node(self, node):
params = {'type': 'power_on'}
res = self.connection.request('/v2/droplets/%s/actions' % (node.id),
params=params, method='POST')
return res.status == httplib.CREATED
def list_key_pairs(self):
"""
List all the available SSH keys.
:return: Available SSH keys.
:rtype: ``list`` of :class:`KeyPair`
"""
data = self.connection.request('/v2/account/keys').object['ssh_keys']
return list(map(self._to_key_pairs, data))
def create_key_pair(self, name, public_key):
"""
Create a new SSH key.
:param name: Key name (required)
:type name: ``str``
:param public_key: Valid public key string (required)
:type public_key: ``str``
"""
params = {'name': name, 'public_key': public_key}
data = self.connection.request('/v2/account/keys', method='POST',
params=params).object['ssh_key']
return | |
"""Base wayland abstractions
"""
# private variables used between classes in file
# pyright: reportPrivateUsage=false
from __future__ import annotations
import asyncio
import io
import logging
from mmap import mmap
import sys
import os
import socket
import secrets
from enum import Enum
from _posixshmem import shm_open, shm_unlink
from xml.etree import ElementTree # nosec
from abc import ABC, abstractmethod
from asyncio import Future
from collections import deque
from struct import Struct
from weakref import WeakSet
from typing import (
Any,
Callable,
ClassVar,
Deque,
Dict,
List,
NamedTuple,
NewType,
Optional,
Protocol as Proto,
Set,
Tuple,
Type,
TypeVar,
Union,
cast,
runtime_checkable,
)
__all__ = [
"Id",
"OpCode",
"Connection",
"Arg",
"ArgUInt",
"ArgInt",
"ArgFixed",
"ArgStr",
"ArgArray",
"ArgNewId",
"ArgObject",
"ArgFd",
"Interface",
"WRequest",
"WEvent",
"WEnum",
"Proxy",
"Protocol",
"Fd",
"FdFile",
"SharedMemory",
"PROXIES",
]
Id = NewType("Id", int)
OpCode = NewType("OpCode", int)
MSG_HEADER = Struct("IHH")
PROXIES: Dict[str, Type[Proxy]] = {}
P = TypeVar("P", bound="Proxy")
C = TypeVar("C", bound="Connection")
class Message(NamedTuple):
"""Wayland message"""
id: Id
opcode: OpCode
data: bytes
fds: List[Fd]
class Connection(ABC):
__slots__ = [
"_socket",
"_loop",
"_is_terminated",
"_is_server",
"_on_terminated",
"_write_buff",
"_write_fds",
"_write_queue",
"_write_done",
"_read_buff",
"_read_fds",
"_id_last",
"_id_free",
"_proxies",
"_futures",
"_debug",
]
_socket: Optional[socket.socket]
_loop: asyncio.AbstractEventLoop
_is_terminated: bool
_is_server: bool
_on_terminated: asyncio.Event
_debug: bool
_write_fds: List[Fd]
_write_buff: bytearray
_write_queue: Deque[Message]
_write_done: asyncio.Event
_read_buff: bytearray
_read_fds: Deque[Fd]
_id_last: Id
_id_free: List[Id]
_proxies: Dict[Id, "Proxy"]
def __init__(self, debug: Optional[bool] = None, is_server: bool = False) -> None:
self._socket = None
self._loop = asyncio.get_running_loop()
self._is_terminated = False
self._is_server = is_server
self._on_terminated = asyncio.Event()
self._debug = bool(os.getenv("WAYLAND_DEBUG")) if debug is None else debug
self._write_fds = []
self._write_buff = bytearray()
self._write_queue = deque()
self._write_done = asyncio.Event()
self._write_done.set()
self._read_fds = deque()
self._read_buff = bytearray()
self._id_last = Id(0)
self._id_free = []
self._proxies = {}
def create_proxy(self, proxy_type: Type[P]) -> P:
"""Create proxy by proxy type"""
if self._is_terminated:
raise RuntimeError("connection has already been terminated")
id = self._id_alloc()
proxy = proxy_type(id, self)
self._proxies[id] = proxy
return proxy
def create_proxy_by_interface(self, interface: Interface) -> Proxy:
"""Create new proxy object"""
if self._is_terminated:
raise RuntimeError("connection has already been terminated")
id = self._id_alloc()
proxy = Proxy(id, self, interface)
self._proxies[id] = proxy
return proxy
@property
def is_terminated(self) -> bool:
return self._is_terminated
async def on_terminated(self) -> None:
await self._on_terminated.wait()
def terminate(self, msg: Optional[Any] = None) -> None:
"""Terminate wayland connection"""
is_terminated, self._is_terminated = self._is_terminated, True
if is_terminated:
return
# disconnect
self._writer_disable()
self._reader_disable()
if self._socket is not None:
self._socket.close()
# detach all proxies
for proxy in self._proxies.values():
proxy._detach(msg if msg else "wayland connection terminated")
self._proxies.clear()
# notify termination
self._on_terminated.set()
@abstractmethod
async def _create_socket(self) -> socket.socket:
"""Create connected wayland socket"""
async def connect(self: C) -> C:
"""Start running wayland connection"""
if self._socket is not None:
raise RuntimeError("socket has already been set")
self._socket = await self._create_socket()
self._socket.setblocking(False)
self._writer_enable()
self._reader_enable()
return self
async def flush(self) -> None:
"""Wait for all pending events to be send"""
await self._write_done.wait()
async def __aenter__(self: C) -> C:
return await self.connect()
async def __aexit__(self, et: Any, *_: Any) -> None:
if et is None:
await self.on_terminated()
else:
self.terminate()
def _writer_enable(self) -> None:
if self._is_terminated:
raise RuntimeError("connection has beend terminated")
self._write_done.clear()
if self._socket is not None:
self._loop.add_writer(self._socket, self._writer)
def _writer_disable(self) -> None:
if self._socket is not None:
self._loop.remove_writer(self._socket)
self._write_done.set()
def _writer(self) -> None:
"""Write pending messages"""
if self._is_terminated or self._socket is None:
self._writer_disable()
return
# pack queued messages
while self._write_queue:
message = self._write_queue.popleft()
self._write_buff.extend(
MSG_HEADER.pack(
message.id,
message.opcode,
MSG_HEADER.size + len(message.data),
)
)
self._write_buff.extend(message.data)
self._write_fds.extend(message.fds)
# send messages
offset = 0
try:
while offset < len(self._write_buff):
try:
fds: List[int] = []
for fd in self._write_fds:
if isinstance(fd, FdFile):
fds.append(fd.fileno())
else:
fds.append(fd)
offset += socket.send_fds(
self._socket, [self._write_buff[offset:]], fds
)
self._write_fds.clear()
except BlockingIOError:
break
except Exception:
error_msg = "failed to write to wayland socket"
logging.exception(error_msg)
self.terminate(error_msg)
finally:
self._write_buff = self._write_buff[offset:]
if not self._write_buff and not self._write_queue:
self._writer_disable()
def _reader_enable(self) -> None:
if self._is_terminated:
raise RuntimeError("connection has beend terminated")
if self._socket is not None:
self._loop.add_reader(self._socket, self._reader)
def _reader_disable(self) -> None:
if self._socket is not None:
self._loop.remove_reader(self._socket)
def _reader(self) -> None:
"""Read incoming messages"""
if self._is_terminated or self._socket is None:
self._reader_disable()
return
# reading data
close = False
while True:
try:
data, fds, _, _ = socket.recv_fds(self._socket, 4096, 32)
if not data:
close = True
break
self._read_fds.extend(open(fd, "w+b") for fd in fds)
self._read_buff.extend(data)
except BlockingIOError:
break
except Exception:
error_msg = "failed to read from wayland socket"
logging.exception(error_msg)
self.terminate(error_msg)
return
while len(self._read_buff) >= MSG_HEADER.size:
# unpack message
id, opcode, size = MSG_HEADER.unpack(self._read_buff[: MSG_HEADER.size])
if len(self._read_buff) < size:
return
message = Message(
Id(id),
OpCode(opcode),
self._read_buff[MSG_HEADER.size : size],
[],
)
# consume data and reset size
self._read_buff = self._read_buff[size:]
# dispatch event
proxy = self._proxies.get(message.id)
if proxy is None:
logging.error("unhandled message: %s", message)
continue
args = proxy._interface.unpack(
self,
message.opcode,
message.data,
)
proxy._dispatch(opcode, args)
if close:
self.terminate("connection closed")
def _id_alloc(self) -> Id:
if self._id_free:
return self._id_free.pop()
else:
self._id_last = Id(self._id_last + 1)
return self._id_last
def _fd_recv(self) -> Optional[Fd]:
"""Pop next descriptor from file descriptor queue"""
if self._read_fds:
return self._read_fds.popleft()
return None
def _new_id_recv(self, id: Id, iface_name: str) -> Proxy:
"""Receive proxy with new_id command"""
proxy_type = PROXIES.get(iface_name)
if proxy_type is None:
raise ValueError(f"failed to resolve proxy type {iface_name}")
proxy = proxy_type(id, self)
if self._is_server:
iface = proxy_type.interface.swap_events_and_requests()
proxy = Proxy(id, self, iface)
else:
proxy = proxy_type(id, self)
self._proxies[id] = proxy
proxy._is_attached = True
return proxy
def _delete_proxy(self, target: Union[Proxy, Id]) -> None:
"""Delete proxy"""
id = target._id if isinstance(target, Proxy) else target
proxy = self._proxies.pop(id, None)
if proxy is not None:
proxy._detach("deleted by server")
self._id_free.append(id)
def _message_submit(self, message: Message) -> None:
"""Submit message for writing"""
if message.id not in self._proxies:
raise RuntimeError("object has already been deleted")
self._write_queue.append(message)
self._writer_enable()
class Arg(ABC):
type_name: ClassVar[str]
name: str
def __init__(self, name: str):
self.name = name
@abstractmethod
def pack(self, write: io.BytesIO, value: Any) -> None:
pass
@abstractmethod
def unpack(
self,
read: io.BytesIO,
connection: Connection,
hint: Optional[Any] = None,
) -> Any:
pass
def __str__(self) -> str:
return f'{self.__class__.__name__}("{self.name}")'
def __repr__(self) -> str:
return str(self)
class ArgUInt(Arg):
type_name: ClassVar[str] = "int"
struct: ClassVar[Struct] = Struct("I")
enum: Optional[str]
def __init__(self, name: str, enum: Optional[str] = None):
super().__init__(name)
self.enum = enum
def pack(self, write: io.BytesIO, value: Any) -> None:
if isinstance(value, Enum):
write.write(self.struct.pack(value.value))
elif isinstance(value, int) and value >= 0:
write.write(self.struct.pack(value))
else:
raise TypeError(f"[{self.name}] unsigend integer expected")
def unpack(
self,
read: io.BytesIO,
connection: Connection,
hint: Optional[Any] = None,
) -> Any:
return self.struct.unpack(read.read(self.struct.size))[0]
def __str__(self) -> str:
if self.enum:
return f'ArgUInt("{self.name}", "{self.enum}")'
return f'ArgUInt("{self.name}")'
class ArgInt(Arg):
type_name: ClassVar[str] = "int"
struct: ClassVar[Struct] = Struct("i")
def pack(self, write: io.BytesIO, value: Any) -> None:
if not isinstance(value, int):
raise TypeError(f"[{self.name}] signed integer expected")
write.write(self.struct.pack(value))
def unpack(
self,
read: io.BytesIO,
connection: Connection,
hint: Optional[Any] = None,
) -> Any:
return self.struct.unpack(read.read(self.struct.size))[0]
class ArgFixed(Arg):
"""Signed 24.8 floating point value"""
type_name: ClassVar[str] = "float"
struct: ClassVar[Struct] = Struct("i")
def pack(self, write: io.BytesIO, value: Any) -> None:
if not isinstance(value, (int, float)):
raise TypeError(f"[{self.name}] float expected")
value = (int(value) << 8) + int((value % 1.0) * 256)
write.write(self.struct.pack(value))
def unpack(
self,
read: io.BytesIO,
connection: Connection,
hint: Optional[Any] = None,
) -> Any:
value = self.struct.unpack(read.read(self.struct.size))[0]
return float(value >> 8) + ((value & 0xFF) / 256.0)
class ArgStr(Arg):
"""String argument
String is zero teminated and 32-bit aligned
"""
type_name: ClassVar[str] = "str"
struct: ClassVar[Struct] = Struct("I")
def pack(self, write: io.BytesIO, value: Any) -> None:
data: bytes
if isinstance(value, str):
data = value.encode()
elif isinstance(value, bytes):
data = value
else:
raise TypeError(f"[{self.name}] string or bytes expected")
size = len(data) + 1 # null terminated length
write.write(self.struct.pack(size))
write.write(data)
# null terminated and padded to 32-bit
padding = (-size % 4) + 1
write.write(b"\x00" * padding)
def unpack(
self,
read: io.BytesIO,
connection: Connection,
hint: Optional[Any] = None,
) -> Any:
size = self.struct.unpack(read.read(self.struct.size))[0]
value = read.read(size - 1).decode()
read.read((-size % 4) + 1)
return value
class ArgArray(Arg):
"""Bytes argument
Bytes are 32-bit aligned
"""
type_name: ClassVar[str] = "bytes"
struct: ClassVar[Struct] = Struct("I")
def pack(self, write: io.BytesIO, value: Any) -> None:
data: bytes
if isinstance(value, str):
data = value.encode()
elif isinstance(value, bytes):
data = value
else:
raise TypeError(f"[{self.name}] string or bytes expected")
size = len(data)
write.write(self.struct.pack(size))
write.write(data)
write.write(b"\x00" * (-size % 4))
def unpack(
self,
read: io.BytesIO,
connection: Connection,
hint: Optional[Any] = None,
| |
"""
saltfactories.utils.processes.salts
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Salt's related daemon classes and CLI processes implementations
"""
import atexit
import json
import logging
import os
import pprint
import re
import stat
import subprocess
import sys
import tempfile
import textwrap
import time
import weakref
from collections import namedtuple
from operator import itemgetter
import psutil # pylint: disable=3rd-party-module-not-gated
import pytest
import salt.client
from saltfactories.exceptions import FactoryTimeout as ProcessTimeout
from saltfactories.utils.processes import terminate_process
SALT_KEY_LOG_LEVEL_SUPPORTED = False
log = logging.getLogger(__name__)
class Popen(subprocess.Popen):
def __init__(self, *args, **kwargs):
for key in ("stdout", "stderr"):
if key in kwargs:
raise RuntimeError(
"{}.Popen() does not accept {} as a valid keyword argument".format(
__name__, key
)
)
stdout = tempfile.SpooledTemporaryFile(512000)
kwargs["stdout"] = stdout
stderr = tempfile.SpooledTemporaryFile(512000)
kwargs["stderr"] = stderr
super().__init__(*args, **kwargs)
self.__stdout = stdout
self.__stderr = stderr
weakref.finalize(self, stdout.close)
weakref.finalize(self, stderr.close)
def communicate(self, input=None): # pylint: disable=arguments-differ
super().communicate(input)
stdout = stderr = None
if self.__stdout:
self.__stdout.flush()
self.__stdout.seek(0)
stdout = self.__stdout.read()
# We want str type on Py3 and Unicode type on Py2
# pylint: disable=undefined-variable
stdout = stdout.decode(__salt_system_encoding__)
# pylint: enable=undefined-variable
if self.__stderr:
self.__stderr.flush()
self.__stderr.seek(0)
stderr = self.__stderr.read()
# We want str type on Py3 and Unicode type on Py2
# pylint: disable=undefined-variable
stderr = stderr.decode(__salt_system_encoding__)
# pylint: enable=undefined-variable
return stdout, stderr
class ProcessResult(
namedtuple("ProcessResult", ("exitcode", "stdout", "stderr", "cmdline"))
):
"""
This class serves the purpose of having a common result class which will hold the
resulting data from a subprocess command.
"""
__slots__ = ()
def __new__(cls, exitcode, stdout, stderr, cmdline=None):
if not isinstance(exitcode, int):
raise ValueError(
"'exitcode' needs to be an integer, not '{}'".format(type(exitcode))
)
return super().__new__(cls, exitcode, stdout, stderr, cmdline=cmdline)
# These are copied from the namedtuple verbose output in order to quiet down PyLint
exitcode = property(itemgetter(0), doc="ProcessResult exit code property")
stdout = property(itemgetter(1), doc="ProcessResult stdout property")
stderr = property(itemgetter(2), doc="ProcessResult stderr property")
cmdline = property(itemgetter(3), doc="ProcessResult cmdline property")
def __str__(self):
message = self.__class__.__name__
if self.cmdline:
message += "\n Command Line: {}".format(self.cmdline)
if self.exitcode is not None:
message += "\n Exitcode: {}".format(self.exitcode)
if self.stdout or self.stderr:
message += "\n Process Output:"
if self.stdout:
message += "\n >>>>> STDOUT >>>>>\n{}\n <<<<< STDOUT <<<<<".format(
self.stdout
)
if self.stderr:
message += "\n >>>>> STDERR >>>>>\n{}\n <<<<< STDERR <<<<<".format(
self.stderr
)
return message + "\n"
class ShellResult(
namedtuple("ShellResult", ("exitcode", "stdout", "stderr", "json", "cmdline"))
):
"""
This class serves the purpose of having a common result class which will hold the
resulting data from a subprocess command.
"""
__slots__ = ()
def __new__(cls, exitcode, stdout, stderr, json=None, cmdline=None):
if not isinstance(exitcode, int):
raise ValueError(
"'exitcode' needs to be an integer, not '{}'".format(type(exitcode))
)
return super().__new__(
cls, exitcode, stdout, stderr, json=json, cmdline=cmdline
)
# These are copied from the namedtuple verbose output in order to quiet down PyLint
exitcode = property(itemgetter(0), doc="ShellResult exit code property")
stdout = property(itemgetter(1), doc="ShellResult stdout property")
stderr = property(itemgetter(2), doc="ShellResult stderr property")
json = property(
itemgetter(3), doc="ShellResult stdout JSON decoded, when parseable."
)
cmdline = property(itemgetter(4), doc="ShellResult cmdline property")
def __str__(self):
message = self.__class__.__name__
if self.cmdline:
message += "\n Command Line: {}".format(self.cmdline)
if self.exitcode is not None:
message += "\n Exitcode: {}".format(self.exitcode)
if self.stdout or self.stderr:
message += "\n Process Output:"
if self.stdout:
message += "\n >>>>> STDOUT >>>>>\n{}\n <<<<< STDOUT <<<<<".format(
self.stdout
)
if self.stderr:
message += "\n >>>>> STDERR >>>>>\n{}\n <<<<< STDERR <<<<<".format(
self.stderr
)
if self.json:
message += "\n JSON Object:\n"
message += "".join(
" {}".format(line) for line in pprint.pformat(self.json)
)
return message + "\n"
def __eq__(self, other):
"""
Allow comparison against the parsed JSON or the output
"""
if self.json:
return self.json == other
return self.stdout == other
class FactoryProcess:
"""
Base class for subprocesses
"""
def __init__(
self,
cli_script_name,
slow_stop=True,
environ=None,
cwd=None,
base_script_args=None,
):
"""
Args:
cli_script_name(str):
This is the string containing the name of the binary to call on the subprocess, either the
full path to it, or the basename. In case of the basename, the directory containing the
basename must be in your ``$PATH`` variable.
slow_stop(bool):
Wether to terminate the processes by sending a :py:attr:`SIGTERM` signal or by calling
:py:meth:`~subprocess.Popen.terminate` on the sub-procecess.
When code coverage is enabled, one will want `slow_stop` set to `True` so that coverage data
can be written down to disk.
environ(dict):
A dictionary of `key`, `value` pairs to add to the environment.
cwd (str):
The path to the current working directory
base_script_args(list or tuple):
An list or tuple iterable of the base arguments to use when building the command line to
launch the process
"""
self.cli_script_name = cli_script_name
self.slow_stop = slow_stop
self.environ = environ or os.environ.copy()
self.cwd = cwd or os.getcwd()
self._terminal = None
self._terminal_result = None
self._terminal_timeout = None
self._children = []
self._base_script_args = base_script_args
def get_display_name(self):
"""
Returns a name to show when process stats reports are enabled
"""
return self.cli_script_name
def get_log_prefix(self):
"""
Returns the log prefix that shall be used for a salt daemon forwarding log records.
It is also used by :py:func:`start_daemon` when starting the daemon subprocess.
"""
return "[{}] ".format(self.cli_script_name)
def get_script_path(self):
"""
Returns the path to the script to run
"""
if os.path.isabs(self.cli_script_name):
script_path = self.cli_script_name
else:
script_path = salt.utils.path.which(self.cli_script_name)
if not os.path.exists(script_path):
pytest.fail("The CLI script {!r} does not exist".format(script_path))
return script_path
def get_base_script_args(self):
"""
Returns any additional arguments to pass to the CLI script
"""
if self._base_script_args:
return list(self._base_script_args)
return []
def get_script_args(self): # pylint: disable=no-self-use
"""
Returns any additional arguments to pass to the CLI script
"""
return []
def build_cmdline(self, *args, **kwargs):
return (
[self.get_script_path()]
+ self.get_base_script_args()
+ self.get_script_args()
+ list(args)
)
def init_terminal(self, cmdline, **kwargs):
"""
Instantiate a terminal with the passed cmdline and kwargs and return it.
Additionaly, it sets a reference to it in self._terminal and also collects
an initial listing of child processes which will be used when terminating the
terminal
"""
self._terminal = Popen(cmdline, **kwargs)
# A little sleep to allow the subprocess to start
time.sleep(0.125)
try:
for child in psutil.Process(self._terminal.pid).children(recursive=True):
if child not in self._children:
self._children.append(child)
except psutil.NoSuchProcess:
# The terminal process is gone
pass
atexit.register(self.terminate)
return self._terminal
def terminate(self):
"""
Terminate the started daemon
"""
if self._terminal is None:
return self._terminal_result
log.info("%sStopping %s", self.get_log_prefix(), self.__class__.__name__)
# Collect any child processes information before terminating the process
try:
for child in psutil.Process(self._terminal.pid).children(recursive=True):
if child not in self._children:
self._children.append(child)
except psutil.NoSuchProcess:
# The terminal process is gone
pass
# poll the terminal before trying to terminate it, running or not, so that
# the right returncode is set on the popen object
self._terminal.poll()
# Lets log and kill any child processes which salt left behind
terminate_process(
pid=self._terminal.pid,
kill_children=True,
children=self._children,
slow_stop=self.slow_stop,
)
stdout, stderr = self._terminal.communicate()
try:
log_message = "{}Terminated {}.".format(
self.get_log_prefix(), self.__class__.__name__
)
if stdout or stderr:
log_message += " Process Output:"
if stdout:
log_message += "\n>>>>> STDOUT >>>>>\n{}\n<<<<< STDOUT <<<<<".format(
stdout.strip()
)
if stderr:
log_message += "\n>>>>> STDERR >>>>>\n{}\n<<<<< STDERR <<<<<".format(
stderr.strip()
)
log_message += "\n"
log.info(log_message)
self._terminal_result = ProcessResult(
self._terminal.returncode, stdout, stderr, cmdline=self._terminal.args
)
return self._terminal_result
finally:
self._terminal = None
self._children = []
@property
def pid(self):
terminal = getattr(self, "_terminal", None)
if not terminal:
return
return terminal.pid
def __repr__(self):
return "<{} display_name='{}'>".format(
self.__class__.__name__, self.get_display_name()
)
class FactoryScriptBase(FactoryProcess):
"""
Base class for CLI scripts
"""
def __init__(self, *args, **kwargs):
"""
Base class for non daemonic CLI processes
Check base class(es) for additional supported parameters
Args:
default_timeout(int):
The maximum ammount of seconds that a script should run
"""
default_timeout = kwargs.pop("default_timeout", None)
super().__init__(*args, **kwargs)
if default_timeout is None:
if not sys.platform.startswith(("win", "darwin")):
default_timeout = 30
else:
# Windows and macOS are just slower.
default_timeout = 120
self.default_timeout = default_timeout
self._terminal_timeout_set_explicitly = False
def run(self, *args, **kwargs):
"""
Run the given command synchronously
"""
start_time = time.time()
timeout = kwargs.pop("_timeout", None)
# Build the cmdline to pass to the terminal
# We set the _terminal_timeout attribute while calling build_cmdline in case it needs
# access to that information to build the command line
self._terminal_timeout = timeout or self.default_timeout
self._terminal_timeout_set_explicitly = timeout is not None
cmdline = self.build_cmdline(*args, **kwargs)
timeout_expire = time.time() + self._terminal_timeout
log.info(
"%sRunning %r in CWD: %s ...", self.get_log_prefix(), cmdline, self.cwd
)
terminal = self.init_terminal(cmdline, | |
import os
import json
import pathlib
import bpy
import pprint
from . import Global
from . import NodeArrange
from . import Versions
from . import MatDct
from . import Util
from . import BumpToNormal
# region top-level methods
def srgb_to_linear_rgb(srgb):
if srgb < 0:
return 0
elif srgb < 0.04045:
return srgb / 12.92
else:
return ((srgb + 0.055) / 1.055) ** 2.4
def hex_to_col(hex, normalize=True, precision=6):
col = []
it = iter(hex)
for char in it:
col.append(int(char + it.__next__(), 16))
if normalize:
col = map(lambda x: x / 255, col)
col = map(lambda x: round(x, precision), col)
return list(srgb_to_linear_rgb(c) for c in col)
def getGroupNode(key):
for slot in Global.getBody().material_slots:
ROOT = bpy.data.materials[slot.name].node_tree.nodes
for n in ROOT:
if n.name.startswith("Group"):
if n.node_tree.name.startswith(key):
return n
def getGroupNodeTree(key):
rtn = getGroupNode(key)
if rtn is not None:
return rtn.node_tree
def default_material():
getGroupNodeTree("EyeDry")
getGroupNodeTree("EyeWet")
getGroupNodeTree("IrayUberSkin")
def forbitMinus():
pbsdf = "Principled BSDF"
for dobj in Util.myccobjs():
if dobj.type != "MESH" or dobj == Global.getBody():
continue
for slot in dobj.material_slots:
mat = bpy.data.materials.get(slot.name)
if mat is None or mat.node_tree is None:
continue
mat_nodes = mat.node_tree.nodes
for mat_node in mat_nodes:
if pbsdf not in mat_node.name:
continue
for node_input in mat_nodes[pbsdf].inputs:
if len(node_input.links) != 0:
continue
if type(node_input.default_value) is float:
if node_input.default_value < 0:
node_input.default_value = 0.0
if (
node_input.name == "Metallic"
and node_input.default_value == 1.0
):
node_input.default_value = 0.0
if (
node_input.name == "Specular"
and node_input.default_value == 2.0
):
node_input.default_value = 0.2
elif type(node_input.default_value) is list:
for i in node_input.default_value:
if type(i) is float:
if node_input.default_value < 0:
node_input.default_value = 0.0
def adjust_material(kind, inc_value, isEye):
skincombi = [
["Base Color.Hue", 11, 0],
["Base Color.Saturation", 11, 1],
["Base Color.Value", 11, 2],
["Base Color.Bright", 8, 1],
["Base Color.Contrast", 8, 2],
["Specular", 9, 1],
["Roughness", 10, 1],
["Roughness.Contrast", 9, 2],
["Specular.Contrast", 10, 2],
["Subsurface.Scale", 14, 1],
["Subsurface.Scale", 13, 1],
["Normal.Strength", 5, 0],
["Bump.Strength", 6, 0],
["Bump.Distance", 6, 1],
["Displacement.Height", 4, 2], # 14
["Subsurface.Scale", 2, 2],
["Subsurface.Scale", 2, 1],
]
eyecombi = [
["Base Color.Bright", 1, 1],
["Base Color.Contrast", 1, 2],
["Normal.Strength", 3, 0],
["Bump.Strength", 4, 0],
["Bump.Distance", 4, 1],
["Base Color.Hue", 6, 0],
["Base Color.Saturation", 6, 1],
["Base Color.Value", 6, 2],
]
flg_skin = False
if isEye:
tree = getGroupNodeTree("EyeDry")
tbls = eyecombi
else:
tree = getGroupNodeTree("IrayUberSkin")
tbls = skincombi
flg_skin = True
if tree is None:
return
nds = tree.nodes
for tidx, tbl in enumerate(tbls):
if tbl[0] == kind:
t1 = getNidx(int(tbl[1]), nds)
dv = nds[t1].inputs[tbl[2]].default_value
cg = 1.0
if flg_skin:
if tidx > 8 and tidx < 16:
cg = cg * Global.getSize() * 0.01
if tidx == 9:
cg = cg * 3
elif tidx == 10:
cg = cg * 0.5
elif tidx == 16:
cg = cg * 0.2
# elif tidx==14:
# cg = cg * 12
# elif tidx>=11 or tidx<=13:
# cg = cg * 8
cg = cg * inc_value
if tidx == 15:
dv[0] += cg * 10
dv[1] += cg * 2
dv[2] += cg
else:
dv += cg
nds[t1].inputs[tbl[2]].default_value = dv
def getNidx(idx, nodes):
for nidx, n in enumerate(nodes):
if n.name.endswith("-" + str(idx)):
return nidx
return idx
# endregion top-level methods
class DtbShaders:
def __init__(self, dtu):
self.material_list = dtu.get_materials_list()
self.mat_data_dict = {}
self.mat_property_dict = {}
self.node_groups = []
self.is_Diffuse = False
self.is_Refract = False
self.is_Alpha = False
# TODO: Find a better way to create the dict
def make_dct(self):
mat_info_list = self.material_list
for mat_info in mat_info_list:
if mat_info["Asset Name"] == mat_info["Asset Label"]:
if mat_info["Asset Name"] in self.mat_data_dict.keys():
self.mat_data_dict[mat_info["Asset Name"]][
mat_info["Material Name"]
] = mat_info
else:
self.mat_data_dict[mat_info["Asset Name"]] = {}
self.mat_data_dict[mat_info["Asset Name"]][
mat_info["Material Name"]
] = mat_info
elif mat_info["Asset Name"] != mat_info["Asset Label"]:
if mat_info["Asset Name"] not in self.mat_data_dict.keys():
self.mat_data_dict[mat_info["Asset Name"]] = {}
self.mat_data_dict[mat_info["Asset Name"]][
mat_info["Material Name"]
] = mat_info
if mat_info["Asset Name"] in self.mat_data_dict.keys():
if (
mat_info["Material Name"]
not in self.mat_data_dict[mat_info["Asset Name"]]
):
self.mat_data_dict[mat_info["Asset Name"]][
mat_info["Material Name"]
] = mat_info
if mat_info["Asset Label"] in self.mat_data_dict.keys():
self.mat_data_dict[mat_info["Asset Label"]][
mat_info["Material Name"]
] = mat_info
if mat_info["Asset Label"] not in self.mat_data_dict.keys():
self.mat_data_dict[mat_info["Asset Label"]] = {}
self.mat_data_dict[mat_info["Asset Label"]][
mat_info["Material Name"]
] = mat_info
def load_shader_nodes(self):
file_path = os.path.join("dependencies", "link_library.blend")
file_dir = os.path.dirname(os.path.realpath(__file__))
file_path = os.path.join(file_dir, file_path)
# load node_groups from link_library.blend file
with bpy.data.libraries.load(file_path) as (data_from, data_to):
if len(bpy.data.node_groups) != len(data_from.node_groups):
self.node_groups = data_from.node_groups
data_to.node_groups = data_from.node_groups
def get_mat_properties(self, mat_data):
self.mat_property_dict = {}
# To deal with material names sometimes being undescriptive.
for mat_property in mat_data["Properties"]:
self.mat_property_dict[mat_property["Name"]] = mat_property
self.mat_property_dict[mat_property["Label"]] = mat_property
return self.mat_property_dict
def get_mat_type(self, material):
material_name = material["Material Name"]
material_type = material["Material Type"]
object_type = material["Value"]
if material_name in [
"Cornea",
"EyeMoisture",
"EyeMoisture.00",
"EylsMoisture",
"Tear",
]:
return "EyeWet"
elif material_name in ["Pupils", "Trises", "Sclera"]:
return "EyeDry"
elif "Eyelashes" in object_type:
return "Eyelashes"
elif material_type == "Iray Uber":
if object_type == "Actor/Character":
return "IrayUberSkin"
else:
return "IrayUber"
elif material_type == "AoA_Subsurface":
return "AoA_Subsurface"
elif material_type == "omUberSurface":
return "omUberSurface"
elif material_type == "PBRSkin":
return "IrayUberSkin"
elif ("Hair" in material_type) or ("Hair" in object_type):
return "IrayUber"
elif material_type == "DAZ Studio Default":
return "DAZ Studio Default"
else:
return "DefaultMaterial"
def optimize_materials(self, mat_slot):
mat = mat_slot.material
if "Genesis" in mat["Asset Name"]:
mat_name = mat["Asset Label"] + "_" + mat["Material Name"]
else:
mat_name = mat["Asset Name"] + "_" + mat["Material Name"]
if mat_name not in bpy.data.materials:
if mat["Asset Name"] != mat["Asset Label"]:
mat.name = mat["Asset Name"] + "_" + mat["Material Name"]
return
else:
return
material = bpy.data.materials[mat_name]
if mat_name != mat.name:
if mat["Asset Name"] == material["Asset Name"]:
mat_slot.material = material
bpy.data.materials.remove(mat)
return True
# TODO: Check for all Color Maps
def check_map_type(self, property_key):
if "Diffuse" in property_key:
self.is_Diffuse = True
else:
self.is_Diffuse = False
if "Opacity" in property_key:
self.is_Alpha = True
else:
self.is_Alpha = False
def check_refract(self):
if "Refraction Weight" in self.mat_property_dict.keys():
if self.mat_property_dict["Refraction Weight"]["Value"] > 0:
self.is_Refract = True
def set_eevee_alpha(self, mat):
if self.is_Alpha:
Versions.eevee_alpha(mat, "HASHED", 0)
else:
mat_name = mat["Material Name"]
if mat_name in [
"Cornea",
"EyeMoisture",
"EylsMoisture",
"Tear",
"Eyelashes",
"Glass",
]:
Versions.eevee_alpha(mat, "HASHED", 0)
def set_eevee_refract(self, mat):
if self.is_Refract:
mat.use_screen_refraction = True
mat.refraction_depth = 0.8 * Global.get_size()
def find_node_property(self, input_key, mat_property_dict):
property_key, property_type = input_key.split(": ")
property_info = mat_property_dict[property_key][property_type]
return property_key, property_type, property_info
def create_texture_input(self, tex_path, tex_image_node):
tex_path = os.path.abspath(tex_path)
tex_image = bpy.data.images.load(filepath=tex_path)
tex_image_node.image = tex_image
if not self.is_Diffuse:
Versions.to_color_space_non(tex_image_node)
def convert_color(self, color, shader_node):
color_hex = color.lstrip("#")
color_rgb = hex_to_col(color_hex)
color_rgb.append(1) # alpha
return color_rgb
# remove shader_node from convert_color()
def daz_color_to_rgb(self, color):
color_hex = color.lstrip("#")
color_rgb = hex_to_col(color_hex)
color_rgb.append(1) # alpha
return color_rgb
def setup_materials(self, obj):
for mat_slot in obj.material_slots:
mat = mat_slot.material
mat_name = mat.name
obj_name = obj.name.replace(".Shape", "")
obj_name = obj_name.split(".")[0]
if mat is None:
# Get or create a new material when slot is missing material
mat = bpy.data.materials.get(mat_slot.name) or bpy.data.materials.new(
name=mat_slot.name
)
mat_slot.material = mat
if obj_name not in self.mat_data_dict.keys():
continue
if mat_name not in self.mat_data_dict[obj_name].keys():
mat_name = mat.name.split(".")[0]
if mat_name not in self.mat_data_dict[obj_name].keys():
continue
mat_data = self.mat_data_dict[obj_name][mat_name]
self.mat_property_dict = self.get_mat_properties(mat_data)
# Set Custom Properties
for key in mat_data:
if not key == "Properties":
mat[key] = mat_data[key]
# Update Name
new_name = mat["Asset Label"] + "_" + mat["Material Name"]
if bpy.context.window_manager.combine_materials:
# To Deal with a duplicate being converted first.
if new_name in bpy.data.materials:
mat_slot.material = bpy.data.materials[new_name]
bpy.data.materials.remove(mat)
continue
mat.name = new_name
mat_name = mat.name
# To Deal with duplications
if self.optimize_materials(mat_slot):
continue
mat.use_nodes = True
mat_nodes = mat.node_tree.nodes
mat_links = mat.node_tree.links
# Remove all the nodes from the material
for mat_node in mat_nodes:
mat_nodes.remove(mat_node)
# Create material output nodes and set corresponding targets
out_node_cy = mat_nodes.new(type="ShaderNodeOutputMaterial")
out_node_cy.target = "CYCLES"
out_node_ev = mat_nodes.new(type="ShaderNodeOutputMaterial")
out_node_ev.target = "EEVEE"
# Create shader node and set links
shader_node = mat_nodes.new(type="ShaderNodeGroup")
node_group = self.get_mat_type(mat)
shader_node.node_tree = bpy.data.node_groups[node_group]
# Link corresponding nodes in the material
render_output = None
surface_input = out_node_cy.inputs["Surface"]
render_output = shader_node.outputs["Cycles"]
mat_links.new(render_output, surface_input)
mat_links.new(shader_node.outputs["EEVEE"], out_node_ev.inputs["Surface"])
# Find and Attach Node Input
for input_key in shader_node.inputs.keys():
if ("Texture" in input_key) or ("Value" in input_key):
# To deal with Gen 8.1 Not Share the Same info as Gen 8 "temp"
if input_key.split(": ")[0] in self.mat_property_dict.keys():
(
property_key,
property_type,
property_info,
) = | |
"""
Profile (Multi-perspective single-record view)
Copyright: 2009-2022 (c) Sahana Software Foundation
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3Profile",
)
from uuid import uuid4
from gluon import current, redirect
from gluon.html import *
from gluon.storage import Storage
from ..resource import FS
from ..tools import get_crud_string, s3_str
from ..ui import ICON
from .crud import S3CRUD
from .report import S3Report
# =============================================================================
class S3Profile(S3CRUD):
"""
Interactive Method Handler for Profile Pages
Configure widgets using s3db.configure(tablename, profile_widgets=[])
TODO Make more configurable:
- Currently uses internal widgets rather than CRUDMethod widgets
TODO unify datalist and datatable methods with the superclass
methods (requires re-design of the superclass methods)
TODO allow as default handler for interactive single-record-no-method
GET requests (include read/update from superclass)
"""
# -------------------------------------------------------------------------
def apply_method(self, r, **attr):
"""
API entry point
Args:
r: the CRUDRequest instance
attr: controller attributes for the request
"""
if r.http in ("GET", "POST", "DELETE"):
if r.record:
# Initialize CRUD form
self.settings = current.response.s3.crud
self.sqlform = sqlform = self.resource.get_config("crud_form")
if not sqlform:
from ..ui import S3SQLDefaultForm
self.sqlform = S3SQLDefaultForm()
# Render page
output = self.profile(r, **attr)
return output
elif r.representation not in ("dl", "aadata"):
# Redirect to the List View
redirect(r.url(method=""))
else:
# No point redirecting
r.error(404, current.ERROR.BAD_RECORD)
else:
r.error(405, current.ERROR.BAD_METHOD)
# -------------------------------------------------------------------------
def profile(self, r, **attr):
"""
Generate a Profile page
Args:
r: the CRUDRequest instance
attr: controller attributes for the request
"""
tablename = self.tablename
get_config = current.s3db.get_config
header = get_config(tablename, "profile_header")
# Get the page widgets
widgets = get_config(tablename, "profile_widgets")
if not widgets and not header:
# Profile page not configured:
if r.representation not in ("dl", "aadata"):
# Redirect to the Read View
redirect(r.url(method="read"))
else:
# No point redirecting
r.error(405, current.ERROR.BAD_METHOD)
# Index the widgets by their position in the config
for index, widget in enumerate(widgets):
widget["index"] = index
if r.representation == "dl":
# Ajax-update of one datalist
index = r.get_vars.get("update", None)
if index:
try:
index = int(index)
except ValueError:
datalist = ""
else:
# @ToDo: Check permissions to the Resource & do
# something different if no permission
datalist = self._datalist(r, widgets[index], **attr)
output = {"item": datalist}
elif r.representation == "aadata":
# Ajax-update of one datatable
index = r.get_vars.get("update", None)
if index:
try:
index = int(index)
except ValueError:
datalist = ""
else:
# @ToDo: Check permissions to the Resource & do
# something different if no permission
datatable = self._datatable(r, widgets[index], **attr)
return datatable
else:
# Default page-load
# Page Title
title = get_config(tablename, "profile_title")
if not title:
try:
title = r.record.name
except:
title = current.T("Profile Page")
elif callable(title):
title = title(r)
# Page Header
if not header:
header = H2(title, _class="profile-header")
elif callable(header):
header = header(r)
output = {"title": title,
"header": header,
}
# Update Form, if configured
update = get_config(tablename, "profile_update")
if update:
editable = get_config(tablename, "editable", True)
authorised = self._permitted(method="update")
if authorised and editable:
show = get_crud_string(tablename, "title_update")
hide = current.T("Hide Form")
form = self.update(r, **attr)["form"]
else:
show = get_crud_string(tablename, "title_display")
hide = current.T("Hide Details")
form = self.read(r, **attr)["item"]
if update == "visible":
hidden = False
label = hide
style_hide, style_show = None, "display:none"
else:
hidden = True
label = show
style_hide, style_show = "display:none", None
toggle = A(SPAN(label,
data = {"on": show,
"off": hide,
},
),
ICON("down", _style=style_show),
ICON("up", _style=style_hide),
data = {"hidden": hidden},
_class = "form-toggle action-lnk",
)
form.update(_style=style_hide)
output["form"] = DIV(toggle,
form,
_class = "profile-update",
)
else:
output["form"] = ""
# Widgets
response = current.response
rows = []
append = rows.append
row = None
cols = get_config(tablename, "profile_cols")
if not cols:
cols = 2
row_cols = 0
for widget in widgets:
# Render the widget
w_type = widget["type"]
if w_type == "comments":
w = self._comments(r, widget, **attr)
elif w_type == "datalist":
w = self._datalist(r, widget, **attr)
elif w_type == "datatable":
w = self._datatable(r, widget, **attr)
elif w_type == "form":
w = self._form(r, widget, **attr)
elif w_type == "map":
w = self._map(r, widget, widgets, **attr)
elif w_type == "report":
w = self._report(r, widget, **attr)
elif w_type == "organizer":
w = self._organizer(r, widget, **attr)
elif w_type == "custom":
w = self._custom(r, widget, **attr)
else:
if response.s3.debug:
raise SyntaxError("Unsupported widget type %s" %
w_type)
else:
# ignore
continue
if row is None:
# Start new row
row = DIV(_class="row profile")
row_cols = 0
# Append widget to row
row.append(w)
colspan = widget.get("colspan", 1)
row_cols += colspan
if row_cols == cols:
# Close this row
append(row)
row = None
if row:
# We have an incomplete row of widgets
append(row)
output["rows"] = rows
# Activate this if a project needs it
#response.view = get_config(tablename, "profile_view") or \
# self._view(r, "profile.html")
response.view = self._view(r, "profile.html")
return output
# -------------------------------------------------------------------------
@staticmethod
def _resolve_context(r, tablename, context):
"""
Resolve a context filter
Args:
context: the context (as a string)
id: the record_id
"""
record_id = r.id
if not record_id:
return None
if not context:
query = None
elif type(context) is tuple:
context, field = context
query = (FS(context) == r.record[field])
elif context == "location":
# Show records linked to this Location & all it's Child Locations
s = "(location)$path"
# This version doesn't serialize_url
#m = ("%(id)s/*,*/%(id)s/*" % dict(id=id)).split(",")
#filter = (FS(s).like(m)) | (FS(s) == id)
m = ("%(id)s,%(id)s/*,*/%(id)s/*,*/%(id)s" % dict(id=record_id)).split(",")
m = [f.replace("*", "%") for f in m]
query = (FS(s).like(m))
# @ToDo:
#elif context == "organisation":
# # Show records linked to this Organisation and all it's Branches
# s = "(%s)" % context
# query = (FS(s) == id)
else:
# Normal: show just records linked directly to this master resource
s = "(%s)" % context
query = (FS(s) == record_id)
# Define target resource
resource = current.s3db.resource(tablename, filter=query)
r.customise_resource(tablename)
return resource, query
# -------------------------------------------------------------------------
def _comments(self, r, widget, **attr):
"""
Generate a Comments widget
Args:
r: the CRUDRequest instance
widget: the widget definition as dict
attr: controller attributes for the request
TODO Configurable to use either Disqus or internal Comments
"""
label = widget.get("label", "")
# Activate if-required
#if label and isinstance(label, str):
if label:
label = current.T(label)
icon = widget.get("icon", "")
if icon:
icon = ICON(icon)
_class = self._lookup_class(r, widget)
comments = "@ToDo"
# Render the widget
output = DIV(H4(icon,
label,
_class = "profile-sub-header",
),
DIV(comments,
_class = "card-holder",
),
_class = _class,
)
return output
# -------------------------------------------------------------------------
def _custom(self, r, widget, **attr):
"""
Generate a Custom widget
Args:
r: the CRUDRequest instance
widget: the widget definition as dict
attr: controller attributes for the request
"""
label = widget.get("label", "")
# Activate if-required
#if label and isinstance(label, str):
if label:
label = current.T(label)
icon = widget.get("icon", "")
if icon:
icon = ICON(icon)
_class = self._lookup_class(r, widget)
contents = widget["fn"](r, **attr)
# Render the widget
output = DIV(H4(icon,
label,
_class = "profile-sub-header",
),
DIV(contents,
_class = "card-holder",
),
_class = _class,
)
return output
# -------------------------------------------------------------------------
def _datalist(self, r, widget, **attr):
"""
Generate a data list
Args:
r: the CRUDRequest instance
widget: the widget definition as dict
attr: controller attributes for the request
"""
T = current.T
widget_get | |
reorganization, and hence
no move operation, so the `position` and `save` arguments are
ignored; they are present for regularity purposes with the rest of
the deletion preparation methods.
:param node: the :class:`CTENode` to prepare for deletion.
:param position: this is ignored, but present for regularity.
:param save: this is ignored, but present for regularity.
"""
# Django will take care of deleting the sub-tree through the reverse
# Foreign Key parent relation.
pass
def prepare_delete_grandmother(self, node, position = None, save = True):
""" Prepares a given :class:`CTENode` `node` for deletion, by executing
the :const:`DELETE_METHOD_GRANDMOTHER` semantics. Descendant nodes,
if present, will be moved; in this case the optional `position` can
be a ``callable`` which is invoked prior to each move operation (see
:meth:`move` for details).
By default, after each move operation, sub-tree nodes which were
moved will be saved through a call to :meth:`Model.save` unless
`save` is ``False``.
This method delegates move operations to :meth:`move`.
:param node: the :class:`CTENode` to prepare for deletion.
:param position: optionally, a ``callable`` to invoke prior to each
move operation.
:param save: flag indicating whether to save after each move
operation, ``True`` by default.
"""
# Move all children to the node's parent.
for child in node.children.all():
child.move(node.parent, position, save)
def prepare_delete_monarchy(self, node, position = None, save = True):
""" Prepares a given :class:`CTENode` `node` for deletion, by executing
the :const:`DELETE_METHOD_MONARCHY` semantics. Descendant nodes,
if present, will be moved; in this case the optional `position` can
be a ``callable`` which is invoked prior to each move operation (see
:meth:`move` for details).
By default, after each move operation, sub-tree nodes which were
moved will be saved through a call to :meth:`Model.save` unless
`save` is ``False``.
This method delegates move operations to :meth:`move`.
:param node: the :class:`CTENode` to prepare for deletion.
:param position: optionally, a ``callable`` to invoke prior to each
move operation.
:param save: flag indicating whether to save after each move
operation, ``True`` by default.
"""
# We are going to iterate all children, even though the first child is
# treated in a special way, because the query iterator may be custom, so
# we will avoid using slicing children[0] and children[1:].
first = None
for child in node.children.all():
if first is None:
first = child
first.move(node.parent, position, save)
else:
child.move(first, position, save)
def move(self, node, destination, position = None, save = False):
""" Moves the given :class:`CTENode` `node` and places it as a child
node of the `destination` :class:`CTENode` (or makes it a root node
if `destination` is ``None``).
Optionally, `position` can be a callable which is invoked prior to
placement of the `node` with the `node` and the `destination` node
as the sole two arguments; this can be useful in implementing
specific sibling ordering semantics.
Optionally, if `save` is ``True``, after the move operation
completes (after the :attr:`CTENode.parent` foreign key is updated
and the `position` callable is called if present), a call to
:meth:`Model.save` is made.
:param destination: the destination node of this move, ``None``
denoting that the node will become a root node.
:param position: optional callable invoked prior to placement for
purposes of custom sibling ordering semantics.
:param save: optional flag indicating whether this model's
:meth:`save` method should be invoked after the move.
:return: this node.
"""
# Allow custom positioning semantics to specify the position before
# setting the parent.
if not position is None:
position(node, destination)
node.parent = destination
if save:
node.save()
return node
class CTENode(Model):
""" Abstract :class:`Model` which implements a node in a CTE tree. This
model features a mandatory foreign key to the parent node (hence to
``self``), which, when ``None``, indicates a root node. Multiple nodes
with a ``None`` parent results in a forest, which can be constrained
either with custom SQL constraints or through application logic.
It is necessary for any custom :class:`Manager` of this model to inherit
from :class:`CTENodeManager`, as all functionality of the CTE tree is
implemented in the manager.
It is possible to manipulate individual nodes when not loaded through
the custom manager, or when freshly created either through the
:meth:`create` method or through the constructor, however, any operation
which requires tree information (the :attr:`depth`, :attr:`path`,
and :attr:`ordering` virtual fields) will not work, and any attempt to
invoke such methods will result in an :class:`ImproperlyConfigured`
exception being raised.
Many runtime properties of nodes are specified through a set of
parameters which are stored as attributes of the node class, and begin
with ``_cte_node_``. Before any of these parameters are used, the
manager will attempt to load and verify them, raising an
:class:`ImproperlyConfigured` exception if any errors are encountered.
All parameters have default values.
All :class:`QuerySet` objects involving CTE nodes use the
:meth:`QuerySet.extra` semantics in order to specify additional
``SELECT``, ``WHERE``, and ``ORDER_BY`` SQL semantics, therefore, they
cannot be combined through the ``OR`` operator (the ``|`` operator).
The following parameters can optionally be specified at the class level:
* _cte_node_traversal:
A string from one of :const:`TREE_TRAVERSAL_METHODS`, which
specifies the default tree traversal order. If this parameters is
``None`` or :const:`TREE_TRAVERSAL_NONE`, then
:const:`DEFAULT_TREE_TRAVERSAL` method is used (which is ``dfs``
for depth-first).
* _cte_node_order_by:
A list of strings or tuples specifying the ordering of siblings
during tree traversal (in the breadth-first method, siblings are
ordered depending on their parent and not the entire set of nodes at
the given depth of the tree).
The entries in this list can be any of the model fields, much like
the entries in the :attr:`ordering` of the model's :class:`Meta`
class or the arguments of the :meth:`order_by` method of
:class:`QuerySet`.
These entries may also contain the virtual field :attr:`depth`,
which cannot be used by the normal :class:`QuerySet` because Django
cannot recognize such virtual fields.
In case of multiple entries, they must all be of the same database
type. For VARCHAR fields, their values will be cast to TEXT, unless
otherwise specified. It is possible to specify the database type
into which the ordering field values are cast by providing tuples of
the form ``(fieldname, dbtype)`` in the ordering sequence.
Specifying cast types is necessary when combining different data
types in the ordering sequence, such as an int and a float (casting
the int into a float is probably the desired outcome in this
situation). In the worst case, TEXT can be specified for all casts.
* _cte_node_delete_method:
A string specifying the desired default deletion semantics, which
may be one of :const:`DELETE_METHODS`. If this parameter is missing
or ``None`` or :const:`DELETE_METHOD_NONE`, then the default
deletion semantics :const:`DEFAULT_DELETE_METHOD` will be used
(which is :const:`DELETE_METHOD_PHARAOH` or ``pharaoh`` for the
Pharaoh deletion semantics).
* _cte_node_parent:
A string referencing the name of the :class:`ForeignKey` field which
implements the parent relationship, typically called ``parent`` and
automatically inherited from this class.
If this parameter is missing, and no field with the name ``parent``
can be found, then the first :class:`ForeignKey` which relates to
this model will be used as the parent relationship field.
* _cte_node_children:
A string referencing the `related_name` attribute of the
:class:`ForeignKey` field which implements the parent relationship,
typically called ``parent`` (specified in
:const:`DEFAULT_CHILDREN_NAME`) and automatically
inherited from this class.
* _cte_node_table:
The name of the temporary table to use with the ``WITH`` CTE SQL
statement when compiling queries involving nodes. By default this is
:const:`DEFAULT_TABLE_NAME` (which is ``cte``).
* _cte_node_primary_key_type:
A string representing the database type of the primary key, if the
primary key is a non-standard type, and must be cast in order to be
used in the :attr:`path` or :attr:`ordering` virtual fields
(similarly to the :attr:`_cte_node_order_by` parameter above).
A ``VARCHAR`` primary key will be automatically cast to ``TEXT``,
unless explicitly specified otherwise through this parameter.
* _cte_node_path, _cte_node_depth, _cte_node_ordering:
Strings specifying the attribute names of the virtual fields
containing the path, depth, and ordering prefix of each node, by
default, respectively, | |
self.PL_cons_functor = self._lib.PL_cons_functor # FIXME:
# PL_EXPORT(void) PL_cons_functor_v(term_t h, functor_t fd, term_t a0);
self.PL_cons_functor_v = self._lib.PL_cons_functor_v
self.PL_cons_functor_v.argtypes = [term_t, functor_t, term_t]
self.PL_cons_functor_v.restype = None
# PL_EXPORT(void) PL_cons_list(term_t l, term_t h, term_t t);
self.PL_cons_list = self._lib.PL_cons_list
self.PL_cons_list.argtypes = [term_t, term_t, term_t]
self.PL_cons_list.restype = None
#
# term_t PL_exception(qid_t qid)
self.PL_exception = self._lib.PL_exception
self.PL_exception.argtypes = [qid_t]
self.PL_exception.restype = term_t
#
self.PL_register_foreign = self._lib.PL_register_foreign
self.PL_register_foreign = check_strings(0, None)(self.PL_register_foreign)
#
# PL_EXPORT(atom_t) PL_new_atom(const char *s);
self.PL_new_atom = self._lib.PL_new_atom
self.PL_new_atom.argtypes = [c_char_p]
self.PL_new_atom.restype = atom_t
self.PL_new_atom = check_strings(0, None)(self.PL_new_atom)
# PL_EXPORT(functor_t) PL_new_functor(atom_t f, int a);
self.PL_new_functor = self._lib.PL_new_functor
self.PL_new_functor.argtypes = [atom_t, c_int]
self.PL_new_functor.restype = functor_t
# /*******************************
# * COMPARE *
# *******************************/
#
# PL_EXPORT(int) PL_compare(term_t t1, term_t t2);
# PL_EXPORT(int) PL_same_compound(term_t t1, term_t t2);
self.PL_compare = self._lib.PL_compare
self.PL_compare.argtypes = [term_t, term_t]
self.PL_compare.restype = c_int
self.PL_same_compound = self._lib.PL_same_compound
self.PL_same_compound.argtypes = [term_t, term_t]
self.PL_same_compound.restype = c_int
# /*******************************
# * RECORDED DATABASE *
# *******************************/
#
# PL_EXPORT(record_t) PL_record(term_t term);
self.PL_record = self._lib.PL_record
self.PL_record.argtypes = [term_t]
self.PL_record.restype = record_t
# PL_EXPORT(void) PL_recorded(record_t record, term_t term);
self.PL_recorded = self._lib.PL_recorded
self.PL_recorded.argtypes = [record_t, term_t]
self.PL_recorded.restype = None
# PL_EXPORT(void) PL_erase(record_t record);
self.PL_erase = self._lib.PL_erase
self.PL_erase.argtypes = [record_t]
self.PL_erase.restype = None
#
# PL_EXPORT(char *) PL_record_external(term_t t, size_t *size);
# PL_EXPORT(int) PL_recorded_external(const char *rec, term_t term);
# PL_EXPORT(int) PL_erase_external(char *rec);
self.PL_new_module = self._lib.PL_new_module
self.PL_new_module.argtypes = [atom_t]
self.PL_new_module.restype = module_t
self.PL_is_initialised = self._lib.PL_is_initialised
# PL_EXPORT(IOSTREAM *) Sopen_string(IOSTREAM *s, char *buf, size_t sz, const char *m);
self.Sopen_string = self._lib.Sopen_string
self.Sopen_string.argtypes = [POINTER(IOSTREAM), c_char_p, c_size_t, c_char_p]
self.Sopen_string.restype = POINTER(IOSTREAM)
# PL_EXPORT(int) Sclose(IOSTREAM *s);
self.Sclose = self._lib.Sclose
self.Sclose.argtypes = [POINTER(IOSTREAM)]
# PL_EXPORT(int) PL_unify_stream(term_t t, IOSTREAM *s);
self.PL_unify_stream = self._lib.PL_unify_stream
self.PL_unify_stream.argtypes = [term_t, POINTER(IOSTREAM)]
self.func_names = set()
self.func = {}
self.unify = None
@staticmethod
def unifier(arity, *args):
assert arity == 2
# if PL_is_variable(args[0]):
# args[0].unify(args[1])
try:
return {args[0].value: args[1].value}
except AttributeError:
return {args[0].value: args[1]}
def __del__(self):
# only do something if prolog has been initialised
if self.PL_is_initialised(None, None):
# clean up the prolog system using the caught exit code
# if exit code is None, the program exits normally and we can use 0
# instead.
# TODO Prolog documentation says cleanup with code 0 may be interrupted
# If the program has come to an end the prolog system should not
# interfere with that. Therefore we may want to use 1 instead of 0.
self.PL_cleanup(int(_hook.exit_code or 0))
_isCleaned = True
@staticmethod
def _findSwiplPathFromFindLib():
"""
This function resorts to ctype's find_library to find the path to the
DLL. The biggest problem is that find_library does not give the path to the
resource file.
:returns:
A path to the swipl SO/DLL or None if it is not found.
:returns type:
{str, None}
"""
path = (find_library('swipl') or
find_library('pl') or
find_library('libswipl')) # This last one is for Windows
return path
@staticmethod
def _findSwiplFromExec():
"""
This function tries to use an executable on the path to find SWI-Prolog
SO/DLL and the resource file.
:returns:
A tuple of (path to the swipl DLL, path to the resource file)
:returns type:
({str, None}, {str, None})
"""
platform = sys.platform[:3]
fullName = None
swiHome = None
try: # try to get library path from swipl executable.
# We may have pl or swipl as the executable
try:
cmd = Popen(['swipl', '--dump-runtime-variables'], stdout=PIPE)
except OSError:
cmd = Popen(['pl', '--dump-runtime-variables'], stdout=PIPE)
ret = cmd.communicate()
# Parse the output into a dictionary
ret = ret[0].decode().replace(';', '').splitlines()
ret = [line.split('=', 1) for line in ret]
rtvars = dict((name, value[1:-1]) for name, value in ret) # [1:-1] gets
# rid of the
# quotes
if rtvars['PLSHARED'] == 'no':
raise ImportError('SWI-Prolog is not installed as a shared '
'library.')
else: # PLSHARED == 'yes'
swiHome = rtvars['PLBASE'] # The environment is in PLBASE
if not os.path.exists(swiHome):
swiHome = None
# determine platform specific path
if platform == "win":
dllName = rtvars['PLLIB'][:-4] + '.' + rtvars['PLSOEXT']
path = os.path.join(rtvars['PLBASE'], 'bin')
fullName = os.path.join(path, dllName)
if not os.path.exists(fullName):
fullName = None
elif platform == "cyg":
# e.g. /usr/lib/pl-5.6.36/bin/i686-cygwin/cygpl.dll
dllName = 'cygpl.dll'
path = os.path.join(rtvars['PLBASE'], 'bin', rtvars['PLARCH'])
fullName = os.path.join(path, dllName)
if not os.path.exists(fullName):
fullName = None
elif platform == "dar":
dllName = 'lib' + rtvars['PLLIB'][2:] + '.' + rtvars['PLSOEXT']
path = os.path.join(rtvars['PLBASE'], 'lib', rtvars['PLARCH'])
baseName = os.path.join(path, dllName)
if os.path.exists(baseName):
fullName = baseName
else: # We will search for versions
fullName = None
else: # assume UNIX-like
# The SO name in some linuxes is of the form libswipl.so.5.10.2,
# so we have to use glob to find the correct one
dllName = 'lib' + rtvars['PLLIB'][2:] + '.' + rtvars['PLSOEXT']
path = os.path.join(rtvars['PLBASE'], 'lib', rtvars['PLARCH'])
baseName = os.path.join(path, dllName)
if os.path.exists(baseName):
fullName = baseName
else: # We will search for versions
pattern = baseName + '.*'
files = glob.glob(pattern)
if len(files) == 0:
fullName = None
elif len(files) == 1:
fullName = files[0]
else: # Will this ever happen?
fullName = None
except (OSError, KeyError): # KeyError from accessing rtvars
pass
return (fullName, swiHome)
@staticmethod
def _findSwiplWin():
import re
"""
This function uses several heuristics to gues where SWI-Prolog is installed
in Windows. It always returns None as the path of the resource file because,
in Windows, the way to find it is more robust so the SWI-Prolog DLL is
always able to find it.
:returns:
A tuple of (path to the swipl DLL, path to the resource file)
:returns type:
({str, None}, {str, None})
"""
dllNames = ('swipl.dll', 'libswipl.dll')
# First try: check the usual installation path (this is faster but
# hardcoded)
programFiles = os.getenv('ProgramFiles')
paths = [os.path.join(programFiles, r'pl\bin', dllName)
for dllName in dllNames]
for path in paths:
if os.path.exists(path):
return (path, None)
# Second try: use the find_library
path = SWIPl._findSwiplPathFromFindLib()
if path is not None and os.path.exists(path):
return (path, None)
# Third try: use reg.exe to find the installation path in the registry
# (reg should be installed in all Windows XPs)
try:
cmd = Popen(['reg', 'query',
r'HKEY_LOCAL_MACHINE\Software\SWI\Prolog',
'/v', 'home'], stdout=PIPE)
ret = cmd.communicate()
# Result is like:
# ! REG.EXE VERSION 3.0
#
# HKEY_LOCAL_MACHINE\Software\SWI\Prolog
# home REG_SZ C:\Program Files\pl
# (Note: spaces may be \t or spaces in the output)
ret = ret[0].splitlines()
ret = [line.decode("utf-8") for line in ret if len(line) > 0]
pattern = re.compile('[^h]*home[^R]*REG_SZ( |\t)*(.*)$')
match = pattern.match(ret[-1])
if match is not None:
path = match.group(2)
paths = [os.path.join(path, 'bin', dllName)
for dllName in dllNames]
for path in paths:
if os.path.exists(path):
return (path, None)
except OSError:
# reg.exe not found? Weird...
pass
# May the exec is on path?
(path, swiHome) = SWIPl._findSwiplFromExec()
if path is not None:
return (path, swiHome)
# Last try: maybe it is in the current dir
for dllName in dllNames:
if os.path.exists(dllName):
return (dllName, None)
return (None, None)
@staticmethod
def _findSwiplLin():
"""
This function uses several heuristics to guess where SWI-Prolog is
installed in Linuxes.
:returns:
A tuple of (path to the swipl so, path to the resource file)
:returns type:
({str, None}, {str, None})
"""
# Maybe the exec is on path?
(path, swiHome) = SWIPl._findSwiplFromExec()
if path is not None:
return (path, swiHome)
# If it is not, use find_library
path = SWIPl._findSwiplPathFromFindLib()
if path is not None:
return (path, swiHome)
# Our last try: some hardcoded paths.
paths = ['/lib', '/usr/lib', '/usr/local/lib', '.', './lib']
names = ['libswipl.so', 'libpl.so']
path = None
for name in names:
for try_ in paths:
try_ = os.path.join(try_, name)
if os.path.exists(try_):
path = try_
break
if path is not None:
return (path, swiHome)
return (None, None)
@staticmethod
def _findSwiplMacOSHome():
"""
This function is guesing where SWI-Prolog is
installed in MacOS via .app.
:parameters:
- `swi_ver` (str) - Version of SWI-Prolog in '[0-9].[0-9].[0-9]' format
:returns:
A tuple of (path to the swipl so, path to the resource file)
:returns type:
({str, None}, {str, None})
"""
# Need more help with MacOS
# That | |
# Copyright (c) 2012-2014 Turbulenz Limited
"""
This file contains all of the code generation, formatting and default
templates for the build tools. This includes the set of variables
used to render the html templates, the format of dependency
information and the set of shared options across the code build tools.
"""
from turbulenz_tools.utils.dependencies import find_file_in_dirs
from turbulenz_tools.utils.profiler import Profiler
from turbulenz_tools.tools.toolsexception import ToolsException
from turbulenz_tools.tools.templates import read_file_utf8
import os.path
import glob
from re import compile as re_compile
from logging import getLogger
__version__ = '1.1.4'
LOG = getLogger(__name__)
############################################################
DEFAULT_HTML_TEMPLATE = """
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
<title>
/*{% block tz_app_title %}*//*{{ tz_app_title_var }}*//*{% endblock %}*/
</title>
<meta http-equiv="Content-Type" content="text/html;charset=utf-8" >
<style type="text/css">
html, body, div, span, object, iframe, h1, h2, p, a, img, ul, li, fieldset, form, label, legend, table, thead, tbody, tfoot, tr, th, td {
border: 0;
font-size: 100%;
margin: 0;
outline: 0;
padding: 0;
vertical-align: baseline;
}
</style>
<!-- block tz_app_header -->
/*{% block tz_app_header %}*//*{% endblock %}*/
<!-- end tz_app_header -->
</head>
<body style="background:#B4B4B4;font:normal normal normal 13px/1.231 Helvetica,Arial,sans-serif;text-shadow:1px 1px #F9F8F8;">
<div id="titlebar" style="position:fixed;height:65px;top:0;right:0;left:0;">
<strong style="font-size:24px;line-height:64px;margin:16px;">
<!-- block tz_app_title_name -->
/*{% block tz_app_title_name %}*/
/*{{ tz_app_title_name_var }}*/
/*{% endblock %}*/
<!-- end tz_app_title_name -->
</strong>
<div id="titlelogo"
style="float:right;width:27px;height:27px;margin:18px 24px;">
</div>
</div>
<div id="sidebar"
style="background:#B4B4B4;position:fixed;width:303px;top:65px;left:0;">
<!-- block tz_app_html_controls -->
/*{% block tz_app_html_controls %}*/
/*{% endblock %}*/
<!-- end tz_app_html_controls -->
</div>
<div id="engine" style="background:#939393;position:fixed;top:65px;
bottom:0;right:0;left:303px;
border-left:1px solid #898989;">
<!--
HTML to create a plugin or canvas instance.
Supplied by 'tz_engine_div' variable.
-->
/*{{ tz_engine_div }}*/
</div>
<!-- begin 'tz_include_js' variable -->
/*{{ tz_include_js }}*/
<!-- end 'tz_include_js' variable -->
<script type="text/javascript">
// ----------------------------------------
// Embedded code and startup code.
// Supplied by 'tz_startup_code' variable.
// ----------------------------------------
/*{{ tz_startup_code }}*/
</script>
</body>
</html>
"""
############################################################
def default_parser_options(parser):
"""
Command line options shared by make*.py tools
"""
parser.add_option("--version", action="store_true", dest="output_version",
default=False, help="output version number")
parser.add_option("-v", "--verbose", action="store_true", dest="verbose",
default=False, help="verbose output")
parser.add_option("-s", "--silent", action="store_true", dest="silent",
default=False, help="silent running")
# Input / Output (input .html and .js files don't need a prefix)
parser.add_option("-o", "--output", action="store", dest="output",
help="output file to process")
parser.add_option("-t", "--templatedir", action="append", dest="templatedirs",
default=[], help="template directory (multiple allowed)")
# Dependency generation
parser.add_option("-M", "--dependency", action="store_true",
dest="dependency", default=False,
help="output dependencies")
parser.add_option("--MF", action="store", dest="dependency_file",
help="dependencies output to file")
# "use strict" options
parser.add_option("--use-strict", action="store_true", dest="use_strict",
default=False, help='enforce "use strict"; statement. '
'This adds a single "use strict"; line at the top of the '
'JavaScript code.')
parser.add_option("--include-use-strict", action="store_true",
dest="include_use_strict", default=False,
help='don\'t strip out "use strict"; statements. '
'By default all "use strict"; statements are removed '
'from the output file.')
# Hybrid
parser.add_option("--hybrid", action="store_true", dest="hybrid",
default=False, help="canvas, canvas_dev modes only. "
"Start up a plugin as well as a canvas-based "
"TurbulenzEngine. The plugin will be available as "
"TurbulenzEnginePlugin.")
# Profiling
def _enable_profiler(_options, _opt_str, _value, _parser):
Profiler.enable()
parser.add_option("--profile", action="callback", callback=_enable_profiler,
help="enable the collection and output of profiling "
"information")
# Injecting files
parser.add_option("--no-inject", action="store_true", dest="noinject",
default=False, help="Don't inject default library files")
############################################################
def render_js(context, options, templates_js, inject_js):
"""
Renders the templates in templates_js, as if the first template
began with include declarations for each of the files in
inject_js. Returns the result of rendering, and the list of
includes that were not inlined. (rendered_js, inc_js)
For dev modes, the list of includes is returned in inc_js as
relative paths from the output file. For release modes, includes
are all inlined (inc_js == []).
"""
regex_use_strict = re_compile('"use strict";')
out = []
inc_js = []
outfile_dir = os.path.abspath(os.path.dirname(options.output)) + os.sep
includes_seen = []
# Any headers
if options.use_strict:
out.append('"use strict";')
if options.mode in [ 'plugin', 'canvas' ]:
out.append('(function () {')
# Functions for handling includes
def _find_include_or_error(name):
try:
f = find_file_in_dirs(name, options.templatedirs)
except Exception, ex:
raise ToolsException(str(ex))
if f is None:
raise ToolsException("No file '%s' in any template dir" % name)
LOG.info(" resolved '%s' to path '%s'", name, f)
return f
def handle_javascript_dev(name):
file_path = _find_include_or_error(name)
if file_path in includes_seen:
LOG.info(" include '%s' (%s) already listed", name, file_path)
return ""
includes_seen.append(file_path)
# Calculate relative path name
# rel_path = file_path.replace(outfile_dir, '').replace('\\', '/')
# if rel_path == file_path:
# raise ToolsException("Included file '%s' found at '%s', which is "
# "not in a child directory of the output file "
# "'%s' in directory %s" % (name, file_path,
# options.output,
# outfile_dir))
rel_path = os.path.relpath(file_path, outfile_dir).replace('\\', '/')
inc_js.append(rel_path)
return ""
def handle_javascript_webworker_dev(name):
file_path = _find_include_or_error(name)
if file_path in includes_seen:
LOG.info(" include '%s' (%s) already listed", name, file_path)
return ""
includes_seen.append(file_path)
rel_path = os.path.relpath(file_path, outfile_dir).replace('\\', '/')
return ('importScripts("%s");' % rel_path).encode('utf-8')
def handle_javascript_release(name):
if options.stripdebug and os.path.basename(name) == "debug.js":
LOG.warning("App attempting to include debug.js. Removing.")
return ""
file_path = _find_include_or_error(name)
if file_path in includes_seen:
LOG.info(" include '%s' (%s) already listed", name, file_path)
return ""
includes_seen.append(file_path)
d = read_file_utf8(file_path)
if options.include_use_strict:
return d
else:
# strip out any "use strict"; lines
return regex_use_strict.sub('', d)
if options.mode in [ 'plugin', 'canvas', 'webworker' ]:
handle_javascript = handle_javascript_release
elif options.mode == 'webworker-debug':
handle_javascript = handle_javascript_webworker_dev
else:
handle_javascript = handle_javascript_dev
context['javascript'] = handle_javascript
# Inject any includes at the start, either embedding them or
# adding to the inc_js list.
for inj in inject_js:
js_line = handle_javascript(inj)
if js_line:
out.append(js_line)
# Render templates
out += [t.render(context) for t in templates_js]
del context['javascript']
# Any footer code
if options.mode == 'plugin':
out.append("""
if (!TurbulenzEngine.onload)
{
window.alert("Entry point 'TurbulenzEngine.onload' must be defined.");
return;
}
TurbulenzEngine.onload.call(this);
}());""")
if options.mode == 'canvas':
out.append('window.TurbulenzEngine = TurbulenzEngine;}());')
# Combine all parts into a single string
return ("\n".join(out), inc_js)
def render_js_extract_includes(context, options, templates_js, injects):
"""
Renders the templates in templates_js against the given context
and just collects the set of 'javascript('...')' includes. Will
optionally handle a list of files to be injected.
Returns an array of absolute paths, removing duplicates.
"""
includes = []
def _find_in_dirs_or_error(name):
file_path = find_file_in_dirs(name, options.templatedirs)
if file_path is None:
raise ToolsException("No file '%s' in any template dir" % name)
if file_path in includes:
LOG.info(" include '%s' (%s) already listed", name, file_path)
return
LOG.info(" resolved '%s' to path '%s'", name, file_path)
includes.append(file_path)
# In release mode, filter out debug.js
if options.mode in [ 'plugin', 'canvas' ] and options.stripdebug:
_do_find_in_dirs_or_error = _find_in_dirs_or_error
# pylint: disable=E0102
def _find_in_dirs_or_error(name):
if os.path.basename(name) == "debug.js":
LOG.warning("App attempting to include debug.js. Removing.")
return
_do_find_in_dirs_or_error(name)
# pylint: enable=E0102
# Deal with any injects
for i in injects:
_find_in_dirs_or_error(i)
# Use the templating engine to deal with remaining includes
def handle_javascipt_extract_includes(name):
_find_in_dirs_or_error(name)
return ""
context['javascript'] = handle_javascipt_extract_includes
for t in templates_js:
t.render(context)
del context['javascript']
return includes
############################################################
def output_dependency_info(dependency_file, output_file, dependencies):
"""
This dependency write outputs dependency information in a format
consistent with the GCC -M flags.
"""
try:
with open(dependency_file, "wb") as f:
f.write(output_file)
f.write(" ")
f.write(dependency_file)
f.write(" : \\\n")
for d in dependencies:
f.write(" ")
f.write(d)
f.write(" \\\n")
f.write("\n\n")
for d in dependencies:
f.write(d)
f.write(" :\n\n")
except IOError:
raise ToolsException("failed to write file: %s" % dependency_file)
############################################################
def context_from_options(options, title):
# Sanity check
if options.hybrid:
if options.mode not in [ 'canvas', 'canvas-debug' ]:
raise ToolsException("--hybrid option available only in canvas and "
"canvas_dev modes")
# Set up the context
context = {}
context['tz_app_title_name_var'] = title
context['tz_app_title_var'] = title
context['tz_development'] = options.mode in [ 'plugin-debug', 'canvas-debug', 'webworker-debug' ]
context['tz_canvas'] = options.mode in [ 'canvas', 'canvas-debug' ]
context['tz_webworker'] = options.mode in [ 'webworker', 'webworker-debug' ]
context['tz_hybrid'] = options.hybrid
return context
############################################################
def inject_js_from_options(options):
"""
Given the build options, find (if necessary), all includes that
must be injected for canvas mode to work. This is done by
searching for webgl_engine_file in any of the
template directories, and collecting the list of all .js files
that reside there.
"""
inject_list = []
if options.noinject:
return inject_list
mode = options.mode
# Put debug.js at the top (if in debug mode), and ALWAYS include
# vmath.js
if mode in [ 'plugin-debug', 'canvas-debug', 'webworker-debug' ] or not options.stripdebug:
inject_list.append('jslib/debug.js')
inject_list.append('jslib/vmath.js')
# Include webgl includes in canvas mode
if mode in [ 'canvas', 'canvas-debug' ]:
LOG.info("Looking for jslib/webgl ...")
webgl_engine_file = 'jslib/webgl/turbulenzengine.js'
webgl_engine_dir = os.path.dirname(webgl_engine_file)
# Find absolute path of webgl_engine_file
webgl_abs_path = None
for t in options.templatedirs:
p = os.path.join(t, webgl_engine_file)
if | |
day = '19'
##############################
########## PARSING #########
##############################
def parse_input(day=day):
with open(f'2020/data/day_{day}.in', 'r', encoding='utf-8') as f:
rules, messages = f.read().strip().split('\n\n')
initial_rules, final_rules = {}, {}
for rule in rules.split('\n'):
if '"' in rule:
indeks = int(rule[ : rule.index(':')])
value = rule[rule.index('"')+1 : -1]
final_rules[indeks] = [value]
else:
indeks = int(rule[ : rule.index(':')])
rest = rule[rule.index(':')+2 : ]
initial_rules[indeks] = []
if '|' in rule:
rest = rest.split(' | ')
for opt in rest:
if ' ' in opt:
fst = int(opt[ : opt.index(' ')])
snd = int(opt[opt.index(' ')+1 : ])
initial_rules[indeks].append([fst, snd])
else:
initial_rules[indeks].append([int(opt)])
else:
if ' ' in rest:
fst = int(rest[ : rest.index(' ')])
snd = int(rest[rest.index(' ')+1 : ])
initial_rules[indeks] = [[fst, snd]]
else:
initial_rules[indeks].append([int(rest)])
return initial_rules, final_rules, messages.split('\n')
##############################
########## PART 1 ##########
##############################
def rule_is_executable(key, initial_rules, final_rules): # int -> bool
value = initial_rules[key]
needed_rules = []
for sez in value:
for num in sez:
needed_rules.append(num)
for needed_key in needed_rules:
if not needed_key in final_rules:
return False
return True
def execute_rule(key, initial_rules, final_rules): # int -> None
all_matches = set()
value = initial_rules[key]
for sez in value:
if len(sez) == 1: # sez[0]
for opt in final_rules[sez[0]]:
all_matches.add(opt)
elif len(sez) == 2: # sez[0] sez[1]
options0 = final_rules[sez[0]]
options1 = final_rules[sez[1]]
for opt0 in options0:
for opt1 in options1:
opt = opt0 + opt1
all_matches.add(opt)
final_rules[key] = all_matches
return ##
def execute_all_rules(initial_rules, final_rules):
while initial_rules:
executed_this_round = []
for rule_key in initial_rules:
if rule_is_executable(rule_key, initial_rules, final_rules):
execute_rule(rule_key, initial_rules, final_rules)
executed_this_round.append(rule_key)
for key in executed_this_round:
del initial_rules[key]
return ##
def part1():
initial_rules, final_rules, messages = parse_input()
execute_all_rules(initial_rules, final_rules)
return sum(1 for m in messages if m in final_rules[0])
##############################
########## PART 2 ##########
##############################
# DISCLAIMER: spodnja koda je groza, ne glej ker noces videt tega
def part2():
# minimalno osem levo in sestnajst desno. (potem pa z večkratniki)
# 8: 42 | 42 8
# 11: 42 31 | 42 11 31
initial_rules, final_rules, all_messages = parse_input()
execute_all_rules(initial_rules, final_rules)
# lens = {len(m) for m in all_messages}
# # {24, 32, 40, 48, 56, 64, 72, 80}
messages = [m for m in all_messages if not m in final_rules[0]] # 249
def messages_of_len(n, messages=messages):
return [m for m in messages if len(m) == n]
mess24 = messages_of_len(24) # 9
mess32 = messages_of_len(32) # 67
mess40 = messages_of_len(40) # 71
mess48 = messages_of_len(48) # 36
mess56 = messages_of_len(56) # 32
mess64 = messages_of_len(64) # 18
mess72 = messages_of_len(72) # 7
mess80 = messages_of_len(80) # 5
mess88 = messages_of_len(88) # 1
mess96 = messages_of_len(96) # 3
a, b, c = final_rules[8], final_rules[42], final_rules[31]
finally_gucci_good_messages = {m for m in all_messages if m in final_rules[0]} # will be updated
# print(len(finally_gucci_good_messages)) # 149
for m in mess24: # 24 = 8+16
if (
(m[0:8] in a) and (m[8:16] in b) and (m[16:24] in c)
):
finally_gucci_good_messages.add(m)
# print(len(finally_gucci_good_messages)) # 149, +0 (od 9ih)
for m in mess32: # 32 = 8+8+16
if (
(m[0:8] in a) and (m[8:16] in a) and (m[16:24] in b) and (m[24:32] in c)
):
finally_gucci_good_messages.add(m)
# print(len(finally_gucci_good_messages)) # 202, +53 (od 67ih)
for m in mess40: # 40 = 8+8+8+16 | 8+16+16
if (
(m[0:8] in a) and (m[8:16] in a) and (m[16:24] in a) and (m[24:32] in b) and
(m[32:40] in c)
):
finally_gucci_good_messages.add(m)
elif (
(m[0:8] in a) and (m[8:16] in b) and (m[16:24] in b) and (m[24:32] in c) and
(m[32:40] in c)
):
finally_gucci_good_messages.add(m)
# print(len(finally_gucci_good_messages)) # 264, +62 (od 71ih)
for m in mess48: # 48 = 8+8+8+8+16 | 8+8+16+16
if (
(m[0:8] in a) and (m[8:16] in a) and (m[16:24] in a) and (m[24:32] in a) and
(m[32:40] in b) and (m[40:48] in c)
):
finally_gucci_good_messages.add(m)
elif (
(m[0:8] in a) and (m[8:16] in a) and (m[16:24] in b) and (m[24:32] in b) and
(m[32:40] in c) and (m[40:48] in c)
):
finally_gucci_good_messages.add(m)
# print(len(finally_gucci_good_messages)) # 294, +30 (od 36ih)
for m in mess56: # 56 = 8+8+8+8+8+16 | 8+8+8+16+16 | 8+16+16+16
if (
(m[0:8] in a) and (m[8:16] in a) and (m[16:24] in a) and (m[24:32] in a) and
(m[32:40] in a) and (m[40:48] in b) and (m[48:56] in c)
):
finally_gucci_good_messages.add(m)
elif (
(m[0:8] in a) and (m[8:16] in a) and (m[16:24] in a) and (m[24:32] in b) and
(m[32:40] in b) and (m[40:48] in c) and (m[48:56] in c)
):
finally_gucci_good_messages.add(m)
elif (
(m[0:8] in a) and (m[8:16] in b) and (m[16:24] in b) and (m[24:32] in b) and
(m[32:40] in c) and (m[40:48] in c) and (m[48:56] in c)
):
finally_gucci_good_messages.add(m)
# print(len(finally_gucci_good_messages)) # 316, +22 (od 32ih)
for m in mess64: # 64 = 8+8+8+8+8+8+16 | 8+8+8+8+16+16 | 8+8+16+16+16
if (
(m[0:8] in a) and (m[8:16] in a) and (m[16:24] in a) and (m[24:32] in a) and
(m[32:40] in a) and (m[40:48] in a) and (m[48:56] in b) and (m[56:64] in c)
):
finally_gucci_good_messages.add(m)
elif (
(m[0:8] in a) and (m[8:16] in a) and (m[16:24] in a) and (m[24:32] in a) and
(m[32:40] in b) and (m[40:48] in b) and (m[48:56] in c) and (m[56:64] in c)
):
finally_gucci_good_messages.add(m)
elif (
(m[0:8] in a) and (m[8:16] in a) and (m[16:24] in b) and (m[24:32] in b) and
(m[32:40] in b) and (m[40:48] in c) and (m[48:56] in c) and (m[56:64] in c)
):
finally_gucci_good_messages.add(m)
# print(len(finally_gucci_good_messages)) # 328, +12 (od 18ih)
for m in mess72: # 72 = 8+8+8+8+8+8+8+16 | 8+8+8+8+8+16+16 | 8+8+8+16+16+16 | 8+16+16+16+16
if (
(m[0:8] in a) and (m[8:16] in a) and (m[16:24] in a) and (m[24:32] in a) and
(m[32:40] in a) and (m[40:48] in a) and (m[48:56] in a) and (m[56:64] in b) and
(m[64:72] in c)
):
finally_gucci_good_messages.add(m)
elif (
(m[0:8] in a) and (m[8:16] in a) and (m[16:24] in a) and (m[24:32] in a) and
(m[32:40] in a) and (m[40:48] in b) and (m[48:56] in b) and (m[56:64] in c) and
(m[64:72] in c)
):
finally_gucci_good_messages.add(m)
elif (
(m[0:8] in a) and (m[8:16] in a) and (m[16:24] in a) and (m[24:32] in b) and
(m[32:40] in b) and (m[40:48] in b) and (m[48:56] in c) and (m[56:64] in c) and
(m[64:72] in c)
):
finally_gucci_good_messages.add(m)
elif (
(m[0:8] in a) and (m[8:16] in b) and (m[16:24] in b) and (m[24:32] in b) and
(m[32:40] in b) and (m[40:48] in c) and (m[48:56] in c) and (m[56:64] in c) and
(m[64:72] in c)
):
finally_gucci_good_messages.add(m)
# print(len(finally_gucci_good_messages)) # 331, +3 (od 7ih)
for m in mess80: # 80 = 8+8+8+8+8+8+8+8+16 | 8+8+8+8+8+8+16+16 | 8+8+8+8+16+16+16 | 8+8+16+16+16+16
if (
(m[0:8] in a) and (m[8:16] in a) and (m[16:24] in a) and (m[24:32] in a) and
(m[32:40] in a) and (m[40:48] in a) and (m[48:56] in a) and (m[56:64] in a) and
(m[64:72] in b) and (m[72:80] in c)
):
finally_gucci_good_messages.add(m)
elif (
(m[0:8] in a) and (m[8:16] in a) and (m[16:24] in a) and (m[24:32] in a) and
(m[32:40] in a) and (m[40:48] in a) and (m[48:56] in b) and (m[56:64] in b) and
(m[64:72] in c) and (m[72:80] in c)
):
finally_gucci_good_messages.add(m)
elif (
(m[0:8] in a) and (m[8:16] in a) and (m[16:24] in a) and (m[24:32] in a) and
(m[32:40] in b) and (m[40:48] in b) and (m[48:56] in b) and (m[56:64] in c) and
(m[64:72] in c) and (m[72:80] in c)
):
finally_gucci_good_messages.add(m)
elif (
(m[0:8] in a) and (m[8:16] in a) and (m[16:24] in b) and (m[24:32] in b) and
(m[32:40] in b) and (m[40:48] in b) and (m[48:56] in c) and (m[56:64] in c) and
(m[64:72] in c) and (m[72:80] in c)
):
finally_gucci_good_messages.add(m)
# print(len(finally_gucci_good_messages)) # 332, +1 (od 5ih)
for m in mess88: # 88 = 8+8+8+8+8+8+8+8+8+16 | 8+8+8+8+8+8+8+16+16 | 8+8+8+8+8+16+16+16 | 8+8+8+16+16+16+16 | 8+16+16+16+16+16
if (
(m[0:8] in a) and (m[8:16] in a) and (m[16:24] in a) and (m[24:32] in a) and
(m[32:40] in a) and (m[40:48] in a) and (m[48:56] in a) and (m[56:64] | |
http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicromolePerGramOfCreatinine = CommonUCUMUnitsCode("umol/g{creat}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicromolePerGramOfHemoglobin = CommonUCUMUnitsCode("umol/g{Hb}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicromolePerGramOfHemoglobin = CommonUCUMUnitsCode("umol/g{hemoglobin}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Micromole_GramHgb = CommonUCUMUnitsCode("umol/g{Hgb}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicromolePerHour = CommonUCUMUnitsCode("umol/h")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Micromole_Hour_Gram = CommonUCUMUnitsCode("umol/h/g")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicromolePerHourPerLiter = CommonUCUMUnitsCode("umol/h/L")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicromolePerHourPerMilligramOfProtein = CommonUCUMUnitsCode("umol/h/mg{protein}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicromolePerKilogram = CommonUCUMUnitsCode("umol/kg")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicromolePerLiter = CommonUCUMUnitsCode("umol/L")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicromolePerLiterPerHour = CommonUCUMUnitsCode("umol/L/h")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicromolePerLiterOfRedBloodCells = CommonUCUMUnitsCode("umol/L{rbc}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Micromole_Meter = CommonUCUMUnitsCode("umol/m")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicromolePerMilligram = CommonUCUMUnitsCode("umol/mg")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Micromole_MilligramCre = CommonUCUMUnitsCode("umol/mg{Cre}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicromolePerMinute = CommonUCUMUnitsCode("umol/min")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicromolePerMinutePerGram = CommonUCUMUnitsCode("umol/min/g")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Micromole_Minute_GramProt = CommonUCUMUnitsCode("umol/min/g{prot}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicromolePerMinutePerGramOfProtein = CommonUCUMUnitsCode("umol/min/g{protein}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicromolePerMinutePerLiter = CommonUCUMUnitsCode("umol/min/L")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicromolePerMilliliter = CommonUCUMUnitsCode("umol/mL")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicromolePerMilliliterPerMinute = CommonUCUMUnitsCode("umol/mL/min")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicromolePerMillimole = CommonUCUMUnitsCode("umol/mmol")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicromolePerMole = CommonUCUMUnitsCode("umol/mol")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Micromole_MoleCre = CommonUCUMUnitsCode("umol/mol{Cre}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicromolePerMoleOfCreatinine = CommonUCUMUnitsCode("umol/mol{creat}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicromolePerMoleOfHemoglobin = CommonUCUMUnitsCode("umol/mol{Hb}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicromolePerMicromole = CommonUCUMUnitsCode("umol/umol")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicroOhm = CommonUCUMUnitsCode("uOhm")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Microsecond = CommonUCUMUnitsCode("us")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicroUnit = CommonUCUMUnitsCode("uU")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicroEnzymeUnitPerGram = CommonUCUMUnitsCode("uU/g")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicroEnzymeUnitPerLiter = CommonUCUMUnitsCode("uU/L")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicroEnzymeUnitPerMilliliter = CommonUCUMUnitsCode("uU/mL")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Microvolt = CommonUCUMUnitsCode("uV")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Volt = CommonUCUMUnitsCode("V")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Week = CommonUCUMUnitsCode("wk")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
_10MicronewtonSecondPerCentimeterToTheFifthPowerPerSquareMeter = (
CommonUCUMUnitsCode("10.uN.s/(cm5.m2)")
)
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
_10ThousandPerMicroliter = CommonUCUMUnitsCode("10*4/uL")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
_24Hour = CommonUCUMUnitsCode("24.h")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Amp_re = CommonUCUMUnitsCode("A")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
AspirinResponseUnit = CommonUCUMUnitsCode("{ARU}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
StandardAtmosphere = CommonUCUMUnitsCode("atm")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
AttogramPerCell = CommonUCUMUnitsCode("ag/{cell}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Becquerel = CommonUCUMUnitsCode("Bq")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
BindingIndex = CommonUCUMUnitsCode("{binding_index}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
BodanskyUnit = CommonUCUMUnitsCode("[bdsk'U]")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
CAGTrinucleotideRepeats = CommonUCUMUnitsCode("{CAG_repeats}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Calorie = CommonUCUMUnitsCode("cal")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
CentimeterOfWaterPerSecondPerMeter = CommonUCUMUnitsCode("cm[H2O]/s/m")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
ChangeIn_delta_OpticalDensity = CommonUCUMUnitsCode("{delta_OD}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Copies = CommonUCUMUnitsCode("{copies}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Count = CommonUCUMUnitsCode("{count}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
CountsPerMinute = CommonUCUMUnitsCode("{CPM}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
CountsPerMinutePerThousandCells = CommonUCUMUnitsCode("{CPM}/10*3{cell}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
DekaliterPerMinute = CommonUCUMUnitsCode("daL/min")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
DekaliterPerMinutePerSquareMeter = CommonUCUMUnitsCode("daL/min/m2")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Dilution = CommonUCUMUnitsCode("{dilution}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
DyneSecondPerCentimeter = CommonUCUMUnitsCode("dyn.s/cm")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
DyneSecondPerCentimeterPerSquareMeter = CommonUCUMUnitsCode("dyn.s/(cm.m2)")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
EhrlichUnitPer100Gram = CommonUCUMUnitsCode("{Ehrlich'U}/100.g")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
EIAIndex = CommonUCUMUnitsCode("{EIA_index}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
EIATiter = CommonUCUMUnitsCode("{EIA_titer}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
EIAValue = CommonUCUMUnitsCode("{EV}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
EnzymeUnitPer10 = CommonUCUMUnitsCode("U/10")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
EnzymeUnitPer10Billion = CommonUCUMUnitsCode("U/10*10")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
EnzymeUnitPer10GramOfFeces = CommonUCUMUnitsCode("U/(10.g){feces}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
EnzymeUnitPerGramOfCreatinine = CommonUCUMUnitsCode("U/g{creat}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
EnzymeUnitPerGramOfProtein = CommonUCUMUnitsCode("U/g{protein}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
EnzymeUnitPerLiterAt25DegCelsius = CommonUCUMUnitsCode("U{25Cel}/L")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
EnzymeUnitPerLiterAt37DegCelsius = CommonUCUMUnitsCode("U{37Cel}/L")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
EnzymeUnitPerTrillionRedBloodCells = CommonUCUMUnitsCode("U/10*12{RBCs}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Farad = CommonUCUMUnitsCode("F")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
FemtomolePerMilligramOfCytosolProtein = CommonUCUMUnitsCode(
"fmol/mg{cytosol_protein}"
)
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
FemtomolePerMilligramOfProtein = CommonUCUMUnitsCode("fmol/mg{protein}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
FluorescentIntensityUnit = CommonUCUMUnitsCode("{FIU}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Fraction = CommonUCUMUnitsCode("{fraction}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
GAATrinucleotideRepeats = CommonUCUMUnitsCode("{GAA_repeats}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
GenomesPerMilliliter = CommonUCUMUnitsCode("{genomes}/mL")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Globules_drops_PerHighPowerField = CommonUCUMUnitsCode("{Globules}/[HPF]")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
GramMeterPerHeartBeat = CommonUCUMUnitsCode("g.m/{beat}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
GramOfCreatinine = CommonUCUMUnitsCode("g{creat}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
GramOfHemoglobin = CommonUCUMUnitsCode("g{Hb}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
GramOfTotalNitrogen = CommonUCUMUnitsCode("g{total_nit}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
GramOfTotalProtein = CommonUCUMUnitsCode("g{total_prot}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
GramOfWetTissue = CommonUCUMUnitsCode("g{wet_tissue}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
GramPerKilogramPer8Hour = CommonUCUMUnitsCode("g/kg/(8.h)")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
GramPer8HourShift = CommonUCUMUnitsCode("g/(8.h){shift}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
GramPerCubicCentimeter = CommonUCUMUnitsCode("g/cm3")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
GramPerGramOfGlobulin = CommonUCUMUnitsCode("g/g{globulin}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
GramPerKilogramPer8HourShift = CommonUCUMUnitsCode("g/kg/(8.h){shift}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
GramPerKilogramPerHour = CommonUCUMUnitsCode("g/kg/h")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
GramPerKilogramPerMinute = CommonUCUMUnitsCode("g/kg/min")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
GramPerMoleOfCreatinine = CommonUCUMUnitsCode("g/mol{creat}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
GramPerSpecimen = CommonUCUMUnitsCode("g/{specimen}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
GramPerTotalOutput = CommonUCUMUnitsCode("g/{total_output}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
GramPerTotalWeight = CommonUCUMUnitsCode("g/{total_weight}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Gray = CommonUCUMUnitsCode("Gy")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
HeartBeatsPerMinute = CommonUCUMUnitsCode("{beats}/min")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Henry = CommonUCUMUnitsCode("H")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
HighPowerField = CommonUCUMUnitsCode("[HPF]")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
IgGAnticardiolipinUnitPerMilliliter_ = CommonUCUMUnitsCode("[GPL'U]/mL")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
IgGAntiphosphatidylserineUnit = CommonUCUMUnitsCode("{GPS'U}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
IgMAnticardiolipinUnitPerMilliliter_ = CommonUCUMUnitsCode("[MPL'U]/mL")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
ImmuneStatusRatio = CommonUCUMUnitsCode("{ISR}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
ImmunofluorescenceAssayIndex = CommonUCUMUnitsCode("{IFA_index}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
ImmunofluorescenceAssayTiter = CommonUCUMUnitsCode("{IFA_titer}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Inch_international_OfWater = CommonUCUMUnitsCode("[in_i'H2O]")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
IndexValue = CommonUCUMUnitsCode("{index_val}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
InfluenzaHemagglutinationTiter = CommonUCUMUnitsCode("{HA_titer}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
InternationalUnit = CommonUCUMUnitsCode("[IU]")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
InternationalUnitPerLiterAt37DegreesCelsius = CommonUCUMUnitsCode("[IU]/L{37Cel}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
InternationalUnitPerMilligramOfCreatinine = CommonUCUMUnitsCode("[IU]/mg{creat}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Katal = CommonUCUMUnitsCode("kat")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
KiloEnzymeUnit = CommonUCUMUnitsCode("kU")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
KiloEnzymeUnitPerLiterClass = CommonUCUMUnitsCode("kU/L{class}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
KilocaloriePerDay = CommonUCUMUnitsCode("kcal/d")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
KilocaloriePerKilogramPer24Hour = CommonUCUMUnitsCode("kcal/kg/(24.h)")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
KilocaloriePerOunce_US_British_ = CommonUCUMUnitsCode("kcal/[oz_av]")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
KingArmstrongUnit = CommonUCUMUnitsCode("[ka'U]")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
LiterPer24Hour = CommonUCUMUnitsCode("L/(24.h)")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
LiterPerSecondPerSquareSecond = CommonUCUMUnitsCode("L/s/s2")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Log_base10_CopiesPerMilliliter = CommonUCUMUnitsCode("{Log_copies}/mL")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Log_base10_InternationalUnit = CommonUCUMUnitsCode("{Log_IU}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Log_base10_InternationalUnitPerMilliliter = CommonUCUMUnitsCode("{Log_IU}/mL")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
LogBase10 = CommonUCUMUnitsCode("{Log}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
LowPowerField = CommonUCUMUnitsCode("[LPF]")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Lumen = CommonUCUMUnitsCode("lm")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
LumenSquareMeter = CommonUCUMUnitsCode("lm.m2")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
LymeIndexValue = CommonUCUMUnitsCode("{Lyme_index_value}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MacLaganUnit = CommonUCUMUnitsCode("[mclg'U]")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
Megasecond = CommonUCUMUnitsCode("Ms")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicrogramPerGramOfFeces = CommonUCUMUnitsCode("ug/g{feces}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicrogramFibrinogenEquivalentUnitPerMilliliter = CommonUCUMUnitsCode("ug{FEU}/mL")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicrogramPer100Gram = CommonUCUMUnitsCode("ug/(100.g)")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicrogramPerCubicMeter = CommonUCUMUnitsCode("ug/m3")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicrogramPerDeciliterOfRedBloodCells = CommonUCUMUnitsCode("ug/dL{RBCs}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicrogramPerGramOfDryTissue = CommonUCUMUnitsCode("ug/g{dry_tissue}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicrogramPerGramOfDryWeight = CommonUCUMUnitsCode("ug/g{dry_wt}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicrogramPerGramOfHair = CommonUCUMUnitsCode("ug/g{hair}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicrogramPerGramOfHemoglobin = CommonUCUMUnitsCode("ug/g{Hb}")
"""
From: http://hl7.org/fhir/ValueSet/ucum-common in valuesets.xml
"""
MicrogramPerGramOfTissue = CommonUCUMUnitsCode("ug/g{tissue}")
| |
<gh_stars>1-10
import numpy as np
from scipy import constants as con
from scipy.optimize import minimize, dual_annealing, differential_evolution, shgo
import matplotlib.pyplot as plt
import find_nearest as fn
from matplotlib.colors import BoundaryNorm
from matplotlib.ticker import MaxNLocator
from matplotlib import rcParams
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['Arial']
from timeit import default_timer as timer
def as_si(x, ndp):
s = '{x:0.{ndp:d}e}'.format(x=x, ndp=ndp)
m, e = s.split('e')
return r'{m:s}\times 10^{{{e:d}}}'.format(m=m, e=int(e))
def Boltzmann_distribution(energy_diff, T):
'''Energy diff in Joule'''
population_ratio = 1./np.exp(energy_diff/(con.k*T))
#return population_ratio / (1 + population_ratio)
return population_ratio
def spectra_integration(data, start, end):
'''Integration of provided 2D data array from start to end values'''
idx = fn.find_nearest(data, (start, end))
data_intv = data[idx[0]:idx[1]]
integral = np.trapz(data_intv[:,1], data_intv[:,0])
return integral
def nm(value):
'''Converts nm to J'''
return ((con.h*con.c)/(value/1e9))
def eV(value):
'''Converts eV to J'''
return value*1.602176e-19
def kcalmol(value):
'''Converts kcalmol to J'''
return (value * 4186.798188)/con.Avogadro
def Jmol(value):
'''Convert Jmol to J'''
return value / con.Avogadro
class Energy:
def __init__(self, value, unit):
'''Input value in either nm, eV or kcalmol
Available units: J, eV, nm, kcal/mol and J/mol'''
self.unit = unit.__name__
self.value = value
self.J = unit(self.value)
self.eV = self.convert_J_to_eV()
self.nm = self.convert_J_to_nm()
self.kcalmol = self.convert_J_to_kcalmol()
self.Jmol = self.convert_J_to_Jmol()
def convert_J_to_eV(self):
return self.J/1.602176e-19
def convert_J_to_nm(self):
return ((con.h*con.c)/self.J)*1e9
def convert_J_to_kcalmol(self):
return (self.J * con.Avogadro)/4186.798188
def convert_J_to_Jmol(self):
return self.J * con.Avogadro
class Efficiency:
def __init__(self, gibbs_energy, excess_energy):
'''Initiating AM 1.5 G spectra (irradiance, flux and total irradiace) as well as Gibbs energy for considered
reaction and excess energy (energy that is lost due to free energy and kinetic losses)
gibbs_energy and excess_energy are energy class instances'''
self.am15g_full = self.import_am15g_data()
self.am15g_irradiance = np.c_[self.am15g_full[:,0], self.am15g_full[:,2]]
self.am15g_flux = self.convert_spectrum_to_photon_count(self.am15g_irradiance)
self.total_irradiance = spectra_integration(self.am15g_irradiance, 0., 5000.)
self.gibbs_energy = gibbs_energy
self.excess_energy = excess_energy
def import_am15g_data(self):
'''AM1.5G ASTM G-173 reference data from https://www.nrel.gov/grid/solar-resource/spectra-am1.5.html'''
data = np.loadtxt('../Experimental_Data/ASTMG173.csv', skiprows = 2, delimiter = ',')
return data
def convert_spectrum_to_photon_count(self, spectrum):
'''Input spectrum in nm, W m^-2 nm^-1
returns photon count in photons m^-2 s^-1 nm^-1'''
photon_energies = Energy(spectrum[:,0], nm)
photon_count = spectrum[:,1]/photon_energies.J
return np.c_[spectrum[:,0], photon_count]
def STH_Efficiency(self,reactions_per_second, delta_G, irradiance, area):
'''Solar-to-hydrogen efficiency, reactions_per_second in mol/s, delta_G in J/mol,
irradiance in W/m^2, area in m^2
If delta_G is 237 kJ/mol, reactions_per_second refers to H2 mol/s
If delta_G is 474 kJ/mol,reactions_per_second refers to 2 H2 mol/s (input value is halve)
Arbitrary delta_G value can be used to account for partial water splitting reaction'''
return (reactions_per_second * delta_G)/(irradiance * area)
def calc_efficiency(self, wavelengths, number_photons, negative = False, return_reactions = False):
'''Consecutive absorption of photons by a number of intermediates. Number of intermediates determined by
dimension of wavelengths array.
Intermediate that absorbs the lowest number of photons is the bottleneck and determines the number of
water splitting reactions per s.
number_photons is an array of equal length as wavelengths, specifying the number of photons absorbed at corresponding
wavelength
Based on excess_energy and provided wavelengths, the amount of available energy (variable "energy") is calculated.
if the available energy is lower than gibbs_energy, the population of sufficiently energtic particles is calculated
using a two state Boltzmann model and this population is multiplied with min(photons_per_second_list) to obtain reactions_per_second'''
energy = Energy(Energy(wavelengths, nm).eV - self.excess_energy.eV, eV)
energy_difference = Energy(self.gibbs_energy.eV - np.sum(number_photons * energy.eV), eV)
if energy_difference.eV > 0:
pop = Boltzmann_distribution(energy_difference.J, 298.15)
else:
pop = 1.
photons_per_second_list = []
for counter, (wavelength, photons) in enumerate(zip(wavelengths, number_photons)):
if counter == 0:
photons_per_second = spectra_integration(self.am15g_flux, 0., wavelength) / con.Avogadro
else:
photons_per_second = spectra_integration(self.am15g_flux, wavelengths[counter-1], wavelength) / con.Avogadro
photons_per_second_list.append(photons_per_second/photons)
reactions_per_second = pop * min(photons_per_second_list)
sth = self.STH_Efficiency(reactions_per_second, self.gibbs_energy.Jmol, self.total_irradiance, 1.)
if return_reactions is True:
return reactions_per_second
elif negative is True:
return -sth
else:
return sth
def plot_1d(self, wavelengths, number_photons):
sth_values = []
for i in wavelengths:
sth = self.calc_efficiency(np.array([i]), number_photons)
sth_values.append(sth)
sth_values = np.asarray(sth_values)
fig, ax = plt.subplots()
ax.plot(wavelengths, sth_values)
return fig, ax
def plot_2d_colormesh(self, wavelengths_a, wavelengths_b, number_photons, print_maximum = False):
x, y = np.meshgrid(wavelengths_a, wavelengths_b)
sth_values = []
for i in zip(np.ravel(x), np.ravel(y)):
sth = self.calc_efficiency(np.array([i[0], i[1]]), number_photons)
sth_values.append(sth)
sth_values = np.asarray(sth_values)
if print_maximum is True:
idx = np.argmax(sth_values)
print('Maximum STH:', sth_values[idx])
print('Optimal wavelengths:', np.ravel(x)[idx], np.ravel(y)[idx])
sth_values = np.reshape(sth_values, x.shape)
levels = MaxNLocator(nbins=200).tick_values(np.amin(sth_values), np.amax(sth_values))
cmap = plt.get_cmap('inferno')
norm = BoundaryNorm(levels, ncolors=cmap.N, clip = True)
fig, ax = plt.subplots()
im_a = ax.pcolormesh(x, y, sth_values, cmap=cmap, norm=norm)
im_a.set_edgecolor('face')
colorbar = fig.colorbar(im_a, ax=ax)
colorbar.set_label('STH')
ax.set_xlabel(r'Longest absorption wavelength [$\bf{A}$] / nm')
ax.set_ylabel(r'Longest absorption wavelength [$\bf{B}$] / nm')
return fig, ax
def plot_solar_irradiance(self, start, end, ax = plt, offsets = None):
if offsets == None:
# offsets = np.array([-1, 0, 1]) * 80.
offsets = np.zeros(len(self.optimal_wavelengths))
if end < self.optimal_wavelengths[-1]:
idx_plot = fn.find_nearest(self.am15g_flux, (start, self.optimal_wavelengths[-1]))
else:
idx_plot = fn.find_nearest(self.am15g_flux, (start, end))
data = self.am15g_flux[idx_plot[0]:idx_plot[1]]
ax.plot(data[:,0], data[:,1], color = 'black', linewidth = 1., label = 'AM 1.5 G')
idx_segments = np.asarray(fn.find_nearest(self.am15g_flux, self.optimal_wavelengths))
full_idx_segments = np.insert(idx_segments, 0, start)
midpoints = (full_idx_segments[1:] + full_idx_segments[:-1]) / 2
midpoints = midpoints.astype(int)
segments = np.split(self.am15g_flux, idx_segments)
cmap = plt.get_cmap('inferno')
color_list = cmap(np.linspace(0, 1, 5))
for counter, (i, idx, idx_mid, wavelength, offset) in enumerate(zip(segments[:-1], idx_segments, midpoints, self.optimal_wavelengths, offsets)):
photon_flux = spectra_integration(i, 0., 5000.)
i = np.vstack((i, self.am15g_flux[idx]))
ax.fill_between(i[:,0], i[:,1], color = color_list[counter+1])
ax.annotate(r'$\lambda_{%s}$ =' % (counter + 1) + '\n' + ' %.0f nm' % wavelength + '\nFlux $(m^{-2} s^{-1})$ =\n' + r'${0:s}$'.format(as_si(photon_flux, 2)),
(self.am15g_flux[:,0][idx_mid] + offset, 5e18), ha = 'center', color = color_list[counter])
ax.annotate('Maximum STH:\n' + r'$\bf{%.2f}$' % (self.optimal_efficiency * 100) + r'$\bf{\%}$',
(0.81, 0.73), color = 'black', ha = 'center', xycoords = 'figure fraction')
if ax != plt:
ax.set_xlabel('Wavelength / nm')
ax.set_ylabel(r'Spectral photon flux / $m^{-2} nm^{-1} s^{-1}$')
ax.legend()
ax.set_ylim(0, 6.2e18)
def locate_optimum(self, p_guess, number_photons, method = 'Nelder-Mead', print_output = False):
p = minimize(fun=self.calc_efficiency, x0=p_guess, args=(number_photons, True), method = method)
self.optimal_wavelengths = p.x
self.optimal_efficiency = -p.fun
self.number_photons = number_photons
if print_output is True:
print('Photon sequence:', number_photons, 'Wavelengths (nm):', self.optimal_wavelengths, 'STH (%):', '%.2f' % (100.*self.optimal_efficiency))
def global_optimization(self, number_photons, method = 'differential evolution', print_output = False):
bounds = np.array([300., 3000.])
bounds = np.tile(bounds, (len(number_photons),1))
if method == 'dual annealing':
p = dual_annealing(func=self.calc_efficiency, bounds = bounds, args = (number_photons, True), maxiter = 1000, local_search_options = {'method': 'Nelder-Mead'})
elif method == 'shgo':
p = shgo(func=self.calc_efficiency, bounds = bounds, args =(number_photons, True))
else:
p = differential_evolution(func=self.calc_efficiency, bounds = bounds, args = (number_photons, True), workers = 1)
self.optimal_wavelengths = p.x
self.optimal_efficiency = -p.fun
self.number_photons = number_photons
if print_output is True:
print('Photon sequence:', number_photons, 'Wavelengths (nm):', np.around(self.optimal_wavelengths, 0) , 'STH (%):', '%.2f' % (100.*self.optimal_efficiency))
def monte_carlo_optimization(self, number_photons, samples, print_results = False):
results = []
for _ in range(0, samples):
p_guess = np.random.uniform(300., 3000., number_photons.shape)
p_guess = p_guess[np.argsort(p_guess)]
print(p_guess)
self.locate_optimum(p_guess, number_photons)
sth = self.optimal_efficiency
results.append(sth)
results = np.asarray(results)
results = results[np.argsort(results)]
if print_results is True:
print(results)
def thermal_contribution(self, complete_gibbs_energy, print_output = False):
reactions_per_second = self.calc_efficiency(self.optimal_wavelengths, self.number_photons, return_reactions = True)
thermal_energy = complete_gibbs_energy.Jmol - self.gibbs_energy.Jmol
sth = self.STH_Efficiency(reactions_per_second, thermal_energy, self.total_irradiance, 1.)
if print_output is True:
print('Photon sequence:', self.number_photons, 'Wavelengths (nm):', self.optimal_wavelengths, 'STH thermal contribution (%):', '%.2f' % (100.*sth))
print('Total STH (%):', '%.2f' % (100. * (sth + self.optimal_efficiency)))
def main():
#gibbs_energy = Energy(82., kcalmol)
gibbs_energy = Energy(4*1.229, eV)
excess_energy = Energy(17.5, kcalmol)
#excess_energy = Energy(1., eV)
efficiency = Efficiency(gibbs_energy, excess_energy)
efficiency.locate_optimum(p_guess = np.array([450.]), number_photons = np.array([2.]), print_output = True)
efficiency.locate_optimum(p_guess = np.array([450.]), number_photons = np.array([4.]), print_output = True)
efficiency.locate_optimum(p_guess = np.array([450., 550.]), number_photons = np.array([1., 1.]), print_output = True)
efficiency.locate_optimum(p_guess = np.array([450., 550.]), number_photons = np.array([2., 2.]), print_output = True)
efficiency.locate_optimum(p_guess = np.array([450., 550.]), number_photons = np.array([4., 4.]), print_output = True)
efficiency.locate_optimum(p_guess = np.array([450., 550., 650.]), number_photons = np.array([1., 1., 1.]), print_output = True)
efficiency.locate_optimum(p_guess = np.array([450., 550., 650.]), number_photons = np.array([2., 2., 2.]), print_output = True)
efficiency.locate_optimum(p_guess = np.array([450., 550., 650.]), number_photons = np.array([1., 1., 2.]), print_output = True)
efficiency.locate_optimum(p_guess = np.array([450., 550., 650., 750.]), number_photons = np.array([1., 1., 1., 1.]), print_output = True)
efficiency.locate_optimum(p_guess = np.array([450., 550., 650., 750.])*2., number_photons = np.array([2., 2., 2., 2.]), print_output = True)
#efficiency.locate_optimum(p_guess = np.array([450., 550., 650., 1750.]), number_photons = np.array([2., 2., 2., 2.]), print_output = True)
def secondary():
gibbs_energy = Energy(82.7, kcalmol)
excess_energy = Energy(17.5, kcalmol)
#excess_energy = Energy(1., eV)
efficiency = Efficiency(gibbs_energy, excess_energy)
wavelengths_a = np.linspace(300., 500., 100)
wavelengths_b = np.linspace(300., 1000., 100)
#print(spectra_integration(efficiency.am15g_flux, 0., 455.)/10000.)
fig, ax = efficiency.plot_2d_colormesh(wavelengths_a, wavelengths_b, number_photons = np.array([1., 1.]))
efficiency.locate_optimum(p_guess = np.array([450., 550.]), number_photons = np.array([1., 1.]))
efficiency.thermal_contribution(Energy(117.11, kcalmol), print_output = True)
ax.annotate('Maximum STH: %.2f' % (efficiency.optimal_efficiency * 100) + '%' + '\n%.0f nm' % efficiency.optimal_wavelengths[0] +
', %.0f nm' % efficiency.optimal_wavelengths[1], (0.6, 0.8), color = 'white', ha = 'center', xycoords = 'figure fraction')
return fig
def test():
'''Maximum STH for H2O2 pathway'''
#gibbs_energy = Energy(82.7, kcalmol)
# gibbs_energy = Energy(2*1.229, eV)
gibbs_energy = Energy(2*1.78, eV)
excess_energy = Energy(17.5, kcalmol)
efficiency = Efficiency(gibbs_energy, excess_energy)
efficiency.global_optimization(np.array([1., 1.]), method = 'differential evolution', print_output = True)
print(efficiency.calc_efficiency(np.array([455.30270576, 516.52728081]), np.array([1., 1.])))
print(efficiency.calc_efficiency(np.array([455.30270576, 516.52728081]), np.array([1., 1.]), return_reactions = True))
def tertiary():
gibbs_energy = Energy(4*1.229, eV)
excess_energy = Energy(17.5, kcalmol)
efficiency = Efficiency(gibbs_energy, excess_energy)
efficiency.locate_optimum(p_guess = np.array([450., 550., 650.]), number_photons = np.array([1., 1., 2.]), print_output = True)
fig, ax = plt.subplots()
efficiency.plot_solar_irradiance(200., 1000., ax = ax)
return fig
def quaternary():
gibbs_energy = Energy(4*1.229, eV)
#excess_energy = Energy(17.5, kcalmol)
excess_energy = Energy(1., eV)
efficiency = Efficiency(gibbs_energy, excess_energy)
efficiency.monte_carlo_optimization(np.array([1., 1., 1., 1., 1.])*1, 100, print_results = True)
def quinary():
method = 'differential evolution'
gibbs_energy = Energy(4*1.229, eV)
excess_energy = Energy(1., eV)
#excess_energy = Energy(17.5, kcalmol)
efficiency = Efficiency(gibbs_energy, excess_energy)
start = timer()
efficiency.global_optimization(np.array([2.]), method = method, print_output = True)
efficiency.global_optimization(np.array([4.]), method = method, print_output = True)
efficiency.global_optimization(np.array([1., 1.]), method = method, print_output = True)
efficiency.global_optimization(np.array([2., 2.]), method = method, print_output = True)
efficiency.global_optimization(np.array([4., 4.]), method = method, print_output = True)
efficiency.global_optimization(np.array([1., 1., 1.]), method = method, print_output = True)
efficiency.global_optimization(np.array([2., 2., 2.]), method = method, print_output = True)
efficiency.global_optimization(np.array([1., 1., 2.]), method = method, | |
# Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" util unit tests
"""
import numpy
import random
import pytest
from typing import List, Any
from fqe import util
from fqe.wavefunction import Wavefunction
def permBetween(orig: List[Any], perm: List[Any]) -> int:
"""Checks the parity of the permutation between orig and perm
"""
perm = list(perm)
swaps = 0
for ii in range(len(orig) - 1):
p0 = orig[ii]
p1 = perm[ii]
if p0 != p1:
sii = perm[ii:].index(p0) + ii # Find position in perm
perm[ii], perm[sii] = p0, p1 # Swap in perm
swaps += 1
return swaps % 2
def test_alpha_beta_electrons():
"""Check to make sure that the correct number of alpha and beta
electrons are parsed from the number and multiplicity
"""
assert (1, 1) == util.alpha_beta_electrons(2, 0)
assert (4, 1) == util.alpha_beta_electrons(5, 3)
assert (0, 5) == util.alpha_beta_electrons(5, -5)
def test_alpha_beta_error():
"""Check to make sure that alpha_beta_electrons() throws the right errors
"""
with pytest.raises(ValueError):
util.alpha_beta_electrons(-1, 0)
with pytest.raises(ValueError):
util.alpha_beta_electrons(2, 4)
with pytest.raises(ValueError):
util.alpha_beta_electrons(4, 1)
def test_bubblesort_order():
"""Check that bubble sort works.
"""
length = 5
test_list = [(length - 1 - i) for i in range(length)]
ordered_list = [i for i in range(length)]
util.bubblesort(test_list)
assert ordered_list == test_list
def test_bubblesort_permutation_count():
""" Make sure that we are counting the correct number of permutations
to sort the list
"""
length = 2
test_list = [(length - 1 - i) for i in range(length)]
assert 1 == util.bubblesort(test_list)
length = 3
test_list = [(length - 1 - i) for i in range(length)]
assert 3 == util.bubblesort(test_list)
test_list = [2, 0, 1]
assert 2 == util.bubblesort(test_list)
def test_reverse_bubblesort_permutation_count():
""" Make sure that we are counting the correct number of permutations
to sort the list
"""
test_list = [[0, 0], [1, 0]]
assert 1 == util.reverse_bubble_list(test_list)
test_list = [[0, 0], [1, 0], [2, 0]]
assert 3 == util.reverse_bubble_list(test_list)
test_list = [[0, 0], [2, 0], [1, 0]]
assert 2 == util.reverse_bubble_list(test_list)
def test_configuration_key_union_empty():
"""The union of no configuration keys should be an empty list
"""
assert [] == util.configuration_key_union()
def test_configuration_key_union():
"""The union of no configuration keys should be an empty list
"""
configs0 = [(2, 0), (3, 1)]
configs1 = [(2, 0), (5, 1), (6, -2)]
testset = set([(2, 0), (3, 1), (5, 1), (6, -2)])
assert testset == set(util.configuration_key_union(configs0, configs1))
def test_configuration_key_union_many():
"""The union of many different keys should be all of them
"""
configs0 = [(2, 0)]
configs1 = [(5, 1)]
configs2 = [(6, -2)]
configs3 = [(3, -3)]
refset = set([(2, 0), (5, 1), (6, -2), (3, -3)])
testset = set(
util.configuration_key_union(configs0, configs1, configs2, configs3))
assert testset == refset
def test_configuration_key_intersection_none():
"""If there are no keys in common the intersection should be zero
"""
assert [] == util.configuration_key_intersection([(2, 0)], [(2, 2)])
def test_configuration_key_intersection():
"""Check that the intersection returns the intersection
"""
configs0 = [(10, 0), (3, 1), (5, -1)]
configs1 = [(2, 0), (3, 1), (3, -1)]
configs2 = [(10, 0), (3, 1), (3, -1)]
assert [
(3, 1)
] == util.configuration_key_intersection(configs0, configs1, configs2)
def test_bitstring_groundstate():
"""The ground state bitstring has the n lowest bits flipped
"""
assert 15 == util.init_bitstring_groundstate(4)
def test_qubit_particle_number_sector():
"""Find the vectors which are the basis for a particular particle
number.
"""
zero = 0
one = 1
ref = [
numpy.array([zero, zero, one, zero], dtype=numpy.int32),
numpy.array([zero, one, zero, zero], dtype=numpy.int32)
]
test = util.qubit_particle_number_sector(2, 1)
for i, j in zip(test, ref):
assert i.all() == j.all()
def test_qubit_particle_number_index_spin():
"""Find the indexes which point to the correct coefficients in a qubit
particle number sector and return the total spin.
"""
ref = [(3, 0), (5, -2), (6, 0), (9, 0), (10, 2), (12, 0)]
test = util.qubit_particle_number_index_spin(4, 2)
assert ref == test
def test_qubit_config_sector():
"""Find the basis vectors for a particular particle number and spin
configuration
"""
zero = 0
one = 1
lowstate = [
zero, zero, one, zero, zero, zero, zero, zero, zero, zero, zero, zero,
zero, zero, zero, zero
]
highstate = [
zero, zero, zero, zero, zero, zero, zero, zero, one, zero, zero, zero,
zero, zero, zero, zero
]
ref = [
numpy.array(lowstate, dtype=numpy.int32),
numpy.array(highstate, dtype=numpy.int32)
]
test = util.qubit_config_sector(4, 1, 1)
for i, j in zip(test, ref):
assert i.all() == j.all()
ref = [numpy.array([zero, zero, zero, one], dtype=numpy.int32)]
test = util.qubit_config_sector(2, 2, 0)
for i, j in zip(test, ref):
assert i.all() == j.all()
def test_qubit_particle_number_index():
"""Find the indexes which point to the correct coefficients in a qubit
particle number sector and return the total spin.
"""
ref = [1, 2, 4, 8]
test = util.qubit_particle_number_index(4, 1)
assert ref == test
def test_qubit_vacuum():
"""The qubit vacuum is the first vector in the qubit basis.
"""
_gs = numpy.array([1. + .0j, 0. + .0j, 0. + .0j, 0. + .0j],
dtype=numpy.complex64)
assert list(_gs) == list(util.init_qubit_vacuum(2))
def test_sort_config_keys():
"""Keys are sorted by particle number and then by m_s
"""
ref = [(0, 0), (1, -1), (3, -3), (3, 1), (5, -2), (5, 1)]
keys = [(5, 1), (5, -2), (0, 0), (1, -1), (3, -3), (3, 1)]
test = util.sort_configuration_keys(keys)
assert test == ref
def test_validate_config():
"""Make sure that the configuration validation routine identifies
problematic values
"""
with pytest.raises(ValueError):
util.validate_config(0, 0, -1)
with pytest.raises(ValueError):
util.validate_config(3, 0, 2)
with pytest.raises(ValueError):
util.validate_config(0, 3, 2)
with pytest.raises(ValueError):
util.validate_config(-1, 1, 2)
with pytest.raises(ValueError):
util.validate_config(1, -1, 2)
assert util.validate_config(0, 0, 0) is None
assert util.validate_config(0, 0, 1) is None
def test_parity_sort_list():
"""Sort a list of lists according to the parity of the index in the 0th
element.
"""
test = [[x, -x, {'Unused': True}] for x in range(19)]
random.shuffle(test) # randomly shuffled array
test_copy = list(test)
test_even = [x for x in test if x[0] % 2 == 0]
test_odd = [x for x in test if x[0] % 2 == 1]
nswap, _ = util.paritysort_list(test)
assert test_even + test_odd == test # First even elements, then odds
assert permBetween(test, test_copy) == nswap % 2
def test_parity_sort_int():
"""Sort a list of ints according to the parity of the element.
"""
test = list(range(19))
random.shuffle(test) # randomly shuffled array
test_copy = list(test)
test_even = [x for x in test if x % 2 == 0]
test_odd = [x for x in test if x % 2 == 1]
nswap, _ = util.paritysort_int(test)
assert test_even + test_odd == test # First even elements, then odds
assert permBetween(test, test_copy) == nswap % 2
def test_rand_wfn():
"""Check rand_wfn
"""
adim = 10
bdim = 9
test = util.rand_wfn(adim, bdim)
assert test.shape == (adim, bdim)
assert test.dtype == numpy.complex128
def test_validate_tuple():
"""Check validate_tuple. assert is evaluated in the function.
"""
param = (numpy.zeros((2, 2)), numpy.zeros((2, 2, 2, 2)))
util.validate_tuple(param)
def test_dot():
numpy.random.seed(seed=409)
wfn1 = Wavefunction([[2, 0, 2]])
wfn1.set_wfn(strategy='random')
wfn2 = Wavefunction([[2, 0, 2]])
wfn2.set_wfn(strategy='random')
assert abs(util.dot(wfn1, wfn2) - (-0.1872999545144855+0.21646742443751746j)) \
< 1.0e-8
wfn3 = Wavefunction([[2, 2, 2]])
wfn3.set_wfn(strategy='random')
assert util.dot(wfn1, wfn3) == 0.0
def test_vdot():
numpy.random.seed(seed=409)
wfn1 = Wavefunction([[2, 0, 2]])
wfn1.set_wfn(strategy='random')
wfn2 = Wavefunction([[2, 0, 2]])
wfn2.set_wfn(strategy='random')
assert abs(util.vdot(wfn1, wfn2) - (-0.04163626246314951-0.43391345135564796j)) \
< 1.0e-8
wfn3 = Wavefunction([[2, 2, 2]])
wfn3.set_wfn(strategy='random')
assert util.vdot(wfn1, wfn3) == 0.0
@pytest.mark.parametrize("sz,norb", [[1, 10], [0, 5], [-4, 7], [7, 7]])
def test_map_broken_symmetry(sz, norb):
"""Checks map_broken_symmetry
"""
mapping = util.map_broken_symmetry(sz, norb)
assert set(k[0] - k[1] for k in mapping) == set([sz])
assert len(mapping) == norb - abs(sz) + 1
assert set(v[0] + v[1] for v in mapping.values()) == set([norb + sz])
def test_tensors_equal():
""" Test tensors_equal comparison function
"""
tensor1 = { '0' : numpy.zeros((2,2), dtype=numpy.complex128), \
'1' : numpy.zeros((2,2), dtype=numpy.complex128) }
tensor2 = { '0' : numpy.zeros((2,2), dtype=numpy.complex128), \
'1' : numpy.ones((2,2), dtype=numpy.complex128) }
tensor3 = {'0': numpy.zeros((2, 2), dtype=numpy.complex128)}
assert util.tensors_equal(tensor1, | |
affected pixels
Returns
-------
Nothing, modifies DQ extension of `flt_file` in place.
"""
import scipy.ndimage as nd
flt = pyfits.open(flt_file, mode='update')
sat = (((flt['DQ'].data & 256) > 0) & ((flt['DQ'].data & 4) == 0))
## Don't flag pixels in lower right corner
sat[:80,-80:] = False
## Flag only if a number of nearby pixels also saturated
kern = np.ones((3,3))
sat_grow = nd.convolve(sat*1, kern)
sat_mask = (sat & (sat_grow > 2))[::-1,:]*1
NSAT = sat_mask.sum()
if verbose:
print('{0}: flagged {1:d} pixels affected by saturation pulldown'.format(flt_file, NSAT))
if NSAT > 0:
flt['DQ'].data[sat_mask > 0] |= dq_value
flt.flush()
def clip_lists(input, output, clip=20):
"""TBD
Clip [x,y] arrays of objects that don't have a match within `clip` pixels
in either direction
"""
import scipy.spatial
tree = scipy.spatial.cKDTree(input, 10)
### Forward
N = output.shape[0]
dist, ix = np.zeros(N), np.zeros(N, dtype=int)
for j in range(N):
dist[j], ix[j] = tree.query(output[j,:], k=1,
distance_upper_bound=np.inf)
ok = dist < clip
out_arr = output[ok]
if ok.sum() == 0:
print('No matches within `clip={0:f}`'.format(clip))
return False
### Backward
tree = scipy.spatial.cKDTree(out_arr, 10)
N = input.shape[0]
dist, ix = np.zeros(N), np.zeros(N, dtype=int)
for j in range(N):
dist[j], ix[j] = tree.query(input[j,:], k=1,
distance_upper_bound=np.inf)
ok = dist < clip
in_arr = input[ok]
return in_arr, out_arr
def match_lists(input, output, transform=None, scl=3600., simple=True,
outlier_threshold=5, toler=5, triangle_size_limit=[5, 800],
triangle_ba_max=0.9):
"""TBD
Compute matched objects and transformation between two [x,y] lists.
If `transform` is None, use Similarity transform (shift, scale, rot)
"""
import copy
from astropy.table import Table
import skimage.transform
from skimage.measure import ransac
import stsci.stimage
if transform is None:
transform = skimage.transform.SimilarityTransform
#print 'xyxymatch'
if (len(output) == 0) | (len(input) == 0):
print('No entries!')
return input, output, None, transform()
try:
from tristars import match
pair_ix = match.match_catalog_tri(input, output, maxKeep=10, auto_keep=3, auto_transform=transform, auto_limit=outlier_threshold, size_limit=triangle_size_limit, ignore_rot=False, ignore_scale=True, ba_max=triangle_ba_max)
input_ix = pair_ix[:,0]
output_ix = pair_ix[:,1]
print(' tristars.match: Nin={0}, Nout={1}, match={2}'.format(len(input), len(output), len(output_ix)))
#print('xxx Match from tristars!')
if False:
fig = match.match_diagnostic_plot(input, output, pair_ix, tf=None, new_figure=True)
fig.savefig('/tmp/xtristars.png')
plt.close(fig)
tform = match.get_transform(input, output, pair_ix, transform=transform, use_ransac=True)
except:
match = stsci.stimage.xyxymatch(copy.copy(input), copy.copy(output),
origin=np.median(input, axis=0),
mag=(1.0, 1.0), rotation=(0.0, 0.0),
ref_origin=np.median(input, axis=0),
algorithm='tolerance', tolerance=toler,
separation=0.5, nmatch=10, maxratio=10.0,
nreject=10)
m = Table(match)
output_ix = m['ref_idx'].data
input_ix = m['input_idx'].data
print(' xyxymatch.match: Nin={0}, Nout={1}, match={2}'.format(len(input), len(output), len(output_ix)))
tf = transform()
tf.estimate(input[input_ix,:], output[output_ix])
if not simple:
model, inliers = ransac((input[input_ix,:], output[output_ix,:]),
transform, min_samples=3,
residual_threshold=3, max_trials=100)
# Iterate
if inliers.sum() > 2:
m_i, in_i = ransac((input[input_ix[inliers],:], output[output_ix[inliers],:]),
transform, min_samples=3,
residual_threshold=3, max_trials=100)
if in_i.sum() > 2:
model = m_i
inliers[np.arange(len(inliers), dtype=np.int)[inliers][in_i]] = False
outliers = ~inliers
mout = model(input[input_ix,:])
dx = mout - output[output_ix]
else:
model = tf
### Compute statistics
if len(input_ix) > 10:
mout = tf(input[input_ix,:])
dx = mout - output[output_ix]
dr = np.sqrt(np.sum(dx**2, axis=1))
outliers = dr > outlier_threshold
else:
outliers = np.zeros(len(input_ix), dtype=bool)
return input_ix, output_ix, outliers, model
def align_drizzled_image(root='', mag_limits=[14,23], radec=None, NITER=3,
clip=20, log=True, outlier_threshold=5,
verbose=True, guess=[0., 0., 0., 1], simple=True,
rms_limit=2, use_guess=False,
triangle_size_limit=[5,1800],
triangle_ba_max=0.9, max_err_percentile=80):
"""TBD
"""
if not os.path.exists('{0}.cat.fits'.format(root)):
#cat = make_drz_catalog(root=root)
cat = make_SEP_catalog(root=root)
else:
cat = Table.read('{0}.cat.fits'.format(root))
if max_err_percentile < 100:
ecol = 'FLUXERR_APER_0'
if ecol in cat:
emax = np.percentile(cat[ecol], max_err_percentile)
cat = cat[cat[ecol] < emax]
if hasattr(radec, 'upper'):
rd_ref = np.loadtxt(radec)
radec_comment = radec
elif radec is False:
# Align to self, i.e., do nothing
so = np.argsort(cat['MAG_AUTO'])
rd_ref = np.array([cat['X_WORLD'], cat['Y_WORLD']]).T[so[:50],:]
radec_comment = 'self catalog'
else:
rd_ref = radec*1
radec_comment = 'input arrays (N={0})'.format(rd_ref.shape)
### Clip obviously distant files to speed up match
# rd_cat = np.array([cat['X_WORLD'], cat['Y_WORLD']])
# rd_cat_center = np.median(rd_cat, axis=1)
# cosdec = np.array([np.cos(rd_cat_center[1]/180*np.pi),1])
# dr_cat = np.sqrt(np.sum((rd_cat.T-rd_cat_center)**2*cosdec**2, axis=1))
#
# #print('xxx', rd_ref.shape, rd_cat_center.shape, cosdec.shape)
#
# dr = np.sqrt(np.sum((rd_ref-rd_cat_center)**2*cosdec**2, axis=1))
#
# rd_ref = rd_ref[dr < 1.1*dr_cat.max(),:]
ok = (cat['MAG_AUTO'] > mag_limits[0]) & (cat['MAG_AUTO'] < mag_limits[1])
if ok.sum() == 0:
print('{0}.cat: no objects found in magnitude range {1}'.format(root,
mag_limits))
return False
xy_drz = np.array([cat['X_IMAGE'][ok], cat['Y_IMAGE'][ok]]).T
drz_file = glob.glob('{0}_dr[zc]_sci.fits'.format(root))[0]
drz_im = pyfits.open(drz_file)
sh = drz_im[0].data.shape
drz_wcs = pywcs.WCS(drz_im[0].header, relax=True)
orig_wcs = drz_wcs.copy()
if use_guess:
drz_wcs = utils.transform_wcs(drz_wcs, guess[:2], guess[2], guess[3])
return orig_wcs, drz_wcs, guess[:2], guess[2]/np.pi*180, guess[3]
##########
# Only include reference objects in the DRZ footprint
ref_x, ref_y = drz_wcs.all_world2pix(rd_ref[:,0], rd_ref[:,1], 0)
ref_cut = (ref_x > -100) & (ref_x < drz_wcs._naxis1+100) & (ref_y > -100) & (ref_y < drz_wcs._naxis2+100)
if ref_cut.sum() == 0:
print('{0}: no reference objects found in the DRZ footprint'.format(root))
return False
rd_ref = rd_ref[ref_cut,:]
########
# Match surface density of drizzled and reference catalogs
icut = np.minimum(len(cat)-2, int(2*ref_cut.sum()))
cut = np.argsort(cat['MAG_AUTO'])[:icut]
xy_drz = np.array([cat['X_IMAGE'][cut], cat['Y_IMAGE'][cut]]).T
#out_shift, out_rot, out_scale = np.zeros(2), 0., 1.
out_shift, out_rot, out_scale = guess[:2], guess[2], guess[3]
drz_wcs = utils.transform_wcs(drz_wcs, out_shift, out_rot, out_scale)
print('{0} (guess) : {1:6.2f} {2:6.2f} {3:7.3f} {4:7.3f}'.format(root, guess[0], guess[1], guess[2]/np.pi*180, 1./guess[3]))
drz_crpix = drz_wcs.wcs.crpix
NGOOD, rms = 0, 0
for iter in range(NITER):
#print('xx iter {0} {1}'.format(iter, NITER))
xy = np.array(drz_wcs.all_world2pix(rd_ref, 0))
pix = np.cast[int](np.round(xy)).T
### Find objects where drz pixels are non-zero
okp = (pix[0,:] > 0) & (pix[1,:] > 0)
okp &= (pix[0,:] < sh[1]) & (pix[1,:] < sh[0])
ok2 = drz_im[0].data[pix[1,okp], pix[0,okp]] != 0
N = ok2.sum()
if clip > 0:
status = clip_lists(xy_drz-drz_crpix, xy+1-drz_crpix, clip=clip)
if not status:
print('Problem xxx')
input, output = status
else:
input, output = xy_drz+0.-drz_crpix, xy+1-drz_crpix
#print np.sum(input) + np.sum(output)
toler=5
titer=0
while (titer < 3):
try:
res = match_lists(output, input, scl=1., simple=simple,
outlier_threshold=outlier_threshold, toler=toler,
triangle_size_limit=triangle_size_limit,
triangle_ba_max=triangle_ba_max)
output_ix, input_ix, outliers, tf = res
break
except:
toler += 5
titer += 1
#print(output.shape, output_ix.shape, output_ix.min(), output_ix.max(), titer, toler, input_ix.shape, input.shape)
titer = 0
while (len(input_ix)*1./len(input) < 0.1) & (titer < 3):
titer += 1
toler += 5
try:
res = match_lists(output, input, scl=1., simple=simple,
outlier_threshold=outlier_threshold,
toler=toler,
triangle_size_limit=triangle_size_limit,
triangle_ba_max=triangle_ba_max)
except:
pass
output_ix, input_ix, outliers, tf = res
#print(output.shape, output_ix.shape, output_ix.min(), output_ix.max(), titer, toler, input_ix.shape, input.shape)
tf_out = tf(output[output_ix])
dx = input[input_ix] - tf_out
rms = utils.nmad(np.sqrt((dx**2).sum(axis=1)))
#outliers = outliers | (np.sqrt((dx**2).sum(axis=1)) > 4*rms)
outliers = (np.sqrt((dx**2).sum(axis=1)) > 4*rms)
if outliers.sum() > 0:
res2 = match_lists(output[output_ix][~outliers],
input[input_ix][~outliers], scl=1.,
simple=simple,
outlier_threshold=outlier_threshold,
toler=toler,
triangle_size_limit=triangle_size_limit,
triangle_ba_max=triangle_ba_max)
output_ix2, input_ix2, outliers2, tf = res2
if verbose:
shift = tf.translation
NGOOD = (~outliers).sum()
print('{0} ({1:d}) {2:d}: {3:6.2f} {4:6.2f} {5:7.3f} {6:7.3f}'.format(root,iter,NGOOD,
shift[0], shift[1],
tf.rotation/np.pi*180,
1./tf.scale))
out_shift += tf.translation
out_rot -= tf.rotation
out_scale *= tf.scale
drz_wcs = utils.transform_wcs(drz_wcs, tf.translation, tf.rotation,
tf.scale)
# drz_wcs.wcs.crpix += tf.translation
# theta = -tf.rotation
# _mat = np.array([[np.cos(theta), -np.sin(theta)],
# [np.sin(theta), np.cos(theta)]])
#
# drz_wcs.wcs.cd = np.dot(drz_wcs.wcs.cd, _mat)/tf.scale
# Bad fit
if (rms > rms_limit) | (NGOOD < 3):
drz_wcs = orig_wcs
out_shift = [0,0]
out_rot = 0.
out_scale = 1.
log = False
if log:
tf_out = tf(output[output_ix][~outliers])
dx = input[input_ix][~outliers] - tf_out
rms = utils.nmad(np.sqrt((dx**2).sum(axis=1)))
interactive_status=plt.rcParams['interactive']
plt.ioff()
fig = plt.figure(figsize=[6.,6.])
ax = fig.add_subplot(111)
ax.scatter(dx[:,0], dx[:,1], alpha=0.5, color='b')
ax.scatter([0],[0], marker='+', color='red', s=40)
ax.set_xlabel(r'$dx$'); ax.set_ylabel(r'$dy$')
ax.set_title(root)
ax.set_xlim(-7*rms, 7*rms)
ax.set_ylim(-7*rms, 7*rms)
ax.grid()
fig.tight_layout(pad=0.1)
fig.savefig('{0}_wcs.png'.format(root))
plt.close()
if interactive_status:
plt.ion()
log_wcs(root, orig_wcs, out_shift, out_rot/np.pi*180, out_scale, rms,
n=NGOOD, initialize=False,
comment=['radec: {0}'.format(radec_comment)])
return orig_wcs, drz_wcs, out_shift, out_rot/np.pi*180, out_scale
def log_wcs(root, drz_wcs, shift, rot, scale, rms=0., n=-1, initialize=True, comment=[]):
"""Save WCS offset information to a file
"""
if (not os.path.exists('{0}_wcs.log'.format(root))) | initialize:
print('Initialize {0}_wcs.log'.format(root))
orig_hdul = pyfits.HDUList()
fp = open('{0}_wcs.log'.format(root), 'w')
fp.write('# ext xshift yshift rot scale rms N\n')
for c in comment:
fp.write('# {0}\n'.format(c))
fp.write('# {0}\n'.format(root))
count = 0
else:
orig_hdul = pyfits.open('{0}_wcs.fits'.format(root))
fp = open('{0}_wcs.log'.format(root), 'a')
count = len(orig_hdul)
hdu = drz_wcs.to_fits()[0]
orig_hdul.append(hdu)
orig_hdul.writeto('{0}_wcs.fits'.format(root), overwrite=True)
fp.write('{0:5d} {1:13.4f} {2:13.4f} {3:13.4f} {4:13.5f} {5:13.3f} {6:4d}\n'.format(
count, shift[0], shift[1], | |
path or in a URI-like format,
including scheme. A dataPath argument may include a single '*' wildcard character in the filename.
dims: tuple of positive int
Dimensions of input image data, ordered with the fastest-changing dimension first.
ext: string, optional, default "stack"
Extension required on data files to be loaded.
blockSize: string formatted as e.g. "64M", "512k", "2G", or positive int. optional, default "150M"
Requested size of Series partitions in bytes (or kilobytes, megabytes, gigabytes).
dtype: dtype or dtype specifier, optional, default 'int16'
Numpy dtype of input stack data
newDtype: dtype or dtype specifier or string 'smallfloat' or None, optional, default 'smallfloat'
Numpy dtype of output series data. Most methods expect Series data to be floating-point. Input data will be
cast to the requested `newdtype` if not None - see Data `astype()` method.
casting: 'no'|'equiv'|'safe'|'same_kind'|'unsafe', optional, default 'safe'
Casting method to pass on to numpy's `astype()` method; see numpy documentation for details.
startIdx, stopIdx: nonnegative int. optional.
Indices of the first and last-plus-one data file to load, relative to the sorted filenames matching
`dataPath` and `ext`. Interpreted according to python slice indexing conventions.
recursive: boolean, default False
If true, will recursively descend directories rooted at dataPath, loading all files in the tree that
have an extension matching 'ext'. Recursive loading is currently only implemented for local filesystems
(not s3).
"""
seriesBlocks, npointsInSeries, newDtype = \
self._getSeriesBlocksFromStack(dataPath, dims, ext=ext, blockSize=blockSize, dtype=dtype,
newDtype=newDtype, casting=casting, startIdx=startIdx, stopIdx=stopIdx,
recursive=recursive)
return Series(seriesBlocks, dims=dims, dtype=newDtype, index=arange(npointsInSeries))
def fromTif(self, dataPath, ext="tif", blockSize="150M", newDtype='smallfloat', casting='safe',
startIdx=None, stopIdx=None, recursive=False):
"""Load a Series object from multipage tiff files.
Parameters
----------
dataPath: string
Path to data files or directory, specified as either a local filesystem path or in a URI-like format,
including scheme. A dataPath argument may include a single '*' wildcard character in the filename.
ext: string, optional, default "tif"
Extension required on data files to be loaded.
blockSize: string formatted as e.g. "64M", "512k", "2G", or positive int. optional, default "150M"
Requested size of Series partitions in bytes (or kilobytes, megabytes, gigabytes).
newDtype: dtype or dtype specifier or string 'smallfloat' or None, optional, default 'smallfloat'
Numpy dtype of output series data. Most methods expect Series data to be floating-point. Input data will be
cast to the requested `newdtype` if not None - see Data `astype()` method.
casting: 'no'|'equiv'|'safe'|'same_kind'|'unsafe', optional, default 'safe'
Casting method to pass on to numpy's `astype()` method; see numpy documentation for details.
startIdx, stopIdx: nonnegative int. optional.
Indices of the first and last-plus-one data file to load, relative to the sorted filenames matching
`dataPath` and `ext`. Interpreted according to python slice indexing conventions.
recursive: boolean, default False
If true, will recursively descend directories rooted at dataPath, loading all files in the tree that
have an extension matching 'ext'. Recursive loading is currently only implemented for local filesystems
(not s3).
"""
seriesBlocks, metadata = self._getSeriesBlocksFromMultiTif(dataPath, ext=ext, blockSize=blockSize,
newDtype=newDtype, casting=casting,
startIdx=startIdx, stopIdx=stopIdx,
recursive=recursive)
dims, npointsInSeries, dtype = metadata
return Series(seriesBlocks, dims=Dimensions.fromTuple(dims[::-1]), dtype=dtype,
index=arange(npointsInSeries))
def __saveSeriesRdd(self, seriesBlocks, outputDirPath, dims, npointsInSeries, dtype, overwrite=False):
if not overwrite:
self._checkOverwrite(outputDirPath)
overwrite = True # prevent additional downstream checks for this path
writer = getParallelWriterForPath(outputDirPath)(outputDirPath, overwrite=overwrite,
awsCredentialsOverride=self.awsCredentialsOverride)
def blockToBinarySeries(kvIter):
label = None
keyPacker = None
buf = StringIO()
for seriesKey, series in kvIter:
if keyPacker is None:
keyPacker = struct.Struct('h'*len(seriesKey))
label = SimpleBlocks.getBinarySeriesNameForKey(seriesKey) + ".bin"
buf.write(keyPacker.pack(*seriesKey))
buf.write(series.tostring())
val = buf.getvalue()
buf.close()
return [(label, val)]
seriesBlocks.mapPartitions(blockToBinarySeries).foreach(writer.writerFcn)
writeSeriesConfig(outputDirPath, len(dims), npointsInSeries, valueType=dtype, overwrite=overwrite,
awsCredentialsOverride=self.awsCredentialsOverride)
def saveFromStack(self, dataPath, outputDirPath, dims, ext="stack", blockSize="150M", dtype='int16',
newDtype=None, casting='safe', startIdx=None, stopIdx=None, overwrite=False, recursive=False):
"""Write out data from binary image stack files in the Series data flat binary format.
Parameters
----------
dataPath: string
Path to data files or directory, specified as either a local filesystem path or in a URI-like format,
including scheme. A dataPath argument may include a single '*' wildcard character in the filename.
outputDirPath: string
Path to a directory into which to write Series file output. An outputdir argument may be either a path
on the local file system or a URI-like format, as in dataPath.
dims: tuple of positive int
Dimensions of input image data, ordered with the fastest-changing dimension first.
ext: string, optional, default "stack"
Extension required on data files to be loaded.
blockSize: string formatted as e.g. "64M", "512k", "2G", or positive int. optional, default "150M"
Requested size of Series partitions in bytes (or kilobytes, megabytes, gigabytes).
dtype: dtype or dtype specifier, optional, default 'int16'
Numpy dtype of input stack data
newDtype: floating-point dtype or dtype specifier or string 'smallfloat' or None, optional, default None
Numpy dtype of output series binary data. Input data will be cast to the requested `newdtype` if not None
- see Data `astype()` method.
casting: 'no'|'equiv'|'safe'|'same_kind'|'unsafe', optional, default 'safe'
Casting method to pass on to numpy's `astype()` method; see numpy documentation for details.
startIdx, stopIdx: nonnegative int. optional.
Indices of the first and last-plus-one data file to load, relative to the sorted filenames matching
`dataPath` and `ext`. Interpreted according to python slice indexing conventions.
overwrite: boolean, optional, default False
If true, the directory specified by outputdirpath will first be deleted, along with all its contents, if it
already exists. If false, a ValueError will be thrown if outputdirpath is found to already exist.
"""
if not overwrite:
self._checkOverwrite(outputDirPath)
overwrite = True # prevent additional downstream checks for this path
seriesBlocks, npointsInSeries, newDtype = \
self._getSeriesBlocksFromStack(dataPath, dims, ext=ext, blockSize=blockSize, dtype=dtype,
newDtype=newDtype, casting=casting, startIdx=startIdx, stopIdx=stopIdx,
recursive=recursive)
self.__saveSeriesRdd(seriesBlocks, outputDirPath, dims, npointsInSeries, newDtype, overwrite=overwrite)
def saveFromTif(self, dataPath, outputDirPath, ext="tif", blockSize="150M",
newDtype=None, casting='safe', startIdx=None, stopIdx=None,
overwrite=False, recursive=False):
"""Write out data from multipage tif files in the Series data flat binary format.
Parameters
----------
dataPath: string
Path to data files or directory, specified as either a local filesystem path or in a URI-like format,
including scheme. A dataPath argument may include a single '*' wildcard character in the filename.
outputDirPpath: string
Path to a directory into which to write Series file output. An outputdir argument may be either a path
on the local file system or a URI-like format, as in dataPath.
ext: string, optional, default "stack"
Extension required on data files to be loaded.
blockSize: string formatted as e.g. "64M", "512k", "2G", or positive int. optional, default "150M"
Requested size of Series partitions in bytes (or kilobytes, megabytes, gigabytes).
newDtype: floating-point dtype or dtype specifier or string 'smallfloat' or None, optional, default None
Numpy dtype of output series binary data. Input data will be cast to the requested `newdtype` if not None
- see Data `astype()` method.
casting: 'no'|'equiv'|'safe'|'same_kind'|'unsafe', optional, default 'safe'
Casting method to pass on to numpy's `astype()` method; see numpy documentation for details.
startIdx, stopIdx: nonnegative int. optional.
Indices of the first and last-plus-one data file to load, relative to the sorted filenames matching
`dataPath` and `ext`. Interpreted according to python slice indexing conventions.
overwrite: boolean, optional, default False
If true, the directory specified by outputdirpath will first be deleted, along with all its contents, if it
already exists. If false, a ValueError will be thrown if outputdirpath is found to already exist.
"""
if not overwrite:
self._checkOverwrite(outputDirPath)
overwrite = True # prevent additional downstream checks for this path
seriesBlocks, metadata = self._getSeriesBlocksFromMultiTif(dataPath, ext=ext, blockSize=blockSize,
newDtype=newDtype, casting=casting,
startIdx=startIdx, stopIdx=stopIdx,
recursive=recursive)
dims, npointsInSeries, dtype = metadata
self.__saveSeriesRdd(seriesBlocks, outputDirPath, dims, npointsInSeries, dtype, overwrite=overwrite)
def fromMatLocal(self, dataPath, varName, keyFile=None):
"""Loads Series data stored in a Matlab .mat file.
`datafile` must refer to a path visible to all workers, such as on NFS or similar mounted shared filesystem.
"""
data = loadmat(dataPath)[varName]
if data.ndim > 2:
raise IOError('Input data must be one or two dimensional')
if keyFile:
keys = map(lambda x: tuple(x), loadmat(keyFile)['keys'])
else:
keys = arange(0, data.shape[0])
rdd = Series(self.sc.parallelize(zip(keys, data), self.minPartitions), dtype=str(data.dtype))
return rdd
def fromNpyLocal(self, dataPath, keyFile=None):
"""Loads Series data stored in the numpy save() .npy format.
`datafile` must refer to a path visible to | |
import math
from functools import reduce
import torch
import torch.nn as nn
import pytorch_acdc as dct
from torch.utils.checkpoint import checkpoint
class ACDC(nn.Module):
"""
A structured efficient layer, consisting of four steps:
1. Scale by diagonal matrix
2. Discrete Cosine Transform
3. Scale by diagonal matrix
4. Inverse Discrete Cosine Transform
"""
def __init__(self, in_features, out_features, groups=1, bias=True):
super(ACDC, self).__init__()
self.in_features, self.out_features = in_features, out_features
assert in_features == out_features, "output size must equal input"
self.A = nn.Parameter(torch.Tensor(1, in_features))
self.D = nn.Parameter(torch.Tensor(1, out_features))
if bias:
self.bias = nn.Parameter(torch.Tensor(1,out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
self.groups = groups
self.pack, self.unpack = PackGroups(groups), UnPackGroups(groups)
self.riffle = Riffle()
def reset_parameters(self):
# used in original code: https://github.com/mdenil/acdc-torch/blob/master/FastACDC.lua
self.A.data.normal_(1., 1e-2)
self.D.data.normal_(1., 1e-2)
if self.bias is not None:
stdv = 1. / math.sqrt(self.out_features)
self.bias.data.uniform_(-stdv, stdv)
def forward(self, x):
n, d = x.size()
x = self.A*x # first diagonal matrix
x = self.pack(x)
x = dct.dct(x) # forward DCT
x = self.unpack(x)
x = self.D*x # second diagonal matrix
x = self.pack(x)
x = self.riffle(x)
x = dct.idct(x) # inverse DCT
x = self.unpack(x)
if self.bias is not None:
return x + self.bias
else:
return x
class BlockDiagonalACDC(nn.Module):
def __init__(self, in_features, out_features, groups=1, bias=True):
super(BlockDiagonalACDC, self).__init__()
self.in_features, self.out_features = in_features, out_features
self.groups = groups
assert in_features == out_features, "output size must equal input"
c = self.in_features
self.A = nn.Conv1d(c, c, 1, bias=False, groups=groups)
self.D = nn.Conv1d(c, c, 1, bias=False, groups=groups)
if bias:
self.bias = nn.Parameter(torch.Tensor(1,out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
self.riffle = Riffle()
def reset_parameters(self):
if self.bias is not None:
stdv = 1. / math.sqrt(self.out_features)
self.bias.data.uniform_(-stdv, stdv)
def forward(self, x):
n, d = x.size()
x = self.A(x.view(n,d,1)) # first block diagonal matrix
x = dct.dct(x.view(n,d)) # forward DCT
x = self.D(x.view(n,d,1)) # second block diagonal matrix
x = dct.idct(x.view(n,d)) # inverse DCT
x = self.riffle(x)
if self.bias is not None:
return x + self.bias
else:
return x
class LinearACDC(nn.Linear):
"""Implement an ACDC layer in one matrix multiply (but more matrix
operations for the parameterisation of the matrix)."""
def __init__(self, in_features, out_features, bias=False, original=False):
#assert in_features == out_features, "output size must equal input"
assert out_features >= in_features, "%i must be greater than %i"%(out_features, in_features)
assert out_features%in_features == 0
self.expansion = out_features//in_features
super(LinearACDC, self).__init__(in_features, out_features, bias=bias)
self.riffle = Riffle()
self.original = original # whether to use original parameterisation
def reset_parameters(self):
super(LinearACDC, self).reset_parameters()
# this is probably not a good way to do this
if 'A' not in self.__dict__.keys():
self.A = nn.Parameter(torch.Tensor(self.out_features, 1))
self.D = nn.Parameter(torch.Tensor(self.out_features, 1))
self.A.data.normal_(1., 1e-2)
self.D.data.normal_(1., 1e-2)
# need to have DCT matrices stored for speed
# they have to be Parameters so they'll be
N = self.out_features
self.dct = dct.dct(torch.eye(N))
self.idct = dct.idct(torch.eye(N))
# remove weight Parameter
del self.weight
def forward(self, x):
n, d = x.size()
if self.expansion > 1:
x = x.repeat(1, self.expansion)
self.dct = self.dct.to(self.A.device)
AC = self.A*self.dct
self.idct = self.idct.to(self.D.device)
DC = self.D*self.idct
if self.original:
ACDC = torch.matmul(AC,DC)
else:
ACDC = torch.matmul(self.riffle(AC),DC)
self.weight = ACDC.t() # monkey patch
return super(LinearACDC, self).forward(x)
def kernel_matrix_to_weights(W, c_out, c_in, k):
"""Maps to 4D weight tensor from the kernel matrix used in im2col."""
assert k == 1 # yeah this function is quite pointless now
return W.view(c_out, c_in, k, k)
class ConvACDC(nn.Conv2d):
"""Implements an ACDC convolutional layer by replacing the weights in a
convolutional layer with the effective weights of an ACDC layer. After
replacing the weights it operates precisely like a convolutional layer."""
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=False, original=False):
assert out_channels >= in_channels, "channels: %i must be greater than %i"%(out_channels, in_channels)
assert out_channels%in_channels == 0
assert bias == False # likely to accidentally set this and break things
assert groups == 1
self.expansion = out_channels//in_channels
if kernel_size == 1:
super(ConvACDC, self).__init__(in_channels, out_channels,
kernel_size, stride=stride, padding=padding,
dilation=dilation, groups=groups, bias=bias)
elif kernel_size > 1:
super(ConvACDC, self).__init__(out_channels, out_channels, 1,
groups=1, bias=bias)
if kernel_size > 1:
self.grouped = nn.Conv2d(in_channels, in_channels, kernel_size,
stride=stride, padding=padding, dilation=dilation,
groups=in_channels, bias=False)
self.riffle = Riffle()
self.original = original
def reset_parameters(self):
super(ConvACDC, self).reset_parameters()
# this is probably not a good way to do this
assert self.kernel_size[0] == self.kernel_size[1], "%s"%self.kernel_size
N = self.out_channels
if 'A' not in self.__dict__.keys():
self.A = nn.Parameter(torch.Tensor(N, 1))
self.D = nn.Parameter(torch.Tensor(N, 1))
self.A.data.normal_(1., 1e-2)
self.D.data.normal_(1., 1e-2)
# initialise DCT matrices
self.dct = dct.dct(torch.eye(N))
self.idct = dct.idct(torch.eye(N))
# remove weight Parameter
del self.weight
def acdc(self, device):
k = self.kernel_size[0]
c_out = self.out_channels
# check our stored DCT matrices are on the right device
if self.dct.device != device:
self.dct = self.dct.to(device)
self.idct = self.idct.to(device)
AC = self.A*self.dct
DC = self.D*self.idct
if self.original:
return torch.matmul(AC, DC)
else:
return torch.matmul(self.riffle(AC), DC)
def forward(self, x):
if hasattr(self, 'grouped'):
x = self.grouped(x)
n, c_in, h, w = x.size()
k = self.kernel_size[0]
c_in, c_out = self.in_channels, self.out_channels
if self.expansion > 1:
x = x.repeat(1, self.expansion, 1, 1)
ACDC = self.acdc(x.device)
self.weight = kernel_matrix_to_weights(ACDC, c_out, c_in, k)
return super(ConvACDC, self).forward(x)
class Riffle(nn.Module):
def forward(self, x):
# based on shufflenet shuffle
# and https://en.wikipedia.org/wiki/Shuffling#Riffle
dim = x.dim()
if dim == 2:
n, d = x.data.size()
assert d%2 == 0, "dim must be even, was %i"%d
groups = d//2
x = x.view(n, groups, 2).permute(0,2,1).contiguous()
return x.view(n, d)
elif dim == 4:
N,C,H,W = x.size()
g = 2
return x.view(N,g,C//g,H,W).permute(0,2,1,3,4).contiguous().view(N,C,H,W)
else:
raise ValueError("Shape of x not supported: %s"%x.size())
class Permute(nn.Module):
"""Assuming 2d input, permutes along last dimension using a fixed
permutation."""
def __init__(self, d):
self.d = d
super(Permute, self).__init__()
self.reset_parameters()
def reset_parameters(self):
self.permute_idxs = torch.randperm(self.d)
def to(self, device):
self.permute_idxs.to(device)
super(Permute, self).to(device)
def forward(self, x):
return x[:,self.permute_idxs]
class PackGroups(nn.Module):
def __init__(self, groups):
super(PackGroups, self).__init__()
self.groups = groups
def forward(self, x):
n, d = x.size()
return x.view(n*self.groups, d//self.groups)
class UnPackGroups(nn.Module):
def __init__(self, groups):
super(UnPackGroups, self).__init__()
self.groups = groups
def forward(self, x):
n, d = x.size()
return x.view(n//self.groups, d*self.groups)
class PadLinearTo(nn.Linear):
"""Pad by concatenating a linear layer."""
def __init__(self, input_features, to):
super(PadLinearTo, self).__init__(input_features, to-input_features, bias=False)
def forward(self, x):
pad = super(PadLinearTo, self).forward(x)
return torch.cat([x, pad], 1)
class DropLinearTo(nn.Linear):
"""Drop dimensions after providing shortcut by Linear Layer. Not expecting
to use this much."""
def __init__(self, input_features, to):
super(DropLinearTo, self).__init__(input_features-to, to, bias=False)
self.to = to
def forward(self, x):
#residual = super(DropLinearTo, self).forward(x[:,self.to:])
return x[:, :self.to] #+ residual
class StackedACDC(nn.Module):
"""
A series of ACDC layers, with batchnorm, relu and riffle shuffles in between.
Input is divided into groups, groups are rounded to nearest power of 2 and
padding or dropping groups is used to map between different input sizes.
"""
def __init__(self, in_features, out_features, n_layers, groups=1):
super(StackedACDC, self).__init__()
self.in_features, self.out_features = in_features, out_features
self.n_layers = n_layers
# for non-matching input/output sizes
if in_features != out_features:
# nearest power of 2 in input groups
group_size = 2**(math.ceil(math.log(in_features//groups,2)))
# number of groups we'll need at output (minimum)
n_groups_out = math.ceil(float(out_features)/group_size)
# how many more is this than we start with?
n_groups_in = math.ceil(float(in_features)/group_size)
n_groups_diff = n_groups_out - n_groups_in
# evenly spread the steps in groups over the number of layers we have
steps = [n_groups_in+round(n_groups_diff*i/float(n_layers+1))
for i in range(1,n_layers+1)]
# steps in dimensionality
dim_steps = [group_size*s for s in steps]
else:
dim_steps = [in_features]*n_layers
layers = []
d = in_features
for n, d_to in zip(range(n_layers), dim_steps):
if d_to > d:
layers.append(PadLinearTo(d, d_to))
elif d_to < d:
layers.append(DropLinearTo(d, d_to))
d = d_to
acdc = ACDC(d, d, groups=groups, bias=False)
#bn = nn.BatchNorm1d(d, affine=False)
riffle = Riffle()
#relu = nn.ReLU()
layers += [acdc, riffle]
# remove the last relu
#_ = layers.pop(-1)
if self.out_features < d:
layers.append(DropLinearTo(d, self.out_features))
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
class StackedLinearACDC(nn.Module):
def __init__(self, in_features, out_features, n_layers, bias=False,
original=False):
super(StackedLinearACDC, self).__init__()
self.in_features, self.out_features = in_features, out_features
assert out_features%in_features == 0
self.n_layers = n_layers
layers = []
d = in_features
for n in range(n_layers):
acdc = LinearACDC(d, out_features,
bias=False if n < n_layers-1 else bias, original=original)
d = out_features
permute = Riffle()
relu = nn.ReLU()
layers += [acdc, permute]
# remove the last relu
# _ = layers.pop(-1)
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
class StackedConvACDC(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, n_layers, | |
<filename>src/whoosh/util/times.py
# Copyright 2010 <NAME>. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
import calendar
import copy
from datetime import date, datetime, timedelta
from whoosh.compat import iteritems
class TimeError(Exception):
pass
def relative_days(current_wday, wday, dir):
"""Returns the number of days (positive or negative) to the "next" or
"last" of a certain weekday. ``current_wday`` and ``wday`` are numbers,
i.e. 0 = monday, 1 = tuesday, 2 = wednesday, etc.
>>> # Get the number of days to the next tuesday, if today is Sunday
>>> relative_days(6, 1, 1)
2
:param current_wday: the number of the current weekday.
:param wday: the target weekday.
:param dir: -1 for the "last" (past) weekday, 1 for the "next" (future)
weekday.
"""
if current_wday == wday:
return 7 * dir
if dir == 1:
return (wday + 7 - current_wday) % 7
else:
return (current_wday + 7 - wday) % 7 * -1
def timedelta_to_usecs(td):
total = td.days * 86400000000 # Microseconds in a day
total += td.seconds * 1000000 # Microseconds in a second
total += td.microseconds
return total
def datetime_to_long(dt):
"""Converts a datetime object to a long integer representing the number
of microseconds since ``datetime.min``.
"""
return timedelta_to_usecs(dt.replace(tzinfo=None) - dt.min)
def long_to_datetime(x):
"""Converts a long integer representing the number of microseconds since
``datetime.min`` to a datetime object.
"""
days = x // 86400000000 # Microseconds in a day
x -= days * 86400000000
seconds = x // 1000000 # Microseconds in a second
x -= seconds * 1000000
return datetime.min + timedelta(days=days, seconds=seconds, microseconds=x)
# Ambiguous datetime object
class adatetime(object):
"""An "ambiguous" datetime object. This object acts like a
``datetime.datetime`` object but can have any of its attributes set to
None, meaning unspecified.
"""
units = frozenset(("year", "month", "day", "hour", "minute", "second",
"microsecond"))
def __init__(self, year=None, month=None, day=None, hour=None, minute=None,
second=None, microsecond=None):
if isinstance(year, datetime):
dt = year
self.year, self.month, self.day = dt.year, dt.month, dt.day
self.hour, self.minute, self.second = dt.hour, dt.minute, dt.second
self.microsecond = dt.microsecond
else:
if month is not None and (month < 1 or month > 12):
raise TimeError("month must be in 1..12")
if day is not None and day < 1:
raise TimeError("day must be greater than 1")
if (year is not None and month is not None and day is not None
and day > calendar.monthrange(year, month)[1]):
raise TimeError("day is out of range for month")
if hour is not None and (hour < 0 or hour > 23):
raise TimeError("hour must be in 0..23")
if minute is not None and (minute < 0 or minute > 59):
raise TimeError("minute must be in 0..59")
if second is not None and (second < 0 or second > 59):
raise TimeError("second must be in 0..59")
if microsecond is not None and (microsecond < 0
or microsecond > 999999):
raise TimeError("microsecond must be in 0..999999")
self.year, self.month, self.day = year, month, day
self.hour, self.minute, self.second = hour, minute, second
self.microsecond = microsecond
def __eq__(self, other):
if not other.__class__ is self.__class__:
if not is_ambiguous(self) and isinstance(other, datetime):
return fix(self) == other
else:
return False
return all(getattr(self, unit) == getattr(other, unit)
for unit in self.units)
def __repr__(self):
return "%s%r" % (self.__class__.__name__, self.tuple())
def tuple(self):
"""Returns the attributes of the ``adatetime`` object as a tuple of
``(year, month, day, hour, minute, second, microsecond)``.
"""
return (self.year, self.month, self.day, self.hour, self.minute,
self.second, self.microsecond)
def date(self):
return date(self.year, self.month, self.day)
def copy(self):
return adatetime(year=self.year, month=self.month, day=self.day,
hour=self.hour, minute=self.minute, second=self.second,
microsecond=self.microsecond)
def replace(self, **kwargs):
"""Returns a copy of this object with the attributes given as keyword
arguments replaced.
>>> adt = adatetime(year=2009, month=10, day=31)
>>> adt.replace(year=2010)
(2010, 10, 31, None, None, None, None)
"""
newadatetime = self.copy()
for key, value in iteritems(kwargs):
if key in self.units:
setattr(newadatetime, key, value)
else:
raise KeyError("Unknown argument %r" % key)
return newadatetime
def floor(self):
"""Returns a ``datetime`` version of this object with all unspecified
(None) attributes replaced by their lowest values.
This method raises an error if the ``adatetime`` object has no year.
>>> adt = adatetime(year=2009, month=5)
>>> adt.floor()
datetime.datetime(2009, 5, 1, 0, 0, 0, 0)
"""
y, m, d, h, mn, s, ms = (self.year, self.month, self.day, self.hour,
self.minute, self.second, self.microsecond)
if y is None:
raise ValueError("Date has no year")
if m is None:
m = 1
if d is None:
d = 1
if h is None:
h = 0
if mn is None:
mn = 0
if s is None:
s = 0
if ms is None:
ms = 0
return datetime(y, m, d, h, mn, s, ms)
def ceil(self):
"""Returns a ``datetime`` version of this object with all unspecified
(None) attributes replaced by their highest values.
This method raises an error if the ``adatetime`` object has no year.
>>> adt = adatetime(year=2009, month=5)
>>> adt.floor()
datetime.datetime(2009, 5, 30, 23, 59, 59, 999999)
"""
y, m, d, h, mn, s, ms = (self.year, self.month, self.day, self.hour,
self.minute, self.second, self.microsecond)
if y is None:
raise ValueError("Date has no year")
if m is None:
m = 12
if d is None:
d = calendar.monthrange(y, m)[1]
if h is None:
h = 23
if mn is None:
mn = 59
if s is None:
s = 59
if ms is None:
ms = 999999
return datetime(y, m, d, h, mn, s, ms)
def disambiguated(self, basedate):
"""Returns either a ``datetime`` or unambiguous ``timespan`` version
of this object.
Unless this ``adatetime`` object is full specified down to the
microsecond, this method will return a timespan built from the "floor"
and "ceil" of this object.
This method raises an error if the ``adatetime`` object has no year.
>>> adt = adatetime(year=2009, month=10, day=31)
>>> adt.disambiguated()
timespan(datetime(2009, 10, 31, 0, 0, 0, 0), datetime(2009, 10, 31, 23, 59 ,59, 999999)
"""
dt = self
if not is_ambiguous(dt):
return fix(dt)
return timespan(dt, dt).disambiguated(basedate)
# Time span class
class timespan(object):
"""A span of time between two ``datetime`` or ``adatetime`` objects.
"""
def __init__(self, start, end):
"""
:param start: a ``datetime`` or ``adatetime`` object representing the
start of the time span.
:param end: a ``datetime`` or ``adatetime`` object representing the
end of the time span.
"""
if not isinstance(start, (datetime, adatetime)):
raise TimeError("%r is not a datetime object" % start)
if not isinstance(end, (datetime, adatetime)):
raise TimeError("%r is not a datetime object" % end)
self.start = copy.copy(start)
self.end = copy.copy(end)
def __eq__(self, other):
if not other.__class__ is self.__class__:
return False
return self.start == other.start and self.end == other.end
def __repr__(self):
return "%s(%r, %r)" % (self.__class__.__name__, self.start, self.end)
def disambiguated(self, basedate, debug=0):
"""Returns an unambiguous version of this object.
>>> start = adatetime(year=2009, month=2)
>>> end = adatetime(year=2009, month=10)
>>> ts = timespan(start, end)
>>> ts
timespan(adatetime(2009, 2, None, | |
has a color map
try:
self.cmap = src.colormap(1)
except ValueError:
pass
if crs is None:
crs = src.crs
if res is None:
res = src.res[0]
with WarpedVRT(src, crs=crs) as vrt:
minx, miny, maxx, maxy = vrt.bounds
except rasterio.errors.RasterioIOError:
# Skip files that rasterio is unable to read
continue
else:
mint: float = 0
maxt: float = sys.maxsize
if "date" in match.groupdict():
date = match.group("date")
mint, maxt = disambiguate_timestamp(date, self.date_format)
coords = (minx, maxx, miny, maxy, mint, maxt)
self.index.insert(i, coords, filepath)
i += 1
if i == 0:
raise FileNotFoundError(
f"No {self.__class__.__name__} data was found in '{root}'"
)
self._crs = cast(CRS, crs)
self.res = cast(float, res)
def __getitem__(self, query: BoundingBox) -> Dict[str, Any]:
"""Retrieve image/mask and metadata indexed by query.
Args:
query: (minx, maxx, miny, maxy, mint, maxt) coordinates to index
Returns:
sample of image/mask and metadata at that index
Raises:
IndexError: if query is not found in the index
"""
hits = self.index.intersection(tuple(query), objects=True)
filepaths = [hit.object for hit in hits]
if not filepaths:
raise IndexError(
f"query: {query} not found in index with bounds: {self.bounds}"
)
if self.separate_files:
data_list: List[Tensor] = []
filename_regex = re.compile(self.filename_regex, re.VERBOSE)
for band in getattr(self, "bands", self.all_bands):
band_filepaths = []
for filepath in filepaths:
filename = os.path.basename(filepath)
directory = os.path.dirname(filepath)
match = re.match(filename_regex, filename)
if match:
if "date" in match.groupdict():
start = match.start("band")
end = match.end("band")
filename = filename[:start] + band + filename[end:]
if "resolution" in match.groupdict():
start = match.start("resolution")
end = match.end("resolution")
filename = filename[:start] + "*" + filename[end:]
filepath = glob.glob(os.path.join(directory, filename))[0]
band_filepaths.append(filepath)
data_list.append(self._merge_files(band_filepaths, query))
data = torch.cat(data_list) # type: ignore[attr-defined]
else:
data = self._merge_files(filepaths, query)
key = "image" if self.is_image else "mask"
sample = {key: data, "crs": self.crs, "bbox": query}
if self.transforms is not None:
sample = self.transforms(sample)
return sample
def _merge_files(self, filepaths: Sequence[str], query: BoundingBox) -> Tensor:
"""Load and merge one or more files.
Args:
filepaths: one or more files to load and merge
query: (minx, maxx, miny, maxy, mint, maxt) coordinates to index
Returns:
image/mask at that index
"""
if self.cache:
vrt_fhs = [self._cached_load_warp_file(fp) for fp in filepaths]
else:
vrt_fhs = [self._load_warp_file(fp) for fp in filepaths]
bounds = (query.minx, query.miny, query.maxx, query.maxy)
if len(vrt_fhs) == 1:
src = vrt_fhs[0]
out_width = int(round((query.maxx - query.minx) / self.res))
out_height = int(round((query.maxy - query.miny) / self.res))
out_shape = (src.count, out_height, out_width)
dest = src.read(
out_shape=out_shape, window=from_bounds(*bounds, src.transform)
)
else:
dest, _ = rasterio.merge.merge(vrt_fhs, bounds, self.res)
dest = dest.astype(np.int32)
tensor: Tensor = torch.tensor(dest) # type: ignore[attr-defined]
return tensor
@functools.lru_cache(maxsize=128)
def _cached_load_warp_file(self, filepath: str) -> DatasetReader:
"""Cached version of :meth:`_load_warp_file`.
Args:
filepath: file to load and warp
Returns:
file handle of warped VRT
"""
return self._load_warp_file(filepath)
def _load_warp_file(self, filepath: str) -> DatasetReader:
"""Load and warp a file to the correct CRS and resolution.
Args:
filepath: file to load and warp
Returns:
file handle of warped VRT
"""
src = rasterio.open(filepath)
# Only warp if necessary
if src.crs != self.crs:
vrt = WarpedVRT(src, crs=self.crs)
src.close()
return vrt
else:
return src
def plot(self, data: Tensor) -> None:
"""Plot a data sample.
Args:
data: the data to plot
Raises:
AssertionError: if ``is_image`` is True and ``data`` has a different number
of channels than expected
"""
array = data.squeeze().numpy()
if self.is_image:
bands = getattr(self, "bands", self.all_bands)
assert array.shape[0] == len(bands)
# Only plot RGB bands
if bands and self.rgb_bands:
indices = np.array([bands.index(band) for band in self.rgb_bands])
array = array[indices]
# Convert from CxHxW to HxWxC
array = np.rollaxis(array, 0, 3)
if self.cmap:
# Convert from class labels to RGBA values
cmap = np.array([self.cmap[i] for i in range(len(self.cmap))])
array = cmap[array]
if self.stretch:
# Stretch to the range of 2nd to 98th percentile
per02 = np.percentile(array, 2) # type: ignore[no-untyped-call]
per98 = np.percentile(array, 98) # type: ignore[no-untyped-call]
array = (array - per02) / (per98 - per02)
array = np.clip(array, 0, 1)
# Plot the data
ax = plt.axes()
ax.imshow(array)
ax.axis("off")
plt.show()
plt.close()
class VectorDataset(GeoDataset):
"""Abstract base class for :class:`GeoDataset` stored as vector files."""
#: Glob expression used to search for files.
#:
#: This expression should be specific enough that it will not pick up files from
#: other datasets. It should not include a file extension, as the dataset may be in
#: a different file format than what it was originally downloaded as.
filename_glob = "*"
def __init__(
self,
root: str = "data",
crs: Optional[CRS] = None,
res: float = 0.0001,
transforms: Optional[Callable[[Dict[str, Any]], Dict[str, Any]]] = None,
) -> None:
"""Initialize a new Dataset instance.
Args:
root: root directory where dataset can be found
crs: :term:`coordinate reference system (CRS)` to warp to
(defaults to the CRS of the first file found)
res: resolution of the dataset in units of CRS
transforms: a function/transform that takes input sample and its target as
entry and returns a transformed version
Raises:
FileNotFoundError: if no files are found in ``root``
"""
super().__init__(transforms)
self.root = root
self.res = res
# Populate the dataset index
i = 0
pathname = os.path.join(root, "**", self.filename_glob)
for filepath in glob.iglob(pathname, recursive=True):
try:
with fiona.open(filepath) as src:
if crs is None:
crs = CRS.from_dict(src.crs)
minx, miny, maxx, maxy = src.bounds
(minx, maxx), (miny, maxy) = fiona.transform.transform(
src.crs, crs.to_dict(), [minx, maxx], [miny, maxy]
)
except fiona.errors.FionaValueError:
# Skip files that fiona is unable to read
continue
else:
mint = 0
maxt = sys.maxsize
coords = (minx, maxx, miny, maxy, mint, maxt)
self.index.insert(i, coords, filepath)
i += 1
if i == 0:
raise FileNotFoundError(
f"No {self.__class__.__name__} data was found in '{root}'"
)
self._crs = crs
def __getitem__(self, query: BoundingBox) -> Dict[str, Any]:
"""Retrieve image/mask and metadata indexed by query.
Args:
query: (minx, maxx, miny, maxy, mint, maxt) coordinates to index
Returns:
sample of image/mask and metadata at that index
Raises:
IndexError: if query is not found in the index
"""
hits = self.index.intersection(tuple(query), objects=True)
filepaths = [hit.object for hit in hits]
if not filepaths:
raise IndexError(
f"query: {query} not found in index with bounds: {self.bounds}"
)
shapes = []
for filepath in filepaths:
with fiona.open(filepath) as src:
# We need to know the bounding box of the query in the source CRS
(minx, maxx), (miny, maxy) = fiona.transform.transform(
self.crs.to_dict(),
src.crs,
[query.minx, query.maxx],
[query.miny, query.maxy],
)
# Filter geometries to those that intersect with the bounding box
for feature in src.filter(bbox=(minx, miny, maxx, maxy)):
# Warp geometries to requested CRS
shape = fiona.transform.transform_geom(
src.crs, self.crs.to_dict(), feature["geometry"]
)
shapes.append(shape)
# Rasterize geometries
width = (query.maxx - query.minx) / self.res
height = (query.maxy - query.miny) / self.res
transform = rasterio.transform.from_bounds(
query.minx, query.miny, query.maxx, query.maxy, width, height
)
masks = rasterio.features.rasterize(
shapes, out_shape=(int(height), int(width)), transform=transform
)
sample = {
"mask": torch.tensor(masks), # type: ignore[attr-defined]
"crs": self.crs,
"bbox": query,
}
if self.transforms is not None:
sample = self.transforms(sample)
return sample
def plot(self, data: Tensor) -> None:
"""Plot a data sample.
Args:
data: the data to plot
"""
array = data.squeeze().numpy()
# Plot the image
ax = plt.axes()
ax.imshow(array)
ax.axis("off")
plt.show()
plt.close()
class VisionDataset(Dataset[Dict[str, Any]], abc.ABC):
"""Abstract base class for datasets lacking geospatial information.
This base class is designed for datasets with pre-defined image chips.
"""
@abc.abstractmethod
def __getitem__(self, index: int) -> Dict[str, Any]:
"""Return an index within the dataset.
Args:
index: index to return
Returns:
data and labels at that index
Raises:
IndexError: if index is out of range of the dataset
"""
@abc.abstractmethod
def __len__(self) -> int:
"""Return the length of the dataset.
Returns:
length of the dataset
"""
def __str__(self) -> str:
"""Return the informal string representation of the object.
Returns:
informal string representation
"""
return f"""\
{self.__class__.__name__} Dataset
type: VisionDataset
size: {len(self)}"""
class VisionClassificationDataset(VisionDataset, ImageFolder): # type: ignore[misc]
"""Abstract base class for classification datasets lacking geospatial information.
This base class is designed for datasets with pre-defined image chips which
are separated into separate folders per class.
"""
def __init__(
self,
root: str,
transforms: Optional[Callable[[Dict[str, Tensor]], Dict[str, Tensor]]] = None,
loader: Optional[Callable[[str], Any]] = pil_loader,
is_valid_file: | |
<reponame>citrix-openstack-build/neutron
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: <NAME>
import random
import string
import mox
import netaddr
from neutron import context
from neutron.openstack.common import uuidutils
from neutron.plugins.nec.common import ofc_client as ofc
from neutron.plugins.nec.db import api as ndb
from neutron.plugins.nec.db import models as nmodels
from neutron.plugins.nec import drivers
from neutron.tests import base
class TestConfig(object):
"""Configuration for this test."""
host = '127.0.0.1'
port = 8888
use_ssl = False
key_file = None
cert_file = None
def _ofc(id):
"""OFC ID converter."""
return "ofc-%s" % id
class PFCDriverTestBase(base.BaseTestCase):
driver = 'neutron.plugins.nec.drivers.pfc.PFCDriverBase'
def setUp(self):
super(PFCDriverTestBase, self).setUp()
self.mox = mox.Mox()
self.driver = drivers.get_driver(self.driver)(TestConfig)
self.mox.StubOutWithMock(ofc.OFCClient, 'do_request')
self.addCleanup(self.mox.UnsetStubs)
def get_ofc_item_random_params(self):
"""create random parameters for ofc_item test."""
tenant_id = uuidutils.generate_uuid()
network_id = uuidutils.generate_uuid()
port_id = uuidutils.generate_uuid()
portinfo = nmodels.PortInfo(id=port_id, datapath_id="0x123456789",
port_no=1234, vlan_id=321,
mac="11:22:33:44:55:66")
return tenant_id, network_id, portinfo
def _generate_ofc_tenant_id(self, tenant_id):
fields = tenant_id.split('-')
# Strip 1st character (UUID version) of 3rd field
fields[2] = fields[2][1:]
return ''.join(fields)
def get_ofc_description(self, desc):
"""OFC description consists of [A-Za-z0-9_]."""
return desc.replace('-', '_').replace(' ', '_')
def _create_tenant(self, t, ofc_t, post_id=False, post_desc=False):
tenant_path = '/tenants/%s' % ofc_t
path = "/tenants"
description = "desc of %s" % t
body = {}
if post_desc:
ofc_description = self.get_ofc_description(description)
body['description'] = ofc_description
if post_id:
body['id'] = ofc_t
ofc.OFCClient.do_request("POST", path, body=body)
else:
ofc.OFCClient.do_request("POST", path, body=body).\
AndReturn({'id': ofc_t})
self.mox.ReplayAll()
ret = self.driver.create_tenant(description, t)
self.mox.VerifyAll()
self.assertEqual(ret, tenant_path)
def testa_create_tenant(self):
t, n, p = self.get_ofc_item_random_params()
ofc_t = self._generate_ofc_tenant_id(t)
self._create_tenant(t, ofc_t, post_id=True)
def testc_delete_tenant(self):
t, n, p = self.get_ofc_item_random_params()
path = "/tenants/%s" % _ofc(t)
ofc.OFCClient.do_request("DELETE", path)
self.mox.ReplayAll()
self.driver.delete_tenant(path)
self.mox.VerifyAll()
def testd_create_network(self):
t, n, p = self.get_ofc_item_random_params()
description = "desc of %s" % n
ofc_description = self.get_ofc_description(description)
tenant_path = "/tenants/%s" % _ofc(t)
post_path = "%s/networks" % tenant_path
body = {'description': ofc_description}
network = {'id': _ofc(n)}
ofc.OFCClient.do_request("POST", post_path, body=body).\
AndReturn(network)
self.mox.ReplayAll()
ret = self.driver.create_network(tenant_path, description, n)
self.mox.VerifyAll()
net_path = "/tenants/%s/networks/%s" % (_ofc(t), _ofc(n))
self.assertEqual(ret, net_path)
def testf_delete_network(self):
t, n, p = self.get_ofc_item_random_params()
net_path = "/tenants/%s/networks/%s" % (_ofc(t), _ofc(n))
ofc.OFCClient.do_request("DELETE", net_path)
self.mox.ReplayAll()
self.driver.delete_network(net_path)
self.mox.VerifyAll()
def testg_create_port(self):
t, n, p = self.get_ofc_item_random_params()
net_path = "/tenants/%s/networks/%s" % (_ofc(t), _ofc(n))
post_path = "%s/ports" % net_path
port_path = "/tenants/%s/networks/%s/ports/%s" % (_ofc(t), _ofc(n),
_ofc(p.id))
body = {'datapath_id': p.datapath_id,
'port': str(p.port_no),
'vid': str(p.vlan_id)}
port = {'id': _ofc(p.id)}
ofc.OFCClient.do_request("POST", post_path, body=body).AndReturn(port)
self.mox.ReplayAll()
ret = self.driver.create_port(net_path, p, p.id)
self.mox.VerifyAll()
self.assertEqual(ret, port_path)
def testh_delete_port(self):
t, n, p = self.get_ofc_item_random_params()
port_path = "/tenants/%s/networks/%s/ports/%s" % (_ofc(t), _ofc(n),
_ofc(p.id))
ofc.OFCClient.do_request("DELETE", port_path)
self.mox.ReplayAll()
self.driver.delete_port(port_path)
self.mox.VerifyAll()
def test_filter_supported(self):
self.assertFalse(self.driver.filter_supported())
class PFCDriverBaseTest(PFCDriverTestBase):
pass
class PFCV3DriverTest(PFCDriverTestBase):
driver = 'pfc_v3'
def testa_create_tenant(self):
t, n, p = self.get_ofc_item_random_params()
self.mox.ReplayAll()
ret = self.driver.create_tenant('dummy_desc', t)
self.mox.VerifyAll()
ofc_t_path = "/tenants/" + self._generate_ofc_tenant_id(t)
self.assertEqual(ofc_t_path, ret)
def testc_delete_tenant(self):
t, n, p = self.get_ofc_item_random_params()
path = "/tenants/%s" % _ofc(t)
# There is no API call.
self.mox.ReplayAll()
self.driver.delete_tenant(path)
self.mox.VerifyAll()
class PFCV4DriverTest(PFCDriverTestBase):
driver = 'pfc_v4'
class PFCV5DriverTest(PFCDriverTestBase):
driver = 'pfc_v5'
def test_create_router(self):
t = uuidutils.generate_uuid()
r = uuidutils.generate_uuid()
description = 'dummy_router_desc'
tenant_path = "/tenants/%s" % _ofc(t)
post_path = "%s/routers" % tenant_path
router = {'id': _ofc(r)}
ofc.OFCClient.do_request("POST", post_path,
body=None).AndReturn(router)
self.mox.ReplayAll()
ret = self.driver.create_router(tenant_path, description, r)
self.mox.VerifyAll()
router_path = "/tenants/%s/routers/%s" % (_ofc(t), _ofc(r))
self.assertEqual(ret, router_path)
def test_delete_router(self):
t = uuidutils.generate_uuid()
r = uuidutils.generate_uuid()
router_path = "/tenants/%s/routers/%s" % (_ofc(t), _ofc(r))
ofc.OFCClient.do_request("DELETE", router_path)
self.mox.ReplayAll()
self.driver.delete_router(router_path)
self.mox.VerifyAll()
def test_add_router_interface(self):
t = uuidutils.generate_uuid()
r = uuidutils.generate_uuid()
n = uuidutils.generate_uuid()
p = uuidutils.generate_uuid()
router_path = "/tenants/%s/routers/%s" % (_ofc(t), _ofc(r))
infs_path = router_path + "/interfaces"
net_path = "/tenants/%s/networks/%s" % (_ofc(t), _ofc(n))
ip_address = '10.1.1.1/24'
mac_address = '11:22:33:44:55:66'
body = {'net_id': _ofc(n),
'ip_address': ip_address,
'mac_address': mac_address}
inf = {'id': _ofc(p)}
ofc.OFCClient.do_request("POST", infs_path,
body=body).AndReturn(inf)
self.mox.ReplayAll()
ret = self.driver.add_router_interface(router_path, net_path,
ip_address, mac_address)
self.mox.VerifyAll()
inf_path = "%s/interfaces/%s" % (router_path, _ofc(p))
self.assertEqual(ret, inf_path)
def test_update_router_interface(self):
t = uuidutils.generate_uuid()
r = uuidutils.generate_uuid()
p = uuidutils.generate_uuid()
router_path = "/tenants/%s/routers/%s" % (_ofc(t), _ofc(r))
inf_path = "%s/interfaces/%s" % (router_path, _ofc(p))
ip_address = '10.1.1.1/24'
mac_address = '11:22:33:44:55:66'
body = {'ip_address': ip_address,
'mac_address': mac_address}
ofc.OFCClient.do_request("PUT", inf_path, body=body)
body = {'ip_address': ip_address}
ofc.OFCClient.do_request("PUT", inf_path, body=body)
body = {'mac_address': mac_address}
ofc.OFCClient.do_request("PUT", inf_path, body=body)
self.mox.ReplayAll()
self.driver.update_router_interface(inf_path, ip_address, mac_address)
self.driver.update_router_interface(inf_path, ip_address=ip_address)
self.driver.update_router_interface(inf_path, mac_address=mac_address)
self.mox.VerifyAll()
def test_delete_router_interface(self):
t = uuidutils.generate_uuid()
r = uuidutils.generate_uuid()
p = uuidutils.generate_uuid()
router_path = "/tenants/%s/routers/%s" % (_ofc(t), _ofc(r))
inf_path = "%s/interfaces/%s" % (router_path, _ofc(p))
ofc.OFCClient.do_request("DELETE", inf_path)
self.mox.ReplayAll()
self.driver.delete_router_interface(inf_path)
self.mox.VerifyAll()
def _get_route_id(self, dest, nexthop):
dest = netaddr.IPNetwork(dest)
return '-'.join([str(dest.network), nexthop, str(dest.netmask)])
def test_add_router_route(self):
t = uuidutils.generate_uuid()
r = uuidutils.generate_uuid()
router_path = "/tenants/%s/routers/%s" % (_ofc(t), _ofc(r))
routes_path = router_path + "/routes"
dest = '10.1.1.0/24'
nexthop = '192.168.100.10'
body = {'destination': dest, 'nexthop': nexthop}
route_id = self._get_route_id(dest, nexthop)
ofc.OFCClient.do_request("POST", routes_path,
body=body).AndReturn({'id': route_id})
self.mox.ReplayAll()
ret = self.driver.add_router_route(router_path, '10.1.1.0/24',
'192.168.100.10')
self.mox.VerifyAll()
route_path = routes_path + '/' + route_id
self.assertEqual(ret, route_path)
def test_delete_router_route(self):
t = uuidutils.generate_uuid()
r = uuidutils.generate_uuid()
router_path = "/tenants/%s/routers/%s" % (_ofc(t), _ofc(r))
routes_path = router_path + "/routes"
route_id = self._get_route_id('10.1.1.0/24', '192.168.100.10')
route_path = routes_path + '/' + route_id
ofc.OFCClient.do_request("DELETE", route_path)
self.mox.ReplayAll()
self.driver.delete_router_route(route_path)
self.mox.VerifyAll()
def test_list_router_routes(self):
t = uuidutils.generate_uuid()
r = uuidutils.generate_uuid()
router_path = "/tenants/%s/routers/%s" % (_ofc(t), _ofc(r))
routes_path = router_path + "/routes"
routes = [('10.1.1.0/24', '192.168.100.10'),
('10.2.2.0/20', '192.168.100.20')]
data = {'routes': [{'id': self._get_route_id(route[0], route[1]),
'destination': route[0], 'nexthop': route[1]}
for route in routes]}
ofc.OFCClient.do_request("GET", routes_path).AndReturn(data)
self.mox.ReplayAll()
ret = self.driver.list_router_routes(router_path)
self.mox.VerifyAll()
expected = [{'id': (routes_path + "/" +
self._get_route_id(route[0], route[1])),
'destination': route[0], 'nexthop': route[1]}
for route in routes]
self.assertEqual(len(routes), len(ret))
self.assertEqual(data['routes'], expected)
class PFCDriverStringTest(base.BaseTestCase):
driver = 'neutron.plugins.nec.drivers.pfc.PFCDriverBase'
def setUp(self):
super(PFCDriverStringTest, self).setUp()
self.driver = drivers.get_driver(self.driver)(TestConfig)
def test_generate_pfc_id_uuid(self):
id_str = uuidutils.generate_uuid()
exp_str = (id_str[:14] + id_str[15:]).replace('-', '')[:31]
ret_str = self.driver._generate_pfc_id(id_str)
self.assertEqual(exp_str, ret_str)
def test_generate_pfc_id_uuid_no_hyphen(self):
# Keystone tenant_id style uuid
id_str = uuidutils.generate_uuid()
id_no_hyphen = id_str.replace('-', '')
exp_str = (id_str[:14] + id_str[15:]).replace('-', '')[:31]
ret_str = self.driver._generate_pfc_id(id_no_hyphen)
self.assertEqual(exp_str, ret_str)
def test_generate_pfc_id_string(self):
id_str = uuidutils.generate_uuid() + 'x'
exp_str = id_str[:31].replace('-', '_')
ret_str = self.driver._generate_pfc_id(id_str)
self.assertEqual(exp_str, ret_str)
def test_generate_pfc_desc(self):
random_list = [random.choice(string.printable) for x in range(128)]
random_str = ''.join(random_list)
accept_letters = string.letters + string.digits
exp_list = [x if x in accept_letters else '_' for x in random_list]
exp_str = ''.join(exp_list)[:127]
ret_str = self.driver._generate_pfc_description(random_str)
self.assertEqual(exp_str, ret_str)
class PFCIdConvertTest(base.BaseTestCase):
driver = 'neutron.plugins.nec.drivers.pfc.PFCDriverBase'
def setUp(self):
super(PFCIdConvertTest, self).setUp()
self.mox = mox.Mox()
self.driver = drivers.get_driver(self.driver)(TestConfig)
self.ctx = self.mox.CreateMock(context.Context)
self.ctx.session = "session"
self.mox.StubOutWithMock(ndb, 'get_ofc_id_lookup_both')
self.addCleanup(self.mox.UnsetStubs)
def generate_random_ids(self, count=1):
if count == 1:
return uuidutils.generate_uuid()
else:
return [uuidutils.generate_uuid() for _ in xrange(count)]
def test_convert_tenant_id(self):
ofc_t_id = self.generate_random_ids(1)
ret = self.driver.convert_ofc_tenant_id(self.ctx, ofc_t_id)
self.assertEqual(ret, '/tenants/%s' % ofc_t_id)
def test_convert_tenant_id_noconv(self):
ofc_t_id = '/tenants/%s' % self.generate_random_ids(1)
ret = self.driver.convert_ofc_tenant_id(self.ctx, ofc_t_id)
self.assertEqual(ret, ofc_t_id)
def test_convert_network_id(self):
t_id, ofc_t_id, ofc_n_id = self.generate_random_ids(3)
ndb.get_ofc_id_lookup_both(
self.ctx.session, 'ofc_tenant', t_id).AndReturn(ofc_t_id)
self.mox.ReplayAll()
ret = self.driver.convert_ofc_network_id(self.ctx, ofc_n_id, t_id)
self.assertEqual(ret, ('/tenants/%(tenant)s/networks/%(network)s' %
{'tenant': ofc_t_id, 'network': ofc_n_id}))
self.mox.VerifyAll()
def test_convert_network_id_with_new_tenant_id(self):
t_id, ofc_t_id, ofc_n_id = self.generate_random_ids(3)
ofc_t_path = '/tenants/%s' % ofc_t_id
ndb.get_ofc_id_lookup_both(
self.ctx.session, 'ofc_tenant', t_id).AndReturn(ofc_t_path)
self.mox.ReplayAll()
ret = self.driver.convert_ofc_network_id(self.ctx, ofc_n_id, t_id)
self.assertEqual(ret, ('/tenants/%(tenant)s/networks/%(network)s' %
{'tenant': ofc_t_id, 'network': ofc_n_id}))
self.mox.VerifyAll()
def test_convert_network_id_noconv(self):
t_id = 'dummy'
ofc_t_id, ofc_n_id = self.generate_random_ids(2)
ofc_n_id = ('/tenants/%(tenant)s/networks/%(network)s' %
{'tenant': ofc_t_id, 'network': ofc_n_id})
ret = self.driver.convert_ofc_network_id(self.ctx, ofc_n_id, t_id)
self.assertEqual(ret, ofc_n_id)
def test_convert_port_id(self):
t_id, n_id = self.generate_random_ids(2)
ofc_t_id, ofc_n_id, ofc_p_id = self.generate_random_ids(3)
ndb.get_ofc_id_lookup_both(
self.ctx.session, 'ofc_network', n_id).AndReturn(ofc_n_id)
ndb.get_ofc_id_lookup_both(
self.ctx.session, 'ofc_tenant', t_id).AndReturn(ofc_t_id)
self.mox.ReplayAll()
ret = self.driver.convert_ofc_port_id(self.ctx, ofc_p_id, t_id, n_id)
exp = ('/tenants/%(tenant)s/networks/%(network)s/ports/%(port)s' %
{'tenant': ofc_t_id, 'network': ofc_n_id, 'port': ofc_p_id})
self.assertEqual(ret, exp)
self.mox.VerifyAll()
def test_convert_port_id_with_new_tenant_id(self):
t_id, n_id = self.generate_random_ids(2)
ofc_t_id, ofc_n_id, ofc_p_id = self.generate_random_ids(3)
ofc_t_path = '/tenants/%s' % ofc_t_id
ndb.get_ofc_id_lookup_both(
self.ctx.session, 'ofc_network', n_id).AndReturn(ofc_n_id)
ndb.get_ofc_id_lookup_both(
self.ctx.session, 'ofc_tenant', t_id).AndReturn(ofc_t_path)
self.mox.ReplayAll()
ret = self.driver.convert_ofc_port_id(self.ctx, ofc_p_id, t_id, n_id)
exp = ('/tenants/%(tenant)s/networks/%(network)s/ports/%(port)s' %
{'tenant': ofc_t_id, 'network': ofc_n_id, 'port': ofc_p_id})
self.assertEqual(ret, exp)
self.mox.VerifyAll()
def test_convert_port_id_with_new_network_id(self):
t_id, n_id = self.generate_random_ids(2)
ofc_t_id, ofc_n_id, ofc_p_id = self.generate_random_ids(3)
ofc_n_path = ('/tenants/%(tenant)s/networks/%(network)s' %
{'tenant': ofc_t_id, 'network': ofc_n_id})
ndb.get_ofc_id_lookup_both(
self.ctx.session, 'ofc_network', n_id).AndReturn(ofc_n_path)
self.mox.ReplayAll()
ret = self.driver.convert_ofc_port_id(self.ctx, ofc_p_id, t_id, n_id)
exp = ('/tenants/%(tenant)s/networks/%(network)s/ports/%(port)s' %
{'tenant': ofc_t_id, 'network': ofc_n_id, 'port': ofc_p_id})
self.assertEqual(ret, exp)
self.mox.VerifyAll()
def test_convert_port_id_noconv(self):
t_id = n_id = | |
<reponame>MichaelKohler/mozfest-event-app-data-processor
import argparse, os, sys, traceback
import github3
import gspread
import io
import json
import logging
import os
import requests
import base64
from datetime import datetime, timedelta
from logging.config import dictConfig
from oauth2client.client import SignedJwtAssertionCredentials
from helper import parseListFromEnvVar
# #####
# Look for optional environment variables.
# If not set, use default values.
# #####
# TARGET_DIR. Default value ''.
TARGET_DIR = os.environ['TARGET_DIR'] + "/" if 'TARGET_DIR' in os.environ and os.environ['TARGET_DIR'] is not '' else ''
# TARGET_BRANCHES. Default value ['gh-pages'].
TARGET_BRANCHES = parseListFromEnvVar(os.environ['TARGET_BRANCHES']) if 'TARGET_BRANCHES' in os.environ else ['gh-pages']
# FETCH_MULTIPLE_WORKSHEETS. Default value True.
FETCH_MULTIPLE_WORKSHEETS = os.environ['FETCH_MULTIPLE_WORKSHEETS'] if 'FETCH_MULTIPLE_WORKSHEETS' in os.environ else True
# MAKE_LOCAL_JSON. Default value True.
MAKE_LOCAL_JSON = os.environ['MAKE_LOCAL_JSON'] if 'MAKE_LOCAL_JSON' in os.environ else True
# COMMIT_JSON_TO_GITHUB. Default value False.
COMMIT_JSON_TO_GITHUB = True if 'COMMIT_JSON_TO_GITHUB' in os.environ and os.environ['COMMIT_JSON_TO_GITHUB'] == 'True' else False
# SESSIONS_WORKSHEETS_TO_FETCH. Default value [].
SESSIONS_WORKSHEETS_TO_FETCH = parseListFromEnvVar(os.environ['SESSIONS_WORKSHEETS_TO_FETCH']) if 'SESSIONS_WORKSHEETS_TO_FETCH' in os.environ else []
# PROMPT_BEFORE_COMMIT_TO_GITHUB. Default value False.
PROMPT_BEFORE_COMMIT_TO_GITHUB = True if 'PROMPT_BEFORE_COMMIT_TO_GITHUB' in os.environ and os.environ['PROMPT_BEFORE_COMMIT_TO_GITHUB'] == 'True' else False
# custom [category] label in source spreadsheet, e.g., for MozFest 2016 this spreadsheet column is called "space"
CUSTOM_CATEGORY_LABEL = os.environ['CUSTOM_CATEGORY_LABEL'] if 'CUSTOM_CATEGORY_LABEL' in os.environ and len(os.environ['CUSTOM_CATEGORY_LABEL']) > 0 else 'category'
# custom [tags] label in source spreadsheet, e.g., for MozFest 2016 this spreadsheet column is called "pathways"
CUSTOM_TAGS_LABEL = os.environ['CUSTOM_TAGS_LABEL'] if 'CUSTOM_TAGS_LABEL' in os.environ and len(os.environ['CUSTOM_TAGS_LABEL']) > 0 else 'tags'
GITHUB_CONFIG = {
'TOKEN': os.environ['GITHUB_TOKEN'],
'REPO_OWNER': os.environ['REPO_OWNER'],
'REPO_NAME': os.environ['REPO_NAME'],
'TARGET_DIR': TARGET_DIR,
'TARGET_FILE': 'sessions.json',
'TARGET_BRANCHES': TARGET_BRANCHES
}
GOOGLE_API_CONFIG = {
'CLIENT_EMAIL': os.environ['GOOGLE_API_CLIENT_EMAIL'],
'PRIVATE_KEY': os.environ['GOOGLE_API_PRIVATE_KEY'].decode('unicode_escape'),
'SCOPE': ['https://spreadsheets.google.com/feeds']
}
GOOGLE_SPREADSHEET_KEY = os.environ['GOOGLE_SPREADSHEET_KEY']
def authenticate_with_google():
'''
Connect to Google Spreadsheet with gspread library.
'''
credentials = SignedJwtAssertionCredentials(
GOOGLE_API_CONFIG['CLIENT_EMAIL'], GOOGLE_API_CONFIG['PRIVATE_KEY'], GOOGLE_API_CONFIG['SCOPE']
)
google_api_conn = gspread.authorize(credentials)
return google_api_conn
def open_google_spreadsheet():
'''
Authenticate and return spreadsheet by `GOOGLE_SPREADSHEET_KEY`.
'''
google_api_conn = authenticate_with_google()
spreadsheet = google_api_conn.open_by_key(GOOGLE_SPREADSHEET_KEY)
return spreadsheet
def fetch_data(multiple_sheets=False, sessions_worksheets_to_fetch=[]):
spreadsheet = open_google_spreadsheet()
data = {
'timeblocks': fetch_worksheets(spreadsheet, multiple_sheets, ['* Timeblock Values']),
'sessions': fetch_worksheets(spreadsheet, multiple_sheets, sessions_worksheets_to_fetch)
}
return data
def fetch_worksheets(spreadsheet, multiple_sheets=False, sessions_worksheets_to_fetch=[]):
if not multiple_sheets:
# Return data from first worksheet in Google spreadsheet.
worksheet = spreadsheet.get_worksheet(0)
data = worksheet.get_all_records(empty2zero=False)
else:
# Return data from all worksheets in sessions_worksheets_to_fetch
data = []
worksheet_list = [
sheet for sheet in spreadsheet.worksheets() if sheet.title in sessions_worksheets_to_fetch
]
for worksheet in worksheet_list:
worksheet.title
data.extend(worksheet.get_all_records(empty2zero=False))
return data
def slugify_timeblock(timeblock):
# "slugified" version of timeblock
timeblock = timeblock.strip().lower().replace(' ','-').replace(',','').replace(':','-').replace('*','').replace('&','-').replace('(','-').replace(')','-')
return timeblock
def transform_timeblock_data(data):
def _transform_response_item(item, skip=False):
# make sure vars are strings
_transformed_item = {k: unicode(v) for k, v in item.iteritems() if k}
# remove rows that are blank or used for providing instructions
if _transformed_item['day'] and _transformed_item['day'].find('select from dropdown') == -1 and _transformed_item['start time']:
skip = False
else:
skip = True
# transform `Auto Generated. Do Not Modify.` column name into `key` key
if 'Auto Generated. Do Not Modify.' in _transformed_item:
_transformed_item['key'] = slugify_timeblock(_transformed_item.pop('Auto Generated. Do Not Modify.', ''))
# if we've triggered the skip flag anywhere, drop this record
if skip:
_transformed_item = None
return _transformed_item
def _add_timeblock_order(item, index, skip=False):
# make sure vars are strings
_transformed_item = {k: unicode(v) for k, v in item.iteritems() if k}
# +1 because we want order to start from 1
_transformed_item['order'] = index+1
return _transformed_item
dayOrder = {
'Monday': 1,
'Tuesday': 2,
'Wednesday': 3,
'Thursday': 4,
'Friday': 5,
'Saturday': 6,
'Sunday': 7
}
def dayComparator(x, y):
if dayOrder[x['day']] < dayOrder[y['day']]:
return -1
elif dayOrder[x['day']] > dayOrder[y['day']]:
return 1
else:
return 0
# pass data through the transformer
transformed_data = filter(None, [_transform_response_item(item) for item in data])
# sort timeblocks by start time in a day
transformed_data = sorted(transformed_data, key=lambda timeblock:(timeblock['start time']))
# sort timeblocks again by day in week
transformed_data.sort(dayComparator)
# assign 'order' to timeblock
transformed_data = filter(None, [_add_timeblock_order(item,index) for index, item in enumerate(transformed_data)])
return transformed_data
def transform_session_data(data):
'''
Transforms data and filters individual schedule items for fields we want
to publish. Currently, this:
* ensures that all variables going into the JSON are strings
* removes `proposalSpreadsheetRowNumber` to make JSON smaller
* transforms column name `name` into JSON key `title`
* transforms column name `id` into JSON key `id`
* removes any rows that don't have a numeric `id`
* creates a `facilitators` dict
* creates a `facilitators_names` name list
* transform column name "CUSTOM_CATEGORY_LABEL" into JSON key `category`
* remove invalid tag labels that were used for GitHub workflow and transform column name "CUSTOM_TAGS_LABEL" into JSON key `tags`
* creates a `timeblock` key based on data in `time` column
* creates Saturday and Sunday versions of sessions marked 'all-weekend'
* infers a `day` and `start` key based on data in `time` column
* labels `programmatic` session
* validates `notes url` so only one url is included
'''
def _transform_response_item(item, skip=False):
# make sure vars are strings
_transformed_item = {k: unicode(v) for k, v in item.iteritems() if k}
# transform `name` column name into `title` key
if 'name' in _transformed_item:
_transformed_item['title'] = _transformed_item.pop('name', '')
if not _transformed_item['title']:
skip = True
# set `id` key
# (and skip rows without a valid id)
if 'session id' in _transformed_item:
_transformed_item['id'] = _transformed_item.pop('session id', '').strip()
# remove rows with `session id` that is blank or provides instructions
if len(_transformed_item['id']) == 0 or len(_transformed_item['id'].split(" ")) != 1:
skip = True
# create `facilitators` key
facilitators = {}
facilitators_names = []
for key in _transformed_item.keys():
if key.startswith('facilitator'):
facilitatorObj = {}
wordList = key.split(" ")
facilitatorNumber = wordList[1]
metaType = wordList[2]
metaValue = _transformed_item.pop(key)
if facilitatorNumber not in facilitators:
facilitators[facilitatorNumber] = facilitatorObj
else:
facilitatorObj = facilitators[facilitatorNumber]
if metaType == 'name':
facilitatorObj['name'] = metaValue
facilitators_names.insert(int(facilitatorNumber)-1,metaValue)
elif metaType == 'twitter':
facilitatorObj['twitter'] = metaValue
elif metaType == 'affiliated':
facilitatorObj['affiliated org'] = metaValue
facilitators[facilitatorNumber] = facilitatorObj
_transformed_item['facilitators'] = facilitators
_transformed_item['facilitators_names'] = facilitators_names
# transform column name "CUSTOM_CATEGORY_LABEL" into JSON key `category`
_transformed_item['category'] = _transformed_item.pop(CUSTOM_CATEGORY_LABEL, '')
# remove invalid tag labels that were used for GitHub workflow and transform column name "CUSTOM_TAGS_LABEL" into JSON key `tags`
tags = _transformed_item[CUSTOM_TAGS_LABEL]
tag_skip_keywords = ['accepted','consideration','stipend','sample']
accepted_tags = ['AR, VR & Machine Learning','Advocacy & Web Literacy','Digital Inclusion','The Future of the Web','Multi-lingual MozFest','Storytelling']
# Since "AR, VR & Machine Learning" contains a comma,
# it makes it troublesome to turn the string into a list using the split(",") method
# Let's remove it from the string first and add it back to the list after
troublesome_tag = 'AR, VR & Machine Learning'
hasArVrMachineLearning = False
if troublesome_tag in tags:
hasArVrMachineLearning = True
tags = tags.replace(troublesome_tag,"")
# We wrap each value with double quotes in the spreadsheet.
# Double quotes are irrelevant to the actual value, let's remove them.
# filter(None, list) removes empty item from list
tag_list = filter(None,tags.replace("\"","").split(','))
# we don't want random [tags] to be included
for index, name in enumerate(tag_list):
if name.lower().strip() in [ accepted_tag.lower() for accepted_tag in accepted_tags ]:
tag_list[index] = name.strip()
else:
tag_list.remove(name)
if hasArVrMachineLearning is True:
tag_list.append(troublesome_tag)
_transformed_item['tags'] = tag_list
_transformed_item.pop(CUSTOM_TAGS_LABEL, '')
# create `timeblock` key based on `timeblock`
time_data = _transformed_item.pop('timeblock', '')
timeblock = slugify_timeblock(time_data)
_transformed_item['timeblock'] = timeblock
# infer session day
if 'Monday' in time_data:
_transformed_item['day'] = 'Monday'
if 'Tuesday' in time_data:
_transformed_item['day'] = 'Tuesday'
if 'Wednesday' in time_data:
_transformed_item['day'] = 'Wednesday'
if 'Thursday' in time_data:
_transformed_item['day'] = 'Thursday'
if 'Friday' in time_data:
_transformed_item['day'] = 'Friday'
if 'Saturday' in time_data:
_transformed_item['day'] = 'Saturday'
if 'Sunday' in time_data:
_transformed_item['day'] = 'Sunday'
# start time
if len(time_data) > 1:
start_time = time_data.split('(')
start_time = start_time[len(start_time)-1].strip(')')[-5:] # return the last 5 character
duration = _transformed_item.pop('duration', '') or 0
try:
# attempt to coerce to 12-hour format
d = datetime.strptime(start_time, "%H:%M")
start_time = d.strftime("%I:%M").lstrip('0') + d.strftime('%p').lower()
duration = int(duration)
end_time = d + timedelta(minutes=duration)
end_time = end_time.strftime("%I:%M").lstrip('0') + end_time.strftime('%p').lower()
if start_time[0] == '0':
# strip leading 0
start_time = start_time[1:]
except:
start_time = ''
end_time = ''
pass
_transformed_item['start'] = start_time
_transformed_item['end'] = end_time
# prepend `location` with the word 'Floor'
# if _transformed_item['location'] and not _transformed_item['location'].startswith('Floor'):
# _transformed_item['location'] = 'Floor {0}'.format(_transformed_item['location'])
# mark as "programmatic" session if session's category is 'Programmatic Pieces'
# clelar category meta since 'Programmatic Pieces' isn't a real [category] (e.g, MozFest Space)
| |
Nu: u.m**2/u.s
:param Roughness: roughness of channel
:type Roughness: u.m
:return: major head loss in general channel
:rtype: u.m
"""
ut.check_range([Length.magnitude, ">0", "Length"])
return (fric_channel(Area, PerimWetted, Vel, Nu, Roughness) * Length
/ (4 * radius_hydraulic_channel(Area, PerimWetted))
* Vel**2 / (2*u.gravity)
).to(u.m)
@ut.list_handler()
def headloss_exp_general(Vel, KMinor):
"""
.. deprecated::
`headloss_exp_general` is deprecated; use `headloss_minor_channel` instead.
"""
warnings.warn('headloss_exp_general` is deprecated; use `headloss_minor_channel` instead',
UserWarning)
return headloss_minor_channel(Vel, KMinor)
@ut.list_handler()
def headloss_minor_channel(Vel, KMinor):
"""Return the minor head loss due to expansion in a general channel.
This equation applies to both laminar and turbulent flows.
:param Vel: velocity of fluid
:type Vel: u.m/u.s
:param KMinor: minor loss coefficient
:type KMinor: u.dimensionless or unitless
:return: minor head loss in general channel
:rtype: u.m
"""
ut.check_range([Vel.magnitude, ">0", "Velocity"],
[KMinor, '>=0', 'K minor'])
return (KMinor * Vel**2 / (2*u.gravity)).to(u.m)
@ut.list_handler()
def headloss_gen(Area, Vel, PerimWetted, Length, KMinor, Nu, PipeRough):
"""
.. deprecated::
`headloss_gen` is deprecated; use `headloss_channel` instead.
"""
warnings.warn('headloss_gen` is deprecated; use `headloss_channel` instead',
UserWarning)
return headloss_channel(Area, Vel, PerimWetted, Length, KMinor, Nu, PipeRough)
@ut.list_handler()
def headloss_channel(Area, Vel, PerimWetted, Length, KMinor, Nu, Roughness):
"""Return the total head loss from major and minor losses in a general
channel.
This equation applies to both laminar and turbulent flows.
:param Area: cross sectional area of channel
:type Area: u.m**2
:param Vel: velocity of fluid
:type Vel: u.m/u.s
:param PerimWetted: wetted perimeter of channel
:type PerimWetted: u.m
:param Length: length of channel
:type Length: u.m
:param KMinor: minor loss coefficient
:type KMinor: u.dimensionless or unitless
:param Nu: kinematic viscosity of fluid
:type Nu: u.m**2/u.s
:param Roughness: roughness of channel
:type Roughness: u.m
:return: total head loss in general channel
:rtype: u.m
"""
return (headloss_minor_channel(Vel, KMinor)
+ headloss_major_channel(Area, PerimWetted, Vel,
Length, Nu, Roughness)).to(u.m)
@ut.list_handler()
def headloss_manifold(FlowRate, Diam, Length, KMinor, Nu, Roughness=None, NumOutlets=None, *, PipeRough=None):
"""Return the total head loss through the manifold.
:param FlowRate: flow rate through manifold
:type FlowRate: u.m**3/u.s
:param Diam: diameter of manifold
:type Diam: u.m
:param Length: length of manifold
:type Length: u.m
:param KMinor: minor loss coefficient
:type KMinor: u.dimensionless or unitless
:param Nu: kinematic viscosity of fluid
:type Nu: u.m**2/u.s
:param Roughness: roughness of manifold
:type Roughness: u.m
:param NumOutlets: number of outlets from manifold
:type NumOutlets: u.dimensionless or unitless
:param PipeRough: deprecated; use Roughness instead
:return: total headloss through manifold
:rtype: u.m
"""
ut.check_range([NumOutlets, ">0, int", 'Number of outlets'])
if Roughness is not None and PipeRough is not None:
raise TypeError("headloss_manifold received both Roughness and PipeRough")
elif Roughness is None and PipeRough is None:
raise TypeError("headloss_manifold missing Roughness argument")
elif NumOutlets is None:
raise TypeError("headloss_manifold missing NumOutlets argument")
elif PipeRough is not None:
warnings.warn("PipeRough is deprecated; use Roughness instead.",
UserWarning)
Roughness = PipeRough
return (headloss_pipe(FlowRate, Diam, Length, Nu, Roughness, KMinor)
* ((1/3)
+ (1 / (2*NumOutlets))
+ (1 / (6*NumOutlets**2))
)
).to(u.m)
@ut.list_handler()
def elbow_minor_loss(q, id_, k):
"""
.. deprecated::
`elbow_minor_loss` is deprecated; use `headloss_minor_elbow` instead.
"""
warnings.warn('elbow_minor_loss is deprecated; use headloss_minor_elbow instead',
UserWarning)
return headloss_minor_elbow(q, id_, k)
@ut.list_handler()
def headloss_minor_elbow(FlowRate, Diam, KMinor):
"""Return the minor head loss (due to changes in geometry) in an elbow.
:param FlowRate: flow rate through pipe
:type FlowRate: u.m**3/u.s
:param Diam: diameter of pipe
:type Diam: u.m
:param KMinor: minor loss coefficient
:type KMinor: u.dimensionless or unitless
:return: minor head loss in pipe
:rtype: u.m
"""
vel = FlowRate / area_circle(Diam)
minor_loss = KMinor * vel ** 2 / (2 * u.gravity)
return minor_loss.to(u.m)
######################### Orifices #########################
@ut.list_handler()
def flow_orifice(Diam, Height, RatioVCOrifice):
"""Return the flow rate of the orifice.
:param Diam: diameter of orifice
:type Diam: u.m
:param Height: piezometric height of orifice
:type Height: u.m
:param RatioVCOrifice: vena contracta ratio of orifice
:type RatioVCOrifice: u.dimensionless or unitless
:return: flow rate of orifice
:rtype: u.m**3/u.s
"""
ut.check_range([Diam.magnitude, ">0", "Diameter"],
[RatioVCOrifice, "0-1", "VC orifice ratio"])
if Height.magnitude > 0:
return (RatioVCOrifice * area_circle(Diam)
* np.sqrt(2 * u.gravity * Height)).to(u.m**3/u.s)
else:
return 0 * u.m**3/u.s
@ut.list_handler()
def flow_orifice_vert(Diam, Height, RatioVCOrifice):
"""Return the vertical flow rate of the orifice.
:param Diam: diameter of orifice
:type Diam: u.m
:param Height: piezometric height of orifice
:type Height: u.m
:param RatioVCOrifice: vena contracta ratio of orifice
:type RatioVCOrifice: u.dimensionless or unitless
:return: vertical flow rate of orifice
:rtype: u.m**3/u.s
"""
ut.check_range([RatioVCOrifice, "0-1", "VC orifice ratio"])
Diam = Diam.to(u.m)
Height = Height.to(u.m)
if Height > -Diam / 2:
flow_vert = integrate.quad(lambda z: (Diam*np.sin(np.arccos(z*u.m/(Diam/2)))
* np.sqrt(Height - z*u.m)
).magnitude,
- Diam.magnitude / 2,
min(Diam/2, Height).magnitude)
return (flow_vert[0] * u.m**2.5 * RatioVCOrifice *
np.sqrt(2 * u.gravity)).to(u.m**3/u.s)
else:
return 0 * u.m**3/u.s
@ut.list_handler()
def head_orifice(Diam, RatioVCOrifice, FlowRate):
"""Return the piezometric head of the orifice.
:param Diam: diameter of orifice
:type Diam: u.m
:param RatioVCOrifice: vena contracta ratio of orifice
:type RatioVCOrifice: u.dimensionless or unitless
:param FlowRate: flow rate of orifice
:type FlowRate: u.m**3/u.s
:return: head of orifice
:rtype: u.m
"""
ut.check_range([Diam.magnitude, ">0", "Diameter"],
[FlowRate.magnitude, ">0", "Flow rate"],
[RatioVCOrifice, "0-1", "VC orifice ratio"])
return ((FlowRate
/ (RatioVCOrifice * area_circle(Diam))
)**2
/ (2*u.gravity)
).to(u.m)
@ut.list_handler()
def area_orifice(Height, RatioVCOrifice, FlowRate):
"""Return the area of the orifice.
:param Height: piezometric height of orifice
:type Height: u.m
:param RatioVCOrifice: vena contracta ratio of orifice
:type RatioVCOrifice: u.dimensionless or unitless
:param FlowRate: flow rate of orifice
:type FlowRate: u.m**3/u.s
:return: area of orifice
:rtype: u.m**2
"""
ut.check_range([Height.magnitude, ">0", "Height"],
[FlowRate.magnitude, ">0", "Flow rate"],
[RatioVCOrifice, "0-1, >0", "VC orifice ratio"])
return (FlowRate / (RatioVCOrifice * np.sqrt(2 * u.gravity *
Height))).to(u.m**2)
@ut.list_handler()
def num_orifices(FlowRate, RatioVCOrifice, HeadLossOrifice, DiamOrifice):
"""Return the number of orifices.
:param FlowRate: flow rate of orifice
:type FlowRate: u.m**3/u.s
:param RatioVCOrifice: vena contracta ratio of orifice
:type RatioVCOrifice: u.dimensionless or unitless
:param HeadLossOrifice: head loss of orifice
:type HeadLossOrifice: u.m
:param DiamOrifice: diameter of orifice
:type DiamOrifice: u.m
:return: number of orifices
:rtype: u.dimensionless
"""
return np.ceil(area_orifice(HeadLossOrifice, RatioVCOrifice, FlowRate)
/ area_circle(DiamOrifice)).to(u.dimensionless)
########################### Flows ###########################
@ut.list_handler()
def flow_transition(Diam, Nu):
"""Return the flow rate for the laminar/turbulent transition.
:param Diam: diameter of pipe
:type Diam: u.m
:param Nu: kinematic viscosity of fluid
:type Nu: u.m**2/u.s
:return: flow rate for laminar/turbulent transition
:rtype: u.m**3/u.s
"""
ut.check_range([Diam.magnitude, ">0", "Diameter"],
[Nu.magnitude, ">0", "Nu"])
return (np.pi * Diam * RE_TRANSITION_PIPE * Nu / 4).to(u.m**3/u.s)
@ut.list_handler()
def flow_hagen(Diam, HeadLossMajor=None, Length=None, Nu=None, *, HeadLossFric=None):
"""Return the flow rate for laminar flow with only major losses.
:param Diam: diameter of pipe
:type Diam: u.m
:param HeadLossMajor: head loss due to friction
:type HeadLossMajor: u.m
:param Length: length of pipe
:type Length: u.m
:param Nu: kinematic viscosity of fluid
:type Nu: u.m**2/u.s
:param HeadLossFric: deprecated; use HeadLossMajor instead
:return: flow rate for laminar flow with only major losses
:rtype: u.m**3/u.s
"""
if HeadLossMajor is not None and HeadLossFric is not None:
raise TypeError("flow_hagen received both HeadLossMajor and HeadLossFric")
elif HeadLossMajor is None and HeadLossFric is None:
raise TypeError("flow_hagen missing HeadLossMajor argument")
elif Length is None:
raise TypeError("flow_hagen missing Length argument")
elif Nu is None:
raise TypeError("flow_hagen missing Nu argument")
elif HeadLossFric is not None:
warnings.warn("HeadLossFric is deprecated; use HeadLossMajor instead.",
UserWarning)
HeadLossMajor = HeadLossFric
ut.check_range([Diam.magnitude, ">0", "Diameter"],
[Length.magnitude, ">0", "Length"],
[HeadLossMajor.magnitude, ">=0", "Headloss due to friction"],
[Nu.magnitude, ">0", "Nu"])
return ((np.pi*Diam**4) / (128*Nu) * u.gravity * HeadLossMajor
/ Length).to(u.m**3/u.s)
@ut.list_handler()
def flow_swamee(Diam, HeadLossMajor=None, Length=None, Nu=None, Roughness=None, *, HeadLossFric=None, PipeRough=None):
"""Return the flow rate for turbulent flow with only major losses.
:param Diam: diameter of pipe
:type Diam: u.m
:param HeadLossMajor: head loss due to friction
:type HeadLossMajor: u.m
:param Length: length of pipe
:type Length: u.m
:param Nu: kinematic viscosity of fluid
:type Nu: u.m**2/u.s
:param Roughness: roughness of pipe
:type Roughness: u.m
:param HeadLossFric: deprecated; use HeadLossMajor instead
:param PipeRough: deprecated; use Roughness instead
:return: flow rate for turbulent flow with only major losses
:rtype: u.m**3/u.s
"""
if HeadLossMajor is not None and HeadLossFric is not None:
raise TypeError("flow_swamee received both HeadLossMajor and HeadLossFric")
elif HeadLossMajor is None and HeadLossFric is None:
raise TypeError("flow_swamee missing HeadLossMajor argument")
elif Length is None:
raise TypeError("flow_swamee missing Length argument")
elif Nu is None:
raise TypeError("flow_swamee missing Nu argument")
elif Roughness is not None and PipeRough is not None:
raise TypeError("flow_swamee received both Roughness and PipeRough")
elif Roughness is None and PipeRough | |
<gh_stars>1000+
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Name: tree/verticality.py
# Purpose: Object for dealing with vertical simultaneities in a
# fast way w/o Chord's overhead
#
# Authors: <NAME>
# <NAME>
#
# Copyright: Copyright © 2013-16 <NAME> and the music21
# Project
# License: BSD, see license.txt
# ----------------------------------------------------------------------------
'''
Object for dealing with vertical simultaneities in a fast way w/o Chord's overhead.
'''
import collections.abc
import copy
import itertools
import unittest
from music21 import chord
from music21 import common
from music21 import environment
from music21 import exceptions21
from music21 import note
from music21 import prebase
from music21 import tie
# from music21 import key
# from music21 import pitch
from music21.tree import spans
environLocal = environment.Environment('tree.verticality')
class VerticalityException(exceptions21.TreeException):
pass
class Verticality(prebase.ProtoM21Object):
r'''
A collection of information about elements that are sounding at a given
offset or just finished at that offset or are continuing from before, etc..
Create a timespan-stream from a score:
>>> score = corpus.parse('bwv66.6')
>>> scoreTree = tree.fromStream.asTimespans(score, flatten=True,
... classList=(note.Note, chord.Chord))
Find the verticality at offset 6.5, or beat 2.5 of measure 2 (there's a one
beat pickup)
>>> verticality = scoreTree.getVerticalityAt(6.5)
>>> verticality
<music21.tree.verticality.Verticality 6.5 {E3 D4 G#4 B4}>
The representation of a verticality gives the pitches from lowest to
highest (in sounding notes).
A verticality knows its offset, but because elements might end at
different times, it doesn't know its endTime
>>> verticality.offset
6.5
>>> verticality.endTime
Traceback (most recent call last):
AttributeError: 'Verticality' object has no attribute 'endTime'
However, we can find when the next verticality starts by looking at the nextVerticality
>>> nv = verticality.nextVerticality
>>> nv
<music21.tree.verticality.Verticality 7.0 {A2 C#4 E4 A4}>
>>> nv.offset
7.0
Or more simply:
>>> verticality.nextStartOffset
7.0
(There is also a previousVerticality, but not a previousStartOffset)
What we just demonstrated is actually very powerful: a Verticality keeps a
record of exactly where it is in the timespanTree -- scores can be
recreated with this information.
Getting back to the task at hand, we can find all the PitchedTimespans (and
from there the elements) that start at exactly 6.5. There's one, it's a
passing tone D in the tenor and it lasts from offset 6.5 to offset 7.0,
with respect to the beginning of the score, not to the beginning of the
measure. That is to say, it's an eighth note
>>> verticality.startTimespans
(<PitchedTimespan (6.5 to 7.0) <music21.note.Note D>>,)
And we can get all the PitchedTimespans that were already sounding at the
moment (that is to say, the non-passing tones):
>>> verticality.overlapTimespans
(<PitchedTimespan (6.0 to 7.0) <music21.note.Note B>>,
<PitchedTimespan (6.0 to 7.0) <music21.note.Note G#>>,
<PitchedTimespan (6.0 to 7.0) <music21.note.Note E>>)
And we can get all the things that stop right at this moment. It's the E
in the tenor preceding the passing tone D:
>>> verticality.stopTimespans
(<PitchedTimespan (6.0 to 6.5) <music21.note.Note E>>,)
'''
# CLASS VARIABLES #
__slots__ = (
'timespanTree',
'overlapTimespans',
'startTimespans',
'offset',
'stopTimespans',
)
_DOC_ATTR = {
'timespanTree': r'''
Returns the timespanTree initially set.
''',
'overlapTimespans': r'''
Gets timespans overlapping the start offset of a verticality.
>>> score = corpus.parse('bwv66.6')
>>> scoreTree = tree.fromStream.asTimespans(score, flatten=True,
... classList=(note.Note, chord.Chord))
>>> verticality = scoreTree.getVerticalityAt(0.5)
>>> verticality
<music21.tree.verticality.Verticality 0.5 {G#3 B3 E4 B4}>
>>> verticality.overlapTimespans
(<PitchedTimespan (0.0 to 1.0) <music21.note.Note E>>,)
''',
'startTimespans': r'''
Gets the timespans starting at a verticality's start offset.
>>> score = corpus.parse('bwv66.6')
>>> scoreTree = tree.fromStream.asTimespans(score, flatten=True,
... classList=(note.Note, chord.Chord))
>>> verticality = scoreTree.getVerticalityAt(1.0)
>>> verticality
<music21.tree.verticality.Verticality 1.0 {F#3 C#4 F#4 A4}>
>>> for timespan in verticality.startTimespans:
... timespan
...
<PitchedTimespan (1.0 to 2.0) <music21.note.Note A>>
<PitchedTimespan (1.0 to 2.0) <music21.note.Note F#>>
<PitchedTimespan (1.0 to 2.0) <music21.note.Note C#>>
<PitchedTimespan (1.0 to 2.0) <music21.note.Note F#>>
''',
'offset': r'''
Gets the start offset of a verticality.
>>> score = corpus.parse('bwv66.6')
>>> scoreTree = tree.fromStream.asTimespans(score, flatten=True,
... classList=(note.Note, chord.Chord))
>>> verticality = scoreTree.getVerticalityAt(1.0)
>>> verticality
<music21.tree.verticality.Verticality 1.0 {F#3 C#4 F#4 A4}>
>>> verticality.offset
1.0
''',
'stopTimespans': r'''
Gets the timespans stopping at a verticality's start offset.
>>> score = corpus.parse('bwv66.6')
>>> scoreTree = tree.fromStream.asTimespans(score, flatten=True,
... classList=(note.Note, chord.Chord))
>>> verticality = scoreTree.getVerticalityAt(1.0)
>>> verticality
<music21.tree.verticality.Verticality 1.0 {F#3 C#4 F#4 A4}>
Note that none of the elements in the stopTimespans are listed in
the repr for the Verticality
>>> for timespan in verticality.stopTimespans:
... timespan
...
<PitchedTimespan (0.0 to 1.0) <music21.note.Note E>>
<PitchedTimespan (0.5 to 1.0) <music21.note.Note B>>
<PitchedTimespan (0.5 to 1.0) <music21.note.Note B>>
<PitchedTimespan (0.5 to 1.0) <music21.note.Note G#>>
''',
}
# INITIALIZER #
def __init__(self,
offset=None,
overlapTimespans=None,
startTimespans=None,
stopTimespans=None,
timespanTree=None,
):
from music21.tree import trees
if timespanTree is not None and not isinstance(timespanTree, trees.OffsetTree):
raise VerticalityException(
f'timespanTree {timespanTree!r} is not a OffsetTree or None')
self.timespanTree = timespanTree
self.offset = offset
if not isinstance(startTimespans, tuple):
raise VerticalityException(f'startTimespans must be a tuple, not {startTimespans!r}')
if not isinstance(stopTimespans, (tuple, type(None))):
raise VerticalityException(
f'stopTimespans must be a tuple or None, not {stopTimespans!r}')
if not isinstance(overlapTimespans, (tuple, type(None))):
raise VerticalityException(
f'overlapTimespans must be a tuple or None, not {overlapTimespans!r}')
self.startTimespans = startTimespans
self.stopTimespans = stopTimespans
self.overlapTimespans = overlapTimespans
# SPECIAL METHODS #
def _reprInternal(self):
sortedPitches = sorted(self.pitchSet)
enclosedNames = '{' + ' '.join(x.nameWithOctave for x in sortedPitches) + '}'
return f'{self.offset} {enclosedNames}'
# PUBLIC PROPERTIES #
@property
def bassTimespan(self):
r'''
Gets the bass timespan in this verticality.
>>> score = corpus.parse('bwv66.6')
>>> scoreTree = tree.fromStream.asTimespans(score, flatten=True,
... classList=(note.Note, chord.Chord))
>>> verticality = scoreTree.getVerticalityAt(1.0)
>>> verticality
<music21.tree.verticality.Verticality 1.0 {F#3 C#4 F#4 A4}>
>>> verticality.bassTimespan
<PitchedTimespan (1.0 to 2.0) <music21.note.Note F#>>
'''
overallLowestPitch = None
lowestTimespan = None
for ts in self.startAndOverlapTimespans:
if not hasattr(ts, 'pitches'):
continue
tsPitches = ts.pitches
if not tsPitches:
continue
lowestPitch = sorted(tsPitches)[0]
if overallLowestPitch is None:
overallLowestPitch = lowestPitch
lowestTimespan = ts
if lowestPitch <= overallLowestPitch:
overallLowestPitch = lowestPitch
lowestTimespan = ts
return lowestTimespan
@property
def beatStrength(self):
r'''
Gets the beat strength of a verticality.
>>> score = corpus.parse('bwv66.6')
>>> scoreTree = tree.fromStream.asTimespans(score, flatten=True,
... classList=(note.Note, chord.Chord))
>>> verticality = scoreTree.getVerticalityAt(1.0)
>>> verticality.beatStrength
1.0
Note that it will return None if there are no startTimespans at this point:
>>> verticality = scoreTree.getVerticalityAt(1.25)
>>> verticality
<music21.tree.verticality.Verticality 1.25 {F#3 C#4 F#4 A4}>
>>> verticality.startTimespans
()
>>> verticality.beatStrength is None
True
'''
try:
thisTimespan = self.startTimespans[0]
except IndexError:
return None
return thisTimespan.element.beatStrength
def toChord(self):
'''
creates a chord.Chord object of default length (1.0 or
the duration of some note object) from the verticality.
Does nothing about ties, etc. -- a very dumb chord, but useful
for querying consonance, etc. See makeElement() for the smart version.
It may be a zero- or one-pitch chord.
>>> score = corpus.parse('bwv66.6')
>>> scoreTree = score.asTimespans()
>>> verticality = scoreTree.getVerticalityAt(4.0)
>>> verticality.toChord()
<music21.chord.Chord G#3 B3 E4 E5>
'''
c = chord.Chord(sorted(self.pitchSet))
return c
@property
def measureNumber(self):
r'''
Gets the measure number of the verticality's starting elements.
>>> score = corpus.parse('bwv66.6')
>>> scoreTree = tree.fromStream.asTimespans(score, flatten=True,
... classList=(note.Note, chord.Chord))
>>> verticality = scoreTree.getVerticalityAt(7.0)
>>> verticality.measureNumber
2
'''
return self.startTimespans[0].measureNumber
@property
def nextStartOffset(self):
r'''
Gets the next start-offset in the verticality's offset-tree.
>>> score = corpus.parse('bwv66.6')
>>> scoreTree = tree.fromStream.asTimespans(score, flatten=True,
... classList=(note.Note, chord.Chord))
>>> verticality = scoreTree.getVerticalityAt(1.0)
>>> verticality.nextStartOffset
2.0
If a verticality has no tree attached, then it will return None
'''
tree = self.timespanTree
if tree is None:
return None
offset = tree.getPositionAfter(self.offset)
return offset
@property
def nextVerticality(self):
r'''
Gets the next verticality after a verticality.
>>> score = corpus.parse('bwv66.6')
>>> scoreTree = tree.fromStream.asTimespans(score, flatten=True,
... classList=(note.Note, chord.Chord))
>>> verticality = scoreTree.getVerticalityAt(1.0)
>>> print(verticality)
<music21.tree.verticality.Verticality 1.0 {F#3 C#4 F#4 A4}>
>>> nextVerticality = verticality.nextVerticality
>>> print(nextVerticality)
<music21.tree.verticality.Verticality 2.0 {G#3 B3 E4 B4}>
Verticality objects created by an offset-tree hold a reference back to
that offset-tree. This means that they determine their next or previous
verticality dynamically based on the state of the offset-tree only when
asked. Because of this, it is safe to mutate the offset-tree by
inserting or removing timespans while iterating over it.
>>> scoreTree.removeTimespanList(nextVerticality.startTimespans)
>>> verticality.nextVerticality
<music21.tree.verticality.Verticality 3.0 {A3 E4 C#5}>
'''
tree = self.timespanTree
if tree is None:
return None
offset = tree.getPositionAfter(self.offset)
if offset is None:
return None
return tree.getVerticalityAt(offset)
| |
# This file was automatically created by FeynRules 1.7.53
# Mathematica version: 8.0 for Linux x86 (64-bit) (February 23, 2011)
# Date: Tue 31 Jul 2012 19:55:14
from object_library import all_parameters, Parameter
from function_library import complexconjugate, re, im, csc, sec, acsc, asec
# This is a default parameter object representing 0.
ZERO = Parameter(name = 'ZERO',
nature = 'internal',
type = 'real',
value = '0.0',
texname = '0')
# User-defined parameters.
RRd1x1 = Parameter(name = 'RRd1x1',
nature = 'external',
type = 'real',
value = 1.,
texname = '\\text{RRd1x1}',
lhablock = 'DSQMIX',
lhacode = [ 1, 1 ])
RRd2x2 = Parameter(name = 'RRd2x2',
nature = 'external',
type = 'real',
value = 1.,
texname = '\\text{RRd2x2}',
lhablock = 'DSQMIX',
lhacode = [ 2, 2 ])
RRd3x3 = Parameter(name = 'RRd3x3',
nature = 'external',
type = 'real',
value = 0.938737896,
texname = '\\text{RRd3x3}',
lhablock = 'DSQMIX',
lhacode = [ 3, 3 ])
RRd3x6 = Parameter(name = 'RRd3x6',
nature = 'external',
type = 'real',
value = 0.344631925,
texname = '\\text{RRd3x6}',
lhablock = 'DSQMIX',
lhacode = [ 3, 6 ])
RRd4x4 = Parameter(name = 'RRd4x4',
nature = 'external',
type = 'real',
value = 1.,
texname = '\\text{RRd4x4}',
lhablock = 'DSQMIX',
lhacode = [ 4, 4 ])
RRd5x5 = Parameter(name = 'RRd5x5',
nature = 'external',
type = 'real',
value = 1.,
texname = '\\text{RRd5x5}',
lhablock = 'DSQMIX',
lhacode = [ 5, 5 ])
RRd6x3 = Parameter(name = 'RRd6x3',
nature = 'external',
type = 'real',
value = -0.344631925,
texname = '\\text{RRd6x3}',
lhablock = 'DSQMIX',
lhacode = [ 6, 3 ])
RRd6x6 = Parameter(name = 'RRd6x6',
nature = 'external',
type = 'real',
value = 0.938737896,
texname = '\\text{RRd6x6}',
lhablock = 'DSQMIX',
lhacode = [ 6, 6 ])
alp = Parameter(name = 'alp',
nature = 'external',
type = 'real',
value = -0.11382521,
texname = '\\alpha',
lhablock = 'FRALPHA',
lhacode = [ 1 ])
RMUH = Parameter(name = 'RMUH',
nature = 'external',
type = 'real',
value = 357.680977,
texname = '\\text{RMUH}',
lhablock = 'HMIX',
lhacode = [ 1 ])
tb = Parameter(name = 'tb',
nature = 'external',
type = 'real',
value = 9.74862403,
texname = 't_b',
lhablock = 'HMIX',
lhacode = [ 2 ])
MA2 = Parameter(name = 'MA2',
nature = 'external',
type = 'real',
value = 166439.065,
texname = 'm_A^2',
lhablock = 'HMIX',
lhacode = [ 4 ])
RmD21x1 = Parameter(name = 'RmD21x1',
nature = 'external',
type = 'real',
value = 273684.674,
texname = '\\text{RmD21x1}',
lhablock = 'MSD2',
lhacode = [ 1, 1 ])
RmD22x2 = Parameter(name = 'RmD22x2',
nature = 'external',
type = 'real',
value = 273684.674,
texname = '\\text{RmD22x2}',
lhablock = 'MSD2',
lhacode = [ 2, 2 ])
RmD23x3 = Parameter(name = 'RmD23x3',
nature = 'external',
type = 'real',
value = 270261.969,
texname = '\\text{RmD23x3}',
lhablock = 'MSD2',
lhacode = [ 3, 3 ])
RmE21x1 = Parameter(name = 'RmE21x1',
nature = 'external',
type = 'real',
value = 18630.6287,
texname = '\\text{RmE21x1}',
lhablock = 'MSE2',
lhacode = [ 1, 1 ])
RmE22x2 = Parameter(name = 'RmE22x2',
nature = 'external',
type = 'real',
value = 18630.6287,
texname = '\\text{RmE22x2}',
lhablock = 'MSE2',
lhacode = [ 2, 2 ])
RmE23x3 = Parameter(name = 'RmE23x3',
nature = 'external',
type = 'real',
value = 17967.6406,
texname = '\\text{RmE23x3}',
lhablock = 'MSE2',
lhacode = [ 3, 3 ])
RmL21x1 = Parameter(name = 'RmL21x1',
nature = 'external',
type = 'real',
value = 38155.67,
texname = '\\text{RmL21x1}',
lhablock = 'MSL2',
lhacode = [ 1, 1 ])
RmL22x2 = Parameter(name = 'RmL22x2',
nature = 'external',
type = 'real',
value = 38155.67,
texname = '\\text{RmL22x2}',
lhablock = 'MSL2',
lhacode = [ 2, 2 ])
RmL23x3 = Parameter(name = 'RmL23x3',
nature = 'external',
type = 'real',
value = 37828.6769,
texname = '\\text{RmL23x3}',
lhablock = 'MSL2',
lhacode = [ 3, 3 ])
RMx1 = Parameter(name = 'RMx1',
nature = 'external',
type = 'real',
value = 101.396534,
texname = '\\text{RMx1}',
lhablock = 'MSOFT',
lhacode = [ 1 ])
RMx2 = Parameter(name = 'RMx2',
nature = 'external',
type = 'real',
value = 191.504241,
texname = '\\text{RMx2}',
lhablock = 'MSOFT',
lhacode = [ 2 ])
RMx3 = Parameter(name = 'RMx3',
nature = 'external',
type = 'real',
value = 588.263031,
texname = '\\text{RMx3}',
lhablock = 'MSOFT',
lhacode = [ 3 ])
mHd2 = Parameter(name = 'mHd2',
nature = 'external',
type = 'real',
value = 32337.4943,
texname = 'm_{H_d}^2',
lhablock = 'MSOFT',
lhacode = [ 21 ])
mHu2 = Parameter(name = 'mHu2',
nature = 'external',
type = 'real',
value = -128800.134,
texname = 'm_{H_u}^2',
lhablock = 'MSOFT',
lhacode = [ 22 ])
RmQ21x1 = Parameter(name = 'RmQ21x1',
nature = 'external',
type = 'real',
value = 299836.701,
texname = '\\text{RmQ21x1}',
lhablock = 'MSQ2',
lhacode = [ 1, 1 ])
RmQ22x2 = Parameter(name = 'RmQ22x2',
nature = 'external',
type = 'real',
value = 299836.701,
texname = '\\text{RmQ22x2}',
lhablock = 'MSQ2',
lhacode = [ 2, 2 ])
RmQ23x3 = Parameter(name = 'RmQ23x3',
nature = 'external',
type = 'real',
value = 248765.367,
texname = '\\text{RmQ23x3}',
lhablock = 'MSQ2',
lhacode = [ 3, 3 ])
RmU21x1 = Parameter(name = 'RmU21x1',
nature = 'external',
type = 'real',
value = 280382.106,
texname = '\\text{RmU21x1}',
lhablock = 'MSU2',
lhacode = [ 1, 1 ])
RmU22x2 = Parameter(name = 'RmU22x2',
nature = 'external',
type = 'real',
value = 280382.106,
texname = '\\text{RmU22x2}',
lhablock = 'MSU2',
lhacode = [ 2, 2 ])
RmU23x3 = Parameter(name = 'RmU23x3',
nature = 'external',
type = 'real',
value = 179137.072,
texname = '\\text{RmU23x3}',
lhablock = 'MSU2',
lhacode = [ 3, 3 ])
RNN1x1 = Parameter(name = 'RNN1x1',
nature = 'external',
type = 'real',
value = 0.98636443,
texname = '\\text{RNN1x1}',
lhablock = 'NMIX',
lhacode = [ 1, 1 ])
RNN1x2 = Parameter(name = 'RNN1x2',
nature = 'external',
type = 'real',
value = -0.0531103553,
texname = '\\text{RNN1x2}',
lhablock = 'NMIX',
lhacode = [ 1, 2 ])
RNN1x3 = Parameter(name = 'RNN1x3',
nature = 'external',
type = 'real',
value = 0.146433995,
texname = '\\text{RNN1x3}',
lhablock = 'NMIX',
lhacode = [ 1, 3 ])
RNN1x4 = Parameter(name = 'RNN1x4',
nature = 'external',
type = 'real',
value = -0.0531186117,
texname = '\\text{RNN1x4}',
lhablock = 'NMIX',
lhacode = [ 1, 4 ])
RNN2x1 = Parameter(name = 'RNN2x1',
nature = 'external',
type = 'real',
value = 0.0993505358,
texname = '\\text{RNN2x1}',
lhablock = 'NMIX',
lhacode = [ 2, 1 ])
RNN2x2 = Parameter(name = 'RNN2x2',
nature = 'external',
type = 'real',
value = 0.944949299,
texname = '\\text{RNN2x2}',
lhablock = 'NMIX',
lhacode = [ 2, 2 ])
RNN2x3 = Parameter(name = 'RNN2x3',
nature = 'external',
type = 'real',
value = -0.26984672,
texname = '\\text{RNN2x3}',
lhablock = 'NMIX',
lhacode = [ 2, 3 ])
RNN2x4 = Parameter(name = 'RNN2x4',
nature = 'external',
type = 'real',
value = 0.156150698,
texname = '\\text{RNN2x4}',
lhablock = 'NMIX',
lhacode = [ 2, 4 ])
RNN3x1 = Parameter(name = 'RNN3x1',
nature = 'external',
type = 'real',
value = -0.0603388002,
texname = '\\text{RNN3x1}',
lhablock = 'NMIX',
lhacode = [ 3, 1 ])
RNN3x2 = Parameter(name = 'RNN3x2',
nature = 'external',
type = 'real',
value = 0.0877004854,
texname = '\\text{RNN3x2}',
lhablock = 'NMIX',
lhacode = [ 3, 2 ])
RNN3x3 = Parameter(name = 'RNN3x3',
nature = 'external',
type = 'real',
value = 0.695877493,
texname = '\\text{RNN3x3}',
lhablock = 'NMIX',
lhacode = [ 3, 3 ])
RNN3x4 = Parameter(name = 'RNN3x4',
nature = 'external',
type = 'real',
value = 0.710226984,
texname = '\\text{RNN3x4}',
lhablock = 'NMIX',
lhacode = [ 3, 4 ])
RNN4x1 = Parameter(name = 'RNN4x1',
nature = 'external',
type = 'real',
value = -0.116507132,
texname = '\\text{RNN4x1}',
lhablock = 'NMIX',
lhacode = [ 4, 1 ])
RNN4x2 = Parameter(name = 'RNN4x2',
nature = 'external',
type = 'real',
value = 0.310739017,
texname = '\\text{RNN4x2}',
lhablock = 'NMIX',
lhacode = [ 4, 2 ])
RNN4x3 = Parameter(name = 'RNN4x3',
nature = 'external',
type = 'real',
value = 0.64922596,
texname = '\\text{RNN4x3}',
lhablock = 'NMIX',
lhacode = [ 4, 3 ])
RNN4x4 = Parameter(name = 'RNN4x4',
nature = 'external',
type = 'real',
value = -0.684377823,
texname = '\\text{RNN4x4}',
lhablock = 'NMIX',
lhacode = [ 4, 4 ])
RRl1x1 = Parameter(name = 'RRl1x1',
nature = 'external',
type = 'real',
value = 1.,
texname = '\\text{RRl1x1}',
lhablock = 'SELMIX',
lhacode = [ 1, 1 ])
RRl2x2 | |
<gh_stars>1-10
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nicira, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: <NAME>, Nicira Networks, Inc.
# @author: <NAME>, Nicira Networks, Inc.
# @author: <NAME>, Nicira Networks, Inc.
import hashlib
import logging
import webob.exc
from quantum.api.v2 import attributes
from quantum.api.v2 import base
from quantum.common import constants
from quantum.common import exceptions as q_exc
from quantum.common import rpc as q_rpc
from quantum.common import topics
from quantum.db import api as db
from quantum.db import db_base_plugin_v2
from quantum.db import dhcp_rpc_base
# NOTE: quota_db cannot be removed, it is for db model
from quantum.db import quota_db
from quantum.extensions import providernet as pnet
from quantum.openstack.common import cfg
from quantum.openstack.common import rpc
from quantum import policy
from quantum.plugins.nicira.nicira_nvp_plugin.common import config
from quantum.plugins.nicira.nicira_nvp_plugin.common import (exceptions
as nvp_exc)
from quantum.plugins.nicira.nicira_nvp_plugin import nicira_db
from quantum.plugins.nicira.nicira_nvp_plugin import NvpApiClient
from quantum.plugins.nicira.nicira_nvp_plugin import nvplib
from quantum.plugins.nicira.nicira_nvp_plugin import nvp_cluster
from quantum.plugins.nicira.nicira_nvp_plugin.nvp_plugin_version import (
PLUGIN_VERSION)
LOG = logging.getLogger("QuantumPlugin")
# Provider network extension - allowed network types for the NVP Plugin
class NetworkTypes:
""" Allowed provider network types for the NVP Plugin """
STT = 'stt'
GRE = 'gre'
FLAT = 'flat'
VLAN = 'vlan'
def parse_config():
"""Parse the supplied plugin configuration.
:param config: a ConfigParser() object encapsulating nvp.ini.
:returns: A tuple: (clusters, plugin_config). 'clusters' is a list of
NVPCluster objects, 'plugin_config' is a dictionary with plugin
parameters (currently only 'max_lp_per_bridged_ls').
"""
nvp_options = cfg.CONF.NVP
nvp_conf = config.ClusterConfigOptions(cfg.CONF)
cluster_names = config.register_cluster_groups(nvp_conf)
nvp_conf.log_opt_values(LOG, logging.DEBUG)
clusters_options = []
for cluster_name in cluster_names:
clusters_options.append(
{'name': cluster_name,
'default_tz_uuid':
nvp_conf[cluster_name].default_tz_uuid,
'nvp_cluster_uuid':
nvp_conf[cluster_name].nvp_cluster_uuid,
'nova_zone_id':
nvp_conf[cluster_name].nova_zone_id,
'nvp_controller_connection':
nvp_conf[cluster_name].nvp_controller_connection, })
LOG.debug(_("Cluster options: %s"), clusters_options)
return nvp_options, clusters_options
class NVPRpcCallbacks(dhcp_rpc_base.DhcpRpcCallbackMixin):
# Set RPC API version to 1.0 by default.
RPC_API_VERSION = '1.0'
def create_rpc_dispatcher(self):
'''Get the rpc dispatcher for this manager.
If a manager would like to set an rpc API version, or support more than
one class as the target of rpc messages, override this method.
'''
return q_rpc.PluginRpcDispatcher([self])
class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2):
"""
NvpPluginV2 is a Quantum plugin that provides L2 Virtual Network
functionality using NVP.
"""
supported_extension_aliases = ["provider", "quotas"]
# Default controller cluster
default_cluster = None
def __init__(self, loglevel=None):
if loglevel:
logging.basicConfig(level=loglevel)
nvplib.LOG.setLevel(loglevel)
NvpApiClient.LOG.setLevel(loglevel)
self.nvp_opts, self.clusters_opts = parse_config()
self.clusters = {}
for c_opts in self.clusters_opts:
# Password is guaranteed to be the same across all controllers
# in the same NVP cluster.
cluster = nvp_cluster.NVPCluster(c_opts['name'])
for controller_connection in c_opts['nvp_controller_connection']:
args = controller_connection.split(':')
try:
args.extend([c_opts['default_tz_uuid'],
c_opts['nvp_cluster_uuid'],
c_opts['nova_zone_id']])
cluster.add_controller(*args)
except Exception:
LOG.exception(_("Invalid connection parameters for "
"controller %(conn)s in cluster %(name)s"),
{'conn': controller_connection,
'name': c_opts['name']})
raise nvp_exc.NvpInvalidConnection(
conn_params=controller_connection)
api_providers = [(x['ip'], x['port'], True)
for x in cluster.controllers]
cluster.api_client = NvpApiClient.NVPApiHelper(
api_providers, cluster.user, cluster.password,
request_timeout=cluster.request_timeout,
http_timeout=cluster.http_timeout,
retries=cluster.retries,
redirects=cluster.redirects,
concurrent_connections=self.nvp_opts['concurrent_connections'],
nvp_gen_timeout=self.nvp_opts['nvp_gen_timeout'])
if len(self.clusters) == 0:
first_cluster = cluster
self.clusters[c_opts['name']] = cluster
def_cluster_name = self.nvp_opts.default_cluster_name
if def_cluster_name and def_cluster_name in self.clusters:
self.default_cluster = self.clusters[def_cluster_name]
else:
first_cluster_name = self.clusters.keys()[0]
if not def_cluster_name:
LOG.info(_("Default cluster name not specified. "
"Using first cluster:%s"), first_cluster_name)
elif not def_cluster_name in self.clusters:
LOG.warning(_("Default cluster name %(def_cluster_name)s. "
"Using first cluster:%(first_cluster_name)s"),
locals())
# otherwise set 1st cluster as default
self.default_cluster = self.clusters[first_cluster_name]
db.configure_db()
# Extend the fault map
self._extend_fault_map()
# Set up RPC interface for DHCP agent
self.setup_rpc()
def _extend_fault_map(self):
""" Extends the Quantum Fault Map
Exceptions specific to the NVP Plugin are mapped to standard
HTTP Exceptions
"""
base.FAULT_MAP.update({nvp_exc.NvpInvalidNovaZone:
webob.exc.HTTPBadRequest,
nvp_exc.NvpNoMorePortsException:
webob.exc.HTTPBadRequest})
def _novazone_to_cluster(self, novazone_id):
if novazone_id in self.novazone_cluster_map:
return self.novazone_cluster_map[novazone_id]
LOG.debug(_("Looking for nova zone: %s"), novazone_id)
for x in self.clusters:
LOG.debug(_("Looking for nova zone %(novazone_id)s in "
"cluster: %(x)s"), locals())
if x.zone == str(novazone_id):
self.novazone_cluster_map[x.zone] = x
return x
LOG.error(_("Unable to find cluster config entry for nova zone: %s"),
novazone_id)
raise nvp_exc.NvpInvalidNovaZone(nova_zone=novazone_id)
def _find_target_cluster(self, resource):
""" Return cluster where configuration should be applied
If the resource being configured has a paremeter expressing
the zone id (nova_id), then select corresponding cluster,
otherwise return default cluster.
"""
if 'nova_id' in resource:
return self._novazone_to_cluster(resource['nova_id'])
else:
return self.default_cluster
def _check_provider_view_auth(self, context, network):
return policy.check(context,
"extension:provider_network:view",
network)
def _enforce_provider_set_auth(self, context, network):
return policy.enforce(context,
"extension:provider_network:set",
network)
def _handle_provider_create(self, context, attrs):
# NOTE(salvatore-orlando): This method has been borrowed from
# the OpenvSwtich plugin, altough changed to match NVP specifics.
network_type = attrs.get(pnet.NETWORK_TYPE)
physical_network = attrs.get(pnet.PHYSICAL_NETWORK)
segmentation_id = attrs.get(pnet.SEGMENTATION_ID)
network_type_set = attributes.is_attr_set(network_type)
physical_network_set = attributes.is_attr_set(physical_network)
segmentation_id_set = attributes.is_attr_set(segmentation_id)
if not (network_type_set or physical_network_set or
segmentation_id_set):
return
# Authorize before exposing plugin details to client
self._enforce_provider_set_auth(context, attrs)
err_msg = None
if not network_type_set:
err_msg = _("%s required") % pnet.NETWORK_TYPE
elif network_type in (NetworkTypes.GRE, NetworkTypes.STT,
NetworkTypes.FLAT):
if segmentation_id_set:
err_msg = _("Segmentation ID cannot be specified with "
"flat network type")
elif network_type == NetworkTypes.VLAN:
if not segmentation_id_set:
err_msg = _("Segmentation ID must be specified with "
"vlan network type")
elif (segmentation_id_set and
(segmentation_id < 1 or segmentation_id > 4094)):
err_msg = _("%s out of range (1 to 4094)") % segmentation_id
else:
# Verify segment is not already allocated
binding = nicira_db.get_network_binding_by_vlanid(
context.session, segmentation_id)
if binding:
raise q_exc.VlanIdInUse(vlan_id=segmentation_id,
physical_network=physical_network)
else:
err_msg = _("%(net_type_param)s %(net_type_value)s not "
"supported") % {'net_type_param': pnet.NETWORK_TYPE,
'net_type_value': network_type}
if err_msg:
raise q_exc.InvalidInput(error_message=err_msg)
# TODO(salvatore-orlando): Validate tranport zone uuid
# which should be specified in physical_network
def _extend_network_dict_provider(self, context, network, binding=None):
if self._check_provider_view_auth(context, network):
if not binding:
binding = nicira_db.get_network_binding(context.session,
network['id'])
# With NVP plugin 'normal' overlay networks will have no binding
# TODO(salvatore-orlando) make sure users can specify a distinct
# tz_uuid as 'provider network' for STT net type
if binding:
network[pnet.NETWORK_TYPE] = binding.binding_type
network[pnet.PHYSICAL_NETWORK] = binding.tz_uuid
network[pnet.SEGMENTATION_ID] = binding.vlan_id
def _handle_lswitch_selection(self, cluster, network,
network_binding, max_ports,
allow_extra_lswitches):
lswitches = nvplib.get_lswitches(cluster, network.id)
try:
# TODO find main_ls too!
return [ls for ls in lswitches
if (ls['_relations']['LogicalSwitchStatus']
['lport_count'] < max_ports)].pop(0)
except IndexError:
# Too bad, no switch available
LOG.debug(_("No switch has available ports (%d checked)"),
len(lswitches))
if allow_extra_lswitches:
main_ls = [ls for ls in lswitches if ls['uuid'] == network.id]
tag_dict = dict((x['scope'], x['tag']) for x in main_ls[0]['tags'])
if not 'multi_lswitch' in tag_dict:
nvplib.update_lswitch(cluster,
main_ls[0]['uuid'],
main_ls[0]['display_name'],
network['tenant_id'],
tags=[{'tag': 'True',
'scope': 'multi_lswitch'}])
selected_lswitch = nvplib.create_lswitch(
cluster, network.tenant_id,
"%s-ext-%s" % (network.name, len(lswitches)),
network_binding.binding_type,
network_binding.tz_uuid,
network_binding.vlan_id,
network.id)
return selected_lswitch
else:
LOG.error(_("Maximum number of logical ports reached for "
"logical network %s"), network.id)
raise nvp_exc.NvpNoMorePortsException(network=network.id)
def setup_rpc(self):
# RPC support for dhcp
self.topic = topics.PLUGIN
self.conn = rpc.create_connection(new=True)
self.dispatcher = NVPRpcCallbacks().create_rpc_dispatcher()
self.conn.create_consumer(self.topic, self.dispatcher,
fanout=False)
# Consume from all consumers in a thread
self.conn.consume_in_thread()
def get_all_networks(self, tenant_id, **kwargs):
networks = []
for c in self.clusters:
networks.extend(nvplib.get_all_networks(c, tenant_id, networks))
LOG.debug(_("get_all_networks() completed for tenant "
"%(tenant_id)s: %(networks)s"), locals())
return networks
def create_network(self, context, network):
net_data = network['network'].copy()
# Process the provider network extension
self._handle_provider_create(context, net_data)
# Replace ATTR_NOT_SPECIFIED with None before sending to NVP
for attr, value in network['network'].iteritems():
if value is attributes.ATTR_NOT_SPECIFIED:
net_data[attr] = None
# FIXME(arosen) implement admin_state_up = False in NVP
if net_data['admin_state_up'] is False:
LOG.warning(_("Network with admin_state_up=False are not yet "
"supported by this plugin. Ignoring setting for "
"network %s"), net_data.get('name', '<unknown>'))
tenant_id = self._get_tenant_id_for_create(context, net_data)
target_cluster = self._find_target_cluster(net_data)
lswitch = nvplib.create_lswitch(target_cluster,
tenant_id,
net_data.get('name'),
net_data.get(pnet.NETWORK_TYPE),
net_data.get(pnet.PHYSICAL_NETWORK),
net_data.get(pnet.SEGMENTATION_ID))
network['network']['id'] = lswitch['uuid']
with context.session.begin(subtransactions=True):
new_net = super(NvpPluginV2, self).create_network(context,
network)
if net_data.get(pnet.NETWORK_TYPE):
net_binding = nicira_db.add_network_binding(
context.session, new_net['id'],
net_data.get(pnet.NETWORK_TYPE),
net_data.get(pnet.PHYSICAL_NETWORK),
net_data.get(pnet.SEGMENTATION_ID))
self._extend_network_dict_provider(context, new_net,
net_binding)
return new_net
def delete_network(self, context, id):
super(NvpPluginV2, self).delete_network(context, id)
# FIXME(salvatore-orlando): Failures here might lead NVP
# and quantum state to diverge
pairs = self._get_lswitch_cluster_pairs(id, context.tenant_id)
for (cluster, switches) in pairs:
nvplib.delete_networks(cluster, id, switches)
LOG.debug(_("delete_network completed for tenant: %s"),
context.tenant_id)
def _get_lswitch_cluster_pairs(self, netw_id, tenant_id):
"""Figure out the set of lswitches on each cluster that maps to this
network id"""
pairs = []
for c in self.clusters.itervalues():
lswitches = []
try:
results = nvplib.get_lswitches(c, netw_id)
lswitches.extend([ls['uuid'] for ls in results])
except q_exc.NetworkNotFound:
continue
pairs.append((c, lswitches))
if len(pairs) == 0:
raise q_exc.NetworkNotFound(net_id=netw_id)
LOG.debug(_("Returning pairs for network: %s"), pairs)
return pairs
def | |
value of the `status_detail` property.
"""
self._status_detail = value
@property
def power_management(self):
"""
Returns the value of the `power_management` property.
"""
return self._power_management
@power_management.setter
def power_management(self, value):
"""
Sets the value of the `power_management` property.
"""
Struct._check_type('power_management', value, PowerManagement)
self._power_management = value
@property
def nics(self):
"""
Returns the value of the `nics` property.
"""
return self._nics
@nics.setter
def nics(self, value):
"""
Sets the value of the `nics` property.
"""
self._nics = value
@property
def device_passthrough(self):
"""
Returns the value of the `device_passthrough` property.
"""
return self._device_passthrough
@device_passthrough.setter
def device_passthrough(self, value):
"""
Sets the value of the `device_passthrough` property.
"""
Struct._check_type('device_passthrough', value, HostDevicePassthrough)
self._device_passthrough = value
@property
def unmanaged_networks(self):
"""
Returns the value of the `unmanaged_networks` property.
"""
return self._unmanaged_networks
@unmanaged_networks.setter
def unmanaged_networks(self, value):
"""
Sets the value of the `unmanaged_networks` property.
"""
self._unmanaged_networks = value
@property
def protocol(self):
"""
Returns the value of the `protocol` property.
"""
return self._protocol
@protocol.setter
def protocol(self, value):
"""
Sets the value of the `protocol` property.
"""
Struct._check_type('protocol', value, HostProtocol)
self._protocol = value
@property
def root_password(self):
"""
Returns the value of the `root_password` property.
"""
return self._root_password
@root_password.setter
def root_password(self, value):
"""
Sets the value of the `root_password` property.
"""
self._root_password = value
@property
def network_attachments(self):
"""
Returns the value of the `network_attachments` property.
"""
return self._network_attachments
@network_attachments.setter
def network_attachments(self, value):
"""
Sets the value of the `network_attachments` property.
"""
self._network_attachments = value
@property
def max_scheduling_memory(self):
"""
Returns the value of the `max_scheduling_memory` property.
"""
return self._max_scheduling_memory
@max_scheduling_memory.setter
def max_scheduling_memory(self, value):
"""
Sets the value of the `max_scheduling_memory` property.
"""
self._max_scheduling_memory = value
@property
def display(self):
"""
Returns the value of the `display` property.
"""
return self._display
@display.setter
def display(self, value):
"""
Sets the value of the `display` property.
"""
Struct._check_type('display', value, Display)
self._display = value
@property
def auto_numa_status(self):
"""
Returns the value of the `auto_numa_status` property.
"""
return self._auto_numa_status
@auto_numa_status.setter
def auto_numa_status(self, value):
"""
Sets the value of the `auto_numa_status` property.
"""
Struct._check_type('auto_numa_status', value, AutoNumaStatus)
self._auto_numa_status = value
@property
def cpu(self):
"""
Returns the value of the `cpu` property.
"""
return self._cpu
@cpu.setter
def cpu(self, value):
"""
Sets the value of the `cpu` property.
"""
Struct._check_type('cpu', value, Cpu)
self._cpu = value
@property
def external_status(self):
"""
Returns the value of the `external_status` property.
"""
return self._external_status
@external_status.setter
def external_status(self, value):
"""
Sets the value of the `external_status` property.
"""
Struct._check_type('external_status', value, ExternalStatus)
self._external_status = value
@property
def agents(self):
"""
Returns the value of the `agents` property.
"""
return self._agents
@agents.setter
def agents(self, value):
"""
Sets the value of the `agents` property.
"""
self._agents = value
@property
def spm(self):
"""
Returns the value of the `spm` property.
"""
return self._spm
@spm.setter
def spm(self, value):
"""
Sets the value of the `spm` property.
"""
Struct._check_type('spm', value, Spm)
self._spm = value
@property
def libvirt_version(self):
"""
Returns the value of the `libvirt_version` property.
"""
return self._libvirt_version
@libvirt_version.setter
def libvirt_version(self, value):
"""
Sets the value of the `libvirt_version` property.
"""
Struct._check_type('libvirt_version', value, Version)
self._libvirt_version = value
@property
def iscsi(self):
"""
Returns the value of the `iscsi` property.
"""
return self._iscsi
@iscsi.setter
def iscsi(self, value):
"""
Sets the value of the `iscsi` property.
"""
Struct._check_type('iscsi', value, IscsiDetails)
self._iscsi = value
@property
def cpu_units(self):
"""
Returns the value of the `cpu_units` property.
"""
return self._cpu_units
@cpu_units.setter
def cpu_units(self, value):
"""
Sets the value of the `cpu_units` property.
"""
self._cpu_units = value
@property
def status(self):
"""
Returns the value of the `status` property.
"""
return self._status
@status.setter
def status(self, value):
"""
Sets the value of the `status` property.
"""
Struct._check_type('status', value, HostStatus)
self._status = value
@property
def hardware_information(self):
"""
Returns the value of the `hardware_information` property.
"""
return self._hardware_information
@hardware_information.setter
def hardware_information(self, value):
"""
Sets the value of the `hardware_information` property.
"""
Struct._check_type('hardware_information', value, HardwareInformation)
self._hardware_information = value
@property
def katello_errata(self):
"""
Returns the value of the `katello_errata` property.
"""
return self._katello_errata
@katello_errata.setter
def katello_errata(self, value):
"""
Sets the value of the `katello_errata` property.
"""
self._katello_errata = value
@property
def memory(self):
"""
Returns the value of the `memory` property.
"""
return self._memory
@memory.setter
def memory(self, value):
"""
Sets the value of the `memory` property.
"""
self._memory = value
@property
def cluster(self):
"""
Returns the value of the `cluster` property.
"""
return self._cluster
@cluster.setter
def cluster(self, value):
"""
Sets the value of the `cluster` property.
"""
Struct._check_type('cluster', value, Cluster)
self._cluster = value
@property
def transparent_huge_pages(self):
"""
Returns the value of the `transparent_huge_pages` property.
"""
return self._transparent_huge_pages
@transparent_huge_pages.setter
def transparent_huge_pages(self, value):
"""
Sets the value of the `transparent_huge_pages` property.
"""
Struct._check_type('transparent_huge_pages', value, TransparentHugePages)
self._transparent_huge_pages = value
@property
def certificate(self):
"""
Returns the value of the `certificate` property.
"""
return self._certificate
@certificate.setter
def certificate(self, value):
"""
Sets the value of the `certificate` property.
"""
Struct._check_type('certificate', value, Certificate)
self._certificate = value
@property
def type(self):
"""
Returns the value of the `type` property.
"""
return self._type
@type.setter
def type(self, value):
"""
Sets the value of the `type` property.
"""
Struct._check_type('type', value, HostType)
self._type = value
@property
def external_host_provider(self):
"""
Returns the value of the `external_host_provider` property.
"""
return self._external_host_provider
@external_host_provider.setter
def external_host_provider(self, value):
"""
Sets the value of the `external_host_provider` property.
"""
Struct._check_type('external_host_provider', value, ExternalHostProvider)
self._external_host_provider = value
@property
def vgpu_placement(self):
"""
Returns the value of the `vgpu_placement` property.
"""
return self._vgpu_placement
@vgpu_placement.setter
def vgpu_placement(self, value):
"""
Sets the value of the `vgpu_placement` property.
"""
Struct._check_type('vgpu_placement', value, VgpuPlacement)
self._vgpu_placement = value
@property
def network_operation_in_progress(self):
"""
Returns the value of the `network_operation_in_progress` property.
"""
return self._network_operation_in_progress
@network_operation_in_progress.setter
def network_operation_in_progress(self, value):
"""
Sets the value of the `network_operation_in_progress` property.
"""
self._network_operation_in_progress = value
class HostCpuUnit(Identified):
def __init__(
self,
comment=None,
core_id=None,
cpu_id=None,
description=None,
id=None,
name=None,
runs_vdsm=None,
socket_id=None,
vms=None,
):
super(HostCpuUnit, self).__init__(
comment=comment,
description=description,
id=id,
name=name,
)
self.core_id = core_id
self.cpu_id = cpu_id
self.runs_vdsm = runs_vdsm
self.socket_id = socket_id
self.vms = vms
@property
def socket_id(self):
"""
Returns the value of the `socket_id` property.
"""
return self._socket_id
@socket_id.setter
def socket_id(self, value):
"""
Sets the value of the `socket_id` property.
"""
self._socket_id = value
@property
def runs_vdsm(self):
"""
Returns the value of the `runs_vdsm` property.
"""
return self._runs_vdsm
@runs_vdsm.setter
def runs_vdsm(self, value):
"""
Sets the value of the `runs_vdsm` property.
"""
self._runs_vdsm = value
@property
def core_id(self):
"""
Returns the value of the `core_id` property.
"""
return self._core_id
@core_id.setter
def core_id(self, value):
"""
Sets the value of the `core_id` property.
"""
self._core_id = value
@property
def cpu_id(self):
"""
Returns the value of the `cpu_id` property.
"""
return self._cpu_id
@cpu_id.setter
def cpu_id(self, value):
"""
Sets the value of the `cpu_id` property.
"""
self._cpu_id = value
@property
def vms(self):
"""
Returns the value of the `vms` property.
"""
return self._vms
@vms.setter
def vms(self, value):
"""
Sets the value of the `vms` property.
"""
self._vms = value
class HostDevice(Identified):
def __init__(
self,
capability=None,
comment=None,
description=None,
driver=None,
host=None,
id=None,
iommu_group=None,
m_dev_types=None,
name=None,
parent_device=None,
physical_function=None,
placeholder=None,
product=None,
vendor=None,
virtual_functions=None,
vm=None,
):
super(HostDevice, self).__init__(
comment=comment,
description=description,
id=id,
name=name,
)
self.capability = capability
self.driver = driver
self.host = host
self.iommu_group = iommu_group
self.m_dev_types = m_dev_types
self.parent_device = parent_device
self.physical_function = physical_function
self.placeholder = placeholder
self.product = product
self.vendor = vendor
self.virtual_functions = virtual_functions
self.vm = vm
@property
def product(self):
"""
Returns the value of the `product` property.
"""
return self._product
@product.setter
def product(self, value):
"""
Sets the value of the `product` property.
"""
Struct._check_type('product', value, Product)
self._product = value
@property
def vm(self):
"""
Returns the value of the `vm` property.
"""
return self._vm
@vm.setter
def vm(self, value):
"""
Sets the value of the `vm` property.
"""
Struct._check_type('vm', value, Vm)
self._vm = value
@property
def host(self):
"""
Returns the value of the `host` property.
"""
return self._host
@host.setter
def host(self, value):
"""
Sets the value of the `host` property.
"""
Struct._check_type('host', value, Host)
self._host = value
@property
def virtual_functions(self):
"""
Returns the value of the `virtual_functions` property.
"""
return self._virtual_functions
@virtual_functions.setter
def virtual_functions(self, value):
"""
Sets the value of the `virtual_functions` property.
"""
self._virtual_functions | |
linger_rate
else:
run_time = run_time_base
rate_func = base_rate
opt_line_anim = ShowCreation(colored_line) if draw_line else empty_animation
line_draw_anim = AnimationGroup(
opt_line_anim,
walker_anim,
run_time = run_time,
rate_func = rate_func)
return (line_draw_anim, rebased_winder(1))
wind_so_far = 0
anim = empty_animation
sides = [
rect.get_top(),
rect.get_right(),
rect.get_bottom(),
rect.get_left()
]
for (i, (start, end)) in enumerate(sides):
(next_anim, wind_so_far) = draw_line_return_wind(start, end, wind_so_far,
should_linger = i == len(sides) - 1,
draw_line = i in sides_to_draw)
anim = Succession(anim, next_anim)
if self.show_cursor:
cursor = cursor_base.copy()
center_x, center_y = rect.get_center()
width = rect.get_width()
height = rect.get_height()
cursor.move_to(num_plane.coords_to_point(center_x, center_y) + 10 * IN)
cursor.scale(min(width, height))
# Do a quick FadeIn, wait, and quick FadeOut on the cursor, matching rectangle-drawing time
cursor_anim = Succession(
FadeIn(cursor, run_time = 0.1),
Animation(cursor, run_time = 3.8),
FadeOut(cursor, run_time = 0.1)
)
anim = AnimationGroup(anim, cursor_anim)
override_wind = head(manual_wind_override)
if override_wind != None:
total_wind = override_wind
else:
total_wind = round(wind_so_far)
if total_wind == 0:
coords = [
rect.get_top_left(),
rect.get_top_right(),
rect.get_bottom_right(),
rect.get_bottom_left()
]
points = np.array([num_plane.coords_to_point(x, y) for (x, y) in coords]) + 3 * IN
# TODO: Maybe use diagonal lines or something to fill in rectangles indicating
# their "Nothing here" status?
# Or draw a large X or something
fill_rect = polygonObject = Polygon(*points, fill_opacity = 0.8, color = GREY_D)
return EquationSolver2dNode(Succession(anim, FadeIn(fill_rect)))
else:
(sub_rect1, sub_rect2) = rect.splits_on_dim(dim_to_split)
if dim_to_split == 0:
sub_rect_and_sides = [(sub_rect1, 1), (sub_rect2, 3)]
else:
sub_rect_and_sides = [(sub_rect1, 2), (sub_rect2, 0)]
children = [
Animate2dSolver(
cur_depth = cur_depth + 1,
rect = sub_rect,
dim_to_split = 1 - dim_to_split,
sides_to_draw = [side_to_draw],
manual_wind_override = child(manual_wind_override, index)
)
for (index, (sub_rect, side_to_draw)) in enumerate(sub_rect_and_sides)
]
mid_line_coords = rect.split_line_on_dim(dim_to_split)
mid_line_points = [num_plane.coords_to_point(x, y) + 2 * IN for (x, y) in mid_line_coords]
mid_line = DashedLine(*mid_line_points)
return EquationSolver2dNode(Succession(anim, ShowCreation(mid_line)), children)
lower_x = self.initial_lower_x
upper_x = self.initial_upper_x
lower_y = self.initial_lower_y
upper_y = self.initial_upper_y
x_interval = (lower_x, upper_x)
y_interval = (lower_y, upper_y)
rect = RectangleData(x_interval, y_interval)
print("Starting to compute anim")
node = Animate2dSolver(
cur_depth = 0,
rect = rect,
dim_to_split = 0,
sides_to_draw = [],
manual_wind_override = self.manual_wind_override
)
print("Done computing anim")
if self.display_in_parallel:
anim = node.display_in_parallel()
elif self.display_in_bfs:
anim = node.display_in_bfs()
else:
anim = node.display_in_series()
# Keep timing details here in sync with details above
rect_points = [
rect.get_top_left(),
rect.get_top_right(),
rect.get_bottom_right(),
rect.get_bottom_left(),
]
border = Polygon(*[num_plane.coords_to_point(*x) + IN for x in rect_points])
match_style_with_bg(border, base_line)
rect_time_without_linger = 4 * run_time_base
rect_time_with_linger = 3 * run_time_base + run_time_with_lingering
def rect_rate(alpha):
time_in = alpha * rect_time_with_linger
if time_in < 3 * run_time_base:
return fdiv(time_in, 4 * run_time_base)
else:
time_in_last_leg = time_in - 3 * run_time_base
alpha_in_last_leg = fdiv(time_in_last_leg, run_time_with_lingering)
return interpolate(0.75, 1, linger_rate(alpha_in_last_leg))
border_anim = ShowCreation(
border,
run_time = rect_time_with_linger,
rate_func = rect_rate
)
print("About to do the big Play; for reference, the current time is ", time.strftime("%H:%M:%S"))
if self.use_separate_plays:
node.play_in_bfs(self, border_anim)
else:
self.play(anim, border_anim)
print("All done; for reference, the current time is ", time.strftime("%H:%M:%S"))
self.wait()
# TODO: Perhaps have option for bullets (pulses) to fade out and in at ends of line, instead of
# jarringly popping out and in?
#
# TODO: Perhaps have bullets change color corresponding to a function of their coordinates?
# This could involve some merging of functoinality with PiWalker
class LinePulser(ContinualAnimation):
def __init__(self, line, bullet_template, num_bullets, pulse_time, output_func = None, **kwargs):
self.line = line
self.num_bullets = num_bullets
self.pulse_time = pulse_time
self.bullets = [bullet_template.copy() for i in range(num_bullets)]
self.output_func = output_func
ContinualAnimation.__init__(self, VGroup(*self.bullets), **kwargs)
def update_mobject(self, dt):
alpha = self.external_time % self.pulse_time
start = self.line.get_start()
end = self.line.get_end()
for i in range(self.num_bullets):
position = interpolate(start, end,
fdiv((i + alpha),(self.num_bullets)))
self.bullets[i].move_to(position)
if self.output_func:
position_2d = (position[0], position[1])
rev = point_to_rev(self.output_func(position_2d))
color = rev_to_color(rev)
self.bullets[i].set_color(color)
class ArrowCircleTest(Scene):
def construct(self):
circle_radius = 3
circle = Circle(radius = circle_radius, color = WHITE)
self.add(circle)
base_arrow = Arrow(circle_radius * 0.7 * RIGHT, circle_radius * 1.3 * RIGHT)
def rev_rotate(x, revs):
x.rotate(revs * TAU, about_point = ORIGIN)
x.set_color(rev_to_color(revs))
return x
num_arrows = 8 * 3
# 0.5 - fdiv below so as to get a clockwise rotation from left
arrows = [rev_rotate(base_arrow.copy(), 0.5 - (fdiv(i, num_arrows))) for i in range(num_arrows)]
arrows_vgroup = VGroup(*arrows)
self.play(ShowCreation(arrows_vgroup), run_time = 2.5, rate_func=linear)
self.wait()
class FuncRotater(Animation):
CONFIG = {
"rev_func" : lambda x : x, # Func from alpha to CCW revolutions,
}
# Perhaps abstract this out into an "Animation updating from original object" class
def interpolate_submobject(self, submobject, starting_submobject, alpha):
submobject.set_points(starting_submobject.get_points())
def interpolate_mobject(self, alpha):
Animation.interpolate_mobject(self, alpha)
angle_revs = self.rev_func(alpha)
self.mobject.rotate(
angle_revs * TAU,
about_point = ORIGIN
)
self.mobject.set_color(rev_to_color(angle_revs))
class TestRotater(Scene):
def construct(self):
test_line = Line(ORIGIN, RIGHT)
self.play(FuncRotater(
test_line,
rev_func = lambda x : x % 0.25,
run_time = 10))
# TODO: Be careful about clockwise vs. counterclockwise convention throughout!
# Make sure this is correct everywhere in resulting video.
class OdometerScene(ColorMappedObjectsScene):
CONFIG = {
# "func" : lambda p : 100 * p # Full coloring, essentially
"rotate_func" : lambda x : 2 * np.sin(2 * x * TAU), # This is given in terms of CW revs
"run_time" : 40,
"dashed_line_angle" : None,
"biased_display_start" : None,
"pure_odometer_background" : False
}
def construct(self):
ColorMappedObjectsScene.construct(self)
radius = ODOMETER_RADIUS
circle = Circle(center = ORIGIN, radius = radius)
circle.stroke_width = ODOMETER_STROKE_WIDTH
circle.color_using_background_image(self.background_image_file)
self.add(circle)
if self.pure_odometer_background:
# Just display this background circle, for compositing in Premiere with PiWalker odometers
self.wait()
return
if self.dashed_line_angle:
dashed_line = DashedLine(ORIGIN, radius * RIGHT)
# Clockwise rotation
dashed_line.rotate(-self.dashed_line_angle * TAU, about_point = ORIGIN)
self.add(dashed_line)
num_display = DecimalNumber(0, include_background_rectangle = False)
num_display.move_to(2 * DOWN)
caption = TexText("turns clockwise")
caption.next_to(num_display, DOWN)
self.add(caption)
display_val_bias = 0
if self.biased_display_start != None:
display_val_bias = self.biased_display_start - self.rotate_func(0)
display_func = lambda alpha : self.rotate_func(alpha) + display_val_bias
base_arrow = Arrow(ORIGIN, RIGHT, buff = 0)
self.play(
FuncRotater(base_arrow, rev_func = lambda x : -self.rotate_func(x)),
ChangingDecimal(num_display, display_func),
run_time = self.run_time,
rate_func=linear)
#############
# Above are mostly general tools; here, we list, in order, finished or near-finished scenes
class FirstSqrtScene(EquationSolver1d):
CONFIG = {
"x_min" : 0,
"x_max" : 2.5,
"y_min" : 0,
"y_max" : 2.5**2,
"graph_origin" : 2.5*DOWN + 5.5*LEFT,
"x_axis_width" : 12,
"zoom_factor" : 3,
"zoomed_canvas_center" : 2.25 * UP + 1.75 * LEFT,
"func" : lambda x : x**2,
"targetX" : np.sqrt(2),
"targetY" : 2,
"initial_lower_x" : 1,
"initial_upper_x" : 2,
"num_iterations" : 5,
"iteration_at_which_to_start_zoom" : 3,
"graph_label" : "y = x^2",
"show_target_line" : True,
"x_tick_frequency" : 0.25
}
class TestFirstSqrtScene(FirstSqrtScene):
CONFIG = {
"num_iterations" : 1,
}
FirstSqrtSceneConfig = FirstSqrtScene.CONFIG
shiftVal = FirstSqrtSceneConfig["targetY"]
class SecondSqrtScene(FirstSqrtScene):
CONFIG = {
"graph_label" : FirstSqrtSceneConfig["graph_label"] + " - " + str(shiftVal),
"show_y_as_deviation" : True,
}
class TestSecondSqrtScene(SecondSqrtScene):
CONFIG = {
"num_iterations" : 1
}
class GuaranteedZeroScene(SecondSqrtScene):
CONFIG = {
# Manual config values, not automatically synced to anything above
"initial_lower_x" : 1.75,
"initial_upper_x" : 2
}
class TestGuaranteedZeroScene(GuaranteedZeroScene):
CONFIG = {
"num_iterations" : 1
}
# TODO: Pi creatures intrigued
class RewriteEquation(Scene):
def construct(self):
# Can maybe use get_center() to perfectly center Groups before and after transform
f_old = Tex("f(x)")
f_new = f_old.copy()
equals_old = Tex("=")
equals_old_2 = equals_old.copy()
equals_new = equals_old.copy()
g_old = Tex("g(x)")
g_new = g_old.copy()
minus_new = Tex("-")
zero_new = Tex("0")
f_old.next_to(equals_old, LEFT)
g_old.next_to(equals_old, RIGHT)
minus_new.next_to(g_new, LEFT)
f_new.next_to(minus_new, LEFT)
equals_new.next_to(g_new, RIGHT)
zero_new.next_to(equals_new, RIGHT)
# where_old = TexText("Where does ")
# where_old.next_to(f_old, LEFT)
# where_new = where_old.copy()
# where_new.next_to(f_new, LEFT)
# qmark_old = TexText("?")
# qmark_old.next_to(g_old, RIGHT)
# qmark_new = qmark_old.copy()
# qmark_new.next_to(zero_new, RIGHT)
self.add(f_old, equals_old, equals_old_2, g_old) #, where_old, qmark_old)
self.wait()
self.play(
ReplacementTransform(f_old, f_new),
ReplacementTransform(equals_old, equals_new),
ReplacementTransform(g_old, g_new),
ReplacementTransform(equals_old_2, minus_new),
ShowCreation(zero_new),
# ReplacementTransform(where_old, where_new),
# ReplacementTransform(qmark_old, qmark_new),
)
self.wait()
class SignsExplanation(Scene):
def construct(self):
num_line = NumberLine()
largest_num = 10
num_line.add_numbers(*list(range(-largest_num, largest_num + 1)))
self.add(num_line)
self.wait()
pos_num = 3
neg_num = -pos_num
pos_arrow = Arrow(
num_line.number_to_point(0),
num_line.number_to_point(pos_num),
buff = 0,
color = positive_color)
neg_arrow = Arrow(
num_line.number_to_point(0),
num_line.number_to_point(neg_num),
buff = 0,
color = negative_color)
plus_sign = Tex("+", fill_color = positive_color)
minus_sign = Tex("-", fill_color = negative_color)
plus_sign.next_to(pos_arrow, UP)
minus_sign.next_to(neg_arrow, UP)
#num_line.add_numbers(pos_num)
self.play(ShowCreation(pos_arrow), FadeIn(plus_sign))
#num_line.add_numbers(neg_num)
self.play(ShowCreation(neg_arrow), FadeIn(minus_sign))
class VectorField(Scene):
CONFIG = {
"func" | |
tree produced by PlSqlParser#dependent_handling_clause.
def exitDependent_handling_clause(self, ctx:PlSqlParser.Dependent_handling_clauseContext):
pass
# Enter a parse tree produced by PlSqlParser#dependent_exceptions_part.
def enterDependent_exceptions_part(self, ctx:PlSqlParser.Dependent_exceptions_partContext):
pass
# Exit a parse tree produced by PlSqlParser#dependent_exceptions_part.
def exitDependent_exceptions_part(self, ctx:PlSqlParser.Dependent_exceptions_partContext):
pass
# Enter a parse tree produced by PlSqlParser#create_type.
def enterCreate_type(self, ctx:PlSqlParser.Create_typeContext):
pass
# Exit a parse tree produced by PlSqlParser#create_type.
def exitCreate_type(self, ctx:PlSqlParser.Create_typeContext):
pass
# Enter a parse tree produced by PlSqlParser#type_definition.
def enterType_definition(self, ctx:PlSqlParser.Type_definitionContext):
pass
# Exit a parse tree produced by PlSqlParser#type_definition.
def exitType_definition(self, ctx:PlSqlParser.Type_definitionContext):
pass
# Enter a parse tree produced by PlSqlParser#object_type_def.
def enterObject_type_def(self, ctx:PlSqlParser.Object_type_defContext):
pass
# Exit a parse tree produced by PlSqlParser#object_type_def.
def exitObject_type_def(self, ctx:PlSqlParser.Object_type_defContext):
pass
# Enter a parse tree produced by PlSqlParser#object_as_part.
def enterObject_as_part(self, ctx:PlSqlParser.Object_as_partContext):
pass
# Exit a parse tree produced by PlSqlParser#object_as_part.
def exitObject_as_part(self, ctx:PlSqlParser.Object_as_partContext):
pass
# Enter a parse tree produced by PlSqlParser#object_under_part.
def enterObject_under_part(self, ctx:PlSqlParser.Object_under_partContext):
pass
# Exit a parse tree produced by PlSqlParser#object_under_part.
def exitObject_under_part(self, ctx:PlSqlParser.Object_under_partContext):
pass
# Enter a parse tree produced by PlSqlParser#nested_table_type_def.
def enterNested_table_type_def(self, ctx:PlSqlParser.Nested_table_type_defContext):
pass
# Exit a parse tree produced by PlSqlParser#nested_table_type_def.
def exitNested_table_type_def(self, ctx:PlSqlParser.Nested_table_type_defContext):
pass
# Enter a parse tree produced by PlSqlParser#sqlj_object_type.
def enterSqlj_object_type(self, ctx:PlSqlParser.Sqlj_object_typeContext):
pass
# Exit a parse tree produced by PlSqlParser#sqlj_object_type.
def exitSqlj_object_type(self, ctx:PlSqlParser.Sqlj_object_typeContext):
pass
# Enter a parse tree produced by PlSqlParser#type_body.
def enterType_body(self, ctx:PlSqlParser.Type_bodyContext):
pass
# Exit a parse tree produced by PlSqlParser#type_body.
def exitType_body(self, ctx:PlSqlParser.Type_bodyContext):
pass
# Enter a parse tree produced by PlSqlParser#type_body_elements.
def enterType_body_elements(self, ctx:PlSqlParser.Type_body_elementsContext):
pass
# Exit a parse tree produced by PlSqlParser#type_body_elements.
def exitType_body_elements(self, ctx:PlSqlParser.Type_body_elementsContext):
pass
# Enter a parse tree produced by PlSqlParser#map_order_func_declaration.
def enterMap_order_func_declaration(self, ctx:PlSqlParser.Map_order_func_declarationContext):
pass
# Exit a parse tree produced by PlSqlParser#map_order_func_declaration.
def exitMap_order_func_declaration(self, ctx:PlSqlParser.Map_order_func_declarationContext):
pass
# Enter a parse tree produced by PlSqlParser#subprog_decl_in_type.
def enterSubprog_decl_in_type(self, ctx:PlSqlParser.Subprog_decl_in_typeContext):
pass
# Exit a parse tree produced by PlSqlParser#subprog_decl_in_type.
def exitSubprog_decl_in_type(self, ctx:PlSqlParser.Subprog_decl_in_typeContext):
pass
# Enter a parse tree produced by PlSqlParser#proc_decl_in_type.
def enterProc_decl_in_type(self, ctx:PlSqlParser.Proc_decl_in_typeContext):
pass
# Exit a parse tree produced by PlSqlParser#proc_decl_in_type.
def exitProc_decl_in_type(self, ctx:PlSqlParser.Proc_decl_in_typeContext):
pass
# Enter a parse tree produced by PlSqlParser#func_decl_in_type.
def enterFunc_decl_in_type(self, ctx:PlSqlParser.Func_decl_in_typeContext):
pass
# Exit a parse tree produced by PlSqlParser#func_decl_in_type.
def exitFunc_decl_in_type(self, ctx:PlSqlParser.Func_decl_in_typeContext):
pass
# Enter a parse tree produced by PlSqlParser#constructor_declaration.
def enterConstructor_declaration(self, ctx:PlSqlParser.Constructor_declarationContext):
pass
# Exit a parse tree produced by PlSqlParser#constructor_declaration.
def exitConstructor_declaration(self, ctx:PlSqlParser.Constructor_declarationContext):
pass
# Enter a parse tree produced by PlSqlParser#modifier_clause.
def enterModifier_clause(self, ctx:PlSqlParser.Modifier_clauseContext):
pass
# Exit a parse tree produced by PlSqlParser#modifier_clause.
def exitModifier_clause(self, ctx:PlSqlParser.Modifier_clauseContext):
pass
# Enter a parse tree produced by PlSqlParser#object_member_spec.
def enterObject_member_spec(self, ctx:PlSqlParser.Object_member_specContext):
pass
# Exit a parse tree produced by PlSqlParser#object_member_spec.
def exitObject_member_spec(self, ctx:PlSqlParser.Object_member_specContext):
pass
# Enter a parse tree produced by PlSqlParser#sqlj_object_type_attr.
def enterSqlj_object_type_attr(self, ctx:PlSqlParser.Sqlj_object_type_attrContext):
pass
# Exit a parse tree produced by PlSqlParser#sqlj_object_type_attr.
def exitSqlj_object_type_attr(self, ctx:PlSqlParser.Sqlj_object_type_attrContext):
pass
# Enter a parse tree produced by PlSqlParser#element_spec.
def enterElement_spec(self, ctx:PlSqlParser.Element_specContext):
pass
# Exit a parse tree produced by PlSqlParser#element_spec.
def exitElement_spec(self, ctx:PlSqlParser.Element_specContext):
pass
# Enter a parse tree produced by PlSqlParser#element_spec_options.
def enterElement_spec_options(self, ctx:PlSqlParser.Element_spec_optionsContext):
pass
# Exit a parse tree produced by PlSqlParser#element_spec_options.
def exitElement_spec_options(self, ctx:PlSqlParser.Element_spec_optionsContext):
pass
# Enter a parse tree produced by PlSqlParser#subprogram_spec.
def enterSubprogram_spec(self, ctx:PlSqlParser.Subprogram_specContext):
pass
# Exit a parse tree produced by PlSqlParser#subprogram_spec.
def exitSubprogram_spec(self, ctx:PlSqlParser.Subprogram_specContext):
pass
# Enter a parse tree produced by PlSqlParser#type_procedure_spec.
def enterType_procedure_spec(self, ctx:PlSqlParser.Type_procedure_specContext):
pass
# Exit a parse tree produced by PlSqlParser#type_procedure_spec.
def exitType_procedure_spec(self, ctx:PlSqlParser.Type_procedure_specContext):
pass
# Enter a parse tree produced by PlSqlParser#type_function_spec.
def enterType_function_spec(self, ctx:PlSqlParser.Type_function_specContext):
pass
# Exit a parse tree produced by PlSqlParser#type_function_spec.
def exitType_function_spec(self, ctx:PlSqlParser.Type_function_specContext):
pass
# Enter a parse tree produced by PlSqlParser#constructor_spec.
def enterConstructor_spec(self, ctx:PlSqlParser.Constructor_specContext):
pass
# Exit a parse tree produced by PlSqlParser#constructor_spec.
def exitConstructor_spec(self, ctx:PlSqlParser.Constructor_specContext):
pass
# Enter a parse tree produced by PlSqlParser#map_order_function_spec.
def enterMap_order_function_spec(self, ctx:PlSqlParser.Map_order_function_specContext):
pass
# Exit a parse tree produced by PlSqlParser#map_order_function_spec.
def exitMap_order_function_spec(self, ctx:PlSqlParser.Map_order_function_specContext):
pass
# Enter a parse tree produced by PlSqlParser#pragma_clause.
def enterPragma_clause(self, ctx:PlSqlParser.Pragma_clauseContext):
pass
# Exit a parse tree produced by PlSqlParser#pragma_clause.
def exitPragma_clause(self, ctx:PlSqlParser.Pragma_clauseContext):
pass
# Enter a parse tree produced by PlSqlParser#pragma_elements.
def enterPragma_elements(self, ctx:PlSqlParser.Pragma_elementsContext):
pass
# Exit a parse tree produced by PlSqlParser#pragma_elements.
def exitPragma_elements(self, ctx:PlSqlParser.Pragma_elementsContext):
pass
# Enter a parse tree produced by PlSqlParser#type_elements_parameter.
def enterType_elements_parameter(self, ctx:PlSqlParser.Type_elements_parameterContext):
pass
# Exit a parse tree produced by PlSqlParser#type_elements_parameter.
def exitType_elements_parameter(self, ctx:PlSqlParser.Type_elements_parameterContext):
pass
# Enter a parse tree produced by PlSqlParser#drop_sequence.
def enterDrop_sequence(self, ctx:PlSqlParser.Drop_sequenceContext):
pass
# Exit a parse tree produced by PlSqlParser#drop_sequence.
def exitDrop_sequence(self, ctx:PlSqlParser.Drop_sequenceContext):
pass
# Enter a parse tree produced by PlSqlParser#alter_sequence.
def enterAlter_sequence(self, ctx:PlSqlParser.Alter_sequenceContext):
pass
# Exit a parse tree produced by PlSqlParser#alter_sequence.
def exitAlter_sequence(self, ctx:PlSqlParser.Alter_sequenceContext):
pass
# Enter a parse tree produced by PlSqlParser#create_sequence.
def enterCreate_sequence(self, ctx:PlSqlParser.Create_sequenceContext):
pass
# Exit a parse tree produced by PlSqlParser#create_sequence.
def exitCreate_sequence(self, ctx:PlSqlParser.Create_sequenceContext):
pass
# Enter a parse tree produced by PlSqlParser#sequence_spec.
def enterSequence_spec(self, ctx:PlSqlParser.Sequence_specContext):
pass
# Exit a parse tree produced by PlSqlParser#sequence_spec.
def exitSequence_spec(self, ctx:PlSqlParser.Sequence_specContext):
pass
# Enter a parse tree produced by PlSqlParser#sequence_start_clause.
def enterSequence_start_clause(self, ctx:PlSqlParser.Sequence_start_clauseContext):
pass
# Exit a parse tree produced by PlSqlParser#sequence_start_clause.
def exitSequence_start_clause(self, ctx:PlSqlParser.Sequence_start_clauseContext):
pass
# Enter a parse tree produced by PlSqlParser#create_index.
def enterCreate_index(self, ctx:PlSqlParser.Create_indexContext):
pass
# Exit a parse tree produced by PlSqlParser#create_index.
def exitCreate_index(self, ctx:PlSqlParser.Create_indexContext):
pass
# Enter a parse tree produced by PlSqlParser#alter_index.
def enterAlter_index(self, ctx:PlSqlParser.Alter_indexContext):
pass
# Exit a parse tree produced by PlSqlParser#alter_index.
def exitAlter_index(self, ctx:PlSqlParser.Alter_indexContext):
pass
# Enter a parse tree produced by PlSqlParser#create_user.
def enterCreate_user(self, ctx:PlSqlParser.Create_userContext):
pass
# Exit a parse tree produced by PlSqlParser#create_user.
def exitCreate_user(self, ctx:PlSqlParser.Create_userContext):
pass
# Enter a parse tree produced by PlSqlParser#alter_user.
def enterAlter_user(self, ctx:PlSqlParser.Alter_userContext):
pass
# Exit a parse tree produced by PlSqlParser#alter_user.
def exitAlter_user(self, ctx:PlSqlParser.Alter_userContext):
pass
# Enter a parse tree produced by PlSqlParser#alter_identified_by.
def enterAlter_identified_by(self, ctx:PlSqlParser.Alter_identified_byContext):
pass
# Exit a parse tree produced by PlSqlParser#alter_identified_by.
def exitAlter_identified_by(self, ctx:PlSqlParser.Alter_identified_byContext):
pass
# Enter a parse tree produced by PlSqlParser#identified_by.
def enterIdentified_by(self, ctx:PlSqlParser.Identified_byContext):
pass
# Exit a parse tree produced by PlSqlParser#identified_by.
def exitIdentified_by(self, ctx:PlSqlParser.Identified_byContext):
pass
# Enter a parse tree produced by PlSqlParser#identified_other_clause.
def enterIdentified_other_clause(self, ctx:PlSqlParser.Identified_other_clauseContext):
pass
# Exit a parse tree produced by PlSqlParser#identified_other_clause.
def exitIdentified_other_clause(self, ctx:PlSqlParser.Identified_other_clauseContext):
pass
# Enter a parse tree produced by PlSqlParser#user_tablespace_clause.
def enterUser_tablespace_clause(self, ctx:PlSqlParser.User_tablespace_clauseContext):
pass
# Exit a parse tree produced by PlSqlParser#user_tablespace_clause.
def exitUser_tablespace_clause(self, ctx:PlSqlParser.User_tablespace_clauseContext):
pass
# Enter a parse tree produced by PlSqlParser#quota_clause.
def enterQuota_clause(self, ctx:PlSqlParser.Quota_clauseContext):
pass
# Exit a parse tree produced by PlSqlParser#quota_clause.
def exitQuota_clause(self, ctx:PlSqlParser.Quota_clauseContext):
pass
# Enter a parse tree produced by PlSqlParser#profile_clause.
def enterProfile_clause(self, ctx:PlSqlParser.Profile_clauseContext):
pass
# Exit a parse tree produced by PlSqlParser#profile_clause.
def exitProfile_clause(self, ctx:PlSqlParser.Profile_clauseContext):
pass
# Enter a parse tree produced by PlSqlParser#role_clause.
def enterRole_clause(self, ctx:PlSqlParser.Role_clauseContext):
pass
# Exit a parse tree produced by PlSqlParser#role_clause.
def exitRole_clause(self, ctx:PlSqlParser.Role_clauseContext):
pass
# Enter a parse tree produced by PlSqlParser#user_default_role_clause.
def enterUser_default_role_clause(self, ctx:PlSqlParser.User_default_role_clauseContext):
pass
# Exit a parse tree produced by PlSqlParser#user_default_role_clause.
def exitUser_default_role_clause(self, ctx:PlSqlParser.User_default_role_clauseContext):
pass
# Enter a parse tree produced by PlSqlParser#password_expire_clause.
def enterPassword_expire_clause(self, ctx:PlSqlParser.Password_expire_clauseContext):
pass
# Exit a parse tree produced by PlSqlParser#password_expire_clause.
def exitPassword_expire_clause(self, ctx:PlSqlParser.Password_expire_clauseContext):
pass
# Enter a parse tree produced by PlSqlParser#user_lock_clause.
def enterUser_lock_clause(self, ctx:PlSqlParser.User_lock_clauseContext):
pass
# Exit a parse tree produced by PlSqlParser#user_lock_clause.
def exitUser_lock_clause(self, ctx:PlSqlParser.User_lock_clauseContext):
pass
# Enter a parse tree produced by PlSqlParser#user_editions_clause.
def enterUser_editions_clause(self, ctx:PlSqlParser.User_editions_clauseContext):
pass
# Exit a parse tree produced by PlSqlParser#user_editions_clause.
def exitUser_editions_clause(self, ctx:PlSqlParser.User_editions_clauseContext):
pass
# Enter a parse tree produced by PlSqlParser#alter_user_editions_clause.
def enterAlter_user_editions_clause(self, ctx:PlSqlParser.Alter_user_editions_clauseContext):
pass
# Exit a parse tree produced by PlSqlParser#alter_user_editions_clause.
def exitAlter_user_editions_clause(self, ctx:PlSqlParser.Alter_user_editions_clauseContext):
pass
# Enter a parse tree produced by PlSqlParser#proxy_clause.
def enterProxy_clause(self, ctx:PlSqlParser.Proxy_clauseContext):
pass
# Exit a parse tree produced by PlSqlParser#proxy_clause.
def exitProxy_clause(self, ctx:PlSqlParser.Proxy_clauseContext):
pass
# Enter a parse tree produced by PlSqlParser#container_names.
def enterContainer_names(self, ctx:PlSqlParser.Container_namesContext):
pass
# Exit a parse tree produced by PlSqlParser#container_names.
def exitContainer_names(self, ctx:PlSqlParser.Container_namesContext):
pass
# Enter a parse tree produced by PlSqlParser#set_container_data.
def enterSet_container_data(self, ctx:PlSqlParser.Set_container_dataContext):
pass
# Exit a parse tree produced by PlSqlParser#set_container_data.
def exitSet_container_data(self, ctx:PlSqlParser.Set_container_dataContext):
pass
# Enter a parse tree produced by PlSqlParser#add_rem_container_data.
def enterAdd_rem_container_data(self, ctx:PlSqlParser.Add_rem_container_dataContext):
pass
# Exit a parse tree produced by PlSqlParser#add_rem_container_data.
def exitAdd_rem_container_data(self, ctx:PlSqlParser.Add_rem_container_dataContext):
pass
| |
(255, 255, 255, 255),
526: (71, 186, 255, 255),
527: (255, 174, 0, 255),
528: (139, 95, 0, 255),
529: (48, 214, 255, 255),
530: (61, 101, 125, 255),
531: (255, 130, 97, 255),
532: (128, 74, 59, 255),
533: (255, 200, 200, 255),
534: (174, 0, 0, 255),
535: (255, 255, 192, 255),
536: (160, 88, 0, 255),
537: (255, 192, 255, 255),
538: (64, 0, 160, 255),
539: (255, 81, 81, 255),
540: (255, 182, 25, 255),
541: (192, 91, 254, 255),
542: (0, 150, 255, 255),
543: (0, 24, 255, 255),
544: (255, 86, 110, 255),
545: (180, 0, 0, 255),
546: (96, 184, 250, 255),
547: (196, 57, 99, 255),
548: (176, 116, 0, 255),
549: (156, 107, 71, 255),
550: (247, 223, 181, 0),
551: (93, 158, 0, 255),
552: (247, 223, 181, 0),
553: (82, 132, 231, 255),
554: (247, 223, 181, 0),
555: (130, 95, 201, 255),
556: (247, 223, 181, 0),
557: (232, 104, 12, 255),
558: (247, 223, 181, 0),
559: (207, 151, 0, 255),
560: (247, 223, 181, 0),
561: (222, 95, 146, 255),
562: (247, 223, 181, 0),
563: (202, 122, 47, 255),
564: (255, 183, 101, 0),
565: (255, 255, 255, 255),
566: (71, 186, 255, 255),
567: (255, 255, 255, 255),
568: (230, 167, 58, 255),
569: (255, 255, 255, 255),
570: (0, 204, 34, 255),
571: (76, 52, 47, 255),
572: (255, 243, 231, 255),
573: (143, 117, 0, 255),
574: (255, 239, 56, 255),
575: (1, 154, 91, 255),
576: (5, 164, 198, 255),
577: (211, 114, 0, 255),
578: (232, 67, 185, 255),
579: (128, 240, 240, 255),
580: (0, 32, 32, 255),
700: (255, 255, 255, 255),
701: (100, 100, 100, 255),
702: (0, 0, 0, 255),
703: (255, 212, 216, 255),
704: (204, 55, 55, 255),
705: (233, 98, 76, 255),
706: (240, 142, 55, 255),
707: (0, 153, 255, 255),
708: (170, 129, 255, 255),
709: (85, 52, 194, 255),
710: (245, 235, 103, 255),
}, {
0: (0, 0, 0, 0),
1: (255, 255, 255, 255),
2: (204, 204, 204, 255),
3: (160, 160, 160, 255),
4: (128, 128, 128, 255),
5: (110, 110, 110, 255),
6: (87, 87, 87, 255),
7: (12, 0, 56, 255),
8: (255, 255, 255, 255),
9: (193, 123, 183, 255),
10: (255, 212, 216, 255),
11: (202, 117, 87, 255),
12: (255, 153, 153, 255),
13: (169, 96, 60, 255),
14: (204, 55, 55, 255),
15: (233, 98, 76, 255),
16: (229, 0, 79, 255),
17: (220, 0, 0, 255),
18: (178, 46, 46, 255),
19: (143, 7, 7, 255),
20: (195, 188, 165, 255),
21: (162, 154, 135, 255),
22: (177, 216, 224, 255),
23: (165, 231, 224, 255),
24: (255, 248, 176, 255),
25: (255, 255, 102, 255),
26: (215, 203, 70, 255),
27: (141, 136, 0, 255),
28: (230, 167, 58, 255),
29: (152, 109, 10, 255),
30: (153, 77, 44, 255),
31: (243, 204, 100, 255),
32: (240, 142, 55, 255),
33: (211, 238, 233, 255),
34: (159, 238, 241, 255),
35: (121, 199, 236, 255),
36: (55, 140, 205, 255),
37: (0, 153, 255, 255),
38: (19, 125, 255, 255),
39: (148, 206, 199, 255),
40: (136, 220, 174, 255),
41: (69, 178, 174, 255),
42: (151, 220, 96, 255),
43: (140, 255, 90, 255),
44: (117, 187, 0, 255),
45: (0, 204, 34, 255),
46: (40, 130, 70, 255),
47: (1, 114, 35, 255),
48: (200, 90, 255, 255),
49: (133, 85, 255, 255),
50: (255, 255, 255, 255),
51: (0, 0, 0, 255),
52: (19, 204, 163, 255),
53: (10, 105, 146, 255),
54: (142, 106, 12, 255),
55: (51, 51, 51, 255),
56: (158, 168, 255, 255),
57: (76, 190, 255, 255),
58: (26, 214, 214, 255),
59: (46, 209, 133, 255),
60: (88, 214, 34, 255),
61: (193, 229, 46, 255),
62: (245, 235, 103, 255),
63: (241, 183, 91, 255),
64: (255, 255, 255, 255),
65: (240, 142, 55, 255),
66: (255, 192, 80, 255),
67: (160, 248, 116, 255),
68: (76, 52, 47, 255),
500: (255, 123, 26, 255),
501: (0, 0, 0, 255),
502: (64, 191, 255, 255),
503: (0, 0, 0, 255),
504: (0, 204, 34, 255),
505: (0, 0, 0, 255),
506: (255, 255, 102, 255),
507: (0, 0, 0, 255),
508: (255, 128, 128, 255),
509: (0, 0, 0, 255),
510: (255, 232, 232, 255),
511: (192, 48, 0, 255),
512: (190, 205, 255, 255),
513: (27, 43, 158, 255),
514: (225, 197, 0, 238),
515: (255, 232, 232, 255),
516: (192, 48, 0, 255),
517: (102, 210, 255, 255),
518: (255, 40, 4, 255),
519: (255, 255, 255, 255),
520: (198, 154, 65, 255),
521: (255, 255, 255, 255),
522: (196, 119, 234, 255),
523: (255, 235, 234, 255),
524: (255, 112, 146, 255),
525: (255, 255, 255, 255),
526: (71, 186, 255, 255),
527: (255, 174, 0, 255),
528: (139, 95, 0, 255),
529: (48, 214, 255, 255),
530: (61, 101, 125, 255),
531: (255, 130, 97, 255),
532: (128, 74, 59, 255),
533: (255, 200, 200, 255),
534: (174, 0, 0, 255),
535: (255, 255, 192, 255),
536: (160, 88, 0, 255),
537: (255, 192, 255, 255),
538: (64, 0, 160, 255),
539: (255, 81, 81, 255),
540: (255, 182, 25, 255),
541: (192, 91, 254, 255),
542: (0, 150, 255, 255),
543: (0, 24, 255, 255),
544: (255, 86, 110, 255),
545: (180, 0, 0, 255),
546: (96, 184, 250, 255),
547: (196, 57, 99, 255),
548: (241, 198, 0, 255),
549: (243, 243, 243, 255),
550: (0, 0, 0, 255),
551: (0, 255, 0, 255),
552: (0, 64, 0, 255),
553: (0, 255, 255, 255),
554: (0, 0, 255, 255),
555: (179, 140, 255, 255),
556: (128, 0, 0, 255),
557: (255, 166, 102, 255),
558: (0, 0, 0, 255),
559: (255, 225, 25, 255),
560: (0, 0, 0, 255),
561: (250, 137, 182, 255),
562: (0, 0, 0, 255),
563: (233, 194, 156, 255),
564: (0, 0, 0, 255),
565: (255, 255, 255, 255),
566: (71, 186, 255, 255),
567: (255, 255, 255, 255),
568: (230, 167, 58, 255),
569: (255, 255, 255, 255),
570: (0, 204, 34, 255),
571: (255, 255, 255, 255),
572: (0, 0, 0, 255),
573: (249, 232, 70, 255),
574: (0, 0, 0, 255),
575: (55, 222, 153, 255),
576: (91, 226, 255, 255),
577: (255, 194, 64, 255),
578: (255, 90, 208, 255),
579: (128, 240, 240, 255),
580: (0, 32, 32, 255),
700: (255, 255, 255, 255),
701: (100, 100, 100, 255),
702: (0, 0, 0, 255),
703: (255, 212, 216, 255),
704: (204, 55, 55, 255),
705: (233, 98, 76, 255),
706: (240, 142, 55, 255),
707: (0, 153, 255, 255),
708: (170, 129, 255, 255),
709: (85, 52, 194, 255),
710: (245, 235, 103, 255),
}, {
0: (0, 0, 0, 0),
1: (0, 0, 0, 0),
2: (0, 0, 0, 0),
3: (0, 0, 0, 0),
4: (0, 0, 0, 0),
5: (0, 0, 0, 0),
6: (0, 0, 0, 0),
7: (0, 0, 0, 0),
8: (0, 0, 0, 0),
9: (0, 0, 0, 0),
10: (0, 0, 0, 0),
11: (0, 0, 0, 0),
12: (0, 0, 0, 0),
13: (0, 0, 0, 0),
14: (0, 0, 0, 0),
15: (0, 0, 0, 0),
16: (0, 0, 0, 0),
17: (0, 0, 0, 0),
18: (0, 0, 0, 0),
19: (0, 0, 0, 0),
20: (0, 0, 0, 0),
21: (0, 0, 0, 0),
22: (0, 0, 0, 0),
23: (0, 0, 0, 0),
24: (0, 0, 0, 0),
25: (0, 0, 0, 0),
26: (0, 0, 0, 0),
27: (0, 0, 0, | |
<filename>rtg/module/rnnmt.py
import random
from typing import Optional, Callable
import torch
import torch.nn.functional as F
from torch import nn
from tqdm import tqdm
from rtg import log, TranslationExperiment as Experiment
from rtg import my_tensor as tensor, device
from rtg.data.dataset import Batch, BatchIterable, padded_sequence_mask
from rtg.data.codec import Field
from rtg.module import NMTModel
from rtg.module.trainer import TrainerState, SteppedTrainer
from rtg.registry import register, MODEL
PAD_IDX = Field.pad_idx #
class Embedder(nn.Embedding):
"""
This module takes words (word IDs, not the text ) and creates vectors.
For the inverse operation see `Generator` module
"""
def __init__(self, name: str, vocab_size: int, emb_size: int,
weights: Optional[torch.Tensor] = None, freeze: bool = False, pad_idx=PAD_IDX):
self.name = name
self.vocab_size = vocab_size
self.emb_size = emb_size
super(Embedder, self).__init__(self.vocab_size, self.emb_size, padding_idx=pad_idx,
_weight=weights)
self.weight.requires_grad = not freeze
class Generator(nn.Module):
"""
This module takes vectors and produces word ids.
Note: In theory, this is not inverse of `Embedder`, however in practice it is an approximate
inverse operation of `Embedder`
"""
def __init__(self, name: str, vec_size: int, vocab_size: int):
super(Generator, self).__init__()
self.name = name
self.vec_size = vec_size
self.vocab_size = vocab_size
self.proj = nn.Linear(self.vec_size, self.vocab_size)
def forward(self, x, log_probs=True):
x_feats = self.proj(x)
return (F.log_softmax if log_probs else F.softmax)(x_feats, dim=-1)
class SeqEncoder(nn.Module):
def __init__(self, embedder: Embedder, hid_size: int, n_layers: int,
bidirectional: bool = True, dropout=0.5, ext_embedder: Embedder = None):
super().__init__()
self.emb: Embedder = embedder
self.dropout = nn.Dropout(dropout)
# Input size of RNN, which is same as embedding vector size
self.emb_size = self.emb.emb_size
# the output size of RNN, ie. hidden representation size
self.hid_size = hid_size
self.n_layers = n_layers
self.bidirectional = bidirectional
hid_size = self.hid_size
if self.bidirectional:
assert hid_size % 2 == 0
hid_size = hid_size // 2
self.rnn_node = nn.LSTM(self.emb_size, hid_size,
num_layers=self.n_layers,
bidirectional=self.bidirectional,
batch_first=True,
dropout=dropout if n_layers > 1 else 0)
# if external embeddings are provided
self.ext_embedder = ext_embedder
# The output feature vectors vectors
self.out_size = self.hid_size + (self.ext_embedder.emb_size if ext_embedder else 0)
def forward(self, input_seqs: torch.Tensor, input_lengths, hidden=None, pre_embedded=False):
assert len(input_seqs) == len(input_lengths)
if pre_embedded:
embedded = input_seqs
batch_size, seq_len, emb_size = input_seqs.shape
assert emb_size == self.emb_size
else:
batch_size, seq_len = input_seqs.shape
embs = self.emb(input_seqs)
embedded = embs.view(batch_size, seq_len, self.emb_size)
embedded = self.dropout(embedded)
packed = nn.utils.rnn.pack_padded_sequence(embedded, input_lengths, batch_first=True)
outputs, hidden = self.rnn_node(packed, hidden)
outputs, output_lengths = nn.utils.rnn.pad_packed_sequence(outputs, batch_first=True,
padding_value=PAD_IDX)
# Sum bidirectional outputs
# outputs = outputs[:, :, :self.hid_size] + outputs[:, :, self.hid_size:]
dec_state = self.to_dec_state(hidden)
if self.ext_embedder is not None:
ext_embs = self.ext_embedder(input_seqs).view(batch_size, seq_len,
self.ext_embedder.emb_size)
ext_embs = self.dropout(ext_embs)
outputs = torch.cat((outputs, ext_embs), dim=-1)
return outputs, dec_state
def to_dec_state(self, enc_state):
# get the last layer's last time step output
# lnhn is layer n hidden n which is last layer last hidden. similarly lncn
hns, cns = enc_state
if self.bidirectional:
# cat bidirectional
lnhn = hns.view(self.n_layers, 2, hns.shape[1], hns.shape[-1])[-1]
lnhn = torch.cat([lnhn[0], lnhn[1]], dim=1)
lncn = cns.view(self.n_layers, 2, cns.shape[1], cns.shape[-1])[-1]
lncn = torch.cat([lncn[0], lncn[1]], dim=1)
else:
lnhn = hns.view(self.n_layers, hns.shape[1], hns.shape[-1])[-1]
lncn = cns.view(self.n_layers, cns.shape[1], cns.shape[-1])[-1]
# lnhn and lncn hold compact representation
# duplicate for decoder layers
return (lnhn.expand(self.n_layers, *lnhn.shape).contiguous(),
lncn.expand(self.n_layers, *lncn.shape).contiguous())
class SeqDecoder(nn.Module):
def __init__(self, prev_emb_node: Embedder, generator: Generator, n_layers: int, dropout=0.5):
super(SeqDecoder, self).__init__()
self.prev_emb = prev_emb_node
self.dropout = nn.Dropout(dropout)
self.generator: Generator = generator
self.n_layers = n_layers
self.emb_size = self.prev_emb.emb_size
self.hid_size = self.generator.vec_size
self.rnn_node = nn.LSTM(self.emb_size, self.hid_size, num_layers=self.n_layers,
bidirectional=False, batch_first=True,
dropout=dropout if n_layers > 1 else 0)
def forward(self, enc_outs: Optional, prev_out, last_hidden, gen_probs=True, log_probs=True):
# Note: we run this one step at a time
# Get the embedding of the current input word (last output word)
batch_size = prev_out.size(0)
if enc_outs is not None:
assert len(enc_outs) == batch_size
# S=B x 1 x N
embedded = self.prev_emb(prev_out).view(batch_size, 1, self.prev_emb.emb_size)
embedded = self.dropout(embedded)
# Get current hidden state from input word and last hidden state
rnn_output, hidden = self.rnn_node(embedded, last_hidden)
# [B x N ] <- [B x S=1 x N]
rnn_output = rnn_output.squeeze(1)
if gen_probs:
# Finally predict next token
next_word_distr = self.generator(rnn_output, log_probs=log_probs)
# Return final output, hidden state, and attention weights (for visualization)
return next_word_distr, hidden, None
else:
return rnn_output, hidden, None
class AttnModel(nn.Module):
"""
Attention model
"""
def __init__(self, inp_size, out_size=None, att_type='dot'):
"""
:param inp_size: Input size on which the the attention
:param out_size: Output of attention
"""
super(AttnModel, self).__init__()
self.inp_size = inp_size
self.out_size = out_size if out_size is not None else inp_size
if att_type == 'dot':
assert self.inp_size == self.out_size
elif att_type == 'general':
self.attn_W = nn.Linear(self.inp_size, self.out_size)
self.attn_type = att_type
self.attn_func = {
'dot': self.dot_attn,
'general': self.general_attn
}[self.attn_type]
@staticmethod
def dot_attn(this_rnn_out, encoder_outs):
# this_rnn_out: [B, D]
# encoder_out : [B, S, D]
# [B, D] --> [B, S, D] ;; repeat hidden sequence_len times
this_run_out = this_rnn_out.unsqueeze(1).expand_as(encoder_outs)
# A batched dot product implementation using element wise product followed by sum
# [B, S] <-- [B, S, D]
# element wise multiply, then sum along the last dim (i.e. model_dim)
weights = (encoder_outs * this_run_out).sum(dim=-1)
return weights
def general_attn(self, this_rnn_out, encoder_outs):
# First map the encoder_outs to new vector space using attn_W
mapped_enc_outs = self.attn_W(encoder_outs)
# Then compute the dot
return self.dot_attn(this_rnn_out, mapped_enc_outs)
def forward(self, this_rnn_out, encoder_outs):
assert encoder_outs.shape[-1] == self.inp_size
assert this_rnn_out.shape[-1] == self.out_size
weights = self.attn_func(this_rnn_out, encoder_outs)
# Normalize energies to weights in range 0 to 1
return F.softmax(weights, dim=1)
class AttnSeqDecoder(SeqDecoder):
def __init__(self, prev_emb_node: Embedder, generator: Generator, n_layers: int,
ctx_size: Optional[int] = None,
dropout: float = 0.5, attention='dot'):
super(AttnSeqDecoder, self).__init__(prev_emb_node, generator, n_layers, dropout=dropout)
if attention and type(attention) is bool:
# historical reasons, it was boolean in the beginning
attention = 'dot'
ctx_size = ctx_size if ctx_size else self.hid_size
self.attn = AttnModel(inp_size=ctx_size, out_size=self.hid_size, att_type=attention)
# Output from decoder rnn + ctx
self.merge = nn.Linear(self.hid_size + ctx_size, self.hid_size)
def forward(self, enc_outs, prev_out, last_hidden, gen_probs=True, log_probs=True):
# Note: we run this one step at a time
# Get the embedding of the current input word (last output word)
batch_size = prev_out.size(0)
embedded = self.prev_emb(prev_out)
embedded = embedded.view(batch_size, 1, self.prev_emb.emb_size)
embedded = self.dropout(embedded)
# Get current hidden state from input word and last hidden state
rnn_output, hidden = self.rnn_node(embedded, last_hidden)
# [B x N ] <- [B x S=1 x N]
rnn_output = rnn_output.squeeze(1)
# Calculate attention from current RNN state and all encoder outputs;
# apply to encoder outputs to get weighted average
attn_weights = self.attn(rnn_output, enc_outs) # B x S
# attn_weights : B x S --> B x 1 x S
# enc_outs : B x S x N
# Batch multiply : [B x 1 x S] [B x S x N] --> [B x 1 x N]
context = attn_weights.unsqueeze(1).bmm(enc_outs)
# Attentional vector using the RNN hidden state and context vector
# concatenated together (Luong eq. 5)
# rnn_output = rnn_output.squeeze(0) # S=1 x B x N -> B x N
context = context.squeeze(1) # B x S=1 x N -> B x N
concat_input = torch.cat((rnn_output, context), 1)
concat_output = torch.tanh(self.merge(concat_input))
if gen_probs:
# predict next token
output_probs = self.generator(concat_output, log_probs=log_probs)
# Return final output, hidden state, and attention weights (for visualization)
return output_probs, hidden, attn_weights
else:
return concat_output, hidden, attn_weights
class Seq2SeqBridge(nn.Module):
"""Vector to Vector (a slightly different setup than seq2seq
starts with a decoder, then an encoder (short-circuits (or skips) embedder and generator)
"""
def __init__(self, dec: SeqDecoder, enc: SeqEncoder):
super().__init__()
self.dec = dec
self.enc = enc
self.inp_size = dec.hid_size
self.out_size = enc.hid_size
def forward(self, enc_outs, enc_hids, max_len, bos_idx):
batch_size = len(enc_outs)
assert batch_size == enc_hids[0].shape[1] == enc_hids[1].shape[1]
dec_inps = tensor([[bos_idx]] * batch_size, dtype=torch.long)
dec_hids = enc_hids
result = torch.zeros((batch_size, max_len, self.dec.hid_size), device=device)
for t in range(max_len):
dec_outs, dec_hids, _ = self.dec(enc_outs, dec_inps, dec_hids, gen_probs=False)
result[:, t, :] = dec_outs
# TODO: check how hidden state flows
enc_outs, enc_hids = self.enc(result, [max_len] * batch_size, pre_embedded=True)
return enc_outs, enc_hids
@register(MODEL, 'rnnmt')
class RNNMT(NMTModel):
def __init__(self, enc: SeqEncoder, dec: SeqDecoder, bridge: Seq2SeqBridge = None):
super(RNNMT, self).__init__()
self.enc: SeqEncoder = enc
self.dec: SeqDecoder = dec
if bridge:
# enc --> bridge.dec --> bridge.enc --> dec
assert enc.hid_size == bridge.inp_size
assert bridge.out_size == dec.hid_size
else:
# enc --> dec
assert enc.hid_size == dec.hid_size
self.bridge = bridge
def init_src_embedding(self, | |
" `roundcube`@`{0}` IDENTIFIED BY "
"' '".format(self.app.config.get(
'mysql', 'grant-host')))
EEMysql.execute(self, "grant all privileges"
" on `roundcubemail`.* to "
" `roundcube`@`{0}` IDENTIFIED BY "
"'{1}'".format(self.app.config.get(
'mysql', 'grant-host'),
rc_passwd))
EEShellExec.cmd_exec(self, "mysql roundcubemail < {0}"
"roundcubemail/htdocs/SQL/mysql"
".initial.sql"
.format(EEVariables.ee_webroot))
shutil.copyfile("{0}roundcubemail/htdocs/config/"
"config.inc.php.sample"
.format(EEVariables.ee_webroot),
"{0}roundcubemail/htdocs/config/"
"config.inc.php"
.format(EEVariables.ee_webroot))
EEShellExec.cmd_exec(self, "sed -i \"s\'mysql://roundcube:"
"pass@localhost/roundcubemail\'mysql://"
"roundcube:{0}@{1}/"
"roundcubemail\'\" {2}roundcubemail"
"/htdocs/config/config."
"inc.php"
.format(rc_passwd,
EEVariables.ee_mysql_host,
EEVariables.ee_webroot))
# Sieve plugin configuration in roundcube
EEShellExec.cmd_exec(self, "bash -c \"sed -i \\\"s:\$config\["
"\'plugins\'\] "
"= array(:\$config\['plugins'\] = "
"array(\\n \'sieverules\',:\\\" "
"{0}roundcubemail/htdocs/config"
.format(EEVariables.ee_webroot)
+ "/config.inc.php\"")
EEShellExec.cmd_exec(self, "echo \"\$config['sieverules_port']"
"=4190;\" >> {0}roundcubemail"
.format(EEVariables.ee_webroot)
+ "/htdocs/config/config.inc.php")
data = dict(site_name='webmail', www_domain='webmail',
static=False,
basic=True, wp=False, w3tc=False, wpfc=False,
wpsc=False, multisite=False, wpsubdir=False,
webroot=EEVariables.ee_webroot, ee_db_name='',
ee_db_user='', ee_db_pass='', ee_db_host='',
rc=True)
Log.debug(self, 'Writting the nginx configuration for '
'RoundCubemail')
ee_rc = open('/etc/nginx/sites-available/webmail',
encoding='utf-8', mode='w')
self.app.render((data), 'virtualconf.mustache',
out=ee_rc)
ee_rc.close()
# Create Symbolic link for webmail
EEFileUtils.create_symlink(self, ['/etc/nginx/sites-available'
'/webmail',
'/etc/nginx/sites-enabled/'
'webmail'])
# Create log folder and softlinks
if not os.path.exists('{0}roundcubemail/logs'
.format(EEVariables.ee_webroot)):
os.makedirs('{0}roundcubemail/logs'
.format(EEVariables.ee_webroot))
EEFileUtils.create_symlink(self, ['/var/log/nginx/'
'webmail.access.log',
'{0}roundcubemail/'
'logs/access.log'
.format(EEVariables.ee_webroot)])
EEFileUtils.create_symlink(self, ['/var/log/nginx/'
'webmail.error.log',
'{0}roundcubemail/'
'logs/error.log'
.format(EEVariables.ee_webroot)])
# Remove roundcube installer
EEService.reload_service(self, 'nginx')
EEFileUtils.remove(self, ["{0}roundcubemail/htdocs/installer"
.format(EEVariables.ee_webroot)])
EEFileUtils.chown(self, '{0}roundcubemail'
.format(EEVariables.ee_webroot),
EEVariables.ee_php_user,
EEVariables.ee_php_user,
recursive=True)
if any('/tmp/pra.tar.gz' == x[1]
for x in packages):
Log.debug(self, 'Extracting file /tmp/pra.tar.gz to '
'loaction /tmp/')
EEExtract.extract(self, '/tmp/pra.tar.gz', '/tmp/')
if not os.path.exists('{0}22222/htdocs/cache/redis'
.format(EEVariables.ee_webroot)):
Log.debug(self, "Creating new directory "
"{0}22222/htdocs/cache/redis"
.format(EEVariables.ee_webroot))
os.makedirs('{0}22222/htdocs/cache/redis'
.format(EEVariables.ee_webroot))
shutil.move('/tmp/phpRedisAdmin-master/',
'{0}22222/htdocs/cache/redis/phpRedisAdmin'
.format(EEVariables.ee_webroot))
Log.debug(self, 'Extracting file /tmp/predis.tar.gz to '
'loaction /tmp/')
EEExtract.extract(self, '/tmp/predis.tar.gz', '/tmp/')
shutil.move('/tmp/predis-1.0.1/',
'{0}22222/htdocs/cache/redis/phpRedisAdmin/vendor'
.format(EEVariables.ee_webroot))
Log.debug(self, 'Setting Privileges of webroot permission to '
'{0}22222/htdocs/cache/ file '
.format(EEVariables.ee_webroot))
EEFileUtils.chown(self, '{0}22222'
.format(EEVariables.ee_webroot),
EEVariables.ee_php_user,
EEVariables.ee_php_user,
recursive=True)
@expose(help="Install packages")
def install(self, packages=[], apt_packages=[], disp_msg=True):
"""Start installation of packages"""
if self.app.pargs.pagespeed:
Log.error(self, "Pagespeed support has been dropped since EasyEngine v3.6.0",False)
Log.error(self, "Please run command again without `--pagespeed`",False)
Log.error(self, "For more details, read - https://easyengine.io/blog/disabling-pagespeed/")
self.msg = []
try:
# Default action for stack installation
if ((not self.app.pargs.web) and (not self.app.pargs.admin) and
(not self.app.pargs.mail) and (not self.app.pargs.nginx) and
(not self.app.pargs.php) and (not self.app.pargs.mysql) and
(not self.app.pargs.postfix) and (not self.app.pargs.wpcli) and
(not self.app.pargs.phpmyadmin) and (not self.app.pargs.hhvm)
and (not self.app.pargs.pagespeed) and
(not self.app.pargs.adminer) and (not self.app.pargs.utils) and
(not self.app.pargs.mailscanner) and (not self.app.pargs.all)
and (not self.app.pargs.redis) and
(not self.app.pargs.phpredisadmin) and (not self.app.pargs.php7)):
self.app.pargs.web = True
self.app.pargs.admin = True
if self.app.pargs.all:
self.app.pargs.web = True
self.app.pargs.admin = True
self.app.pargs.mail = True
if self.app.pargs.web:
self.app.pargs.nginx = True
self.app.pargs.php = True
self.app.pargs.mysql = True
self.app.pargs.wpcli = True
self.app.pargs.postfix = True
if self.app.pargs.admin:
self.app.pargs.nginx = True
self.app.pargs.php = True
self.app.pargs.mysql = True
self.app.pargs.adminer = True
self.app.pargs.phpmyadmin = True
self.app.pargs.utils = True
if self.app.pargs.mail:
self.app.pargs.nginx = True
self.app.pargs.php = True
self.app.pargs.mysql = True
self.app.pargs.postfix = True
if not EEAptGet.is_installed(self, 'dovecot-core'):
check_fqdn(self,
os.popen("hostname -f | tr -d '\n'").read())
Log.debug(self, "Setting apt_packages variable for mail")
apt_packages = apt_packages + EEVariables.ee_mail
#Backup before changing repo to private
# packages = packages + [["https://github.com/opensolutions/"
# "ViMbAdmin/archive/{0}.tar.gz"
# .format(EEVariables.ee_vimbadmin),
# "/tmp/vimbadmin.tar.gz",
# "ViMbAdmin"],
# ["https://github.com/roundcube/"
# "roundcubemail/releases/download/"
# "{0}/roundcubemail-{0}.tar.gz"
# .format(EEVariables.ee_roundcube),
# "/tmp/roundcube.tar.gz",
# "Roundcube"]]
# https://github.com/EasyEngine/ViMbAdmin/archive/3.0.13.tar.gz
packages = packages + [["https://github.com/EasyEngine/"
"ViMbAdmin/archive/{0}.tar.gz"
.format(EEVariables.ee_vimbadmin),
"/tmp/vimbadmin.tar.gz",
"ViMbAdmin"],
["https://github.com/roundcube/"
"roundcubemail/releases/download/"
"{0}/roundcubemail-{0}.tar.gz"
.format(EEVariables.ee_roundcube),
"/tmp/roundcube.tar.gz",
"Roundcube"]]
if EEVariables.ee_ram > 1024:
self.app.pargs.mailscanner = True
else:
Log.info(self, "System RAM is less than 1GB\nMail "
"scanner packages are not going to install"
" automatically")
else:
Log.info(self, "Mail server is already installed")
if self.app.pargs.redis:
if not EEAptGet.is_installed(self, 'redis-server'):
apt_packages = apt_packages + EEVariables.ee_redis
self.app.pargs.php = True
else:
Log.info(self, "Redis already installed")
if self.app.pargs.nginx:
Log.debug(self, "Setting apt_packages variable for Nginx")
if not (EEAptGet.is_installed(self, 'nginx-custom')):
if not (EEAptGet.is_installed(self, 'nginx-plus') or EEAptGet.is_installed(self, 'nginx')):
apt_packages = apt_packages + EEVariables.ee_nginx
else:
if EEAptGet.is_installed(self, 'nginx-plus'):
Log.info(self, "NGINX PLUS Detected ...")
apt = ["nginx-plus"] + EEVariables.ee_nginx
self.post_pref(apt, packages)
elif EEAptGet.is_installed(self, 'nginx'):
Log.info(self, "EasyEngine detected a previously installed Nginx package. "
"It may or may not have required modules. "
"\nIf you need help, please create an issue at https://github.com/EasyEngine/easyengine/issues/ \n")
apt = ["nginx"] + EEVariables.ee_nginx
self.post_pref(apt, packages)
else:
Log.debug(self, "Nginx Stable already installed")
if self.app.pargs.php:
Log.debug(self, "Setting apt_packages variable for PHP")
if not (EEAptGet.is_installed(self, 'php5-fpm') or EEAptGet.is_installed(self, 'php5.6-fpm')):
if (EEVariables.ee_platform_codename == 'trusty' or EEVariables.ee_platform_codename == 'xenial'or EEVariables.ee_platform_codename == 'bionic'):
apt_packages = apt_packages + EEVariables.ee_php5_6 + EEVariables.ee_php_extra
else:
apt_packages = apt_packages + EEVariables.ee_php
else:
Log.debug(self, "PHP already installed")
Log.info(self, "PHP already installed")
#PHP 7.0 for Debian (jessie+)
if self.app.pargs.php7 and EEVariables.ee_platform_distro == 'debian':
if (EEVariables.ee_platform_codename == 'jessie'):
Log.debug(self, "Setting apt_packages variable for PHP 7.0")
if not EEAptGet.is_installed(self, 'php7.0-fpm') :
apt_packages = apt_packages + EEVariables.ee_php7_0
if not EEAptGet.is_installed(self, 'php5-fpm'):
apt_packages = apt_packages + EEVariables.ee_php
else:
Log.debug(self, "PHP 7.0 already installed")
Log.info(self, "PHP 7.0 already installed")
else:
Log.debug(self, "PHP 7.0 Not Available for your Distribution")
Log.info(self, "PHP 7.0 Not Available for your Distribution")
#PHP 7.0 for Ubuntu
if self.app.pargs.php7 and not EEVariables.ee_platform_distro == 'debian':
if (EEVariables.ee_platform_codename == 'trusty' or EEVariables.ee_platform_codename == 'xenial'or EEVariables.ee_platform_codename == 'bionic'):
Log.debug(self, "Setting apt_packages variable for PHP 7.0")
if not EEAptGet.is_installed(self, 'php7.0-fpm') :
apt_packages = apt_packages + EEVariables.ee_php7_0 + EEVariables.ee_php_extra
if not EEAptGet.is_installed(self, 'php5.6-fpm'):
apt_packages = apt_packages + EEVariables.ee_php5_6 + EEVariables.ee_php_extra
else:
Log.debug(self, "PHP 7.0 already installed")
Log.info(self, "PHP 7.0 already installed")
else:
Log.debug(self, "PHP 7.0 Not Available for your Distribution")
Log.info(self, "PHP 7.0 Not Available for your Distribution")
if self.app.pargs.hhvm:
Log.debug(self, "Setting apt packages variable for HHVM")
if platform.architecture()[0] is '32bit':
Log.error(self, "HHVM is not supported by 32bit system")
if not EEAptGet.is_installed(self, 'hhvm'):
apt_packages = apt_packages + EEVariables.ee_hhvm
else:
Log.debug(self, "HHVM already installed")
Log.info(self, "HHVM already installed")
if self.app.pargs.mysql:
Log.debug(self, "Setting apt_packages variable for MySQL")
if not EEShellExec.cmd_exec(self, "mysqladmin ping"):
apt_packages = apt_packages + EEVariables.ee_mysql
packages = packages + [["https://raw."
"githubusercontent.com/"
"major/MySQLTuner-perl"
"/master/mysqltuner.pl",
"/usr/bin/mysqltuner",
"MySQLTuner"]]
else:
Log.debug(self, "MySQL connection is already alive")
Log.info(self, "MySQL connection is already alive")
if self.app.pargs.postfix:
Log.debug(self, "Setting apt_packages variable for Postfix")
if not EEAptGet.is_installed(self, 'postfix'):
apt_packages = apt_packages + EEVariables.ee_postfix
else:
Log.debug(self, "Postfix is already installed")
Log.info(self, "Postfix is already installed")
if self.app.pargs.wpcli:
Log.debug(self, "Setting packages variable for WP-CLI")
if not EEShellExec.cmd_exec(self, "which wp"):
packages = packages + [["https://github.com/wp-cli/wp-cli/"
"releases/download/v{0}/"
"wp-cli-{0}.phar"
"".format(EEVariables.ee_wp_cli),
"/usr/bin/wp",
"WP-CLI"]]
else:
Log.debug(self, "WP-CLI is already installed")
Log.info(self, "WP-CLI is already installed")
if self.app.pargs.phpmyadmin:
Log.debug(self, "Setting packages varible for phpMyAdmin ")
packages = packages + [["https://github.com/phpmyadmin/"
"phpmyadmin/archive/STABLE.tar.gz",
"/tmp/pma.tar.gz", "phpMyAdmin"]]
if self.app.pargs.phpredisadmin:
Log.debug(self, "Setting packages varible for phpRedisAdmin")
packages = packages + [["https://github.com/ErikDubbelboer/"
"phpRedisAdmin/archive/master.tar.gz",
"/tmp/pra.tar.gz","phpRedisAdmin"],
["https://github.com/nrk/predis/"
"archive/v1.0.1.tar.gz",
"/tmp/predis.tar.gz", "Predis"]]
if self.app.pargs.adminer:
Log.debug(self, "Setting packages variable for Adminer ")
packages = packages + [["https://www.adminer.org/static/download/"
"{0}/adminer-{0}.php"
"".format(EEVariables.ee_adminer),
"{0}22222/"
"htdocs/db/adminer/index.php"
.format(EEVariables.ee_webroot),
"Adminer"]]
if self.app.pargs.mailscanner:
if not EEAptGet.is_installed(self, 'amavisd-new'):
if (EEAptGet.is_installed(self, 'dovecot-core') or
self.app.pargs.mail):
apt_packages = (apt_packages +
EEVariables.ee_mailscanner)
else:
Log.error(self, "Failed to find installed Dovecot")
else:
Log.error(self, "Mail scanner already installed")
if self.app.pargs.utils:
Log.debug(self, "Setting packages variable for utils")
packages = packages + [["https://storage.googleapis.com/google-code-archive-downloads/"
"v2/code.google.com/phpmemcacheadmin/"
"phpMemcachedAdmin-1.2.2-r262.tar.gz", '/tmp/memcache.tar.gz',
'phpMemcachedAdmin'],
["https://raw.githubusercontent.com"
"/rtCamp/eeadmin/master/cache/nginx/"
"clean.php",
"{0}22222/htdocs/cache/"
"nginx/clean.php"
.format(EEVariables.ee_webroot),
"clean.php"],
["https://raw.github.com/rlerdorf/"
"opcache-status/master/opcache.php",
"{0}22222/htdocs/cache/"
"opcache/opcache.php"
.format(EEVariables.ee_webroot),
"opcache.php"],
["https://raw.github.com/amnuts/"
"opcache-gui/master/index.php",
"{0}22222/htdocs/"
"cache/opcache/opgui.php"
.format(EEVariables.ee_webroot),
"Opgui"],
["https://gist.github.com/ck-on/4959032"
"/raw/0b871b345fd6cfcd6d2be030c1f33d1"
"ad6a475cb/ocp.php",
"{0}22222/htdocs/cache/"
"opcache/ocp.php"
.format(EEVariables.ee_webroot),
"OCP.php"],
["https://github.com/jokkedk/webgrind/"
"archive/master.tar.gz",
'/tmp/webgrind.tar.gz', 'Webgrind'],
["http://bazaar.launchpad.net/~"
"percona-toolkit-dev/percona-toolkit/"
"2.1/download/head:/ptquerydigest-"
"20110624220137-or26tn4"
"expb9ul2a-16/pt-query-digest",
"/usr/bin/pt-query-advisor",
"pt-query-advisor"],
["https://github.com/box/Anemometer/"
"archive/master.tar.gz",
'/tmp/anemometer.tar.gz', 'Anemometer']
]
except Exception as e:
pass
if len(apt_packages) or len(packages):
Log.debug(self, "Calling pre_pref")
self.pre_pref(apt_packages)
if len(apt_packages):
EESwap.add(self)
Log.info(self, "Updating apt-cache, please wait...")
EEAptGet.update(self)
Log.info(self, "Installing packages, please wait...")
EEAptGet.install(self, apt_packages)
if len(packages):
Log.debug(self, "Downloading following: {0}".format(packages))
EEDownload.download(self, packages)
Log.debug(self, "Calling post_pref")
self.post_pref(apt_packages, packages)
if 'redis-server' in apt_packages:
# set redis.conf parameter
# set maxmemory 10% for ram below 512MB and 20% for others
# set maxmemory-policy allkeys-lru
if os.path.isfile("/etc/redis/redis.conf"):
if EEVariables.ee_ram < 512:
Log.debug(self, "Setting maxmemory variable to {0} in redis.conf"
.format(int(EEVariables.ee_ram*1024*1024*0.1)))
EEShellExec.cmd_exec(self, "sed -i 's/# maxmemory <bytes>/maxmemory {0}/' /etc/redis/redis.conf"
.format(int(EEVariables.ee_ram*1024*1024*0.1)))
Log.debug(self, "Setting maxmemory-policy variable to allkeys-lru in redis.conf")
EEShellExec.cmd_exec(self, "sed -i 's/# maxmemory-policy.*/maxmemory-policy allkeys-lru/' "
"/etc/redis/redis.conf")
EEService.restart_service(self, 'redis-server')
else:
Log.debug(self, "Setting maxmemory variable to {0} in redis.conf"
.format(int(EEVariables.ee_ram*1024*1024*0.2)))
EEShellExec.cmd_exec(self, "sed -i 's/# maxmemory <bytes>/maxmemory {0}/' /etc/redis/redis.conf"
.format(int(EEVariables.ee_ram*1024*1024*0.2)))
Log.debug(self, "Setting maxmemory-policy variable to allkeys-lru in redis.conf")
EEShellExec.cmd_exec(self, "sed -i 's/# maxmemory-policy.*/maxmemory-policy allkeys-lru/' "
"/etc/redis/redis.conf")
EEService.restart_service(self, 'redis-server')
if disp_msg:
if len(self.msg):
for msg in self.msg:
Log.info(self, Log.ENDC + msg)
Log.info(self, "Successfully installed packages")
else:
return self.msg
@expose(help="Remove packages")
def remove(self):
"""Start removal of packages"""
apt_packages = []
packages = []
if self.app.pargs.pagespeed:
Log.error(self, "Pagespeed | |
msg)
return
try:
func_name = self.instance.get(p[2])
sub_terms = p[3]
p[0] = func_name(*sub_terms)
except tsk.LanguageError as e:
msg = "Error parsing expression, function '{}' is not declared".format(p[1])
raise SemanticError(self.lexer.lineno(), msg)
def p_binary_op(self, p):
'''
binary_op : multi_op
| MINUS
| DIV'''
p[0] = p[1]
def p_multi_op(self, p):
'''
multi_op : PLUS
| TIMES'''
p[0] = p[1]
def p_f_comp(self, p):
'''f_comp : LPAREN binary_comp f_exp f_exp RPAREN'''
if p[2] == '<':
p[0] = p[3] < p[4]
elif p[2] == '<=':
p[0] = p[3] <= p[4]
elif p[2] == '>':
p[0] = p[3] > p[4]
elif p[2] == '>=':
p[0] = p[3] >= p[4]
elif p[2] == '=':
#print('equality: =({}/{}, {}/{})'.format(p[3], type(p[3]), p[4], type(p[4])))
p[0] = p[3] == p[4]
else:
assert False
def p_binary_comp(self, p):
'''
binary_comp : GT
| LT
| EQUA
| GEQ
| LEQ'''
p[0] = p[1]
def p_effect(self, p):
'''
effect : LPAREN rwAND list_of_c_effect RPAREN
| c_effect
'''
if len(p) == 2:
p[0] = [p[1]]
return
p[0] = p[3]
def p_list_of_c_effect(self, p):
'''
list_of_c_effect : c_effect list_of_c_effect
| empty'''
if p[1] is None:
p[0] = []
return
p[0] = [p[1]] + p[2]
def p_c_effect(self, p):
'''
c_effect : LPAREN rwFORALL LPAREN list_of_typed_variables RPAREN effect RPAREN
| LPAREN rwWHEN GD cond_effect RPAREN
| p_effect'''
if len(p) == 2:
p[0] = p[1]
return
if p[2] == self.lexer.symbols.rwFORALL:
msg = "Error parsing effect: universally quantified effects are not supported at the moment"
raise UnsupportedFeature(self.lexer.lineno(), msg)
if p[2] == self.lexer.symbols.rwWHEN:
msg = "Error parsing effect: conditional effects are not supported at the moment"
raise UnsupportedFeature(self.lexer.lineno(), msg)
def p_p_effect(self, p):
'''
p_effect : LPAREN rwNOT atomic_formula_of_term RPAREN
| atomic_formula_of_term
| LPAREN assign_op f_head f_exp RPAREN
| LPAREN rwASSIGN function_term term RPAREN
| LPAREN rwASSIGN function_term rwUNDEFINED RPAREN'''
if len(p) == 2:
atomic_formula = p[1]
p[0] = AssignmentEffectData(lhs=atomic_formula['lhs'], rhs=atomic_formula['rhs'])
return
if p[2] == self.lexer.symbols.rwNOT:
atomic_formula = p[3]
if atomic_formula['boolean'] == False:
msg = "Error parsing action effect: negated literals over non-Boolean functions are not supported"
raise SemanticError(self.lexer.lineno(), msg)
p[0] = AssignmentEffectData(lhs=atomic_formula['lhs'], rhs=0)
return
if p[2] == self.lexer.symbols.rwASSIGN:
if isinstance(p[4], Term):
p[0] = AssignmentEffectData(lhs=p[3], rhs=p[4])
return
if p[4] == self.lexer.symbols.rwUNDEFINED:
msg = "Error parsing action effect: 'undefined' special constant is not supported at the moment"
raise UnsupportedFeature(self.lexer.lineno(), msg)
return
msg = "Error parsing action effect: special assignment operators are not supported at the moment"
raise UnsupportedFeature(self.lexer.lineno(), msg)
def p_cond_effect(self, p):
'''
cond_effect : LPAREN rwAND list_of_p_effect RPAREN
| p_effect'''
if len(p) == 2:
p[0] = [p[1]]
return
p[0] = p[3]
def p_list_of_p_effect(self, p):
'''
list_of_p_effect : p_effect list_of_p_effect
| empty'''
if p[1] is None:
p[0] = []
p[0] = [p[1]] + p[2]
def p_assign_op(self, p):
'''
assign_op : rwASSIGN
| rwSCALE_UP
| rwSCALE_DOWN
| rwINCREASE
| rwDECREASE'''
p[0] = p[1]
def p_durative_action_def(self, p):
'''durative_action_def : LPAREN rwDURATIVE_ACTION da_symbol action_parameters da_def_body RPAREN'''
name = p[3]
parameters = p[4]
body_data = p[5]
self.instance.process_action_skeleton(name, parameters, body_data)
# clear up scope
for entry in parameters:
del self.var_dict[entry['var']]
def p_da_symbol(self, p):
'''da_symbol : ID'''
p[0] = p[1]
def p_da_def_body(self, p):
'''da_def_body : rwDURATION duration_constraint rwCONDITION empty_or_da_GD rwEFFECT empty_or_da_effect'''
p[0] = {
'duration': p[2],
'precondition': p[4],
'effect': p[6]
}
def p_empty_or_da_GD(self, p):
'''
empty_or_da_GD : da_GD
| LPAREN RPAREN'''
if len(p) == 3:
p[0] = None
return
if not isinstance(p[1], list):
# MRJ: we do this in order to capture singleton conditions
p[0] = [p[1]]
else:
p[0] = p[1]
def p_empty_or_da_effect(self, p):
'''
empty_or_da_effect : da_effect
| LPAREN RPAREN'''
if len(p) == 3:
p[0] = None
return
if not isinstance(p[1], list):
# MRJ: we do this in order to capture singleton effects
p[0] = [p[1]]
else:
p[0] = p[1]
def p_list_of_da_GD(self, p):
'''
list_of_da_GD : da_GD list_of_da_GD
| empty'''
if p[1] is None:
p[0] = []
return
p[0] = [p[1]] + p[2]
def p_da_GD(self, p):
'''
da_GD : pref_timed_GD
| LPAREN rwAND list_of_da_GD RPAREN
| LPAREN rwFORALL LPAREN list_of_typed_variables RPAREN da_GD RPAREN'''
if len(p) == 2:
p[0] = p[1]
return
if p[2] == self.lexer.symbols.rwAND:
# MRJ: Note that we do not attempt to convert the preconditions of durative actions into a
# conjunctive formula just yet
p[0] = p[3]
return
msg = "Error parsing precondition of durative action: universally quantified preconditions are not supported at the moment"
raise UnsupportedFeature(self.lexer.lineno(), msg)
def p_pref_timed_GD(self, p):
'''
pref_timed_GD : timed_GD
| LPAREN rwPREFERENCE timed_GD RPAREN
| LPAREN rwPREFERENCE pref_name timed_GD RPAREN'''
if len(p) > 2:
assert p[2] == self.lexer.symbols.rwPREFERENCES
msg = "Error parsing precondition of durative action: preferences are not supported"
raise UnsupportedFeature(self.lexer.lineno(), msg)
p[0] = p[1]
def p_timed_GD(self, p):
'''
timed_GD : LPAREN rwAT time_specifier GD RPAREN
| LPAREN rwOVER interval GD RPAREN'''
if p[2] == self.lexer.symbols.rwAT:
p[0] = {
'type': 'instant',
'offset': p[3],
'formula': p[4]
}
return
p[0] = {
'type': 'interval',
'offset': None,
'formula': p[4]
}
def p_time_specifier(self, p):
'''
time_specifier : rwSTART
| rwEND'''
p[0] = p[1]
def p_interval(self, p):
'''interval : rwALL'''
p[0] = p[1]
def p_duration_constraint(self, p):
'''
duration_constraint : LPAREN rwAND list_of_simple_duration_constraint RPAREN
| LPAREN RPAREN
| simple_duration_constraint'''
if len(p) == 2:
p[0] = p[1]
return
if len(p) == 3:
return
msg = "Error parsing durative action: complex duration constraints are not supported"
raise UnsupportedFeature(self.lexer.lineno(), msg)
def p_list_of_simple_duration_constraint(self, p):
'''
list_of_simple_duration_constraint : simple_duration_constraint list_of_simple_duration_constraint
| empty'''
if p[1] is None:
p[0] = []
return
p[0] = [p[1]] + p[2]
def p_simple_duration_constraint(self, p):
'''
simple_duration_constraint : LPAREN d_op VAR d_value RPAREN
| LPAREN rwAT time_specifier simple_duration_constraint RPAREN'''
# Note that the VAR in the first rule needs to be ?duration
if len(p) > 6:
msg = "Error parsing duration of durative action: instant-specific duration constraints are not supported"
raise UnsupportedFeature(self.lexer.lineno(), msg)
if p[2] != '=':
msg = "Error parsing duration of durative action: inequality expressions in duration constraints are not supported"
raise UnsupportedFeature(self.lexer.lineno(), msg)
variable = p[3]
if variable != '?duration':
msg = "Error parsing duration of durative action: found variable '{}' rather than '?duration'".format(variable)
raise ParseError(self.lexer.lineno(), msg)
p[0] = p[4]
def p_d_op(self, p):
'''
d_op : LEQ
| GEQ
| EQUA'''
p[0] = p[1]
def p_d_value(self, p):
'''
d_value : f_exp'''
p[0] = p[1]
def p_da_effect(self, p):
'''
da_effect : LPAREN rwAND list_of_da_effect RPAREN
| timed_effect
| LPAREN rwFORALL LPAREN list_of_typed_variables RPAREN da_effect RPAREN
| LPAREN rwWHEN da_GD timed_effect RPAREN'''
if len(p) == 2:
p[0] = p[1]
return
if p[2] == self.lexer.symbols.rwAND:
p[0] = p[3]
return
if p[2] == self.lexer.symbols.rwFORALL:
msg = "Error parsing effect of durative action: universally quantified effects are not supported at the moment"
raise UnsupportedFeature(self.lexer.lineno(), msg)
if p[2] == self.lexer.symbols.rwWHEN:
msg = "Error parsing effect of durative action: conditional effects are not supported at the moment"
raise UnsupportedFeature(self.lexer.lineno(), msg)
def p_list_of_da_effect(self, p):
'''
list_of_da_effect : da_effect list_of_da_effect
| empty'''
if p[1] is None:
p[0] = []
return
p[0] = [p[1]] + p[2]
def p_timed_effect(self, p):
'''
timed_effect : LPAREN rwAT time_specifier cond_effect RPAREN
| LPAREN rwAT time_specifier f_assign_da RPAREN
| LPAREN assign_op_t f_head f_exp_t RPAREN'''
if p[2] == self.lexer.symbols.rwAT:
p[0] = {
'type': 'timed_effect',
'instant': p[3],
'effect': p[4]
}
return
msg = "Error parsing effects of durative actions: extended assignment operators are not supported"
raise UnsupportedFeature(self.lexer.lineno(), msg)
def p_f_assign_da(self, p):
'''f_assign_da : LPAREN assign_op f_head f_exp_da RPAREN'''
if p[2] != self.lexer.symbols.rwASSIGN:
msg = "Error parsing effects of durative actions: assignment operators other than 'assign' are not supported at the moment"
raise UnsupportedFeature(self.lexer.lineno(), msg)
p[0] = AssignmentEffectData(lhs=p[3], rhs=p[4])
def p_f_exp_da(self, p):
'''
f_exp_da : LPAREN assign_op f_head f_exp_da RPAREN
| LPAREN binary_op f_exp_da f_exp_da RPAREN
| LPAREN multi_op f_exp_da list_of_f_exp_da RPAREN
| LPAREN MINUS f_exp_da RPAREN
| VAR
| f_exp'''
# NOTE: the VAR above must be ?duration
msg = "Error parsing effects of durative actions: arithmetic expressions are not supported at the moment"
raise SemanticError(self.lexer.lineno(), msg)
def p_list_of_exp_da(self, p):
'''
list_of_f_exp_da : f_exp_da list_of_f_exp_da
| empty'''
if p[1] is None:
p[0] = []
return
p[0] = [p[1]] + p[2]
def p_assign_op_t(self, p):
'''
| |
made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
def test_list_intents_flattened_error():
client = IntentsClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_intents(
intent.ListIntentsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_intents_flattened_async():
client = IntentsAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.list_intents), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = intent.ListIntentsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
intent.ListIntentsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_intents(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
@pytest.mark.asyncio
async def test_list_intents_flattened_error_async():
client = IntentsAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_intents(
intent.ListIntentsRequest(), parent="parent_value",
)
def test_list_intents_pager():
client = IntentsClient(credentials=credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client._transport.list_intents), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
intent.ListIntentsResponse(
intents=[intent.Intent(), intent.Intent(), intent.Intent(),],
next_page_token="abc",
),
intent.ListIntentsResponse(intents=[], next_page_token="def",),
intent.ListIntentsResponse(
intents=[intent.Intent(),], next_page_token="ghi",
),
intent.ListIntentsResponse(intents=[intent.Intent(), intent.Intent(),],),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_intents(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, intent.Intent) for i in results)
def test_list_intents_pages():
client = IntentsClient(credentials=credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client._transport.list_intents), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
intent.ListIntentsResponse(
intents=[intent.Intent(), intent.Intent(), intent.Intent(),],
next_page_token="abc",
),
intent.ListIntentsResponse(intents=[], next_page_token="def",),
intent.ListIntentsResponse(
intents=[intent.Intent(),], next_page_token="ghi",
),
intent.ListIntentsResponse(intents=[intent.Intent(), intent.Intent(),],),
RuntimeError,
)
pages = list(client.list_intents(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_intents_async_pager():
client = IntentsAsyncClient(credentials=credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.list_intents),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
intent.ListIntentsResponse(
intents=[intent.Intent(), intent.Intent(), intent.Intent(),],
next_page_token="abc",
),
intent.ListIntentsResponse(intents=[], next_page_token="def",),
intent.ListIntentsResponse(
intents=[intent.Intent(),], next_page_token="ghi",
),
intent.ListIntentsResponse(intents=[intent.Intent(), intent.Intent(),],),
RuntimeError,
)
async_pager = await client.list_intents(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, intent.Intent) for i in responses)
@pytest.mark.asyncio
async def test_list_intents_async_pages():
client = IntentsAsyncClient(credentials=credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.list_intents),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
intent.ListIntentsResponse(
intents=[intent.Intent(), intent.Intent(), intent.Intent(),],
next_page_token="abc",
),
intent.ListIntentsResponse(intents=[], next_page_token="def",),
intent.ListIntentsResponse(
intents=[intent.Intent(),], next_page_token="ghi",
),
intent.ListIntentsResponse(intents=[intent.Intent(), intent.Intent(),],),
RuntimeError,
)
pages = []
async for page_ in (await client.list_intents(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_get_intent(transport: str = "grpc", request_type=intent.GetIntentRequest):
client = IntentsClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client._transport.get_intent), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = intent.Intent(
name="name_value",
display_name="display_name_value",
priority=898,
is_fallback=True,
)
response = client.get_intent(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == intent.GetIntentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, intent.Intent)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.priority == 898
assert response.is_fallback is True
def test_get_intent_from_dict():
test_get_intent(request_type=dict)
@pytest.mark.asyncio
async def test_get_intent_async(transport: str = "grpc_asyncio"):
client = IntentsAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = intent.GetIntentRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.get_intent), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
intent.Intent(
name="name_value",
display_name="display_name_value",
priority=898,
is_fallback=True,
)
)
response = await client.get_intent(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert isinstance(response, intent.Intent)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.priority == 898
assert response.is_fallback is True
def test_get_intent_field_headers():
client = IntentsClient(credentials=credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = intent.GetIntentRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client._transport.get_intent), "__call__") as call:
call.return_value = intent.Intent()
client.get_intent(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_intent_field_headers_async():
client = IntentsAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = intent.GetIntentRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.get_intent), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(intent.Intent())
await client.get_intent(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_intent_flattened():
client = IntentsClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client._transport.get_intent), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = intent.Intent()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_intent(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
def test_get_intent_flattened_error():
client = IntentsClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_intent(
intent.GetIntentRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_intent_flattened_async():
client = IntentsAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.get_intent), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = intent.Intent()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(intent.Intent())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_intent(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_get_intent_flattened_error_async():
client = IntentsAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_intent(
intent.GetIntentRequest(), name="name_value",
)
def test_create_intent(
transport: str = "grpc", request_type=gcdc_intent.CreateIntentRequest
):
client = IntentsClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client._transport.create_intent), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_intent.Intent(
name="name_value",
display_name="display_name_value",
priority=898,
is_fallback=True,
)
response = client.create_intent(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
| |
in rl["authors"] if int(a["id"]) in author_ids_set]
except:
continue
try:
for ai in author_ids:
citations = rl["n_citation"] + 1
fos = [f['name'] for f in rl['fos']]
fos_with_citations = [(f, citations) for f in fos]
all_tags_per_author[ai].extend(fos_with_citations)
except:
continue
del json_lines
del rel_lines
del lines
gc.collect()
print("Authors found up till now:", len(all_tags_per_author))
print("Done with 14.")
with open('15.json', 'rb') as fin:
lines = fin.readlines()
json_lines = [ll for ll in [load(l) for l in lines] if ll]
rel_lines = [l for l in json_lines if l["id"] in all_ids]
for rl in rel_lines:
try:
author_ids = [a["id"] for a in rl["authors"] if int(a["id"]) in author_ids_set]
except:
continue
try:
for ai in author_ids:
citations = rl["n_citation"] + 1
fos = [f['name'] for f in rl['fos']]
fos_with_citations = [(f, citations) for f in fos]
all_tags_per_author[ai].extend(fos_with_citations)
except:
continue
del json_lines
del rel_lines
del lines
gc.collect()
print("Authors found up till now:", len(all_tags_per_author))
print("Done with 15.")
with open('16.json', 'rb') as fin:
lines = fin.readlines()
json_lines = [ll for ll in [load(l) for l in lines] if ll]
rel_lines = [l for l in json_lines if l["id"] in all_ids]
for rl in rel_lines:
try:
author_ids = [a["id"] for a in rl["authors"] if int(a["id"]) in author_ids_set]
except:
continue
try:
for ai in author_ids:
citations = rl["n_citation"] + 1
fos = [f['name'] for f in rl['fos']]
fos_with_citations = [(f, citations) for f in fos]
all_tags_per_author[ai].extend(fos_with_citations)
except:
continue
del json_lines
del rel_lines
del lines
gc.collect()
print("Authors found up till now:", len(all_tags_per_author))
print("Done with 16.")
with open('17.json', 'rb') as fin:
lines = fin.readlines()
json_lines = [ll for ll in [load(l) for l in lines] if ll]
rel_lines = [l for l in json_lines if l["id"] in all_ids]
for rl in rel_lines:
try:
author_ids = [a["id"] for a in rl["authors"] if int(a["id"]) in author_ids_set]
except:
continue
try:
for ai in author_ids:
citations = rl["n_citation"] + 1
fos = [f['name'] for f in rl['fos']]
fos_with_citations = [(f, citations) for f in fos]
all_tags_per_author[ai].extend(fos_with_citations)
except:
continue
del json_lines
del rel_lines
del lines
gc.collect()
print("Authors found up till now:", len(all_tags_per_author))
print("Done with 17.")
with open('18.json', 'rb') as fin:
lines = fin.readlines()
json_lines = [ll for ll in [load(l) for l in lines] if ll]
rel_lines = [l for l in json_lines if l["id"] in all_ids]
for rl in rel_lines:
try:
author_ids = [a["id"] for a in rl["authors"] if int(a["id"]) in author_ids_set]
except:
continue
try:
for ai in author_ids:
citations = rl["n_citation"] + 1
fos = [f['name'] for f in rl['fos']]
fos_with_citations = [(f, citations) for f in fos]
all_tags_per_author[ai].extend(fos_with_citations)
except:
continue
del json_lines
del rel_lines
del lines
gc.collect()
print("Authors found up till now:", len(all_tags_per_author))
print("Done with 18.")
with open('19.json', 'rb') as fin:
lines = fin.readlines()
json_lines = [ll for ll in [load(l) for l in lines] if ll]
rel_lines = [l for l in json_lines if l["id"] in all_ids]
for rl in rel_lines:
try:
author_ids = [a["id"] for a in rl["authors"] if int(a["id"]) in author_ids_set]
except:
continue
try:
for ai in author_ids:
citations = rl["n_citation"] + 1
fos = [f['name'] for f in rl['fos']]
fos_with_citations = [(f, citations) for f in fos]
all_tags_per_author[ai].extend(fos_with_citations)
except:
continue
del json_lines
del rel_lines
del lines
gc.collect()
print("Authors found up till now:", len(all_tags_per_author))
print("Done with 19.")
with open('20.json', 'rb') as fin:
lines = fin.readlines()
json_lines = [ll for ll in [load(l) for l in lines] if ll]
rel_lines = [l for l in json_lines if l["id"] in all_ids]
for rl in rel_lines:
try:
author_ids = [a["id"] for a in rl["authors"] if int(a["id"]) in author_ids_set]
except:
continue
try:
for ai in author_ids:
citations = rl["n_citation"] + 1
fos = [f['name'] for f in rl['fos']]
fos_with_citations = [(f, citations) for f in fos]
all_tags_per_author[ai].extend(fos_with_citations)
except:
continue
del json_lines
del rel_lines
del lines
gc.collect()
print("Authors found up till now:", len(all_tags_per_author))
print("Done with 20.")
with open('21.json', 'rb') as fin:
lines = fin.readlines()
json_lines = [ll for ll in [load(l) for l in lines] if ll]
rel_lines = [l for l in json_lines if l["id"] in all_ids]
for rl in rel_lines:
try:
author_ids = [a["id"] for a in rl["authors"] if int(a["id"]) in author_ids_set]
except:
continue
try:
for ai in author_ids:
citations = rl["n_citation"] + 1
fos = [f['name'] for f in rl['fos']]
fos_with_citations = [(f, citations) for f in fos]
all_tags_per_author[ai].extend(fos_with_citations)
except:
continue
del json_lines
del rel_lines
del lines
gc.collect()
print("Authors found up till now:", len(all_tags_per_author))
print("Done with 21.")
with open('22.json', 'rb') as fin:
lines = fin.readlines()
json_lines = [ll for ll in [load(l) for l in lines] if ll]
rel_lines = [l for l in json_lines if l["id"] in all_ids]
for rl in rel_lines:
try:
author_ids = [a["id"] for a in rl["authors"] if int(a["id"]) in author_ids_set]
except:
continue
try:
for ai in author_ids:
citations = rl["n_citation"] + 1
fos = [f['name'] for f in rl['fos']]
fos_with_citations = [(f, citations) for f in fos]
all_tags_per_author[ai].extend(fos_with_citations)
except:
continue
del json_lines
del rel_lines
del lines
gc.collect()
print("Authors found up till now:", len(all_tags_per_author))
print("Done with 22.")
with open('23.json', 'rb') as fin:
lines = fin.readlines()
json_lines = [ll for ll in [load(l) for l in lines] if ll]
rel_lines = [l for l in json_lines if l["id"] in all_ids]
for rl in rel_lines:
try:
author_ids = [a["id"] for a in rl["authors"] if int(a["id"]) in author_ids_set]
except:
continue
try:
for ai in author_ids:
citations = rl["n_citation"] + 1
fos = [f['name'] for f in rl['fos']]
fos_with_citations = [(f, citations) for f in fos]
all_tags_per_author[ai].extend(fos_with_citations)
except:
continue
del json_lines
del rel_lines
del lines
gc.collect()
print("Authors found up till now:", len(all_tags_per_author))
print("Done with 23.")
with open('24.json', 'rb') as fin:
lines = fin.readlines()
json_lines = [ll for ll in [load(l) for l in lines] if ll]
rel_lines = [l for l in json_lines if l["id"] in all_ids]
for rl in rel_lines:
try:
author_ids = [a["id"] for a in rl["authors"] if int(a["id"]) in author_ids_set]
except:
continue
try:
for ai in author_ids:
citations = rl["n_citation"] + 1
fos = [f['name'] for f in rl['fos']]
fos_with_citations = [(f, citations) for f in fos]
all_tags_per_author[ai].extend(fos_with_citations)
except:
continue
del json_lines
del rel_lines
del lines
gc.collect()
print("Authors found up till now:", len(all_tags_per_author))
print("Done with 24.")
with open('25.json', 'rb') as fin:
lines = fin.readlines()
json_lines = [ll for ll in [load(l) for l in lines] if ll]
rel_lines = [l for l in json_lines if l["id"] in all_ids]
for rl in rel_lines:
try:
author_ids = [a["id"] for a in rl["authors"] if int(a["id"]) in author_ids_set]
except:
continue
try:
for ai in author_ids:
citations = rl["n_citation"] + 1
fos = [f['name'] for f in rl['fos']]
fos_with_citations = [(f, citations) for f in fos]
all_tags_per_author[ai].extend(fos_with_citations)
except:
continue
del json_lines
del rel_lines
del lines
gc.collect()
print("Authors found up till now:", len(all_tags_per_author))
print("Done with 25.")
with open('26.json', 'rb') as fin:
lines = fin.readlines()
json_lines = [ll for ll in [load(l) for l in lines] if ll]
rel_lines = [l for l in json_lines if l["id"] in all_ids]
for rl in rel_lines:
try:
author_ids = [a["id"] for a in rl["authors"] if int(a["id"]) in author_ids_set]
except:
continue
try:
for ai in author_ids:
citations = rl["n_citation"] + 1
fos = [f['name'] for f in rl['fos']]
fos_with_citations = [(f, citations) for f in fos]
all_tags_per_author[ai].extend(fos_with_citations)
except:
continue
del json_lines
del rel_lines
del lines
gc.collect()
print("Authors found up till now:", len(all_tags_per_author))
print("Done with 26.")
with open('27.json', 'rb') as fin:
lines = fin.readlines()
json_lines = [ll for ll in [load(l) for l in lines] if ll]
rel_lines = [l for l in json_lines if l["id"] in all_ids]
for rl in rel_lines:
try:
author_ids = [a["id"] for a in rl["authors"] if int(a["id"]) in author_ids_set]
except:
continue
try:
for ai in author_ids:
citations = rl["n_citation"] + 1
fos = [f['name'] for f in rl['fos']]
fos_with_citations = [(f, citations) for f in fos]
all_tags_per_author[ai].extend(fos_with_citations)
except:
continue
del json_lines
del rel_lines
del lines
gc.collect()
print("Authors found up till now:", len(all_tags_per_author))
print("Done with 27.")
with open('28.json', 'rb') as fin:
lines = fin.readlines()
json_lines = [ll for ll in [load(l) for l in lines] if ll]
rel_lines = [l for l in json_lines if l["id"] in all_ids]
for rl in rel_lines:
try:
author_ids = [a["id"] for a in rl["authors"] if int(a["id"]) in author_ids_set]
except:
continue
try:
for ai in author_ids:
citations = rl["n_citation"] + 1
fos = [f['name'] for f in rl['fos']]
fos_with_citations = [(f, citations) for f in fos]
all_tags_per_author[ai].extend(fos_with_citations)
except:
continue
del json_lines
del rel_lines
del lines
gc.collect()
print("Authors found up till now:", len(all_tags_per_author))
print("Done with 28.")
with open('29.json', 'rb') as fin:
lines = fin.readlines()
json_lines = [ll for ll in [load(l) for l in lines] if ll]
rel_lines = [l for l in json_lines if l["id"] in all_ids]
for rl in rel_lines:
try:
author_ids = [a["id"] for a in rl["authors"] if int(a["id"]) in author_ids_set]
except:
continue
try:
for ai in author_ids:
citations = rl["n_citation"] + 1
fos = | |
# Lakeshore 370, Lakeshore 370 temperature controller driver
# <NAME> <<EMAIL>>, 2014
# Based on Lakeshore 340 driver by <NAME> <<EMAIL>>, 2010.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from instrument import Instrument
import visa
import types
import logging
import re
import math
import time
import numpy as np
import qt
import os
import random
import hashlib
from lib.config import get_config
config = get_config()
class Lakeshore_370(Instrument):
def __init__(self, name, address, reset=False, **kwargs):
Instrument.__init__(self, name)
self._address = address
self._visainstrument = visa.ResourceManager().open_resource(self._address,
timeout=5000.) # milliseconds
try:
if address.lower().startswith('asrl'):
# These are for an RS-232 connection, values found in the LS manual
self._visainstrument.parity = visa.constants.Parity.odd
self._visainstrument.data_bits = 7
self._visainstrument.stop_bits = visa.constants.StopBits.one
self._visainstrument.read_termination = '\r\n'
self._visainstrument.write_termination = '\n'
self._channels = kwargs.get('channels', (1, 2, 5, 6))
self._logger = kwargs.get('logger', None)
self.add_parameter('identification',
flags=Instrument.FLAG_GET)
self.add_parameter('common_mode_reduction',
flags=Instrument.FLAG_GET,
type=types.BooleanType)
self.add_parameter('guard_drive',
flags=Instrument.FLAG_GET,
type=types.BooleanType)
self.add_parameter('scanner_auto',
flags=Instrument.FLAG_GETSET,
type=types.BooleanType)
self.add_parameter('scanner_channel',
flags=Instrument.FLAG_GETSET,
type=types.IntType,
format_map=dict(zip(self._channels,self._channels)))
self.add_parameter('kelvin',
flags=Instrument.FLAG_GET,
type=types.FloatType,
channels=self._channels,
units='K')
self.add_parameter('resistance',
flags=Instrument.FLAG_GET,
type=types.FloatType,
channels=self._channels,
units='Ohm')
self.add_parameter('resistance_range',
flags=Instrument.FLAG_GET,
type=types.StringType,
channels=self._channels,
format_map={
1: '2 mOhm',
2: '6.32 mOhm',
3: '20 mOhm',
4: '63.2 mOhm',
5: '200 mOhm',
6: '632 mOhm',
7: '2 Ohm',
8: '6.32 Ohm',
9: '20 Ohm',
10: '63.2 Ohm',
11: '200 Ohm',
12: '632 Ohm',
13: '2 kOhm',
14: '6.32 kOhm',
15: '20 kOhm',
16: '63.2 kOhm',
17: '200 kOhm',
18: '632 kOhm',
19: '2 MOhm',
20: '6.32 MOhm',
21: '20 MOhm',
22: '63.2 MOhm'
})
self.add_parameter('excitation_mode',
flags=Instrument.FLAG_GET,
type=types.IntType,
channels=self._channels,
format_map={
0: 'voltage',
1: 'current'
})
self.add_parameter('excitation_on',
flags=Instrument.FLAG_GET,
type=types.BooleanType,
channels=self._channels)
self.add_parameter('excitation_range',
flags=Instrument.FLAG_GETSET,
type=types.StringType,
channels=self._channels,
format_map={
1: '2 uV or 1 pA',
2: '6.32 uV or 3.16 pA',
3: '20 uV or 10 pA',
4: '63.2 uV or 31.6 pA',
5: '200 uV or 100 pA',
6: '632 uV or 316 pA',
7: '2 mV or 1 nA',
8: '6.32 mV or 3.16 nA',
9: '20 mV or 10 nA',
10: '63.2 mV or 31.6 nA',
11: '200 mV or 100 nA',
12: '632 mV or 316nA',
13: '1 uA',
14: '3.16 uA',
15: '10 uA',
16: '31.6 uA',
17: '100 uA',
18: '316 uA',
19: '1 mA',
20: '3,16 mA',
21: '10 mA',
22: '31.6 mA'
})
self.add_parameter('autorange',
flags=Instrument.FLAG_GET,
type=types.BooleanType,
channels=self._channels)
self.add_parameter('scanner_dwell_time',
flags=Instrument.FLAG_GET,
type=types.FloatType,
units='s',
channels=self._channels)
self.add_parameter('scanner_pause_time',
flags=Instrument.FLAG_GET,
type=types.FloatType,
units='s',
channels=self._channels)
self.add_parameter('filter_on',
flags=Instrument.FLAG_GET,
type=types.BooleanType,
channels=self._channels)
self.add_parameter('filter_settle_time',
flags=Instrument.FLAG_GETSET,
type=types.FloatType,
units='s',
channels=self._channels)
self.add_parameter('filter_reset_threshold',
flags=Instrument.FLAG_GET,
type=types.FloatType,
units='%',
channels=self._channels)
self._heater_ranges = {
0: 'off',
1: '31.6 uA',
2: '100 uA',
3: '316 uA',
4: '1 mA',
5: '3.16 mA',
6: '10 mA',
7: '31.6 mA',
8: '100 mA' }
self.add_parameter('heater_range',
flags=Instrument.FLAG_GETSET,
type=types.IntType,
format_map=self._heater_ranges)
self.add_parameter('heater_power',
flags=Instrument.FLAG_GET,
type=types.FloatType,
units='W or %')
self.add_parameter('heater_status',
flags=Instrument.FLAG_GET,
type=types.IntType,
format_map={
0: 'OK',
1: 'heater open error'
})
self.add_parameter('mode',
flags=Instrument.FLAG_GETSET,
type=types.IntType,
format_map={0: 'Local', 1: 'Remote', 2: 'Remote, local lock'})
self.add_parameter('temperature_control_mode',
flags=Instrument.FLAG_GETSET,
type=types.IntType,
format_map={
1: 'closed loop PID',
2: 'Zone tuning',
3: 'open loop',
4: 'off'
})
self.add_parameter('temperature_control_pid',
flags=Instrument.FLAG_GETSET,
type=types.TupleType)
self.add_parameter('temperature_control_setpoint',
flags=Instrument.FLAG_GETSET,
type=types.FloatType)
self.add_parameter('temperature_control_channel',
flags=Instrument.FLAG_GET,
type=types.IntType,
format_map=dict(zip(self._channels,self._channels)))
self.add_parameter('temperature_control_use_filtered_reading',
flags=Instrument.FLAG_GET,
type=types.BooleanType)
self.add_parameter('temperature_control_setpoint_units',
flags=Instrument.FLAG_GET,
type=types.IntType,
format_map={1: 'K', 2: 'Ohm'})
self.add_parameter('temperature_control_heater_max_range',
flags=Instrument.FLAG_GET,
type=types.IntType,
format_map=self._heater_ranges)
self.add_parameter('temperature_control_heater_load_resistance',
flags=Instrument.FLAG_GET,
type=types.FloatType,
units='Ohm')
self.add_parameter('autoupdate_interval',
flags=Instrument.FLAG_GETSET,
type=types.IntType,
units='s')
self.add_parameter('still_heater',
flags=Instrument.FLAG_GETSET|Instrument.FLAG_GET_AFTER_SET,
type=types.FloatType,
minval=0, maxval=100,
units='%')
self.add_parameter('autoupdate_while_measuring',
flags=Instrument.FLAG_GETSET|Instrument.FLAG_PERSIST,
type=types.BooleanType)
if self.get_autoupdate_while_measuring() == None: self.update_value('autoupdate_while_measuring', False)
self.add_function('local')
self.add_function('remote')
### Auto-updating (useful mostly if you are also logging temperatures) ####
self._autoupdater_handle = "lakeshore_autoupdater_%s" % (hashlib.md5(address).hexdigest()[:8])
self.set_autoupdate_interval(kwargs.get('autoupdate_interval', 60. if self._logger != None else -1)) # seconds
if reset:
self.reset()
else:
self.get_all()
except:
self._visainstrument.close()
raise
def reset(self):
self.__write('*RST')
qt.msleep(.5)
def get_all(self):
self.get_identification()
self.get_mode()
self.get_scanner_auto()
self.get_scanner_channel()
self.get_temperature_control_mode()
self.get_temperature_control_pid()
self.get_temperature_control_setpoint()
self.get_temperature_control_setpoint_units()
self.get_temperature_control_channel()
self.get_temperature_control_use_filtered_reading()
self.get_temperature_control_heater_max_range()
self.get_temperature_control_heater_load_resistance()
self.get_still_heater()
self.get_heater_range()
self.get_heater_status()
self.get_heater_power()
self.get_common_mode_reduction()
self.get_guard_drive()
for ch in self._channels:
getattr(self, 'get_kelvin%s' % ch)()
getattr(self, 'get_resistance%s' % ch)()
getattr(self, 'get_resistance_range%s' % ch)()
getattr(self, 'get_excitation_on%s' % ch)()
getattr(self, 'get_excitation_mode%s' % ch)()
getattr(self, 'get_excitation_range%s' % ch)()
getattr(self, 'get_autorange%s' % ch)()
getattr(self, 'get_scanner_dwell_time%s' % ch)()
getattr(self, 'get_scanner_pause_time%s' % ch)()
getattr(self, 'get_filter_on%s' % ch)()
getattr(self, 'get_filter_settle_time%s' % ch)()
getattr(self, 'get_filter_reset_threshold%s' % ch)()
def __ask(self, msg):
max_attempts = 5
for attempt in range(max_attempts):
try:
m = self._visainstrument.ask("%s" % msg).replace('\r','')
qt.msleep(.01)
break
except Exception as e:
if attempt >= 0: logging.exception('Attempt #%d to communicate with LakeShore failed.', 1+attempt)
if attempt < max_attempts-1 and not e.message.strip().lower().startswith('human abort'):
qt.msleep((1+attempt)**2 * (0.1 + random.random()))
else:
raise
return m
def __write(self, msg):
max_attempts = 5
for attempt in range(max_attempts):
try:
self._visainstrument.write("%s" % msg)
qt.msleep(.5)
break
except:
if attempt > 0: logging.exception('Attempt #%d to communicate with LakeShore failed.', 1+attempt)
if attempt < max_attempts-1 and not e.message.strip().lower().startswith('human abort'):
qt.msleep((1+attempt)**2 * (0.1 + random.random()))
else:
raise
def __query_auto_updated_quantities(self):
if self not in qt.instruments.get_instruments().values():
logging.debug('Old timer for Lakeshore auto-update. Terminating thread...')
return False # stop the timer
if not (self._autoupdate_interval != None and self._autoupdate_interval > 0):
logging.debug('Auto-update interval not > 0. Terminating thread...')
return False # stop the timer
if (not self.get_autoupdate_while_measuring()) and qt.flow.is_measuring():
return True # don't interfere with the measurement
try:
ch = self.do_get_scanner_channel()
logging.debug('Auto-updating temperature reading (channel %s)...' % ch)
getattr(self, 'get_kelvin%s' % ch)()
getattr(self, 'get_resistance%s' % ch)()
except Exception as e:
logging.debug('Failed to auto-update temperature/resistance: %s' % (str(e)))
return True # keep calling back
def do_get_autoupdate_interval(self):
return self._autoupdate_interval
def do_set_autoupdate_interval(self, val):
self._autoupdate_interval = val
from qtflow import get_flowcontrol
get_flowcontrol().remove_callback(self._autoupdater_handle, warn_if_nonexistent=False)
if self._autoupdate_interval != None and self._autoupdate_interval > 0:
if self._logger == None:
logging.warn('You have enabled auto-updating, but not log file writing, which is a bit odd.')
get_flowcontrol().register_callback(int(np.ceil(1e3 * self._autoupdate_interval)),
self.__query_auto_updated_quantities,
handle=self._autoupdater_handle)
def do_get_autoupdate_while_measuring(self):
return self.get('autoupdate_while_measuring', query=False)
def do_set_autoupdate_while_measuring(self, v):
self.update_value('autoupdate_while_measuring', bool(v))
def do_get_identification(self):
return self.__ask('*IDN?')
def do_get_common_mode_reduction(self):
ans = self.__ask('CMR?')
return bool(int(ans))
def do_get_guard_drive(self):
ans = self.__ask('GUARD?')
return bool(int(ans))
def do_get_scanner_auto(self):
ans = self.__ask('SCAN?')
return bool(int(ans.split(',')[1]))
def do_set_scanner_auto(self, val):
ch = self.get_scanner_channel()
cmd = 'SCAN %d,%d' % (ch, 1 if val else 0)
self.__write(cmd)
self.get_scanner_auto()
self.get_scanner_channel()
def do_get_scanner_channel(self):
ans = self.__ask('SCAN?')
return int(ans.split(',')[0])
def do_set_scanner_channel(self, val):
auto = self.get_scanner_auto()
cmd = 'SCAN %d,%d' % (val, 1 if auto else 0)
self.__write(cmd)
self.get_scanner_auto()
self.get_scanner_channel()
def do_get_kelvin(self, channel):
ans = float(self.__ask('RDGK? %s' % channel))
if self._logger != None:
try: self._logger('kelvin', channel, ans)
except: logging.exception('Could not log kelvin%s', channel)
return ans
def do_get_resistance(self, channel):
ans = float(self.__ask('RDGR? %s' % channel))
if self._logger != None:
try: self._logger('resistance', channel, ans)
except: logging.exception('Could not log resistance%s', channel)
return ans
def do_get_resistance_range(self, channel):
ans = self.__ask('RDGRNG? %s' % channel)
return int(ans.split(',')[2])
def do_get_excitation_mode(self, channel):
ans = self.__ask('RDGRNG? %s' % channel)
return int(ans.split(',')[0])
def do_get_excitation_range(self, channel):
ans = self.__ask('RDGRNG? %s' % channel)
return int(ans.split(',')[1])
def do_set_excitation_range(self, val, channel):
s = self.__ask('RDGRNG? %s' % channel)
s = s.split(',')
s[1] = str(val)
s = np.append([ str(channel) ], s)
self.__write('RDGRNG %s' % (','.join(s)))
def do_get_autorange(self, channel):
ans = self.__ask('RDGRNG? %s' % channel)
return bool(int(ans.split(',')[3]))
def do_get_excitation_on(self, channel):
ans = self.__ask('RDGRNG? %s' % channel)
return (int(ans.split(',')[4]) == 0)
def do_get_scanner_dwell_time(self, channel):
ans = self.__ask('INSET? %s' % channel)
return float(ans.split(',')[1])
def do_get_scanner_pause_time(self, channel):
ans = self.__ask('INSET? %s' % channel)
return float(ans.split(',')[2])
def do_get_filter_on(self, channel):
ans = self.__ask('FILTER? %s' % channel)
return bool(int(ans.split(',')[0]))
def do_get_filter_settle_time(self, channel):
ans = self.__ask('FILTER? %s' % channel)
return float(ans.split(',')[1])
def do_set_filter_settle_time(self, val, channel):
cmd = 'FILTER %s,1,%d,80' % (channel,int(np.round(val)))
self.__write(cmd)
getattr(self, 'get_filter_settle_time%s' % channel)()
getattr(self, 'get_filter_on%s' % channel)()
getattr(self, 'get_filter_reset_threshold%s' % channel)()
def do_get_filter_reset_threshold(self, channel):
ans = self.__ask('FILTER? %s' % channel)
return float(ans.split(',')[2])
def do_get_heater_range(self):
ans = self.__ask('HTRRNG?')
return int(ans)
def do_get_heater_power(self):
ans = self.__ask('HTR?')
return float(ans)
def do_set_heater_range(self, val):
self.__write('HTRRNG %d' % val)
self.get_heater_range()
def do_get_heater_status(self):
ans = self.__ask('HTRST?')
| |
<reponame>SdgJlbl/vault-cli
import contextlib
import logging
import os
import pathlib
import sys
from typing import (
Any,
Dict,
Generator,
Mapping,
NoReturn,
Optional,
Sequence,
TextIO,
Tuple,
)
import click
import yaml
import vault_cli
from vault_cli import client, environment, exceptions, settings, types
logger = logging.getLogger(__name__)
CONTEXT_SETTINGS = {
"help_option_names": ["-h", "--help"],
"auto_envvar_prefix": settings.ENV_PREFIX,
}
def load_config(ctx: click.Context, param: click.Parameter, value: str) -> None:
if value == "no":
ctx.default_map = {}
return
if value is None:
config_files = settings.CONFIG_FILES
else:
config_files = [value]
config = settings.build_config_from_files(*config_files)
ctx.default_map = config
def set_verbosity(ctx: click.Context, param: click.Parameter, value: int) -> int:
level = settings.get_log_level(verbosity=value)
logging.basicConfig(level=level)
logger.info(f"Log level set to {logging.getLevelName(level)}")
return value
@contextlib.contextmanager
def handle_errors():
try:
yield
except exceptions.VaultException as exc:
raise click.ClickException(str(exc))
def print_version(ctx, __, value):
if not value or ctx.resilient_parsing:
return
click.echo(f"vault-cli {vault_cli.__version__}")
click.echo(f"License: {vault_cli.__license__}")
ctx.exit()
@click.group(context_settings=CONTEXT_SETTINGS)
@click.pass_context
@click.option(
"--url", "-U", help="URL of the vault instance", default=settings.DEFAULTS.url
)
@click.option(
"--verify/--no-verify",
default=settings.DEFAULTS.verify,
help="Verify HTTPS certificate",
)
@click.option(
"--ca-bundle",
type=click.Path(),
help="Location of the bundle containing the server certificate "
"to check against.",
)
@click.option(
"--login-cert",
type=click.Path(),
help="Path to a public client certificate to use for connecting to vault.",
)
@click.option(
"--login-cert-key",
type=click.Path(),
help="Path to a private client certificate to use for connecting to vault.",
)
@click.option(
"--token-file",
"-T",
type=click.Path(),
help="File which contains the token to connect to Vault. "
'Configuration file can also contain a "token" key.',
)
@click.option("--username", "-u", help="Username used for userpass authentication")
@click.option(
"--password-file",
"-w",
type=click.Path(),
help='Can read from stdin if "-" is used as parameter. '
'Configuration file can also contain a "password" key.',
)
@click.option("--base-path", "-b", help="Base path for requests")
@click.option(
"-s",
"--safe-write/--unsafe-write",
default=settings.DEFAULTS.safe_write,
help="When activated, you can't overwrite a secret without "
'passing "--force" (in commands "set", "mv", etc)',
)
@click.option(
"--render/--no-render",
default=settings.DEFAULTS.render,
help="Render templated values",
)
@click.option(
"-v",
"--verbose",
is_eager=True,
callback=set_verbosity,
count=True,
help="Use multiple times to increase verbosity",
)
@click.option(
"--config-file",
is_eager=True,
callback=load_config,
help="Config file to use. Use 'no' to disable config file. "
"Default value: first of " + ", ".join(settings.CONFIG_FILES),
type=click.Path(),
)
@click.option(
"-V",
"--version",
is_flag=True,
callback=print_version,
expose_value=False,
is_eager=True,
)
@handle_errors()
def cli(ctx: click.Context, **kwargs) -> None:
"""
Interact with a Vault. See subcommands for details.
All arguments can be passed by environment variables: VAULT_CLI_UPPERCASE_NAME
(including VAULT_CLI_PASSWORD and VAULT_CLI_TOKEN).
"""
kwargs.pop("config_file")
verbose = kwargs.pop("verbose")
assert ctx.default_map # make mypy happy
kwargs.update(extract_special_args(ctx.default_map, os.environ))
# There might still be files to read, so let's do it now
kwargs = settings.read_all_files(kwargs)
saved_settings = kwargs.copy()
saved_settings.update({"verbose": verbose})
ctx.obj = client.get_client_class()(**kwargs) # type: ignore
ctx.obj.auth()
ctx.obj.saved_settings = saved_settings
def extract_special_args(
config: Mapping[str, Any], environ: Mapping[str, str]
) -> Dict[str, Any]:
result = {}
for key in ["password", "token"]:
result[key] = config.get(key)
env_var_key = "VAULT_CLI_{}".format(key.upper())
if env_var_key in environ:
result[key] = environ.get(env_var_key)
return result
@cli.command("list")
@click.argument("path", required=False, default="")
@click.pass_obj
@handle_errors()
def list_(client_obj: client.VaultClientBase, path: str):
"""
List all the secrets at the given path. Folders are listed too. If no path
is given, list the objects at the root.
"""
result = client_obj.list_secrets(path=path)
click.echo("\n".join(result))
@cli.command(name="get-all")
@click.option(
"--flat/--no-flat",
default=True,
show_default=True,
help=("Returns the full path as keys instead of merging paths into a tree"),
)
@click.argument("path", required=False, nargs=-1)
@click.pass_obj
@handle_errors()
def get_all(client_obj: client.VaultClientBase, path: Sequence[str], flat: bool):
"""
Return multiple secrets. Return a single yaml with all the secrets located
at the given paths. Folders are recursively explored. Without a path,
explores all the vault.
"""
paths = list(path) or [""]
result = client_obj.get_all_secrets(*paths, flat=flat)
click.echo(
yaml.safe_dump(result, default_flow_style=False, explicit_start=True), nl=False
)
@cli.command()
@click.pass_obj
@click.option(
"--text/--yaml",
default=True,
help=(
"Returns the value in yaml format instead of plain text."
"If the secret is not a string, it will always be yaml."
),
)
@click.option(
"-o",
"--output",
type=click.File("w"),
help="File in which to write the secret. "
"If ommited (or -), write in standard output",
)
@click.argument("name")
@click.argument("key", required=False)
@handle_errors()
def get(
client_obj: client.VaultClientBase,
text: bool,
output: Optional[TextIO],
key: Optional[str],
name: str,
):
"""
Return a single secret value.
"""
secret = client_obj.get_secret(path=name, key=key)
force_yaml = isinstance(secret, list) or isinstance(secret, dict)
if text and not force_yaml:
if secret is None:
secret = "null"
click.echo(secret, file=output)
return
click.echo(
yaml.safe_dump(secret, default_flow_style=False, explicit_start=True),
nl=False,
file=output,
)
def build_kv(attributes: Sequence[str]) -> Generator[Tuple[str, str], None, None]:
"""
Converts a list of "key=value" to tuples (key, value).
If the value is "-" then reads the secret from stdin.
"""
for item in attributes:
try:
k, v = item.split("=", 1)
except ValueError:
raise click.UsageError(
f"Expecting 'key=value' arguments. '{ item }' provided."
)
if v == "-":
v = click.get_text_stream("stdin").read()
yield k, v
@cli.command("set")
@click.pass_obj
@click.option(
"--update/--clear",
default=True,
help="Update the current kv mapping or replace the its content",
)
@click.option(
"-p",
"--prompt",
is_flag=True,
help="Prompt user for values using a hidden input. Keys name are passed as arguments",
)
@click.option(
"--file",
"yaml_file",
default=None,
help="Read key/value mapping from a file. A filename of '-' reads the standard input",
type=click.File(),
)
@click.option(
"--force/--no-force",
"-f",
is_flag=True,
default=None,
help="In case the path already holds a secret, allow overwriting it "
"(this is necessary only if --safe-write is set).",
)
@click.argument("path")
@click.argument("attributes", nargs=-1, metavar="[key=value...]")
@handle_errors()
def set_(
client_obj: client.VaultClientBase,
update: bool,
prompt: bool,
yaml_file: TextIO,
path: str,
attributes: Sequence[str],
force: Optional[bool],
):
"""
Set a secret.
\b
You can give secrets in 3 different ways:
- Usage: vault set [OPTIONS] PATH [key=value...]
directly in the arguments. A value of "-" means that value will be read from the standard input
- Usage: vault set [OPTIONS] PATH --prompt [key...]
prompt user for a values using hidden input
- Usage: vault set [OPTIONS] PATH --file=/path/to/file
using a json/yaml file
"""
if bool(attributes) + bool(yaml_file) > 1:
raise click.UsageError(
"Conflicting input methods: you can't mix --file and positional argument"
)
json_value: types.JSONValue
if yaml_file:
json_value = yaml.safe_load(yaml_file)
elif prompt:
json_value = {}
for key in attributes:
json_value[key] = click.prompt(
f"Please enter a value for key `{key}` of `{path}`", hide_input=True
)
else:
json_value = dict(build_kv(attributes))
try:
client_obj.set_secret(path=path, value=json_value, force=force, update=update)
except exceptions.VaultOverwriteSecretError as exc:
raise click.ClickException(
f"Secret already exists at {exc.path}. Use -f to force overwriting."
)
except exceptions.VaultMixSecretAndFolder as exc:
raise click.ClickException(str(exc))
click.echo("Done")
@cli.command()
@click.pass_obj
@click.argument("name")
@click.argument("key", required=False)
@handle_errors()
def delete(client_obj: client.VaultClientBase, name: str, key: Optional[str]) -> None:
"""
Delete a single secret.
"""
client_obj.delete_secret(path=name, key=key)
click.echo("Done")
@cli.command("env")
@click.option(
"-p",
"--path",
multiple=True,
required=True,
help="Folder or single item. Pass several times to load multiple values. You can use --path mypath=prefix or --path mypath:key=prefix if you want to change the generated names of the environment variables",
)
@click.option(
"-o",
"--omit-single-key/--no-omit-single-key",
is_flag=True,
default=False,
help="When the secret has only one key, don't use that key to build the name of the environment variable",
)
@click.argument("command", nargs=-1)
@click.pass_obj
@handle_errors()
def env(
client_obj: client.VaultClientBase,
path: Sequence[str],
omit_single_key: bool,
command: Sequence[str],
) -> NoReturn:
"""
Launch a command, loading secrets in environment.
Strings are exported as-is, other types (including booleans, nulls, dicts, lists)
are exported JSON-encoded.
If the path ends with `:key` then only one key of the mapping is used and its name is the name of the key.
VARIABLE NAMES
By default the name is build upon the relative path to the parent of the given path (in parameter) and the name of the keys in the value mapping.
Let's say that we have stored the mapping `{'username': 'me', 'password': '<PASSWORD>'}` at path `a/b/c`
Using `--path a/b` will inject the following environment variables: B_C_USERNAME and B_C_PASSWORD
Using `--path a/b/c` will inject the following environment variables: C_USERNAME and C_PASSWORD
Using `--path a/b/c:username` will only inject `USERNAME=me` in the environment.
You can customize the variable names generation by appending `=SOME_PREFIX` to the path.
In this case the part corresponding to the base path is replace by your prefix.
Using `--path a/b=FOO` will inject the following environment variables: FOO_C_USERNAME and FOO_C_PASSWORD
Using `--path a/b/c=FOO` will inject the following environment variables: FOO_USERNAME and FOO_PASSWORD
Using `--path a/b/c:username=FOO` will inject `FOO=me` in the environment.
"""
paths = list(path) or [""]
env_secrets = {}
for path in paths:
path_with_key, _, prefix = path.partition("=")
path, _, filter_key = path_with_key.partition(":")
if filter_key:
secret = client_obj.get_secret(path=path, key=filter_key)
env_updates = environment.get_envvars_for_secret(
key=filter_key, secret=secret, prefix=prefix
)
else:
secrets = client_obj.get_secrets(path=path, relative=True)
env_updates = environment.get_envvars_for_secrets(
path=path,
prefix=prefix,
secrets=secrets,
omit_single_key=omit_single_key,
)
env_secrets.update(env_updates)
environ = os.environ.copy()
environ.update(env_secrets)
environment.exec_command(command=command, environ=environ)
@cli.command("dump-config")
@click.pass_obj
@handle_errors()
def dump_config(client_obj: client.VaultClientBase,) -> None:
"""
Display settings in the format of a config file.
"""
assert client_obj.saved_settings
click.echo(
yaml.safe_dump(
client_obj.saved_settings, default_flow_style=False, explicit_start=True
),
nl=False,
)
@cli.command("delete-all")
@click.option(
"-f",
"--force",
is_flag=True,
help="If not force, prompt for confirmation before each deletion.",
)
@click.argument("path", required=False, nargs=-1)
@click.pass_obj
@handle_errors()
def delete_all(
client_obj: client.VaultClientBase, path: Sequence[str], force: bool
) -> None:
"""
Delete multiple secrets.
"""
paths = list(path) or [""]
for secret in client_obj.delete_all_secrets(*paths, generator=True):
| |
parser.add_argument('userName')
args = parser.parse_args()
jobId = args["jobId"]
userName = args["userName"]
result = JobRestAPIUtils.PauseJob(userName, jobId)
ret = {}
if result:
ret["result"] = "Success, the job is scheduled to be paused."
else:
ret["result"] = "Cannot pause the job. Job ID:" + jobId
resp = jsonify(ret)
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["dataType"] = "json"
return resp
##
## Actually setup the Api resource routing here
##
api.add_resource(PauseJob, '/PauseJob')
class ResumeJob(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('jobId')
parser.add_argument('userName')
args = parser.parse_args()
jobId = args["jobId"]
userName = args["userName"]
result = JobRestAPIUtils.ResumeJob(userName, jobId)
ret = {}
if result:
ret["result"] = "Success, the job is scheduled to be resumed."
else:
ret["result"] = "Cannot resume the job. Job ID:" + jobId
resp = jsonify(ret)
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["dataType"] = "json"
return resp
##
## Actually setup the Api resource routing here
##
api.add_resource(ResumeJob, '/ResumeJob')
class CloneJob(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('jobId')
parser.add_argument('userName')
args = parser.parse_args()
jobId = args["jobId"]
userName = args["userName"]
result = JobRestAPIUtils.CloneJob(userName, jobId)
ret = {}
if result:
ret["result"] = "Success, the job is scheduled to be cloned."
else:
ret["result"] = "Cannot clone the job. Job ID:" + jobId
resp = jsonify(ret)
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["dataType"] = "json"
return resp
##
## Actually setup the Api resource routing here
##
api.add_resource(CloneJob, '/CloneJob')
class ApproveJob(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('jobId')
parser.add_argument('userName')
args = parser.parse_args()
jobId = args["jobId"]
userName = args["userName"]
result = JobRestAPIUtils.ApproveJob(userName, jobId)
ret = {}
if result:
ret["result"] = "Success, the job has been approved."
else:
ret["result"] = "Cannot approve the job. Job ID:" + jobId
resp = jsonify(ret)
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["dataType"] = "json"
return resp
##
## Actually setup the Api resource routing here
##
api.add_resource(ApproveJob, '/ApproveJob')
class GetCommands(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('jobId')
parser.add_argument('userName')
args = parser.parse_args()
jobId = args["jobId"]
userName = args["userName"]
commands = JobRestAPIUtils.GetCommands(userName, jobId)
resp = jsonify(commands)
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["dataType"] = "json"
return resp
##
## Actually setup the Api resource routing here
##
api.add_resource(GetCommands, '/GetCommands')
class GetJobDetail(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('jobId')
parser.add_argument('userName')
args = parser.parse_args()
jobId = args["jobId"]
userName = args["userName"]
job = JobRestAPIUtils.GetJobDetail(userName, jobId)
job["jobParams"] = json.loads(base64.b64decode(job["jobParams"]))
if "endpoints" in job and job["endpoints"] is not None and len(job["endpoints"].strip()) > 0:
job["endpoints"] = json.loads(job["endpoints"])
if "jobStatusDetail" in job and job["jobStatusDetail"] is not None and len(job["jobStatusDetail"].strip()) > 0:
try:
job["jobStatusDetail"] = Json.loads(base64.b64decode(job["jobStatusDetail"]))
except Exception as e:
pass
if "jobMeta" in job:
job.pop("jobMeta",None)
resp = jsonify(job)
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["dataType"] = "json"
return resp
##
## Actually setup the Api resource routing here
##
api.add_resource(GetJobDetail, '/GetJobDetail')
class GetJobStatus(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('jobId')
args = parser.parse_args()
jobId = args["jobId"]
job = JobRestAPIUtils.GetJobStatus(jobId)
resp = jsonify(job)
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["dataType"] = "json"
return resp
##
## Actually setup the Api resource routing here
##
api.add_resource(GetJobStatus, '/GetJobStatus')
class GetClusterStatus(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('userName')
args = parser.parse_args()
userName = args["userName"]
cluster_status, last_updated_time = JobRestAPIUtils.GetClusterStatus()
cluster_status["last_updated_time"] = last_updated_time
resp = jsonify(cluster_status)
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["dataType"] = "json"
return resp
##
## Actually setup the Api resource routing here
##
api.add_resource(GetClusterStatus, '/GetClusterStatus')
class AddCommand(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('jobId')
parser.add_argument('command')
parser.add_argument('userName')
args = parser.parse_args()
userName = args["userName"]
jobId = args["jobId"]
command = args["command"]
ret = {}
if command is None or len(command) == 0:
ret["result"] = "Cannot Run empty Command. Job ID:" + jobId
else:
result = JobRestAPIUtils.AddCommand(userName, jobId, command)
if result:
ret["result"] = "Success, the command is scheduled to be run."
else:
ret["result"] = "Cannot Run the Command. Job ID:" + jobId
resp = jsonify(ret)
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["dataType"] = "json"
return resp
##
## Actually setup the Api resource routing here
##
api.add_resource(AddCommand, '/AddCommand')
class AddUser(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('userName')
parser.add_argument('uid')
parser.add_argument('gid')
parser.add_argument('groups')
args = parser.parse_args()
ret = {}
userName = args["userName"]
if args["uid"] is None or len(args["uid"].strip()) == 0:
uid = authorization.INVALID_ID
else:
uid = args["uid"]
if args["gid"] is None or len(args["gid"].strip()) == 0:
gid = authorization.INVALID_ID
else:
gid = args["gid"]
if args["groups"] is None or len(args["groups"].strip()) == 0:
groups = []
else:
groups = args["groups"]
ret["status"] = JobRestAPIUtils.AddUser(userName, uid, gid, groups)
resp = jsonify(ret)
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["dataType"] = "json"
return resp
##
## Actually setup the Api resource routing here
##
api.add_resource(AddUser, '/AddUser')
class UpdateAce(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('userName')
parser.add_argument('identityName')
parser.add_argument('resourceType')
parser.add_argument('resourceName')
parser.add_argument('permissions')
args = parser.parse_args()
username = args["userName"]
identityName = str(args["identityName"])
resourceType = int(args["resourceType"])
resourceName = str(args["resourceName"])
permissions = int(args["permissions"])
ret = {}
ret["result"] = JobRestAPIUtils.UpdateAce(username, identityName, resourceType, resourceName, permissions)
resp = jsonify(ret)
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["dataType"] = "json"
return resp
##
## Actually setup the Api resource routing here
##
api.add_resource(UpdateAce, '/UpdateAce')
class DeleteAce(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('userName')
parser.add_argument('identityName')
parser.add_argument('resourceType')
parser.add_argument('resourceName')
args = parser.parse_args()
username = args["userName"]
identityName = str(args["identityName"])
resourceType = int(args["resourceType"])
resourceName = str(args["resourceName"])
ret = {}
ret["result"] = JobRestAPIUtils.DeleteAce(username, identityName, resourceType, resourceName)
resp = jsonify(ret)
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["dataType"] = "json"
return resp
##
## Actually setup the Api resource routing here
##
api.add_resource(DeleteAce, '/DeleteAce')
class IsClusterAdmin(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('userName')
args = parser.parse_args()
username = args["userName"]
ret = {}
ret["result"] = AuthorizationManager.IsClusterAdmin(username)
resp = jsonify(ret)
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["dataType"] = "json"
return resp
##
## Actually setup the Api resource routing here
##
api.add_resource(IsClusterAdmin, '/IsClusterAdmin')
class GetACL(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('userName')
args = parser.parse_args()
username = args["userName"]
ret = {}
ret["result"] = AuthorizationManager.GetAcl(username)
resp = jsonify(ret)
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["dataType"] = "json"
return resp
##
## Actually setup the Api resource routing here
##
api.add_resource(GetACL, '/GetACL')
class ListVCs(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('userName')
args = parser.parse_args()
userName = args["userName"]
ret = {}
ret["result"] = JobRestAPIUtils.ListVCs(userName)
resp = jsonify(ret)
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["dataType"] = "json"
return resp
##
## Actually setup the Api resource routing here
##
api.add_resource(ListVCs, '/ListVCs')
class GetVC(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('userName')
parser.add_argument('vcName')
args = parser.parse_args()
userName = args["userName"]
vcName = args["vcName"]
ret = JobRestAPIUtils.GetVC(userName, vcName)
resp = jsonify(ret)
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["dataType"] = "json"
return resp
##
## Actually setup the Api resource routing here
##
api.add_resource(GetVC, '/GetVC')
class AddVC(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('vcName')
parser.add_argument('quota')
parser.add_argument('metadata')
parser.add_argument('userName')
args = parser.parse_args()
vcName = args["vcName"]
quota = args["quota"]
metadata = args["metadata"]
userName = args["userName"]
ret = {}
ret["result"] = JobRestAPIUtils.AddVC(userName, vcName, quota, metadata)
resp = jsonify(ret)
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["dataType"] = "json"
return resp
##
## Actually setup the Api resource routing here
##
api.add_resource(AddVC, '/AddVC')
class DeleteVC(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('vcName')
parser.add_argument('userName')
args = parser.parse_args()
vcName = args["vcName"]
userName = args["userName"]
ret = {}
ret["result"] = JobRestAPIUtils.DeleteVC(userName, vcName)
resp = jsonify(ret)
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["dataType"] = "json"
return resp
##
## Actually setup the Api resource routing here
##
api.add_resource(DeleteVC, '/DeleteVC')
class UpdateVC(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('vcName')
parser.add_argument('quota')
parser.add_argument('metadata')
parser.add_argument('userName')
args = parser.parse_args()
vcName = args["vcName"]
quota = args["quota"]
metadata = args["metadata"]
userName = args["userName"]
ret = {}
ret["result"] = JobRestAPIUtils.UpdateVC(userName, vcName, quota, metadata)
resp = jsonify(ret)
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["dataType"] = "json"
return resp
##
## Actually setup the Api resource routing here
##
api.add_resource(UpdateVC, '/UpdateVC')
class ListStorages(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('vcName')
parser.add_argument('userName')
args = parser.parse_args()
vcName = args["vcName"]
userName = args["userName"]
ret = {}
ret["result"] = JobRestAPIUtils.ListStorages(userName, vcName)
resp = jsonify(ret)
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["dataType"] = "json"
return resp
##
## Actually setup the Api resource routing here
##
api.add_resource(ListStorages, '/ListStorages')
class AddStorage(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('vcName')
parser.add_argument('storageType')
parser.add_argument('url')
parser.add_argument('metadata')
parser.add_argument('defaultMountPath')
parser.add_argument('userName')
args = parser.parse_args()
vcName = args["vcName"]
storageType = args["storageType"]
url = args["url"]
metadata = args["metadata"]
defaultMountPath = args["defaultMountPath"]
userName = args["userName"]
ret = {}
ret["result"] = JobRestAPIUtils.AddStorage(userName, vcName, url, storageType, metadata, defaultMountPath)
resp = jsonify(ret)
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["dataType"] = "json"
return resp
##
## Actually setup the Api resource routing here
##
api.add_resource(AddStorage, '/AddStorage')
class DeleteStorage(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('vcName')
parser.add_argument('userName')
parser.add_argument('url')
args = parser.parse_args()
vcName = args["vcName"]
userName = args["userName"]
url = args["url"]
ret = {}
ret["result"] = JobRestAPIUtils.DeleteStorage(userName, vcName, url)
resp = jsonify(ret)
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["dataType"] = "json"
return resp
##
## Actually setup the Api resource routing here
##
api.add_resource(DeleteStorage, '/DeleteStorage')
class UpdateStorage(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('vcName')
parser.add_argument('storageType')
parser.add_argument('url')
parser.add_argument('metadata')
parser.add_argument('defaultMountPath')
parser.add_argument('userName')
args = parser.parse_args()
vcName = args["vcName"]
storageType = args["storageType"]
url = args["url"]
metadata = args["metadata"]
defaultMountPath = args["defaultMountPath"]
userName = args["userName"]
ret = {}
ret["result"] = JobRestAPIUtils.UpdateStorage(userName, vcName, url, storageType, metadata, defaultMountPath)
resp = jsonify(ret)
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["dataType"] = "json"
return resp
##
## Actually setup the Api resource routing here
##
api.add_resource(UpdateStorage, '/UpdateStorage')
def getAlias(username):
if "@" in username:
return username.split("@")[0].strip()
if "/" in username:
return username.split("/")[1].strip()
return username
class | |
# -*- coding: utf-8 -*-
"""
@file
@brief First approach for a edit distance between two graphs.
See :ref:`l-graph_distance`.
Code adapted from https://github.com/sdpython/mlstatpy
"""
import copy
import re
import json
import numpy as np
class Vertex:
"""
Defines a vertex of a graph.
"""
def __init__(self, nb, label, weight):
"""
constructor
@param nb (int) index of the vertex
@param label (str) label
@para weight (float)
"""
self.nb = nb # kind of id
self.label = label # label
self.pair = (None, None)
self.edges = {}
self.predE = {}
self.succE = {}
self.weight = weight
def __str__(self):
"""
usual
"""
return '{}'.format(self.Label)
def __repr__(self):
"""
usual
"""
return "Vertex({}, {}, {})".format(repr(self.nb), repr(self.Label), self.weight)
def is_vertex(self):
"""
returns True
"""
return True
def is_edge(self):
"""
returns False
"""
return False
@property
def Label(self):
"""
returns the label
"""
return self.label
class Edge:
"""
Defines an edge of a graph.
"""
def __init__(self, from_, to, label, weight):
"""
constructor
@param from_ (int)
@param to (int)
@param label (str)
@param weight (float)
``'00'`` means the beginning of a graph, ``'11'`` the end.
"""
self.from_, self.to = from_, to
self.nb = from_, to
self.label = label
self.pair = (None, None)
self.weight = weight
if self.from_ == "00" and self.to == "00":
raise AssertionError("should not happen") # pragma: no cover
if self.from_ == "11" and self.to == "11":
raise AssertionError("should not happen") # pragma: no cover
def __str__(self):
"""
usual
"""
return "{} -> {} [{}] w: {}".format(self.nb[0], self.nb[1], self.Label, self.weight)
def __repr__(self):
"""
usual
"""
return "Edge({}, {}, {}, {})".format(repr(self.nb[0]), repr(self.nb[1]), repr(self.Label), self.weight)
def is_vertex(self):
"""
returns False
"""
return False
def is_edge(self):
"""
returns True
"""
return True
@property
def Label(self):
"""
returns the label
"""
return self.label
class GraphDistance:
"""
Defines a graph to compute a distance between two graphs.
.. exref::
:title: Compute a distance between two graphs.
See :ref:`l-graph_distance`.
.. runpython::
:showcode:
import copy
from mlstatpy.graph import GraphDistance
# We define two graphs as list of edges.
graph1 = [("a", "b"), ("b", "c"), ("b", "X"), ("X", "c"),
("c", "d"), ("d", "e"), ("0", "b")]
graph2 = [("a", "b"), ("b", "c"), ("b", "X"), ("X", "c"),
("c", "t"), ("t", "d"), ("d", "e"), ("d", "g")]
# We convert them into objects GraphDistance.
graph1 = GraphDistance(graph1)
graph2 = GraphDistance(graph2)
distance, graph = graph1.distance_matching_graphs_paths(graph2, use_min=False)
print("distance", distance)
print("common paths:", graph)
"""
# graph is a dictionary
@staticmethod
def get_list_of_vertices(graph):
edges = [edge[:2] for edge in graph]
unique = {}
for i, j in edges:
unique[i] = unique[j] = 1
vertices = list(unique.keys())
vertices.sort()
return vertices
def __init__(self, edge_list, vertex_label=None, add_loop=False,
weight_vertex=1., weight_edge=1.):
"""
constructor
@param edge_list list of edges
@param add_loop automatically add a loop on each vertex (an edge from a vertex to itself)
@param weight_vertex weight for every vertex
@param weight_edge weight for every edge
"""
if vertex_label is None:
vertex_label = dict()
if isinstance(edge_list, str):
self.load_from_file(edge_list, add_loop)
else:
self.vertices = {}
self.edges = {}
self.labelBegin = "00"
self.labelEnd = "11"
vid = GraphDistance.get_list_of_vertices(edge_list)
for u in vid:
self.vertices[u] = Vertex(
u, vertex_label.get(u, str(u)), weight_vertex)
for e in edge_list:
i, j = e[:2]
ls = "" if len(e) < 3 else e[2]
w = weight_edge if len(e) < 4 else e[3] #if custom weight edge is used, custom label is mandatory too
self.edges[i, j] = Edge(i, j, str(ls), w)
self._private__init__(add_loop, weight_vertex, weight_edge)
def __getitem__(self, index):
"""
returns a vertex or an edge if no vertex with the given index was found
@param index id (index) to look for
@return Vertex or Edge
"""
if isinstance(index, str):
return self.vertices[index]
if isinstance(index, tuple):
return self.edges[index]
raise KeyError( # pragma: no cover
"unable to get element " + str(index))
@staticmethod
def load_from_file(filename, add_loop):
"""
loads a graph from a file
@param filename file name
@param add_loop @see me __init__
"""
lines = open(filename, "r").readlines()
regV = re.compile("\\\"?([a-z0-9_]+)\\\"? *[[]label=\\\"(.*)\\\"[]]")
regE = re.compile("\\\"?([a-z0-9_]+)\\\"? *-> *\\\"?" +
"([a-z0-9_]+)\\\"? *[[]label=\\\"(.*)\\\"[]]")
edge_list = []
vertex_label = {}
for line in lines:
line = line.strip("\r\n ;")
ed = regE.search(line)
ve = regV.search(line)
if ed:
g = ed.groups()
edge_list.append((g[0], g[1], g[2]))
elif ve:
g = ve.groups()
vertex_label[g[0]] = g[1]
if len(vertex_label) == 0 or len(edge_list) == 0:
raise OSError( # pragma: no cover
"Unable to parse file %r." % filename)
return GraphDistance(edge_list, vertex_label, add_loop)
@staticmethod
def load_from_darts_genotype(genotype, op_stats_filepath="op_stats.json"):
with open(op_stats_filepath) as f:
op_stats = json.load(f)
edge_list = [('00', 'c0_0', "", 1), ('00', 'c0_1', "", 1)]
last_to = None
prev_last_to = None
for i, cell_genotype in enumerate(genotype.genes):
if last_to:
edge_list.append((f"c{i-1}_{last_to}", f"c{i}_1", "", 1))
c_prev_prev = ("00", f"c{i}_0", "", 1) if i == 1 else (f"c{i-2}_{prev_last_to}", f"c{i}_0", "", 1)
edge_list.append(c_prev_prev)
cell_edge_list = []
for _, to, from_ in cell_genotype:
if (from_, to) not in cell_edge_list:
cell_edge_list.append((from_, to))
for j, (from_, to) in enumerate(cell_edge_list):
edge_label = ""
edge_weight = 0
for op, t, f in cell_genotype:
if t == to and f == from_:
edge_label += f";{op}" if edge_label != "" else f"{op}"
edge_weight += op_stats[op]
edge_list.append((f"c{i}_{from_}", f"c{i}_{to}", edge_label, edge_weight))
prev_last_to = last_to
_, last_to = cell_edge_list[-1]
edge_list.append((f"c{len(genotype.genes)-1}_{last_to}", '11', "", 1))
return GraphDistance(edge_list)
@staticmethod
def load_from_darts_cell(cell_genotype, op_stats_filepath="op_stats.json"):
with open(op_stats_filepath) as f:
op_stats = json.load(f)
edge_list = []
cell_edge_list = []
for _, to, from_ in cell_genotype:
if (from_, to) not in cell_edge_list:
cell_edge_list.append((from_, to))
for j, (from_, to) in enumerate(cell_edge_list):
edge_label = ""
edge_weight = 0
for op, t, f in cell_genotype:
if t == to and f == from_:
edge_label += f";{op}" if edge_label != "" else f"{op}"
edge_weight += op_stats[op]
edge_list.append((str(from_), str(to), edge_label, edge_weight))
return GraphDistance(edge_list)
def _private__init__(self, add_loop, weight_vertex, weight_edge):
if add_loop:
for k in self.vertices:
if k not in (self.labelBegin, self.labelEnd):
self.edges[k, k] = Edge(k, k, "", weight_edge)
self.connect_root_and_leave(weight_vertex, weight_edge)
self.compute_predecessor()
self.compute_successor()
def connect_root_and_leave(self, weight_vertex, weight_edge):
order = self.get_order_vertices()
roots = [v for v, k in order.items() if k == 0]
vert = {}
for o in order:
vert[o] = 0
for k in self.edges:
if k[0] != k[1]:
vert[k[0]] += 1
for r in roots:
if self.labelBegin not in self.vertices:
self.vertices[self.labelBegin] = Vertex(
self.labelBegin, self.labelBegin, weight_vertex)
if r != self.labelBegin:
self.edges[self.labelBegin, r] = Edge(
self.labelBegin, r, "", weight_edge)
leaves = [k for k, v in vert.items() if v == 0]
for r in leaves:
if self.labelEnd not in self.vertices:
self.vertices[self.labelEnd] = Vertex(
self.labelEnd, self.labelEnd, weight_vertex)
if r != self.labelEnd:
self.edges[r, self.labelEnd] = Edge(
r, self.labelEnd, "", weight_edge)
def get_order_vertices(self):
edges = self.edges
order = {}
for k in edges:
order[k[0]] = 0
order[k[1]] = 0
modif = 1
while modif > 0:
modif = 0
for k in edges:
i, j = k
if i != j and order[j] <= order[i]:
order[j] = order[i] + 1
modif += 1
return order
def __str__(self):
"""
usual
"""
li = []
for v in self.vertices.values():
li.append(str(v))
for k, e in self.edges.items():
li.append(str(e))
return "\n".join(li)
def __repr__(self):
"""
usual
"""
edges = ", ".join(repr(v) for _, v in sorted(self.edges.items()))
vertices = ", ".join("'{}': {}".format(k, repr(v))
for k, v in sorted(self.vertices.items()))
return "GraphDistance(\n [{0}],\n {{{1}}})".format(edges, vertices)
def compute_predecessor(self):
"""
usual
"""
pred = {}
for i, j in self.edges:
if j not in pred:
pred[j] = {}
pred[j][i, j] = 0
for k, v in pred.items():
for n in v:
self.vertices[k].predE[n] = self.edges[n]
def compute_successor(self):
succ = {}
for i, j in self.edges:
if i not in succ:
succ[i] = {}
succ[i][i, j] = i, j
for k, v in succ.items():
for n in v:
self.vertices[k].succE[n] = self.edges[n]
def get_matching_functions(self, function_mach_vertices, function_match_edges,
cost=False):
"""
returns default matching functions between two vertices and two edges
@param function_mach_vertices if not None, this function is returned, othewise, it returns a new fonction.
See below.
@param function_match_edges if not None, this function is returned, othewise, it returns a new fonction.
See below.
@param cost if True, the returned function should return a float, otherwise a boolean
@return a pair of functions
Example for * if cost is False:
::
| |
!pip install -qq shap==0.35.0
# !pip install -qq shap
import shap
# !pip install -qq torch==1.7.1
!pip install -qq transformers
!pip install -qq sentence-transformers
# !pip -qq install transformers==3.3.1
!pip install -qq torch==1.8.1
from tensorboard.plugins.hparams import api as hp
import tensorflow as tf
# !pip install -qq --upgrade wandb
# !pip install -qq torchviz
# !pip install -qq bertviz
# import sys
# !test -d bertviz_repo && echo "FYI: bertviz_repo directory already exists, to pull latest version uncomment this line: !rm -r bertviz_repo"
# # !rm -r bertviz_repo # Uncomment if you need a clean pull from repo
# !test -d bertviz_repo || git clone https://github.com/jessevig/bertviz bertviz_repo
# if not 'bertviz_repo' in sys.path:
# sys.path += ['bertviz_repo']
# !pip install -qq regex
# !pip install -qq transformers
# !pip install -qq boto3
# !wandb login 79c99cb8196ccfc85f75dd926f9e872da3ba85a8
# import wandb
# wandb.init(project="school",notes='Dec-26_BERT')
# %cd /content/drive/MyDrive/Colab Notebooks/0.NLP_Twitter_&_Complaints/
!pwd
# https://machinelearningmastery.com/feature-importance-and-feature-selection-with-xgboost-in-python/
# Commented out IPython magic to ensure Python compatibility.
RANDOM_SEED =47
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import torch
from torch import nn, optim
from torch.utils.data import Dataset, DataLoader
import torch.nn.functional as F
import warnings
warnings.filterwarnings('ignore')
import sklearn
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix,classification_report
from collections import defaultdict
from textwrap import wrap
import seaborn as sns
from joblib import load, dump
import pickle
from tqdm import tqdm
# import transformers
import datetime
PATH = '/content/drive/MyDrive/Colab Notebooks/0.NLP_Twitter_&_Complaints'
# %load_ext tensorboard
log_dir = PATH + "logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
from sklearn.metrics import accuracy_score
torch.manual_seed(RANDOM_SEED)
device = torch.device ("cuda:0" if torch.cuda.is_available() else "cpu")
# from bertviz import head_view
import shap
device
# Commented out IPython magic to ensure Python compatibility.
!pip install -qq watermark
# %reload_ext watermark
# %watermark -v -p numpy,tensorflow,torch,pandas,sklearn,seaborn,transformers
# Commented out IPython magic to ensure Python compatibility.
plt.rcParams['figure.figsize'] =(8,8)
import matplotlib.pylab as pylab
params = {'legend.fontsize': 'x-large',
'figure.figsize': (8, 8),
'axes.labelsize': '8',
'axes.titlesize': '8',
'xtick.labelsize':'4',
'ytick.labelsize':'4',
'font.family': 'Times new roman'}
pylab.rcParams.update(params)
# %matplotlib inline
# %config InlineBackend.figure_format='retina'
sns.set(style='whitegrid', palette='muted', font_scale=1.2)
HAPPY_COLORS_PALETTE = ["#01BEFE", "#FFDD00", "#FF7D00", "#FF006D", "#93D30C", "#8F00FF"]
sns.set_palette(sns.color_palette(HAPPY_COLORS_PALETTE))
RANDOM_SEED =47
np.random.seed(RANDOM_SEED)
torch.manual_seed(RANDOM_SEED)
device = torch.device ("cuda:0" if torch.cuda.is_available() else "cpu")
# !git clone https://github.com/LiqunW/Long-document-dataset
# !pip install -qq pyunpack
# !pip install -qq patool
# PATH_arxiv = '/content/Long-document-dataset'
# from pyunpack import Archive
# Archive('cs.AI.rar').extractall(PATH_arxiv)
df_imdb = pd.read_csv('/content/drive/MyDrive/Colab Notebooks/0.NLP_Twitter_&_Complaints/data/IMDB_Dataset.csv')
df_imdb
# # df = pd.read_csv('/content/drive/MyDrive/Colab Notebooks/0.NLP_Twitter_&_Complaints/data/dec_5_hand.csv')
# # df
# # df_clean = pd.read_csv('/content/drive/MyDrive/Colab Notebooks/0.NLP_Twitter_&_Complaints/data/ucf_clean.csv',
# # lineterminator='\n')
# # df_origin = df_clean
# # df_origin= pd.read_csv('/content/drive/MyDrive/Colab Notebooks/0.NLP_Twitter_&_Complaints/data/ucf_college_tweets_standord.csv')
# # df_cmu = pd.read_csv('/content/drive/MyDrive/Colab Notebooks/0.NLP_Twitter_&_Complaints/data/ucf_college_tweets_CMU.csv',parse_dates=['created_at'])
# df_origin= pd.read_csv('/content/drive/MyDrive/Colab Notebooks/0.NLP_Twitter_&_Complaints/data/ucf_clean.csv',lineterminator='\n')
# # df_stanford = pd.read_csv('/content/drive/MyDrive/Colab Notebooks/0.NLP_Twitter_&_Complaints/data/ucf_college_tweets.csv')
# df_origin.columns = ['created_at','school_name','user_name','text','school_handle','tweet_id']
# # col='user_name'
sns.countplot(df_embeddings.predicted_raw_difference)
df_embeddings
# sns.countplot(df_embeddings[df_embeddings.wrong==0].predicted_raw_difference)
# plt.show()
sns.count(df_embeddf_embeddings[df_embeddings.wrong==1][:100]['predict_c_0'],)
plt.show()
df_embeddings[df_embeddings.wrong==1].predicted_raw_difference
params = {'legend.fontsize': 'x-large',
'figure.figsize': (18, 18),
'axes.labelsize': '18',
'axes.titlesize': '18',
'xtick.labelsize':'18',
'ytick.labelsize':'18',
'font.family': 'Times new roman'}
pylab.rcParams.update(params)
sns.countplot(df_imdb.sentiment)
plt.ylabel('Samples')
plt.xlabel('IMDB Movie Sentiments')
plt.show()
df= df_imdb
df_profile = df_imdb
# df_profile
"""# Profile Classificaiton"""
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
df_profile['labels']= le.fit_transform(df_profile['sentiment'])
df_profile = df_profile.sample(len(df_profile),random_state=47)
df_profile.reset_index(drop=True,inplace=True)
mapping = dict(zip(le.classes_, range(len(le.classes_))))
# df_profile = pd.read_csv('/content/drive/MyDrive/Colab Notebooks/0.NLP_Twitter_&_Complaints/data/profile_Feb_4.csv')
df_profile= df_imdb
# df_profile = df_profile.sample(5000,random_state=47)
df_profile.reset_index(drop=True,inplace=True)
news_groups = le.classes_.tolist()
import sklearn
X = df_profile.review
# X = df_profile[['text']]
y = df_profile.labels
# z = df_profile.user_name
X_train,X_test,y_train,y_test= train_test_split(X,y,stratify=y,test_size=0.2,
)
print('number of training samples:', len(X_train))
print('number of test samples:', len(X_test))
train_df = pd.DataFrame({'doc':X_train,
'labels':y_train})
test_df = pd.DataFrame({'doc':X_test,
'labels':y_test})
train_df.reset_index(drop=True,inplace=True)
test_df.reset_index(drop=True,inplace=True)
# test_df_og = test_df
# test_df = test_df[test_df.false_predicted == 0]
test_df.reset_index(drop=True,inplace=True)
# sns.countplot(test_df['labels'])
# plt.title('Test Profiles for UCF')
# plt.xlabel('Schools')
# plt.ylabel('Number of profiles')
# plt.show()
# !pip -qq install transformers==3.3.1
# !pip -qq install transformers==4.0.0
!pip -qq install pkbar
import os
import re
import collections
import timeit
import torch
import pandas as pd
import pkbar
import numpy
# import numpy.testing.decorators
from sklearn.model_selection import train_test_split
from torch.utils.data import Dataset,DataLoader
import torch.nn as nn
from sklearn.metrics import f1_score,classification_report
import transformers
# Uses GPU if available
from torch import cuda
device = 'cuda' if cuda.is_available() else 'cpu'
# device = torch.device ("cuda:0" if torch.cuda.is_available() else "cpu")
device
# @@ hy
MAX_LEN = 128
TRAIN_BATCH_SIZE = 8
VALID_BATCH_SIZE = 8
EPOCHS = 10
LEARNING_RATE = 1e-06
WEIGHT_DECAY = 1e-05
num_of_batches_per_epoch = len(X_train)//TRAIN_BATCH_SIZE
# Distil-bert model parameters
from transformers import DistilBertConfig,DistilBertTokenizer,DistilBertModel
from transformers import BertConfig,BertTokenizer,BertModel
from transformers import BigBirdConfig,BigBirdTokenizer,BigBirdModel
from transformers import LongformerConfig,LongformerTokenizer,LongformerModel
# from transformers import Big
num_classes = len(df_profile.labels.unique())
# tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
tokenizer = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-large')
# BigBirdTokenizer
# tokenizer = DistilBertTokenizer.from_pretrained()
class BertDataFormat(Dataset):
def __init__(self, dataframe, tokenizer, max_len):
self.len = len(dataframe)
self.data = dataframe
self.tokenizer = tokenizer
self.max_len = max_len
def __getitem__(self, index):
cur_doc = str(self.data.doc[index])
cur_doc = " ".join(cur_doc.split())
inputs = self.tokenizer.encode_plus(
cur_doc,
None,
add_special_tokens=True,
max_length=self.max_len,
padding='max_length',
return_token_type_ids=True,
truncation=True
)
ids = inputs['input_ids']
mask = inputs['attention_mask']
return {
'ids': torch.tensor(ids, dtype=torch.long),
'mask': torch.tensor(mask, dtype=torch.long),
'targets': torch.tensor(self.data.labels[index], dtype=torch.long)
}
def __len__(self):
return self.len
training_set = BertDataFormat(train_df, tokenizer, MAX_LEN)
testing_set = BertDataFormat(test_df, tokenizer, MAX_LEN)
train_params = {'batch_size': TRAIN_BATCH_SIZE,
'shuffle': True,
'num_workers': 0
}
test_params = {'batch_size': VALID_BATCH_SIZE,
'shuffle': False,
'num_workers': 0
}
training_loader = DataLoader(training_set, **train_params)
testing_loader = DataLoader(testing_set, **test_params)
testing_set = BertDataFormat(test_df, tokenizer, MAX_LEN)
testing_loader = DataLoader(testing_set, **test_params)
# Creating the customized model, by adding a drop out and a dense layer on top of distil bert to get the final output for the model.
history = defaultdict(list)
class DistillBERTClass(torch.nn.Module):
def __init__(self,num_classes):
super(DistillBERTClass, self).__init__()
self.l1 = DistilBertModel.from_pretrained("distilbert-base-uncased")
self.classifier = torch.nn.Linear(768, 768)
self.dropout = torch.nn.Dropout(0.6)
self.classifier = torch.nn.Linear(768, num_classes)
def forward(self, input_ids, attention_mask):
output_1 = self.l1(input_ids=input_ids, attention_mask=attention_mask)
hidden_state = output_1[0]
bert_last = hidden_state[:, 0]
output = self.classifier(bert_last)
return output
class BERTClass(torch.nn.Module):
def __init__(self,num_classes):
super(BERTClass, self).__init__()
self.l1 = BertModel.from_pretrained("bert-base-uncased",output_hidden_states=True)
self.classifier = torch.nn.Linear(768, 768)
self.dropout = torch.nn.Dropout(0.6)
self.classifier = torch.nn.Linear(768, num_classes)
def forward(self, input_ids, attention_mask):
output_1 = self.l1(input_ids=input_ids, attention_mask=attention_mask)
hidden_state = output_1[0]
bert_last = hidden_state[:, 0]
output = self.classifier(bert_last)
return output
class BibBirdClass(torch.nn.Module):
def __init__(self,num_classes):
super(BibBirdClass, self).__init__()
self.l1 = BigBirdModel.from_pretrained("google/bigbird-roberta-large",output_hidden_states=True)
self.classifier = torch.nn.Linear(4096, 1024)
self.dropout = torch.nn.Dropout(0.6)
self.classifier = torch.nn.Linear(1024, num_classes)
def forward(self, input_ids, attention_mask):
output_1 = self.l1(input_ids=input_ids, attention_mask=attention_mask)
hidden_state = output_1[0]
bert_last = hidden_state[:, 0]
output = self.classifier(bert_last)
return output
# Commented out IPython magic to ensure Python compatibility.
# %%capture
# # Copy model to device.
# # baseline_model = DistillBERTClass(num_classes)
# # baseline_model = BERTClass(num_classes)
# # baseline_model.to(device)
# baseline_model = BibBirdClass(num_classes)
# baseline_model.to(device)
# Create the loss function and optimizer
loss_function = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(params = baseline_model.parameters(), lr=LEARNING_RATE,weight_decay=WEIGHT_DECAY)
baseline_model.parameters
# Calcuate accuracy of the model
def acc_cal(big_idx, targets):
n_correct = (big_idx==targets).sum().item()
return n_correct
# train model
def train(epoch,model):
tr_loss = 0
n_correct = 0
nb_tr_steps = 0
nb_tr_examples = 0
model.train()
# progress bar
train_per_epoch = num_of_batches_per_epoch
kbar = pkbar.Kbar(target=train_per_epoch, epoch=epoch,
num_epochs=EPOCHS, width=8,
always_stateful=False)
for idx,data in enumerate(training_loader, 0):
# copy tensors to gpu
ids = data['ids'].to(device, dtype = torch.long)
mask = data['mask'].to(device, dtype = torch.long)
targets = data['targets'].to(device, dtype = torch.long)
# get output and calculate loss.
outputs = model(ids, mask)
loss = loss_function(outputs, targets)
tr_loss += loss.item()
big_val, big_idx = torch.max(outputs.data, dim=1)
n_correct += acc_cal(big_idx, targets)
nb_tr_steps += 1
nb_tr_examples+=targets.size(0)
optimizer.zero_grad()
loss.backward()
# # When using GPU
optimizer.step()
kbar.update(idx, values=[("train_loss", tr_loss/(idx+1))])
epoch_loss = tr_loss/nb_tr_steps
epoch_accu = (n_correct*100)/nb_tr_examples
# Comment them out for faster training
test_acc,test_loss,predicted_labels,true_labels, predicted_raw= valid(model, testing_loader)
print(" - ")
print("test accuracy:",round(test_acc,2))
print("test loss:",round(test_loss,2))
history['train_acc'].append(epoch_accu)
history['train_loss'].append(epoch_loss)
history['test_acc_while_training'].append(test_acc)
history['test_loss_while_training'].append(test_loss)
# print(f"Training Loss Epoch: {epoch_loss}")
# print(f"Training Accuracy Epoch: {epoch_accu}")
return
# function to predict output.
def valid(model, testing_loader):
predicted_raw = []
predicted_labels = []
true_labels = []
nb_tr_steps = 0
tr_loss =0
nb_tr_examples=0
model.eval()
n_correct = 0; n_wrong = 0; total = 0
with torch.no_grad():
for _, data in enumerate(testing_loader, 0):
# copy tensors to gpu.
ids = data['ids'].to(device, dtype = torch.long)
mask = data['mask'].to(device, dtype = torch.long)
targets = data['targets'].to(device, dtype = torch.long)
outputs = model(ids, mask).squeeze()
# calculate loss
loss = loss_function(outputs, targets)
tr_loss += loss.item()
big_val, big_idx = torch.max(outputs.data, dim=1)
predicted_raw += outputs
predicted_labels += big_idx
true_labels += targets
n_correct += acc_cal(big_idx, targets)
nb_tr_steps += 1
nb_tr_examples+=targets.size(0)
epoch_loss = tr_loss/nb_tr_steps
epoch_accu = (n_correct*100)/nb_tr_examples
history['val_acc'].append(epoch_accu)
history['val_loss'].append(epoch_loss)
return epoch_accu,epoch_loss,predicted_labels,true_labels,predicted_raw
# with torch.no_grad():
# for _, data in enumerate(testing_loader, 0):
# ids = data['ids'].to(device, dtype = torch.long)
# mask = data['mask'].to(device, dtype = torch.long)
# targets = data['targets'].to(device, dtype = torch.long)
# outputs =baseline_model(ids, mask).squeeze()
# print(outputs)
# big_val, big_idx = torch.max(outputs.data, dim=1)
# function to predict output.
def test_model(model, testing_loader):
predicted_labels = []
true_labels = []
nb_tr_steps = 0
tr_loss =0
nb_tr_examples=0
model.eval()
n_correct = 0; n_wrong = 0; total = 0
with torch.no_grad():
for _, data in enumerate(testing_loader, 0):
# copy tensors to gpu.
ids = data['ids'].to(device, dtype = torch.long)
mask = data['mask'].to(device, dtype = torch.long)
targets = data['targets'].to(device, dtype = torch.long)
outputs = model(ids, mask).squeeze()
# calculate loss
loss = loss_function(outputs, targets)
tr_loss += loss.item()
big_val, big_idx = torch.max(outputs.data, dim=1)
torch.max
predicted_labels += big_idx
true_labels += targets
n_correct += acc_cal(big_idx, targets)
nb_tr_steps += 1
nb_tr_examples+=targets.size(0)
epoch_loss = tr_loss/nb_tr_steps
epoch_accu = (n_correct*100)/nb_tr_examples
return epoch_accu,epoch_loss,predicted_labels,true_labels
# import wandb
# wandb.login()
# args = dict(
# epochs=5,
# classes=10,
# batch_size=32,
# learning_rate=0.005,
# dataset="Twitter_Clean",
# architecture="Baseline")
# wandb.init(config=args)
# wandb.watch(baseline_model)
hp_batch_train = hp.HParam('train_batch', hp.Discrete([16,32,64]))
hp_batch_valid = hp.HParam('valid_batch', hp.Discrete([16,32]))
hp_learning_rate = hp.HParam('learning_rate',hp.RealInterval(1e-06,1e-03))
hp_max_len = hp.HParam('length', hp.Discrete([128,256,512]))
METRIC_ACCURACY ='accuracy'
with tf.summary.create_file_writer('logs/hparam_tuning').as_default():
hp.hparams_config(
hparams=[hp_batch_train, hp_batch_valid, hp_learning_rate,hp_max_len],
metrics=[hp.Metric(METRIC_ACCURACY, display_name='Accuracy')],
)
def run(run_dir, hparams):
with tf.summary.create_file_writer(run_dir).as_default():
hp.hparams(hparams) # record the values used in this trial
accuracy = model(hparams)
tf.summary.scalar(METRIC_ACCURACY, accuracy, step=1)
df_profile
from torch.utils.tensorboard import SummaryWriter
# from torchvision import datasets, transforms
writer = SummaryWriter()
for n_iter in range(100):
writer.add_scalar('Loss/train', np.random.random(), n_iter)
writer.add_scalar('Loss/test', np.random.random(), n_iter)
| |
not userpass:
logger.error("Unable to get password from env variable: "
"TCF_PASSWORD" + aka)
continue
logger.info("%s: login in with user/pwd from environment "
"TCF_USER/PASSWORD", rtb._url)
else:
if args.userid == None:
userid = raw_input('Login for %s [%s]: ' \
% (rtb._url, getpass.getuser()))
if userid == "":
userid = getpass.getuser()
else:
userid = args.userid
userpass = getpass.getpass(
"Login to %s as %s\nPassword: " % (rtb._url, userid))
try:
if rtb.login(userid, userpass):
logged = True
else:
logger.error("%s (%s): cannot login: with given "
"credentials %s", rtb._url, rtb.aka, userid)
except Exception as e:
logger.error("%s (%s): cannot login: %s",
rtb._url, rtb.aka, e)
else:
logged = True
if not logged:
logger.error("Could not login to any server, "
"please check your config")
exit(1)
def rest_logout(args):
for rtb in rest_target_brokers.itervalues():
logger.info("%s: checking for a valid session", rtb._url)
if rtb.valid_session:
rtb.logout()
def rest_target_print(rt, verbosity = 0):
"""
Print information about a REST target taking into account the
verbosity level from the logging module
:param rt: object describing the REST target to print
:type rt: dict
"""
if verbosity == 0:
print "%(fullid)s" % rt
elif verbosity == 1:
# Simple list, just show owner and power state
if 'powered' in rt:
if rt['powered'] == True:
power = " ON"
else:
power = ""
else:
power = ""
if rt['owner'] != None:
owner = "[" + rt['owner'] + "]"
else:
owner = ""
print "%s %s%s" % (rt['fullid'], owner, power)
elif verbosity == 2:
print rt['url']
for key in sorted(rt.keys()):
val = rt[key]
if key == "url":
continue
elif key == "interfaces" or key == "consoles":
print " %s: %s" % (key, ' '.join(sorted(val)))
elif key == "bsp_models":
print " %s: %s" % (key, ' '.join(sorted(val.keys())))
elif isinstance(val, list) or isinstance(val, dict):
print " %s: %s" % (key, pprint.pformat(val))
else:
print " %s: %s" % (key, val)
elif verbosity == 3:
pprint.pprint(rt)
else:
rtb = rt.pop('rtb') # DIRTY: Can't get skipkeys to work that well
print json.dumps(rt, skipkeys = True, indent = 8)
rt['rbt'] = rtb
def _rest_target_find_by_id(_target):
"""
Find a target by ID.
Ignores if the target is disabled or enabled.
:param str target: Target to locate; it can be a *name* or a full *url*.
"""
# Try to see if it is cached by that ID
rt = rest_target_broker.rts_cache.get(_target, None)
if rt != None:
return rt['rtb'], rt
# Dirty messy search
for rt in rest_target_broker.rts_cache.itervalues():
if rt['id'] == _target \
or rt['url'] == _target:
return rt['rtb'], rt
raise IndexError("target-id '%s': not found" % _target)
def _rest_target_broker_find_by_id_url(target):
# Note this function finds by ID and does nt care if the target is
# disabled or enabled
if target in rest_target_brokers:
return rest_target_brokers[target]
rtb, _rt = _rest_target_find_by_id(target)
return rtb
def _target_select_by_spec( rt, spec, _kws = None):
if not _kws:
_kws = {}
origin = "cmdline"
# FIXME: merge with tcfl.tc.t_c._targets_select_by_spec()
# We are going to modify the _kws dict, so make a copy!
kws = dict(_kws)
# We don't consider BSP models, just iterate over all the BSPs
bsps = rt.get('bsps', {}).keys()
kws['bsp_count'] = len(bsps)
kws_bsp = dict()
commonl.kws_update_from_rt(kws, rt)
rt_full_id = rt['fullid']
rt_type = rt['type']
for bsp in bsps:
kws_bsp.clear()
kws_bsp.update(kws)
kws_bsp['bsp'] = bsp
commonl.kws_update_type_string(kws_bsp, rt['bsps'][bsp])
logger.info("%s/%s (type:%s): considering by spec",
rt_full_id, bsp, rt_type)
if commonl.conditional_eval("target selection", kws_bsp,
spec, origin, kind = "specification"):
# This remote target matches the specification for
# this target want
logger.info("%s/%s (type:%s): candidate by spec",
rt_full_id, bsp, rt_type)
return True
else:
logger.info(
"%s/%s (type:%s): ignoring by spec; didn't match '%s'",
rt_full_id, bsp, rt_type, spec)
if bsps == []:
# If there are no BSPs, just match on the core keywords
if commonl.conditional_eval("target selection", kws,
spec, origin, kind = "specification"):
# This remote target matches the specification for
# this target want
logger.info("%s (type:%s): candidate by spec w/o BSP",
rt_full_id, rt_type)
return True
else:
logger.info("%s (type:%s): ignoring by spec w/o BSP; "
"didn't match '%s'", rt_full_id, rt_type, spec)
return False
def rest_target_list_table(args, spec):
"""
List all the targets in a table format, appending * if powered
up, ! if owned.
"""
# Collect the targets into a list of tuples (FULLID, SUFFIX),
# where suffix will be *! (* if powered, ! if owned)
l = []
for rt_fullid, rt in sorted(rest_target_broker.rts_cache.iteritems(),
key = lambda x: x[0]):
try:
if spec and not _target_select_by_spec(rt, spec):
continue
suffix = ""
if rt['owner']:
suffix += "@"
if rt.get('powered', False) == True:
suffix += "!"
l.append((rt_fullid, suffix))
except requests.exceptions.ConnectionError as e:
logger.error("%s: can't use: %s", rt_fullid, e)
if not l:
return
# Figure out the max target name length, so from there we can see
# how many entries we can fit per column. Note that the suffix is
# max two characters, separated from the target name with a
# space and we must leave another space for the next column (hence
# +4).
_h, display_w, _hp, _wp = struct.unpack(
'HHHH', fcntl.ioctl(0, termios.TIOCGWINSZ,
struct.pack('HHHH', 0, 0, 0, 0)))
maxlen = max([len(i[0]) for i in l])
columns = int(math.floor(display_w / (maxlen + 4)))
if columns < 1:
columns = 1
rows = int((len(l) + columns - 1) / columns)
# Print'em sorted; filling out columns first -- there might be a
# more elegant way to do it, but this one is quite simple and I am
# running on fumes sleep-wise...
l = sorted(l)
for row in range(rows):
for column in range(columns):
index = rows * column + row
if index >= len(l):
break
i = l[index]
sys.stdout.write(u"{fullid:{column_width}} {suffix:2} ".format(
fullid = i[0], suffix = i[1], column_width = maxlen))
sys.stdout.write("\n")
def rest_target_list(args):
specs = []
# Bring in disabled targets? (note the field is a text, not a bool)
if args.all == False:
specs.append("( disabled != 'True' )")
# Bring in target specification from the command line (if any)
if args.target:
specs.append("(" + ") or (".join(args.target) + ")")
spec = " and ".join(specs)
if args.verbosity < 1 and sys.stderr.isatty() and sys.stdout.isatty():
rest_target_list_table(args, spec)
return
else:
for rt_fullid, rt in sorted(rest_target_broker.rts_cache.iteritems(),
key = lambda x: x[0]):
try:
if spec and not _target_select_by_spec(rt, spec):
continue
rest_target_print(rt, args.verbosity)
except requests.exceptions.ConnectionError as e:
logger.error("%s: can't use: %s", rt_fullid, e)
def rest_target_find_all(all_targets = False):
"""
Return descriptors for all the known remote targets
:param bool all_targets: Include or not disabled targets
:returns: list of remote target descriptors (each being a dictionary).
"""
if all_targets == True:
return list(rest_target_broker.rts_cache.values())
targets = []
for rt in rest_target_broker.rts_cache.values():
if rt.get('disabled', 'False') in ('True', True):
continue
targets.append(rt)
return targets
def rest_target_acquire(args):
"""
:param argparse.Namespace args: object containing the processed
command line arguments; need args.target
:returns: dictionary of tags
:raises: IndexError if target not found
"""
for target in args.target:
rtb, rt = _rest_target_find_by_id(target)
rtb.rest_tb_target_acquire(rt, ticket = args.ticket)
def rest_target_enable(args):
"""
:param argparse.Namespace args: object containing the processed
command line arguments; need args.target
:raises: IndexError if target not found
"""
for target in args.target:
rtb, rt = _rest_target_find_by_id(target)
rtb.rest_tb_target_enable(rt, ticket = args.ticket)
def rest_target_disable(args):
"""
:param argparse.Namespace args: object containing the processed
command line arguments; need args.target
:raises: IndexError if target not found
"""
for target in args.target:
rtb, rt = _rest_target_find_by_id(target)
rtb.rest_tb_target_disable(rt, ticket = args.ticket)
def rest_target_property_set(args):
"""
:param argparse.Namespace args: object containing the processed
command line arguments; need args.target
:raises: IndexError if target not found
"""
rtb, rt = _rest_target_find_by_id(args.target)
rtb.rest_tb_property_set(rt, args.property, args.value,
ticket = args.ticket)
def rest_target_property_get(args):
"""
:param argparse.Namespace args: object containing the processed
command line arguments; need args.target
:raises: IndexError if target not found
"""
rtb, rt = _rest_target_find_by_id(args.target)
value = rtb.rest_tb_property_get(rt, args.property, ticket = args.ticket)
if value != None:
print value
def rest_target_release(args):
"""
:param argparse.Namespace args: object containing the processed
command line arguments; need args.target
:raises: IndexError if target not found
"""
for target in args.target:
rtb, rt = _rest_target_find_by_id(target)
rtb.rest_tb_target_release(rt, force = args.force,
ticket = args.ticket)
def rest_target_power_on(args):
"""
:param argparse.Namespace args: object containing the processed
command line arguments; need args.target
:raises: IndexError if target not found
"""
for target in args.target:
rtb, rt = _rest_target_find_by_id(target)
rtb.rest_tb_target_power_on(rt, | |
from os.path import join, dirname, isfile
from PySide2.QtWidgets import QDialog, QMessageBox, QLayout
from PySide2.QtCore import Qt, Signal
from logging import getLogger
from numpy import pi, array, array_equal
from .....GUI.Dialog.DMatLib.DMatSetup.Gen_DMatSetup import Gen_DMatSetup
from .....Classes.Material import Material
from .....Classes.ImportMatrixVal import ImportMatrixVal
from .....Classes.ImportMatrix import ImportMatrix
from .....Classes.ImportMatrixXls import ImportMatrixXls
from .....Functions.path_tools import rel_file_path
from .....loggers import GUI_LOG_NAME
class DMatSetup(Gen_DMatSetup, QDialog):
# Signal to DMatLib to update material treeview
saveNeededChanged = Signal() # Modified / Saved / Canceled (add/remove *)
materialToDelete = Signal() # Material will be deleted in DMatLib
materialToRename = Signal() # Material name/path has changed => rename in DMatLib
materialToRevert = Signal() # Revert reference from DMatLib
materialToSave = Signal() # Material to save (update reference/file/machine)
def __init__(self, parent=None, material=None):
"""Dialog for edit/show material properties
Parameters
----------
material : Material
material to edit
parent : Widget
Parent Widget (DMatLib)
material : Material
Material object to show/edit
"""
# Build the interface according to the .ui file
QDialog.__init__(self)
self.setupUi(self)
self.is_save_needed = False
self.init_name = None # Initial name of current Material (to revert rename)
self.init_path = None # Initial part of current Material (for rename)
self.mat = None # Current material being edited
# Set initial material
if material is not None:
self.set_material(material=material)
# === setup signals ===
# General
self.le_name.editingFinished.connect(self.set_name)
self.cb_material_type.currentIndexChanged.connect(self.set_is_isotropic)
# Elec
self.lf_rho_elec.editingFinished.connect(self.set_rho_elec)
# Magnetics
self.lf_mur_lin.editingFinished.connect(self.set_mur_lin)
self.lf_Brm20.editingFinished.connect(self.set_Brm20)
self.lf_alpha_Br.editingFinished.connect(self.set_alpha_Br)
self.lf_Wlam.editingFinished.connect(self.set_Wlam)
# Economical
self.lf_cost_unit.editingFinished.connect(self.set_cost_unit)
# Thermics
self.lf_Cp.editingFinished.connect(self.set_Cp)
self.lf_alpha.editingFinished.connect(self.set_alpha)
self.lf_L.editingFinished.connect(self.set_lambda)
self.lf_Lx.editingFinished.connect(self.set_lambda_x)
self.lf_Ly.editingFinished.connect(self.set_lambda_y)
self.lf_Lz.editingFinished.connect(self.set_lambda_z)
# Mechanics
self.lf_rho_meca.editingFinished.connect(self.set_rho_meca)
self.lf_E.editingFinished.connect(self.set_E)
self.lf_Ex.editingFinished.connect(self.set_Ex)
self.lf_Ey.editingFinished.connect(self.set_Ey)
self.lf_Ez.editingFinished.connect(self.set_Ez)
self.lf_G.editingFinished.connect(self.set_G)
self.lf_Gxy.editingFinished.connect(self.set_Gxy)
self.lf_Gxz.editingFinished.connect(self.set_Gxz)
self.lf_Gyz.editingFinished.connect(self.set_Gyz)
self.lf_nu.editingFinished.connect(self.set_nu)
self.lf_nu_xy.editingFinished.connect(self.set_nu_xy)
self.lf_nu_xz.editingFinished.connect(self.set_nu_xz)
self.lf_nu_yz.editingFinished.connect(self.set_nu_yz)
self.tab_values.saveNeeded.connect(self.set_table_values)
self.c_type_material.currentIndexChanged.connect(self.change_type_material)
# Connect buttons
self.b_delete.clicked.connect(lambda: self.materialToDelete.emit())
self.b_save.clicked.connect(lambda: self.materialToSave.emit())
self.b_cancel.clicked.connect(lambda: self.materialToRevert.emit())
def set_save_needed(self, is_save_needed=True):
"""Set if there are unsaved modifications within the object
Parameters
----------
self : DMatSetup
A DMatSetup object
is_save_needed : bool
New value for is_save_needed
"""
old = self.is_save_needed # Keep old values
self.is_save_needed = is_save_needed
self.b_save.setEnabled(is_save_needed)
self.b_cancel.setEnabled(is_save_needed)
if is_save_needed != old:
# Raise signal only if value is different
getLogger(GUI_LOG_NAME).debug("DMatSetup: Sending saveNeededChanged")
self.saveNeededChanged.emit()
def set_material(self, material, is_save_needed=False):
"""Update the current material and setup all the widgets
Parameters
----------
self : DMatSetup
A DMatSetup object
material : Material
The material to edit/show
is_save_needed : bool
True if the material is different from the reference
"""
old_mat = self.mat
self.mat = material
self.init_name = self.mat.name # Keep to revert rename
self.init_path = self.mat.path
getLogger(GUI_LOG_NAME).debug("DMatSetup: Setting material " + self.mat.name)
self.le_name.setText(self.mat.name)
if self.mat.is_isotropic:
self.cb_material_type.setCurrentIndex(1)
else:
self.cb_material_type.setCurrentIndex(0)
# === check material attribute and set values ===
# Elec
if self.mat.elec is None:
self.set_default("elec")
self.lf_rho_elec.setValue(self.mat.elec.rho)
# Economical
if self.mat.eco is None:
self.set_default("eco")
self.lf_cost_unit.setValue(self.mat.eco.cost_unit)
# Thermics
if self.mat.HT is None:
self.set_default("HT")
self.lf_Cp.setValue(self.mat.HT.Cp)
self.lf_alpha.setValue(self.mat.HT.alpha)
self.lf_L.setValue(self.mat.HT.lambda_x)
self.lf_Lx.setValue(self.mat.HT.lambda_x)
self.lf_Ly.setValue(self.mat.HT.lambda_y)
self.lf_Lz.setValue(self.mat.HT.lambda_z)
# Structural
if self.mat.struct is None:
self.set_default("struct")
self.lf_rho_meca.setValue(self.mat.struct.rho)
if self.mat.struct.Ex not in [0, None]:
self.lf_E.setValue(self.mat.struct.Ex / 1e9)
self.lf_Ex.setValue(self.mat.struct.Ex / 1e9)
else:
self.lf_E.setValue(self.mat.struct.Ex)
self.lf_Ex.setValue(self.mat.struct.Ex)
if self.mat.struct.Ey not in [0, None]:
self.lf_Ey.setValue(self.mat.struct.Ey / 1e9)
else:
self.lf_Ey.setValue(self.mat.struct.Ey)
if self.mat.struct.Ez not in [0, None]:
self.lf_Ez.setValue(self.mat.struct.Ez / 1e9)
else:
self.lf_Ez.setValue(self.mat.struct.Ez)
if self.mat.struct.Gxy not in [0, None]:
self.lf_G.setValue(self.mat.struct.Gxy / 1e9)
self.lf_Gxy.setValue(self.mat.struct.Gxy / 1e9)
else:
self.lf_G.setValue(self.mat.struct.Gxy)
self.lf_Gxy.setValue(self.mat.struct.Gxy)
if self.mat.struct.Gxz not in [0, None]:
self.lf_Gxz.setValue(self.mat.struct.Gxz / 1e9)
else:
self.lf_Gxz.setValue(self.mat.struct.Gxz)
if self.mat.struct.Gyz not in [0, None]:
self.lf_Gyz.setValue(self.mat.struct.Gyz / 1e9)
else:
self.lf_Gyz.setValue(self.mat.struct.Gyz)
self.lf_nu.setValue(self.mat.struct.nu_xy)
self.lf_nu_xy.setValue(self.mat.struct.nu_xy)
self.lf_nu_xz.setValue(self.mat.struct.nu_xz)
self.lf_nu_yz.setValue(self.mat.struct.nu_yz)
# Magnetical
if self.mat.mag is None:
self.set_default("mag")
self.lf_mur_lin.setValue(self.mat.mag.mur_lin)
self.lf_Brm20.setValue(self.mat.mag.Brm20)
self.lf_alpha_Br.setValue(self.mat.mag.alpha_Br)
self.lf_Wlam.setValue(self.mat.mag.Wlam)
# Setup tab values
if not isinstance(self.mat.mag.BH_curve, ImportMatrixVal):
self.g_BH_import.setChecked(False)
elif array_equal(self.mat.mag.BH_curve.value, array([[0, 0]])):
self.g_BH_import.setChecked(False)
else:
self.g_BH_import.setChecked(True)
self.tab_values.setWindowFlags(self.tab_values.windowFlags() & ~Qt.Dialog)
self.tab_values.title = self.g_BH_import.title()
self.tab_values.N_row_txt = "Nb of Points"
self.tab_values.shape_max = (None, 2)
self.tab_values.shape_min = (None, 2)
self.tab_values.col_header = ["H-curve(A/m)", "B-curve(T)"]
self.tab_values.unit_order = ["First column H", "First column B"]
self.tab_values.button_plot_title = "B(H)"
self.tab_values.si_col.hide()
self.tab_values.in_col.hide()
self.tab_values.b_close.hide()
self.tab_values.b_import.setHidden(False)
self.tab_values.b_export.setHidden(False)
if isinstance(self.mat.mag.BH_curve, ImportMatrixXls):
self.mat.mag.BH_curve = ImportMatrixVal(self.mat.mag.BH_curve.get_data())
self.tab_values.data = self.mat.mag.BH_curve.get_data()
elif not isinstance(self.mat.mag.BH_curve, ImportMatrixVal):
self.tab_values.data = array([[0, 0]])
elif self.mat.mag.BH_curve.get_data() is not None:
self.tab_values.data = self.mat.mag.BH_curve.get_data()
else:
self.tab_values.data = array([[0, 0]])
self.tab_values.update()
if isinstance(self.mat.mag.BH_curve, ImportMatrixVal) and not array_equal(
self.mat.mag.BH_curve.value, array([[0, 0]])
):
self.c_type_material.setCurrentIndex(2)
elif self.mat.mag.Brm20 != 0 and self.mat.mag.alpha_Br != 0:
self.c_type_material.setCurrentIndex(1)
else:
self.c_type_material.setCurrentIndex(0)
self.change_type_material()
# Hide useless widget
self.in_epsr.hide()
self.lf_epsr.hide()
self.unit_epsr.hide()
# Enable/Disable buttons
self.blockSignals(True)
self.set_save_needed(is_save_needed=is_save_needed)
self.blockSignals(False)
def set_default(self, attr):
"""When mat.elec or mat.mag are None, initialize with default values
Parameters
----------
self : DMatSetup
A DMatSetup widget
attr : str
name of the property to set
"""
setattr(self.mat, attr, type(getattr(Material(), attr))())
def set_name(self):
"""Signal to update the value of name according to the line edit
Parameters
----------
self : DMatSetup
A DMatSetup object
"""
file_name = str(self.le_name.text())
if file_name == self.init_name:
return # New name is the same as the previous one
# Check that the user wants to rename the materials
msg = self.tr(
"Do you want to rename your material to "
+ file_name
+ " ?\nAll current modifications (if any) on the material will be saved."
)
reply = QMessageBox.question(
self,
self.tr("Renaming material"),
msg,
QMessageBox.Yes,
QMessageBox.No,
)
self.qmessagebox_question = reply
if reply == QMessageBox.No:
# Revert name
self.le_name.blockSignals(True)
self.le_name.setText(self.init_name)
self.le_name.blockSignals(False)
return
# Check that new name is correct (doesn't exist)
filepath = rel_file_path(
join(dirname(self.mat.path), file_name + ".json"), "MATLIB_DIR"
)
if isfile(filepath):
QMessageBox().critical(
self,
self.tr("Error"),
self.tr(
"A material with the name "
+ file_name
+ " already exist!\nPlease enter another name."
),
)
# Revert name
self.le_name.blockSignals(True)
self.le_name.setText(self.init_name)
self.le_name.blockSignals(False)
return
# Update name and path
self.mat.name = file_name
self.le_name.setText(self.mat.name)
self.mat.path = rel_file_path(
join(dirname(self.mat.path), file_name + ".json"), "MATLIB_DIR"
)
self.set_save_needed(is_save_needed=False)
self.materialToRename.emit() # Update reference and treeview
def set_is_isotropic(self):
"""Signal to update the value of is_isotropic according to the checkbox
Parameters
----------
self :
A DMatSetup object
is_checked :
State of the checkbox
Returns
-------
None
"""
if self.cb_material_type.currentText() == "Isotropic":
self.mat.is_isotropic = True
self.nav_meca.setCurrentIndex(1)
self.nav_ther.setCurrentIndex(1)
elif self.cb_material_type.currentText() == "Orthotropic":
self.mat.is_isotropic = False
self.nav_meca.setCurrentIndex(0)
self.nav_ther.setCurrentIndex(0)
self.set_save_needed(is_save_needed=True)
def set_rho_elec(self):
"""Signal to update the value of rho_elec according to the line edit
Parameters
----------
self :
A DMatSetup object
Returns
-------
None
"""
if self.mat.elec.rho != self.lf_rho_elec.value():
self.mat.elec.rho = self.lf_rho_elec.value()
self.set_save_needed(is_save_needed=True)
def set_mur_lin(self):
"""Signal to update the value of mur_lin according to the line edit
Parameters
----------
self :
A DMatSetup object
Returns
-------
None
"""
if self.mat.mag.mur_lin != self.lf_mur_lin.value():
self.mat.mag.mur_lin = self.lf_mur_lin.value()
self.set_save_needed(is_save_needed=True)
def set_Brm20(self):
"""Signal to update the value of Brm20 according to the line edit
Parameters
----------
self :
A DMatSetup object
Returns
-------
None
"""
if self.mat.mag.Brm20 != self.lf_Brm20.value():
self.mat.mag.Brm20 = self.lf_Brm20.value()
self.set_save_needed(is_save_needed=True)
def set_alpha_Br(self):
"""Signal to update the value of alpha_Br according to the line edit
Parameters
----------
self :
A DMatSetup object
Returns
-------
None
"""
if self.mat.mag.alpha_Br != self.lf_alpha_Br.value():
self.mat.mag.alpha_Br = self.lf_alpha_Br.value()
self.set_save_needed(is_save_needed=True)
def set_Wlam(self):
"""Signal to update the value of Wlam according to the line edit
Parameters
----------
self :
A DMatSetup object
Returns
-------
None
"""
if self.mat.mag.Wlam != self.lf_Wlam.value():
self.mat.mag.Wlam = self.lf_Wlam.value()
self.set_save_needed(is_save_needed=True)
def set_cost_unit(self):
"""Signal to update the value of cost_unit according to the line edit
Parameters
----------
self :
A DMatSetup object
Returns
-------
None
"""
if self.mat.eco.cost_unit != self.lf_cost_unit.value():
self.mat.eco.cost_unit = self.lf_cost_unit.value()
self.set_save_needed(is_save_needed=True)
def set_Cp(self):
"""Signal to update the value of Cp according to the line edit
Parameters
----------
self :
A DMatSetup object
Returns
-------
None
"""
if self.mat.HT.Cp != self.lf_Cp.value():
self.mat.HT.Cp = self.lf_Cp.value()
self.set_save_needed(is_save_needed=True)
def set_alpha(self):
"""Signal to update the value of alpha according to the line edit
Parameters
----------
self :
A DMatSetup object
Returns
-------
None
"""
if self.mat.HT.alpha != self.lf_alpha.value():
self.mat.HT.alpha = self.lf_alpha.value()
self.set_save_needed(is_save_needed=True)
def set_lambda(self):
"""Signal to update the value of lambda according to the line edit
Parameters
----------
self :
A DMatSetup object
Returns
-------
None
"""
if self.mat.HT.lambda_x != self.lf_L.value():
self.mat.HT.lambda_x = self.lf_L.value()
self.mat.HT.lambda_y = self.lf_L.value()
self.mat.HT.lambda_z = self.lf_L.value()
self.set_save_needed(is_save_needed=True)
def set_lambda_x(self):
"""Signal to update the value of lambda_x according to the line edit
Parameters
----------
self :
A DMatSetup object
Returns
-------
None
"""
if self.mat.HT.lambda_x != self.lf_Lx.value():
self.mat.HT.lambda_x = self.lf_Lx.value()
self.set_save_needed(is_save_needed=True)
def set_lambda_y(self):
"""Signal to update the value of lambda_y according to the line edit
Parameters
----------
self :
A DMatSetup object
Returns
-------
None
"""
if self.mat.HT.lambda_y != self.lf_Ly.value():
self.mat.HT.lambda_y = self.lf_Ly.value()
self.set_save_needed(is_save_needed=True)
def set_lambda_z(self):
"""Signal to update the value of lambda_z according to the line edit
Parameters
----------
self :
A DMatSetup object
Returns
-------
| |
= xgb.XGBClassifier(max_depth=9, n_estimators=450, learning_rate=0.01)
xgclass.fit(x_train,y_train)
#print(xgclass.best_params_)
print("In-sample accuracy: " + str(train_acc_score(xgclass)))
print("Test accuracy: " + str(test_acc_score(xgclass)))
print ("In-sample Precision Score: " + str(train_prec_score(xgclass)))
print ("Test Precision Score: " + str(test_prec_score(xgclass)))
print ("In-sample F1 Score: " + str(train_f1(xgclass)))
print ("Test F1 Score: " + str(test_f1(xgclass)))
confusion_matrix_model_train(xgclass)
# In[ ]:
importance_of_features(xgclass)
# ## **3.3 Evaluation for models predicting results**
# In[ ]:
Classifiers=["Logistic Regression (Lasso)","Logistic Regression (Ridge)","Support Vector Machine (RBF)","Support Vector Machine(Linear)","K-Nearest Neighbours","Decision Tree","Random Forest","XGBoost"]
in_sample_acc=[round(train_acc_score(x),2) for x in [log_reg1,log_reg2,svc_rbf,svc_lin,KNN,Dec_tree,ranfor,xgclass]]
test_acc=[round(test_acc_score(x),2) for x in [log_reg1,log_reg2,svc_rbf,svc_lin,KNN,Dec_tree,ranfor,xgclass]]
train_prec = [round(train_prec_score(x),2) for x in [log_reg1,log_reg2,svc_rbf,svc_lin,KNN,Dec_tree,ranfor,xgclass]]
test_prec = [round(test_prec_score(x),2) for x in [log_reg1,log_reg2,svc_rbf,svc_lin,KNN,Dec_tree,ranfor,xgclass]]
trainf1 = [train_f1(x) for x in [log_reg1,log_reg2,svc_rbf,svc_lin,KNN,Dec_tree,ranfor,xgclass]]
testf1 = [test_f1(x) for x in [log_reg1,log_reg2,svc_rbf,svc_lin,KNN,Dec_tree,ranfor,xgclass]]
cols=["Classifier","Training Accuracy","Test Accuracy","Training Precision","Test Precision","Training F1 Score","Test F1 Score"]
pred_results = pd.DataFrame(columns=cols)
pred_results["Classifier"]=Classifiers
pred_results["Training Accuracy"]=in_sample_acc
pred_results["Test Accuracy"]=test_acc
pred_results["Training Precision"]=train_prec
pred_results["Test Precision"]=test_prec
pred_results["Training F1 Score"]=trainf1
pred_results["Test F1 Score"]=testf1
pred_results
# **Selected model to predict W/D/L result: **
#
# **XGBoost** --> Highest Test set F1 score, along with highest test set accuracy and precision
# # **4. Classification Models to predict exact goals scored by Home and Away Sides**
# ## **4.1 Splitting into training and test set**
# Again, we will use 80-20 to split the training and test set with 5-fold cross validation.
# In[ ]:
x = results.loc[:,["country","impt","home_rank_change","away_rank_change","diff_in_ranking","diff_in_mean_weighted_over_years"]]
y_home=results.loc[:,"home_score"]
y_away=results.loc[:,"away_score"]
# In[ ]:
x_home_train,x_home_test,y_home_train,y_home_test=train_test_split(x,y_home,test_size=0.2,random_state=0)
x_away_train,x_away_test,y_away_train,y_away_test=train_test_split(x,y_away,test_size=0.2,random_state=0)
k_fold = KFold(n_splits=5, shuffle=True, random_state=0)
# Functions to evaluate the models for goals scored for home and away:
# In[ ]:
#home goals
def home_train_acc_score(model):
return round(np.mean(cross_val_score(model,x_home_train,y_home_train,cv=k_fold,scoring="accuracy")),2)
def home_test_acc_score(model):
return round(accuracy_score(y_home_test, model.predict(x_home_test)),2)
def home_train_prec_score(model):
return round(precision_score(y_home_train,model.predict(x_home_train),average='macro'),2)
def home_test_prec_score(model):
return round(precision_score(y_home_test,model.predict(x_home_test),average='macro'),2)
def home_train_f1(model):
return round(f1_score(y_home_train,model.predict(x_home_train),average='macro'),2)
def home_test_f1(model):
return round(f1_score(y_home_test,model.predict(x_home_test),average='macro'),2)
def home_confusion_matrix_model_train(model_used):
cm=confusion_matrix(y_home_train,model_used.predict(x_home_train))
col=["Predicted Home Goals: 0","Predicted Home Goals: 1","Predicted Home Goals: 2","Predicted Home Goals: 3","Predicted Home Goals: 4","Predicted Home Goals: 5","Predicted Home Goals: 6"]
cm=pd.DataFrame(cm)
cm.columns=["Predicted Home Goals: 0","Predicted Home Goals: 1","Predicted Home Goals: 2","Predicted Home Goals: 3","Predicted Home Goals: 4","Predicted Home Goals: 5","Predicted Home Goals: 6"]
cm.index=["Actual Home Goals: 0","Actual Home Goals: 1","Actual Home Goals: 2","Actual Home Goals: 3","Actual Home Goals: 4","Actual Home Goals: 5","Actual Home Goals: 6"]
#cm[col]=np(cm[col])
return cm.T
def home_confusion_matrix_model_test(model_used):
cm=confusion_matrix(y_home_test,model_used.predict(x_home_test))
col=["Predicted Home Goals: 0","Predicted Home Goals: 1","Predicted Home Goals: 2","Predicted Home Goals: 3","Predicted Home Goals: 4","Predicted Home Goals: 5","Predicted Home Goals: 6"]
cm=pd.DataFrame(cm)
cm.columns=["Predicted Home Goals: 0","Predicted Home Goals: 1","Predicted Home Goals: 2","Predicted Home Goals: 3","Predicted Home Goals: 4","Predicted Home Goals: 5","Predicted Home Goals: 6"]
cm.index=["Actual Home Goals: 0","Actual Home Goals: 1","Actual Home Goals: 2","Actual Home Goals: 3","Actual Home Goals: 4","Actual Home Goals: 5","Actual Home Goals: 6"]
#cm[col]=np(cm[col])
return cm.T
def home_importance_of_features(model):
features = pd.DataFrame()
features['feature'] = x_home_train.columns
features['importance'] = model.feature_importances_
features.sort_values(by=['importance'], ascending=True, inplace=True)
features.set_index('feature', inplace=True)
return features.plot(kind='barh', figsize=(10,10))
#away goals
def away_train_acc_score(model):
return round(np.mean(cross_val_score(model,x_away_train,y_away_train,cv=k_fold,scoring="accuracy")),2)
def away_test_acc_score(model):
return round(accuracy_score(y_away_test, model.predict(x_away_test)),2)
def away_train_prec_score(model):
return round(precision_score(y_away_train,model.predict(x_away_train),average='macro'),2)
def away_test_prec_score(model):
return round(precision_score(y_away_test,model.predict(x_away_test),average='macro'),2)
def away_train_f1(model):
return round(f1_score(y_away_train,model.predict(x_away_train),average='macro'),2)
def away_test_f1(model):
return round(f1_score(y_away_test,model.predict(x_away_test),average='macro'),2)
def away_confusion_matrix_model_train(model_used):
cm=confusion_matrix(y_away_train,model_used.predict(x_away_train))
col=["Predicted Away Goals: 0","Predicted Away Goals: 1","Predicted Away Goals: 2","Predicted Away Goals: 3","Predicted Away Goals: 4","Predicted Away Goals: 5","Predicted Away Goals: 6"]
cm=pd.DataFrame(cm)
cm.columns=["Predicted Away Goals: 0","Predicted Away Goals: 1","Predicted Away Goals: 2","Predicted Away Goals: 3","Predicted Away Goals: 4","Predicted Away Goals: 5","Predicted Away Goals: 6"]
cm.index=["Actual Away Goals: 0","Actual Away Goals: 1","Actual Away Goals: 2","Actual Away Goals: 3","Actual Away Goals: 4","Actual Away Goals: 5","Actual Away Goals: 6"]
#cm[col]=np(cm[col])
return cm.T
def away_confusion_matrix_model_test(model_used):
cm=confusion_matrix(y_away_test,model_used.predict(x_away_test))
col=["Predicted Away Goals: 0","Predicted Away Goals: 1","Predicted Away Goals: 2","Predicted Away Goals: 3","Predicted Away Goals: 4","Predicted Away Goals: 5","Predicted Away Goals: 6"]
cm=pd.DataFrame(cm)
cm.columns=["Predicted Away Goals: 0","Predicted Away Goals: 1","Predicted Away Goals: 2","Predicted Away Goals: 3","Predicted Away Goals: 4","Predicted Away Goals: 5","Predicted Away Goals: 6"]
cm.index=["Actual Away Goals: 0","Actual Away Goals: 1","Actual Away Goals: 2","Actual Away Goals: 3","Actual Away Goals: 4","Actual Away Goals: 5","Actual Away Goals: 6"]
#cm[col]=np(cm[col])
return cm.T
def away_importance_of_features(model):
features = pd.DataFrame()
features['feature'] = x_away_train.columns
features['importance'] = model.feature_importances_
features.sort_values(by=['importance'], ascending=True, inplace=True)
features.set_index('feature', inplace=True)
return features.plot(kind='barh', figsize=(10,10))
# ## **4.2 Classification models for goals scored by Home Side**
# Just like in (3.2), the models will be optimised using GridSearchCV based on F1 score.
#
# I have typed in some of the optimised parameters based on the GridSearchCV code output, then commented out the GridSearchCV codes to make the notebook run faster as it won't be re-optimised.
#
# Confusion matrix table and details will only be shown for the final selected models in order to save space. There would be a summary of each models in the evaluation section below
# **4.2.1. Logistic Regression (Lasso)**
# In[ ]:
param_grid = dict(C=(0.0001,0.001,0.005,0.01,0.1,0.5,1))
homelog_reg1 = GridSearchCV(LogisticRegression(penalty="l1"),param_grid=param_grid,scoring="f1_macro")
#homelog_reg1=LogisticRegression(penalty="l1")
homelog_reg1.fit(x_home_train,y_home_train)
#predicted=log_reg1.predict(x_test)
print(homelog_reg1.best_params_)
print("In-sample accuracy: " + str(home_train_acc_score(homelog_reg1)))
print("Test accuracy: " + str(home_test_acc_score(homelog_reg1)))
print ("In-sample Precision Score: " + str(home_train_prec_score(homelog_reg1)))
print ("Test Precision Score: " + str(home_test_prec_score(homelog_reg1)))
print ("In-sample F1 Score: " + str(home_train_f1(homelog_reg1)))
print ("Test F1 Score: " + str(home_test_f1(homelog_reg1)))
home_confusion_matrix_model_train(homelog_reg1)
# **4.2.2. Logistic Regression (Ridge)**
# In[ ]:
param_grid = dict(C=(0.0001,0.001,0.005,0.01,0.1,0.5,1))
homelog_reg2 = GridSearchCV(LogisticRegression(penalty="l2"),param_grid=param_grid,scoring="f1_macro")
#homelog_reg1=LogisticRegression(penalty="l1")
homelog_reg2.fit(x_home_train,y_home_train)
#predicted=log_reg1.predict(x_test)
print(homelog_reg2.best_params_)
print("In-sample accuracy: " + str(home_train_acc_score(homelog_reg2)))
print("Test accuracy: " + str(home_test_acc_score(homelog_reg2)))
print ("In-sample Precision Score: " + str(home_train_prec_score(homelog_reg2)))
print ("Test Precision Score: " + str(home_test_prec_score(homelog_reg2)))
print ("In-sample F1 Score: " + str(home_train_f1(homelog_reg2)))
print ("Test F1 Score: " + str(home_test_f1(homelog_reg2)))
#home_confusion_matrix_model_train(homelog_reg2)
# **4.2.3. SVM (RBF Kernel)**
# In[ ]:
#param_grid = dict(C=(0.001,0.01,0.1,0.5,1,2),gamma=(0.001,0.01,0.1,0.5,1,2))
#homesvc_rbf = GridSearchCV(SVC(kernel="rbf",random_state=0),param_grid=param_grid,scoring="f1_macro")
homesvc_rbf = SVC(kernel='rbf', gamma=0.001, C=1,random_state=0)
homesvc_rbf.fit(x_home_train, y_home_train)
#print(homesvc_rbf.best_params_)
print("In-sample accuracy: " + str(home_train_acc_score(homesvc_rbf)))
print("Test accuracy: " + str(home_test_acc_score(homesvc_rbf)))
print ("In-sample Precision Score: " + str(home_train_prec_score(homesvc_rbf)))
print ("Test Precision Score: " + str(home_test_prec_score(homesvc_rbf)))
print ("In-sample F1 Score: " + str(home_train_f1(homesvc_rbf)))
print ("Test F1 Score: " + str(home_test_f1(homesvc_rbf)))
#home_confusion_matrix_model_train(homesvc_rbf)
# **4.2.4. KNN**
# In[ ]:
#param_grid = dict(n_neighbors=np.arange(10,70),weights=("uniform","distance"),p=(1,2))
#homeKNN = GridSearchCV(KNeighborsClassifier(),param_grid=param_grid,scoring="f1_macro")
homeKNN=KNeighborsClassifier(n_neighbors=10,p=1,weights='uniform')
homeKNN.fit(x_home_train,y_home_train)
predicted=homeKNN.predict(x_home_test)
#print(homeKNN.best_params_)
print("In-sample accuracy: " + str(home_train_acc_score(homeKNN)))
print("Test accuracy: " + str(home_test_acc_score(homeKNN)))
print ("In-sample Precision Score: " + str(home_train_prec_score(homeKNN)))
print ("Test Precision Score: " + str(home_test_prec_score(homeKNN)))
print ("In-sample F1 Score: " + str(home_train_f1(homeKNN)))
print ("Test F1 Score: " + str(home_test_f1(homeKNN)))
#home_confusion_matrix_model_train(homeKNN)
# **4.2.5. Decision Tree**
# In[ ]:
#param_grid = dict(max_depth=np.arange(4,10),min_samples_leaf=np.arange(1,8),min_samples_split=np.arange(2,8),max_leaf_nodes=np.arange(30,100,10))
#homeDec_tree = GridSearchCV(DecisionTreeClassifier(),param_grid=param_grid,scoring="f1_macro")
homeDec_tree=DecisionTreeClassifier(max_depth= 8, max_leaf_nodes= 50, min_samples_leaf= 1, min_samples_split= 6,random_state=0)
homeDec_tree.fit(x_home_train,y_home_train)
predicted=homeDec_tree.predict(x_home_test)
#print(homeDec_tree.best_params_)
print("In-sample accuracy: " + str(home_train_acc_score(homeDec_tree)))
print("Test accuracy: " + str(home_test_acc_score(homeDec_tree)))
print ("In-sample Precision Score: " + str(home_train_prec_score(homeDec_tree)))
print ("Test Precision Score: " + str(home_test_prec_score(homeDec_tree)))
print ("In-sample F1 Score: " + str(home_train_f1(homeDec_tree)))
print ("Test F1 Score: " + str(home_test_f1(homeDec_tree)))
#home_confusion_matrix_model_train(homeDec_tree)
# **4.2.6. Random Forest**
# In[ ]:
#param_grid = dict(max_depth=np.arange(3,10),min_samples_leaf=np.arange(1,10),min_samples_split=np.arange(2,6),max_leaf_nodes=np.arange(50,120,10))
#param_grid = dict(n_estimators = np.arange(50,500,50))
#homeranfor = GridSearchCV(RandomForestClassifier(max_depth= 4, max_leaf_nodes=50, min_samples_leaf= 1, min_samples_split= 3,random_state=0),param_grid=param_grid,scoring="f1_macro")
homeranfor = RandomForestClassifier(n_estimators=250,max_depth= 4, max_leaf_nodes=50, min_samples_leaf= 1, min_samples_split= 3,random_state=0)
homeranfor.fit(x_home_train,y_home_train)
predicted=homeranfor.predict(x_home_test)
#print(ranfor.best_params_)
print("In-sample accuracy: " + str(home_train_acc_score(homeranfor)))
print("Test accuracy: " + str(home_test_acc_score(homeranfor)))
print ("In-sample Precision Score: " + str(home_train_prec_score(homeranfor)))
print ("Test Precision Score: " + str(home_test_prec_score(homeranfor)))
print ("In-sample F1 Score: " + str(home_train_f1(homeranfor)))
print ("Test F1 Score: " + str(home_test_f1(homeranfor)))
#home_confusion_matrix_model_train(homeranfor)
# **4.2.7. XGBooost**
# In[ ]:
#param_grid = dict(n_estimators=np.arange(50,500,50),max_depth=np.arange(6,12),learning_rate=(0.0001,0.001,0.01,0.1))
#homexgclass = GridSearchCV(xgb.XGBClassifier(random_state=0),param_grid=param_grid,scoring="f1_macro")
homexgclass = xgb.XGBClassifier(max_depth=11, n_estimators=350, learning_rate=0.01)
homexgclass.fit(x_home_train,y_home_train)
predicted=homexgclass.predict(x_home_test)
#print(homexgclass.best_params_)
print("In-sample accuracy: " + str(home_train_acc_score(homexgclass)))
print("Test accuracy: " + str(home_test_acc_score(homexgclass)))
print ("In-sample Precision Score: " + str(home_train_prec_score(homexgclass)))
print ("Test Precision Score: " + str(home_test_prec_score(homexgclass)))
print ("In-sample F1 Score: " + str(home_train_f1(homexgclass)))
print ("Test F1 Score: " + str(home_test_f1(homexgclass)))
#home_confusion_matrix_model_train(homexgclass)
# ## **4.3 Evaluation for Models predicting Home Goals**
# In[ ]:
Classifiers=["Logistic Regression (Lasso)","Logistic Regression (Ridge)","Support Vector Machine (RBF)","K-Nearest Neighbours","Decision Tree","Random Forest","XGBoost"]
in_sample_acc=[round(home_train_acc_score(x),2) for x in [homelog_reg1,homelog_reg2,homesvc_rbf,homeKNN,homeDec_tree,homeranfor,homexgclass]]
test_acc=[round(home_test_acc_score(x),2) for x in [homelog_reg1,homelog_reg2,homesvc_rbf,homeKNN,homeDec_tree,homeranfor,homexgclass]]
train_prec = [round(home_train_prec_score(x),2) for x in [homelog_reg1,homelog_reg2,homesvc_rbf,homeKNN,homeDec_tree,homeranfor,homexgclass]]
test_prec = [round(home_test_prec_score(x),2) for x in [homelog_reg1,homelog_reg2,homesvc_rbf,homeKNN,homeDec_tree,homeranfor,homexgclass]]
trainf1 = [home_train_f1(x) for x in [homelog_reg1,homelog_reg2,homesvc_rbf,homeKNN,homeDec_tree,homeranfor,homexgclass]]
testf1 = [home_test_f1(x) for x in [homelog_reg1,homelog_reg2,homesvc_rbf,homeKNN,homeDec_tree,homeranfor,homexgclass]]
cols=["Classifier","Training Accuracy","Test Accuracy","Training Precision","Test Precision","Training F1 Score","Test F1 Score"]
Home_goals_pred = pd.DataFrame(columns=cols)
Home_goals_pred["Classifier"]=Classifiers
Home_goals_pred["Training Accuracy"]=in_sample_acc
Home_goals_pred["Test Accuracy"]=test_acc
Home_goals_pred["Training Precision"]=train_prec
Home_goals_pred["Test Precision"]=test_prec
Home_goals_pred["Training F1 Score"]=trainf1
Home_goals_pred["Test F1 Score"]=testf1
Home_goals_pred
# We will use **Logistic Regression (Lasso)** to predict goals scored by home side. Although the F1 score for XGBoost is highest, logistic regression accuracy and precision are higher for the test set, while the precision metric for training and test differs alot, which makes it overfitted. Logistic Regression is also a simpler model to use.
#
# ## **4.4 Classification models for goals scored by Away Side**
# Just like in (4.2), the models will be optimised using GridSearchCV based on F1 score.
#
# I have typed in some of the optimised parameters based on the GridSearchCV code output, then commented out the GridSearchCV codes to make the notebook run faster as it won't be re-optimised.
#
# Confusion matrix table and details will only be shown for the final selected models in order to save space. There would be a summary of each models in the evaluation section below
# **4.4.1. Logistic Regression (Lasso)**
# In[ ]:
param_grid = dict(C=(0.0001,0.001,0.005,0.01,0.1,0.5,1))
awaylog_reg1 = GridSearchCV(LogisticRegression(penalty="l1"),param_grid=param_grid,scoring="f1_macro")
#awaylog_reg1=LogisticRegression(penalty="l1")
awaylog_reg1.fit(x_away_train,y_away_train)
#predicted=awaylog_reg1.predict(x_test)
print(awaylog_reg1.best_params_)
print("In-sample accuracy: " + str(away_train_acc_score(awaylog_reg1)))
print("Test accuracy: " + str(away_test_acc_score(awaylog_reg1)))
print ("In-sample Precision Score: " + str(away_train_prec_score(awaylog_reg1)))
print ("Test Precision Score: " + str(away_test_prec_score(awaylog_reg1)))
print ("In-sample F1 Score: " + str(away_train_f1(awaylog_reg1)))
print ("Test F1 Score: " + str(away_test_f1(awaylog_reg1)))
away_confusion_matrix_model_train(awaylog_reg1)
# **4.4.2. Logistic Regression (Ridge)**
# In[ ]:
param_grid = dict(C=(0.0001,0.001,0.005,0.01,0.1,0.5,1))
awaylog_reg2 = GridSearchCV(LogisticRegression(penalty="l2"),param_grid=param_grid,scoring="f1_macro")
#awaylog_reg1=LogisticRegression(penalty="l1")
awaylog_reg2.fit(x_away_train,y_away_train)
#predicted=awaylog_reg1.predict(x_test)
print(awaylog_reg2.best_params_)
print("In-sample accuracy: " + str(away_train_acc_score(awaylog_reg2)))
print("Test accuracy: " + str(away_test_acc_score(awaylog_reg2)))
print ("In-sample Precision Score: " + str(away_train_prec_score(awaylog_reg2)))
print ("Test Precision Score: " + str(away_test_prec_score(awaylog_reg2)))
print ("In-sample F1 Score: " + str(away_train_f1(awaylog_reg2)))
print ("Test F1 Score: " + str(away_test_f1(awaylog_reg2)))
#away_confusion_matrix_model_train(awaylog_reg2)
# **4.4.3. SVM (RBF | |
<reponame>ifm/nexxT<filename>nexxT/services/gui/MainWindow.py<gh_stars>1-10
# SPDX-License-Identifier: Apache-2.0
# Copyright (C) 2020 ifm electronic gmbh
#
# THE PROGRAM IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND.
#
"""
This module provides a MainWindow GUI service for the nexxT framework.
"""
import logging
import re
import subprocess
import sys
import shiboken2
from PySide2.QtWidgets import (QMainWindow, QMdiArea, QMdiSubWindow, QDockWidget, QAction, QWidget, QGridLayout,
QMenuBar, QMessageBox, QScrollArea, QLabel)
from PySide2.QtCore import (QObject, Signal, Slot, Qt, QByteArray, QDataStream, QIODevice, QRect, QPoint, QSettings,
QTimer, QUrl)
from PySide2.QtGui import QDesktopServices
import nexxT
from nexxT.interface import Filter
from nexxT.core.Application import Application
logger = logging.getLogger(__name__)
class NexxTMdiSubWindow(QMdiSubWindow):
"""
Need subclassing for getting close / show events and saving / restoring state.
"""
visibleChanged = Signal(bool)
def closeEvent(self, closeEvent):
"""
override from QMdiSubWindow
:param closeEvent: a QCloseEvent instance
:return:
"""
logger.internal("closeEvent widget=%s", self.widget())
self.visibleChanged.emit(False)
return super().closeEvent(closeEvent)
def showEvent(self, showEvent):
"""
override from QMdiSubWindow
:param closeEvent: a QShowEvent instance
:return:
"""
logger.internal("showEvent widget=%s", self.widget())
self.visibleChanged.emit(True)
res = super().showEvent(showEvent)
# no idea why this is necessary, but otherwise the child window is not shown
if self.widget() is not None:
self.widget().show()
return res
def saveGeometry(self):
"""
Saves the geometry of this subwindow (see https://bugreports.qt.io/browse/QTBUG-18648)
:return: a ByteArray instance
"""
array = QByteArray()
stream = QDataStream(array, QIODevice.WriteOnly)
stream.writeUInt32(0x1D9D0CB)
stream.writeUInt16(1)
stream.writeUInt16(0)
frameGeom = self.frameGeometry()
stream.writeInt64(frameGeom.x())
stream.writeInt64(frameGeom.y())
stream.writeInt64(frameGeom.width())
stream.writeInt64(frameGeom.height())
normalGeom = self.normalGeometry()
stream.writeInt64(normalGeom.x())
stream.writeInt64(normalGeom.y())
stream.writeInt64(normalGeom.width())
stream.writeInt64(normalGeom.height())
stream.writeUInt32(self.windowState() & Qt.WindowMaximized)
stream.writeUInt32(self.windowState() & Qt.WindowFullScreen)
return array
def restoreGeometry(self, geometry):
"""
Restores the geometry of this subwindow
:param geometry: the saved state as a QByteArray instance
:return:
"""
if geometry.size() < 4:
return False
stream = QDataStream(geometry)
if stream.readUInt32() != 0x1D9D0CB:
return False
if stream.readUInt16() != 1:
return False
stream.readUInt16() # minorVersion is ignored.
x = stream.readInt64()
y = stream.readInt64()
width = stream.readInt64()
height = stream.readInt64()
restoredFrameGeometry = QRect(x, y, width, height)
x = stream.readInt64()
y = stream.readInt64()
width = stream.readInt64()
height = stream.readInt64()
restoredNormalGeometry = QRect(x, y, width, height)
maximized = stream.readUInt32()
fullScreen = stream.readUInt32()
frameHeight = 20
if not restoredFrameGeometry.isValid():
restoredFrameGeometry = QRect(QPoint(0, 0), self.sizeHint())
if not restoredNormalGeometry.isValid():
restoredNormalGeometry = QRect(QPoint(0, frameHeight), self.sizeHint())
restoredFrameGeometry.moveTop(max(restoredFrameGeometry.top(), 0))
restoredNormalGeometry.moveTop(max(restoredNormalGeometry.top(), 0 + frameHeight))
if maximized or fullScreen:
self.setGeometry(restoredNormalGeometry)
ws = self.windowState()
if maximized:
ws |= Qt.WindowMaximized
if fullScreen:
ws |= Qt.WindowFullScreen
self.setWindowState(ws)
else:
offset = QPoint()
self.setWindowState(self.windowState() & ~(Qt.WindowMaximized|Qt.WindowFullScreen))
self.move(restoredFrameGeometry.topLeft() + offset)
self.resize(restoredNormalGeometry.size())
return True
class NexxTDockWidget(QDockWidget):
"""
Need subclassing for getting close / show events
"""
visibleChanged = Signal(bool)
def closeEvent(self, closeEvent):
"""
override from QMdiSubWindow
:param closeEvent: a QCloseEvent instance
:return:
"""
self.visibleChanged.emit(False)
return super().closeEvent(closeEvent)
def showEvent(self, showEvent):
"""
override from QMdiSubWindow
:param closeEvent: a QShowEvent instance
:return:
"""
self.visibleChanged.emit(True)
return super().showEvent(showEvent)
class MainWindow(QMainWindow):
"""
Main Window service for the nexxT frameworks. Other services usually create dock windows, filters use the
subplot functionality to create grid-layouted views.
"""
mdiSubWindowCreated = Signal(QMdiSubWindow) # TODO: deprecated, can be removed in later versions
aboutToClose = Signal(object)
userSelectionChanged = Signal(str, QPoint)
def __init__(self, config):
super().__init__()
self.config = config
self.config.appActivated.connect(self._appActivated)
self.mdi = QMdiArea(self)
self.mdi.setHorizontalScrollBarPolicy(Qt.ScrollBarAsNeeded)
self.mdi.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded)
self.setCentralWidget(self.mdi)
self.menu = self.menuBar().addMenu("&Windows")
self.aboutMenu = QMenuBar(self.menuBar())
self.menuBar().setCornerWidget(self.aboutMenu)
m = self.aboutMenu.addMenu("&Help")
self.helpNexxT = QAction("Help ...")
self.aboutNexxT = QAction("About nexxT ...")
self.aboutQt = QAction("About Qt ...")
self.aboutPython = QAction("About Python ...")
m.addActions([self.helpNexxT])
m.addSeparator()
m.addActions([self.aboutNexxT, self.aboutQt, self.aboutPython])
self.helpNexxT.triggered.connect(lambda: QDesktopServices.openUrl(QUrl("https://nexxT.readthedocs.org")))
self.aboutNexxT.triggered.connect(lambda: QMessageBox.about(self, "About nexxT", """\
This program uses <b>nexxT</b> %(version)s, a generic hybrid python/c++ framework for developing computer vision
algorithms.<br><br>
nexxT is available under the <a href='https://github.com/ifm/nexxT/blob/master/LICENSE'>Apache 2.0 License</a> together
with the <a href='https://github.com/ifm/nexxT/blob/master/NOTICE'>notice</a>.
""" % dict(version=nexxT.__version__)))
self.aboutQt.triggered.connect(lambda: QMessageBox.aboutQt(self))
self.aboutPython.triggered.connect(self._aboutPython)
self.toolbar = None
self.managedMdiWindows = []
self.managedSubplots = {}
self.windows = {}
self.activeApp = None
self._ignoreCloseEvent = False
def closeEvent(self, closeEvent):
"""
Override from QMainWindow, saves the state.
:param closeEvent: a QCloseEvent instance
:return:
"""
self._ignoreCloseEvent = False
self.aboutToClose.emit(self)
if self._ignoreCloseEvent:
logger.info("Ignoring event")
closeEvent.ignore()
return
closeEvent.accept()
self.saveState()
self.saveMdiState()
super().closeEvent(closeEvent)
def ignoreCloseEvent(self):
"""
Can be called in slots connected to aboutToClose for requesting to ignore the event.
Use case is the "There are unsaved changes" dialog.
:return:
"""
self._ignoreCloseEvent = True
def restoreState(self):
"""
restores the state of the main window including the dock windows of Services
:return:
"""
logger.info("restoring main window's state")
settings = QSettings()
v = settings.value("MainWindowState")
if v is not None:
super().restoreState(v)
v = settings.value("MainWindowGeometry")
if v is not None:
self.restoreGeometry(v)
if self.toolbar is not None:
self.toolbar.show()
def saveState(self):
"""
saves the state of the main window including the dock windows of Services
:return:
"""
logger.info("saving main window's state")
settings = QSettings()
settings.setValue("MainWindowState", super().saveState())
settings.setValue("MainWindowGeometry", self.saveGeometry())
def saveMdiState(self):
"""
saves the state of the individual MDI windows
:return:
"""
for i in self.managedMdiWindows:
window = i["window"]
propColl = i["propColl"]
prefix = i["prefix"]
logger.debug("save window geometry %s: %s", prefix, window.geometry())
geom = str(window.saveGeometry().toBase64(), "ascii")
visible = self.windows[shiboken2.getCppPointer(window)[0]].isChecked() # pylint: disable=no-member
propColl.setProperty(prefix + "_geom", geom)
logger.debug("%s is visible: %d", prefix, int(visible))
propColl.setProperty(prefix + "_visible", int(visible))
self.managedMdiWindows = []
def __del__(self):
logging.getLogger(__name__).debug("deleting MainWindow")
@Slot(str, QPoint)
def updateSelection(self, group, point):
"""
QT Meta-function which can be called to update the 2D selection.
:param group: the group name given as str/QString
:param point: the new selection point given as QPoint
"""
self.userSelectionChanged.emit(group, point)
@Slot()
def getToolBar(self):
"""
Get the main toolbar (adds seperators as appropriate).
:return:
"""
if self.toolbar is None:
self.toolbar = self.addToolBar("NexxT")
self.toolbar.setObjectName("NexxT_main_toolbar")
else:
self.toolbar.addSeparator()
return self.toolbar
@Slot(str, QObject, int, int)
def newDockWidget(self, name, parent, defaultArea, allowedArea=Qt.LeftDockWidgetArea|Qt.BottomDockWidgetArea,
defaultLoc=None):
"""
This function is supposed to be called by services
:param name: the name of the dock window
:param parent: the parent (usually None)
:param defaultArea: the default dock area
:param allowedArea: the allowed dock areas
:return: a new QDockWindow instance
"""
res = NexxTDockWidget(name, parent if parent is not None else self)
res.setAllowedAreas(allowedArea)
res.setAttribute(Qt.WA_DeleteOnClose, False)
self.addDockWidget(defaultArea, res)
self._registerWindow(res, res.objectNameChanged)
res.setObjectName(name)
if defaultLoc is not None:
dl = self.findChild(QDockWidget, defaultLoc)
if dl is not None:
self.tabifyDockWidget(dl, res)
return res
@staticmethod
def parseWindowId(windowId):
"""
convers a subplot window id into windowTitle, row and column
:param windowId: the window id
:return: title, row, column
"""
regexp = re.compile(r"([^\[]+)\[(\d+),\s*(\d+)\]")
match = regexp.match(windowId)
if not match is None:
return match.group(1), int(match.group(2)), int(match.group(3))
return windowId, 0, 0
@Slot(str, QObject, QWidget)
def subplot(self, windowId, theFilter, widget):
"""
Adds widget to the GridLayout specified by windowId.
:param windowId: a string with the format "<windowTitle>[<row>,<col>]" where <windowTitle> is the caption
of the MDI window (and it is used as identifier for saving/restoring window state) and
<row>, <col> are the coordinates of the addressed subplots (starting at 0)
:param theFilter: a Filter instance which is requesting the subplot
:param widget: a QWidget which shall be placed into the grid layout. Note that this widget is reparented
as a result of this operation and the parents can be used to get access to the MDI sub window.
Use releaseSubplot to remove the window
:return: None
"""
logger.internal("subplot '%s'", windowId)
title, row, col = self.parseWindowId(windowId)
if title == "":
title = "(view)"
if title in self.managedSubplots and (row, col) in self.managedSubplots[title]["plots"]:
logger.warning("subplot %s[%d,%d] is already registered. Creating a new window for the plot.",
title, row, col)
i = 2
while "%s(%d)" % (title, i) in self.managedSubplots:
i += 1
title = "%s(%d)" % (title, i)
row = 0
col = 0
if title not in self.managedSubplots:
subWindow = self._newMdiSubWindow(theFilter, title)
swwidget = QWidget()
subWindow.setWidget(swwidget)
layout = QGridLayout(swwidget)
swwidget.setLayout(layout)
self.managedSubplots[title] = dict(mdiSubWindow=subWindow, layout=layout, swwidget=swwidget, plots={})
self.managedSubplots[title]["layout"].addWidget(widget, row, col)
self.managedSubplots[title]["mdiSubWindow"].updateGeometry()
widget.setParent(self.managedSubplots[title]["swwidget"])
# note: there seems to be a race condition when decreasing the single shot timeout to 0
# sometimes the window size is then not correctly adjusted
# with the 100 ms timeout this couldn't be reproduced
QTimer.singleShot(100, lambda: (
self.managedSubplots[title]["mdiSubWindow"].adjustSize() if
shiboken2.isValid(widget) and ( # pylint: disable=no-member
widget.parent().size().height() < widget.minimumSizeHint().height() or
widget.parent().size().height() < widget.minimumSize().height()) else None
))
self.managedSubplots[title]["plots"][row, col] = widget
@Slot(QWidget)
@Slot(str)
def releaseSubplot(self, arg):
"""
This needs to be called to release the previously allocated subplot called windowId.
The managed widget is deleted as a consequence of this function.
:param arg: the widget as passed to subplot. Passing the windowId is also supported, but deprecated.
:return:
"""
if isinstance(arg, str):
windowId = arg
logger.warning("Using deprecated API to release a subplot. Please | |
<reponame>victor-estrade/SystGradDescent<gh_stars>1-10
#!/usr/bin/env python
# coding: utf-8
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import itertools
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
from scipy.special import softmax
from scipy import stats
from tqdm import tqdm
from utils.plot import set_plot_config
from problem.synthetic3D import S3D2
from problem.synthetic3D import S3D2Config
from problem.synthetic3D import get_minimizer
from utils.misc import estimate
from utils.misc import register_params
# from utils.log import print_params
SEED = None
DIRECTORY = "/home/estrade/Bureau/PhD/SystML/SystGradDescent/savings/S3D2/Likelihood"
def expectancy(values, probabilities, axis=None, keepdims=False):
return np.sum(values * probabilities, axis=axis, keepdims=keepdims)
def variance(values, probabilities, axis=None):
return np.sum(probabilities * np.square(values - expectancy(values, probabilities, axis=axis, keepdims=True)), axis=axis)
def variance_bis(values, probabilities, axis=None):
return np.sum(values * values * probabilities, axis=axis) - np.square(expectancy(values, probabilities, axis=axis, keepdims=True))
def stat_uncertainty(values, posterior, marginal):
return sum([variance(values, posterior[i, j, :]) * marginal[i, j]
for i, j in itertools.product(range(marginal.shape[0]), range(marginal.shape[1]))])
def stat_uncertainty2(values, posterior, marginal):
v = np.array([variance(values, posterior[i, j, :])
for i, j in itertools.product(range(posterior.shape[0]), range(posterior.shape[1]))])
return expectancy(v.ravel(), marginal.ravel())
def stat_uncertainty3(values, posterior, marginal):
v = variance(values.reshape(1, 1, -1), posterior, axis=2)
return expectancy(v.ravel(), marginal.ravel())
def syst_uncertainty(values, posterior, marginal, marginal_posterior):
E_y_x = expectancy(values, marginal_posterior)
return sum([np.square(expectancy(values, posterior[i, j, :]) - E_y_x) * marginal[i, j]
for i, j in itertools.product(range(marginal.shape[0]), range(marginal.shape[1]))])
def syst_uncertainty2(values, posterior, marginal):
v = np.array([expectancy(values, posterior[i, j, :])
for i, j in itertools.product(range(posterior.shape[0]), range(posterior.shape[1]))])
return variance(v.ravel(), marginal.ravel())
def syst_uncertainty3(values, posterior, marginal):
v = expectancy(values.reshape(1, 1, -1), posterior, axis=2)
return variance(v.ravel(), marginal.ravel())
def explore():
print("Hello master !")
set_plot_config()
config = S3D2Config()
N_SAMPLES = 10_000
R_MIN = -0.3
R_MAX = 0.3
LAM_MIN = 2
LAM_MAX = 4
MU_MIN = 0.0
MU_MAX = 1.0
generator = S3D2(SEED)
X, label = generator.sample_event(config.TRUE.r, config.TRUE.lam, config.TRUE.mu, size=N_SAMPLES)
n_sig = np.sum(label==1)
n_bkg = np.sum(label==0)
print(f"nb of signal = {n_sig}")
print(f"nb of backgrounds = {n_bkg}")
df = pd.DataFrame(X, columns=["x1","x2","x3"])
df['label'] = label
g = sns.PairGrid(df, vars=["x1","x2","x3"], hue='label')
g = g.map_upper(sns.scatterplot)
g = g.map_diag(sns.kdeplot)
g = g.map_lower(sns.kdeplot, n_levels=6)
g = g.add_legend()
# g = g.map_offdiag(sns.kdeplot, n_levels=6)
g.savefig(os.path.join(DIRECTORY, 'pairgrid.png'))
plt.clf()
nll = generator.nll(X, config.TRUE.r, config.TRUE.lam, config.TRUE.mu)
print(f"NLL = {nll}")
R_RANGE = np.linspace(R_MIN, R_MAX, 30)
nll = [generator.nll(X, r, config.TRUE.lam, config.TRUE.mu) for r in R_RANGE]
min_nll = R_RANGE[np.argmin(nll)]
plt.plot(R_RANGE, nll, label="nll(r)")
plt.axvline(config.TRUE.r, c="orange", label="true r")
plt.axvline(min_nll, c="red", label="min nll")
plt.xlabel("r")
plt.ylabel("NLL")
plt.title("NLL according to r param")
plt.legend()
plt.savefig(os.path.join(DIRECTORY, 'NLL_r.png'))
plt.clf()
LAM_RANGE = np.linspace(LAM_MIN, LAM_MAX, 30)
nll = [generator.nll(X, config.TRUE.r, lam, config.TRUE.mu) for lam in LAM_RANGE]
min_nll = LAM_RANGE[np.argmin(nll)]
plt.plot(LAM_RANGE, nll, label="nll(lam)")
plt.axvline(config.TRUE.lam, c="orange", label="true lam")
plt.axvline(min_nll, c="red", label="min nll")
plt.xlabel("$\lambda$")
plt.ylabel("NLL")
plt.title("NLL according to $\lambda$ param")
plt.legend()
plt.savefig(os.path.join(DIRECTORY, 'NLL_lambda.png'))
plt.clf()
MU_RANGE = np.linspace(MU_MIN, MU_MAX, 30)
nll = [generator.nll(X, config.TRUE.r, config.TRUE.lam, mu) for mu in MU_RANGE]
min_nll = MU_RANGE[np.argmin(nll)]
plt.plot(MU_RANGE, nll, label="nll(mu)")
plt.axvline(config.TRUE.mu, c="orange", label="true mu")
plt.axvline(min_nll, c="red", label="min nll")
plt.xlabel("$\mu$")
plt.ylabel("NLL")
plt.title("NLL according to $\mu$ param")
plt.legend()
plt.savefig(os.path.join(DIRECTORY, 'NLL_mu.png'))
plt.clf()
def main():
print("Hello world !")
set_plot_config()
config = S3D2Config()
DATA_N_SAMPLES = 8_000
R_MIN = -0.3
R_MAX = 0.3
LAM_MIN = 2
LAM_MAX = 4
MU_MIN = 0.1
MU_MAX = 0.3
R_N_SAMPLES = 101
LAM_N_SAMPLES = 102
MU_N_SAMPLES = 103
prior_r = stats.uniform(loc=R_MIN, scale=R_MAX-R_MIN)
prior_lam = stats.uniform(loc=LAM_MIN, scale=LAM_MAX-LAM_MIN)
prior_mu = stats.uniform(loc=MU_MIN, scale=MU_MAX-MU_MIN)
r_grid = np.linspace(R_MIN, R_MAX, R_N_SAMPLES)
lam_grid = np.linspace(LAM_MIN, LAM_MAX, LAM_N_SAMPLES)
mu_grid = np.linspace(MU_MIN, MU_MAX, MU_N_SAMPLES)
data_generator = S3D2(SEED)
data, label = data_generator.sample_event(config.TRUE.r, config.TRUE.lam, config.TRUE.mu, size=DATA_N_SAMPLES)
n_sig = np.sum(label==1)
n_bkg = np.sum(label==0)
print(f"nb of signal = {n_sig}")
print(f"nb of backgrounds = {n_bkg}")
shape = (R_N_SAMPLES, LAM_N_SAMPLES, MU_N_SAMPLES)
n_elements = np.prod(shape)
print(f"3D grid has {n_elements} elements")
log_likelihood = np.zeros(shape)
log_prior_proba = np.zeros(shape)
for i, j, k in tqdm(itertools.product(range(R_N_SAMPLES), range(LAM_N_SAMPLES), range(MU_N_SAMPLES)), total=n_elements):
log_likelihood[i, j, k] = data_generator.log_proba_density(data, r_grid[i], lam_grid[j], mu_grid[k]).sum()
log_prior_proba[i, j, k] = prior_r.logpdf(r_grid[i]) \
+ prior_lam.logpdf(lam_grid[j]) \
+ prior_mu.logpdf(mu_grid[k])
element_min = (log_likelihood + log_prior_proba).min()
print("min element = ", element_min)
posterior_r_lam_mu = softmax(log_likelihood + log_prior_proba)
n_zeros = (posterior_r_lam_mu == 0).sum()
n_elements = np.prod(posterior_r_lam_mu.shape)
print()
print(f"number of zeros in posterior = {n_zeros}/{n_elements} ({n_zeros/n_elements*100:2.3f} %)")
marginal_r = posterior_r_lam_mu.sum(axis=2).sum(axis=1)
marginal_lam = posterior_r_lam_mu.sum(axis=2).sum(axis=0)
marginal_mu = posterior_r_lam_mu.sum(axis=1).sum(axis=0)
marginal_r_lam = posterior_r_lam_mu.sum(axis=2)
assert marginal_r.shape == r_grid.shape, "sum along the wrong axis for marginal r"
assert marginal_lam.shape == lam_grid.shape, "sum along the wrong axis for marginal lam"
assert marginal_mu.shape == mu_grid.shape, "sum along the wrong axis for marginal mu"
assert marginal_r_lam.shape == (R_N_SAMPLES, LAM_N_SAMPLES), "sum along the wrong axis for marginal (r, lam)"
n_zeros = (marginal_r == 0).sum()
n_elements = np.prod(marginal_r.shape)
print(f"number of zeros in marginal r = {n_zeros}/{n_elements} ({n_zeros/n_elements*100:2.3f} %)")
n_zeros = (marginal_lam == 0).sum()
n_elements = np.prod(marginal_lam.shape)
print(f"number of zeros in marginal lam = {n_zeros}/{n_elements} ({n_zeros/n_elements*100:2.3f} %)")
n_zeros = (marginal_mu == 0).sum()
n_elements = np.prod(marginal_mu.shape)
print(f"number of zeros in marginal mu = {n_zeros}/{n_elements} ({n_zeros/n_elements*100:2.3f} %)")
n_zeros = (marginal_r_lam == 0).sum()
n_elements = np.prod(marginal_r_lam.shape)
print(f"number of zeros in marginal r_lam = {n_zeros}/{n_elements} ({n_zeros/n_elements*100:2.3f} %)")
posterior_mu = np.divide(posterior_r_lam_mu, marginal_r_lam.reshape(R_N_SAMPLES, LAM_N_SAMPLES, 1),
out=np.zeros_like(posterior_r_lam_mu), where=(posterior_r_lam_mu!=0))
print("probability densities should sum to one")
# TODO : posterior_mu sum to SOME_N_SAMPLES. is it ok ?
# TODO : with new division policy posterior_mu/ALPHA_N sums to 1-zero_ration in marginal_y
# ... It does not look good
print(np.sum(posterior_mu)/n_elements, np.sum(posterior_r_lam_mu), np.sum(marginal_r), np.sum(marginal_lam))
print(np.sum(marginal_r_lam))
print()
print("True mu value =", config.TRUE.mu)
sig_ratio = n_sig/DATA_N_SAMPLES
print("Sig ratio =", sig_ratio)
expect_mu = expectancy(mu_grid, marginal_mu)
print("E[mu|x] =", expect_mu)
full_var = variance(mu_grid, marginal_mu)
print("Var[mu|x] =", full_var)
std_mu = np.sqrt(full_var)
print("sqrt(Var[mu|x]) =", std_mu)
print("argmax_mu p(mu|x) =", mu_grid[np.argmax(marginal_mu)])
i_max, j_max, k_max = np.unravel_index(np.argmax(log_likelihood), log_likelihood.shape)
assert np.max(log_likelihood) == log_likelihood[i_max, j_max, k_max], "max and argmax should point to the same value"
print("argmax_r_lam_mu logp(x|r, lam, mu) =", r_grid[i_max], lam_grid[j_max], mu_grid[k_max])
stat_err = stat_uncertainty(mu_grid, posterior_mu, marginal_r_lam)
print("stat_uncertainty=", stat_err)
stat_err = stat_uncertainty2(mu_grid, posterior_mu, marginal_r_lam)
print("stat_uncertainty=", stat_err)
stat_err = stat_uncertainty3(mu_grid, posterior_mu, marginal_r_lam)
print("stat_uncertainty=", stat_err)
print("syst_uncertainty=", full_var - stat_err)
syst_err = syst_uncertainty(mu_grid, posterior_mu, marginal_r_lam, marginal_mu)
print("syst_uncertainty=", syst_err)
syst_err = syst_uncertainty2(mu_grid, posterior_mu, marginal_r_lam)
print("syst_uncertainty=", syst_err)
syst_err = syst_uncertainty3(mu_grid, posterior_mu, marginal_r_lam)
print("syst_uncertainty=", syst_err)
print()
print("check marginals")
print("mu ", marginal_mu.min(), marginal_mu.max())
print("lam ", marginal_lam.min(), marginal_lam.max())
print("r ", marginal_r.min(), marginal_r.max())
print("check posterior")
print("p(y|x) ", posterior_mu.min(), posterior_mu.max())
print("p(y|x,a)", posterior_r_lam_mu.min(), posterior_r_lam_mu.max())
# return None
plt.axvline(config.TRUE.mu, c="orange", label="true mu")
plt.axvline(config.TRUE.mu-std_mu, c="orange", label="true mu - std(mu)")
plt.axvline(config.TRUE.mu+std_mu, c="orange", label="true mu + std(mu)")
plt.axvline(sig_ratio, c="red", label="signal ratio")
plt.axvline(expect_mu, c="green", label="E[mu|x]")
plt.plot(mu_grid, marginal_mu, label="posterior")
plt.xlabel("mu")
plt.ylabel("proba density")
plt.title("posterior marginal proba of mu vs mu values")
plt.legend()
plt.savefig(os.path.join(DIRECTORY, 'marginal_mu.png'))
plt.clf()
plt.plot(lam_grid, marginal_lam, label="posterior")
plt.axvline(config.TRUE.lam, c="orange", label="true lambda")
plt.xlabel("lambda")
plt.ylabel("proba density")
plt.title("posterior marginal proba of lam vs lam values")
plt.legend()
plt.savefig(os.path.join(DIRECTORY, 'marginal_lam.png'))
plt.clf()
plt.plot(r_grid, marginal_r, label="posterior")
plt.axvline(config.TRUE.r, c="orange", label="true r")
plt.xlabel("r")
plt.ylabel("proba density")
plt.title("posterior marginal proba of r vs r values")
plt.legend()
plt.savefig(os.path.join(DIRECTORY, 'marginal_r.png'))
plt.clf()
# sns.distplot(data, label="data hist")
# x_range = np.linspace(np.min(data), np.max(data), 1000)
# p = data_generator.proba_density(x_range, Y_TRUE, ALPHA_TRUE)
# plt.plot(x_range, p, label="true proba")
# plt.legend()
# plt.savefig(os.path.join(DIRECTORY, 'data_dstrib.png'))
# plt.clf()
def likelihood_fit():
print("Hello world !")
set_plot_config()
config = S3D2Config()
DATA_N_SAMPLES = 80_000
result_table = []
for mu in config.TRUE_MU_RANGE[1:]:
result_row = {}
config.TRUE_MU = mu
generator = S3D2(SEED)
data, label = generator.sample_event(config.TRUE.r, config.TRUE.lam, config.TRUE_MU, size=DATA_N_SAMPLES)
n_sig = np.sum(label==1)
n_bkg = np.sum(label==0)
print(f"nb of signal = {n_sig}")
print(f"nb of backgrounds = {n_bkg}")
compute_nll = lambda r, lam, mu : generator.nll(data, r, lam, mu)
print('Prepare minuit minimizer')
minimizer = get_minimizer(compute_nll, config)
fmin, params = estimate(minimizer)
params_truth = [config.TRUE_R, config.TRUE_LAMBDA, config.TRUE_MU]
my_print_params(params, params_truth)
register_params(params, params_truth, result_row)
result_row['is_mingrad_valid'] = minimizer.migrad_ok()
result_row.update(fmin)
result_table.append(result_row.copy())
result_table = pd.DataFrame(result_table)
result_table.to_csv(os.path.join(DIRECTORY, 'results.csv'))
print('Plot params')
param_names = config.PARAM_NAMES
for name in param_names:
my_plot_params(name, result_table)
def my_print_params(param, params_truth):
for p, truth in zip(param, params_truth):
name = p['name']
value = p['value']
error = p['error']
print('{name:4} = {truth} vs {value} +/- {error}'.format(**locals()))
def my_plot_params(param_name, result_table, directory=DIRECTORY):
from utils.misc import _ERROR
from utils.misc import _TRUTH
values = result_table[param_name]
errors = result_table[param_name+_ERROR]
truths = result_table[param_name+_TRUTH]
xx = np.arange(len(values))
if 'is_valid' in result_table:
valid_values = values[result_table['is_valid']]
valid_errors = errors[result_table['is_valid']]
valid_x = xx[result_table['is_valid']]
print("Plot_params valid lenght = {}, {}, {}".format(len(valid_x), len(valid_values), len(valid_errors)))
values = values[result_table['is_valid'] == False]
errors = errors[result_table['is_valid'] == False]
x = xx[result_table['is_valid'] == False]
print('Plot_params invalid lenght = {}, {}, {}'.format(len(x), len(values), len(errors)))
try:
if 'is_valid' in result_table:
plt.errorbar(valid_x, valid_values, yerr=valid_errors, fmt='o', capsize=20, capthick=2, label='valid_infer')
plt.errorbar(x, values, yerr=errors, fmt='o', capsize=20, capthick=2, label='invalid_infer')
else:
plt.errorbar(xx, values, yerr=errors, fmt='o', capsize=20, capthick=2, label='infer')
plt.scatter(xx, truths, c='red', label='truth')
plt.xticks(xx, map(lambda x: round(x, 3), truths))
plt.xlabel('truth value')
plt.ylabel(param_name)
plt.title("Likelihood | |
, '53366' : 'servo'
, '53411' : 'set' , '53412' : 'seth' , '53413' : 'seton' , '53414' : 'setup' , '53415' : 'seven'
, '53416' : 'sever' , '53421' : 'severe' , '53422' : 'sew' , '53423' : 'sewn' , '53424' : 'sex'
, '53425' : 'sexy' , '53426' : 'sf' , '53431' : 'sg' , '53432' : 'sh' , '53433' : 'shack'
, '53434' : 'shad' , '53435' : 'shade' , '53436' : 'shady' , '53441' : 'shafer' , '53442' : 'shaft'
, '53443' : 'shag' , '53444' : 'shah' , '53445' : 'shake' , '53446' : 'shaken' , '53451' : 'shako'
, '53452' : 'shaky' , '53453' : 'shale' , '53454' : 'shall' , '53455' : 'sham' , '53456' : 'shame'
, '53461' : 'shank' , '53462' : 'shape' , '53463' : 'shard' , '53464' : 'share' , '53465' : 'shari'
, '53466' : 'shark' , '53511' : 'sharp' , '53512' : 'shave' , '53513' : 'shaw' , '53514' : 'shawl'
, '53515' : 'shay' , '53516' : 'she' , '53521' : "she'd" , '53522' : 'shea' , '53523' : 'sheaf'
, '53524' : 'shear' , '53525' : 'sheath' , '53526' : 'shed' , '53531' : 'sheen' , '53532' : 'sheep'
, '53533' : 'sheer' , '53534' : 'sheet' , '53535' : 'sheik' , '53536' : 'shelf' , '53541' : 'shell'
, '53542' : 'shied' , '53543' : 'shift' , '53544' : 'shill' , '53545' : 'shim' , '53546' : 'shin'
, '53551' : 'shine' , '53552' : 'shinto' , '53553' : 'shiny' , '53554' : 'ship' , '53555' : 'shire'
, '53556' : 'shirk' , '53561' : 'shirt' , '53562' : 'shish' , '53563' : 'shiv' , '53564' : 'shoal'
, '53565' : 'shock' , '53566' : 'shod' , '53611' : 'shoe' , '53612' : 'shoji' , '53613' : 'shone'
, '53614' : 'shoo' , '53615' : 'shook' , '53616' : 'shoot' , '53621' : 'shop' , '53622' : 'shore'
, '53623' : 'short' , '53624' : 'shot' , '53625' : 'shout' , '53626' : 'shove' , '53631' : 'show'
, '53632' : 'shown' , '53633' : 'showy' , '53634' : 'shrank' , '53635' : 'shred' , '53636' : 'shrew'
, '53641' : 'shrike' , '53642' : 'shrub' , '53643' : 'shrug' , '53644' : 'shu' , '53645' : 'shuck'
, '53646' : 'shun' , '53651' : 'shunt' , '53652' : 'shut' , '53653' : 'shy' , '53654' : 'si'
, '53655' : 'sial' , '53656' : 'siam' , '53661' : 'sian' , '53662' : 'sib' , '53663' : 'sibley'
, '53664' : 'sibyl' , '53665' : 'sic' , '53666' : 'sick' , '54111' : 'side' , '54112' : 'sidle'
, '54113' : 'siege' , '54114' : 'siena' , '54115' : 'sieve' , '54116' : 'sift' , '54121' : 'sigh'
, '54122' : 'sight' , '54123' : 'sigma' , '54124' : 'sign' , '54125' : 'signal' , '54126' : 'signor'
, '54131' : 'silas' , '54132' : 'silk' , '54133' : 'silky' , '54134' : 'sill' , '54135' : 'silly'
, '54136' : 'silo' , '54141' : 'silt' , '54142' : 'silty' , '54143' : 'sima' , '54144' : 'simon'
, '54145' : 'simons' , '54146' : 'sims' , '54151' : 'sin' , '54152' : 'sinai' , '54153' : 'since'
, '54154' : 'sine' , '54155' : 'sinew' , '54156' : 'sing' , '54161' : 'singe' , '54162' : 'sinh'
, '54163' : 'sink' , '54164' : 'sinus' , '54165' : 'sioux' , '54166' : 'sip' , '54211' : 'sir'
, '54212' : 'sire' , '54213' : 'siren' , '54214' : 'sis' , '54215' : 'sisal' , '54216' : 'sit'
, '54221' : 'site' , '54222' : 'situ' , '54223' : 'situs' , '54224' : 'siva' , '54225' : 'six'
, '54226' : 'sixgun' , '54231' : 'sixth' , '54232' : 'sixty' , '54233' : 'size' , '54234' : 'sj'
, '54235' : 'sk' , '54236' : 'skat' , '54241' : 'skate' , '54242' : 'skeet' , '54243' : 'skew'
, '54244' : 'ski' , '54245' : 'skid' , '54246' : 'skied' , '54251' : 'skiff' , '54252' : 'skill'
, '54253' : 'skim' , '54254' : 'skimp' , '54255' : 'skimpy' , '54256' : 'skin' , '54261' : 'skip'
, '54262' : 'skirt' , '54263' : 'skit' , '54264' : 'skulk' , '54265' : 'skull' , '54266' : 'skunk'
, '54311' : 'sky' , '54312' : 'skye' , '54313' : 'sl' , '54314' : 'slab' , '54315' : 'slack'
, '54316' : 'slag' , '54321' : 'slain' , '54322' : 'slake' , '54323' : 'slam' , '54324' : 'slang'
, '54325' : 'slant' , '54326' : 'slap' , '54331' : 'slash' , '54332' : 'slat' , '54333' : 'slate'
, '54334' : 'slater' , '54335' : 'slav' , '54336' : 'slave' , '54341' : 'slay' , '54342' : 'sled'
, '54343' : 'sleek' , '54344' : 'sleep' , '54345' : 'sleet' , '54346' : 'slept' , '54351' : 'slew'
, '54352' : 'slice' , '54353' : 'slick' , '54354' : 'slid' , '54355' : 'slide' , '54356' : 'slim'
, '54361' : 'slime' , '54362' : 'slimy' , '54363' : 'sling' , '54364' : 'slip' , '54365' : 'slit'
, '54366' : 'sliver' , '54411' : 'sloan' , '54412' : 'slob' , '54413' : 'sloe' , '54414' : 'slog'
, '54415' : 'sloop' , '54416' : 'slop' , '54421' : 'slope' , '54422' : 'slosh' , '54423' : 'slot'
, '54424' : 'sloth' , '54425' : 'slow' , '54426' : 'slug' , '54431' : 'sluice' , '54432' : 'slum'
, '54433' : 'slump' , '54434' : 'slung' , '54435' : 'slur' , '54436' : 'slurp' , '54441' : 'sly'
, '54442' : 'sm' , '54443' : 'smack' , '54444' : 'small' , '54445' : 'smart' , '54446' : 'smash'
, '54451' : 'smear' , '54452' : 'smell' , '54453' : 'smelt' , '54454' : 'smile' , '54455' : 'smirk'
, '54456' : 'smith' , '54461' : 'smithy' , '54462' : 'smog' , '54463' : 'smoke' , '54464' : 'smoky'
, '54465' : 'smug' , '54466' : 'smut' , '54511' : 'sn' , '54512' : 'snack' , '54513' : 'snafu'
, '54514' : 'snag' , '54515' : 'snail' , '54516' : 'snake' , '54521' : 'snap' , '54522' : 'snare'
, '54523' : 'snark' , '54524' : 'snarl' , '54525' : 'snatch' , '54526' : 'sneak' , '54531' : 'sneer'
, '54532' : 'snell' , '54533' : 'snick' , '54534' : 'sniff' , '54535' : 'snip' , '54536' : 'snipe'
, '54541' : 'snob' , '54542' : 'snook' , '54543' : 'snoop' , '54544' : 'snore' , '54545' : 'snort'
, '54546' : 'snout' , '54551' : 'snow' , '54552' : 'snowy' , '54553' : 'snub' , '54554' : 'snuff'
, '54555' : 'snug' , '54556' : 'so' , '54561' : 'soak' , '54562' : 'soap' , '54563' : 'soapy'
, '54564' : 'soar' , '54565' : 'sob' , '54566' : 'sober' , '54611' : 'social' , '54612' : 'sock'
, '54613' : 'sod' , '54614' : 'soda' , '54615' : 'sofa' , '54616' : 'sofia' , '54621' : 'soft'
, '54622' : 'soften' , '54623' : 'soggy' , '54624' : 'soil' , '54625' : 'sol' , '54626' : 'solar'
, '54631' : 'sold' , '54632' : 'sole' , '54633' : 'solemn' , '54634' : 'solid' , '54635' : 'solo'
, '54636' : 'solon' , '54641' : 'solve' , '54642' : 'soma' , '54643' : 'somal' , '54644' : 'some'
, '54645' : 'son' , '54646' : 'sonar' , '54651' : 'song' , '54652' : 'sonic' , '54653' : 'sonny'
, '54654' : 'sonora' , '54655' : 'sony' , '54656' : 'soon' , '54661' : 'soot' , '54662' : 'sooth'
, '54663' : 'sop' , '54664' : 'sora' , '54665' : 'sorb' , '54666' : 'sore' , '55111' : 'sorry'
, '55112' : 'sort' , '55113' : 'sos' , '55114' : 'sou' , '55115' : 'sough' , '55116' : 'soul'
, '55121' : 'sound' , '55122' : 'soup' , '55123' : 'sour' , '55124' : 'source' , '55125' : 'sousa'
, '55126' : 'south' | |
= params['page']
if 'per_page' in params:
query_params['per_page'] = params['per_page']
if 'sort_by' in params:
query_params['sort_by'] = params['sort_by']
if 'sort_direction' in params:
query_params['sort_direction'] = params['sort_direction']
if 'ip' in params:
query_params['ip'] = params['ip']
if 'features' in params:
query_params['features'] = params['features']
if 'filters' in params:
query_params['filters'] = params['filters']
if 'customer_id' in params:
query_params['customer_id'] = params['customer_id']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ProductListResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_products_from_product(self, product_id, **kwargs):
"""
Get products linked to another product
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_products_from_product(product_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int product_id: (required)
:param int page:
:param int per_page:
:param str features: ``` features[*][value]=string&features[*][operator]=strict&features[1][value]=string&features[1][operator]=strict _______________ { \"*\": { \"value\": \"string\", \"operator\": \"strict\" }, \"1\": { \"value\": \"string\", \"operator\": \"contains\" } } ``` Operator can be: strict, contains, between, in, gt (greater than), lt (lower than). To search on all features, you can pass * as featureId.
:param str filters: ``` name[value]=string&name][operator]=contains&date_add[value]=string&date_add[operator]=lt _______________ { \"name\": { \"value\": \"string\", \"operator\": \"contains\" }, \"date_add\": { \"value\": \"string\", \"operator\": \"lt\" } } ``` Operator can be: strict, contains, between, in, gt (greater than), lt (lower than).
:param str sort_by: Sort by this attribute (id by default)
:param str sort_direction: Sorting direction (asc by default)
:param str ip: Filter by user IP
:return: ProductListResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_products_from_product_with_http_info(product_id, **kwargs)
else:
(data) = self.get_products_from_product_with_http_info(product_id, **kwargs)
return data
def get_products_from_product_with_http_info(self, product_id, **kwargs):
"""
Get products linked to another product
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_products_from_product_with_http_info(product_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int product_id: (required)
:param int page:
:param int per_page:
:param str features: ``` features[*][value]=string&features[*][operator]=strict&features[1][value]=string&features[1][operator]=strict _______________ { \"*\": { \"value\": \"string\", \"operator\": \"strict\" }, \"1\": { \"value\": \"string\", \"operator\": \"contains\" } } ``` Operator can be: strict, contains, between, in, gt (greater than), lt (lower than). To search on all features, you can pass * as featureId.
:param str filters: ``` name[value]=string&name][operator]=contains&date_add[value]=string&date_add[operator]=lt _______________ { \"name\": { \"value\": \"string\", \"operator\": \"contains\" }, \"date_add\": { \"value\": \"string\", \"operator\": \"lt\" } } ``` Operator can be: strict, contains, between, in, gt (greater than), lt (lower than).
:param str sort_by: Sort by this attribute (id by default)
:param str sort_direction: Sorting direction (asc by default)
:param str ip: Filter by user IP
:return: ProductListResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['product_id', 'page', 'per_page', 'features', 'filters', 'sort_by', 'sort_direction', 'ip']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_products_from_product" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'product_id' is set
if ('product_id' not in params) or (params['product_id'] is None):
raise ValueError("Missing the required parameter `product_id` when calling `get_products_from_product`")
collection_formats = {}
resource_path = '/products/{product_id}/products'.replace('{format}', 'json')
path_params = {}
if 'product_id' in params:
path_params['product_id'] = params['product_id']
query_params = {}
if 'page' in params:
query_params['page'] = params['page']
if 'per_page' in params:
query_params['per_page'] = params['per_page']
if 'features' in params:
query_params['features'] = params['features']
if 'filters' in params:
query_params['filters'] = params['filters']
if 'sort_by' in params:
query_params['sort_by'] = params['sort_by']
if 'sort_direction' in params:
query_params['sort_direction'] = params['sort_direction']
if 'ip' in params:
query_params['ip'] = params['ip']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ProductListResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_video_groups_from_product(self, product_id, **kwargs):
"""
Get Video Groups attached to product
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_video_groups_from_product(product_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int product_id: Product ID to fetch (required)
:param int page:
:param int per_page:
:param str sort_by: Sort by this attribute (id by default)
:param str sort_direction: Sorting direction (asc by default)
:param str filters: ``` name[value]=string&name][operator]=contains&date_add[value]=string&date_add[operator]=lt _______________ { \"name\": { \"value\": \"string\", \"operator\": \"contains\" }, \"date_add\": { \"value\": \"string\", \"operator\": \"lt\" } } ``` Operator can be: strict, contains, between, in, gt (greater than), lt (lower than).
:return: VideoGroupListResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_video_groups_from_product_with_http_info(product_id, **kwargs)
else:
(data) = self.get_video_groups_from_product_with_http_info(product_id, **kwargs)
return data
def get_video_groups_from_product_with_http_info(self, product_id, **kwargs):
"""
Get Video Groups attached to product
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_video_groups_from_product_with_http_info(product_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int product_id: Product ID to fetch (required)
:param int page:
:param int per_page:
:param str sort_by: Sort by this attribute (id by default)
:param str sort_direction: Sorting direction (asc by default)
:param str filters: ``` name[value]=string&name][operator]=contains&date_add[value]=string&date_add[operator]=lt _______________ { \"name\": { \"value\": \"string\", \"operator\": \"contains\" }, \"date_add\": { \"value\": \"string\", \"operator\": \"lt\" } } ``` Operator can be: strict, contains, between, in, gt (greater than), lt (lower than).
:return: VideoGroupListResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['product_id', 'page', 'per_page', 'sort_by', 'sort_direction', 'filters']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_video_groups_from_product" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'product_id' is set
if ('product_id' not in params) or (params['product_id'] is None):
raise ValueError("Missing the required parameter `product_id` when calling `get_video_groups_from_product`")
collection_formats = {}
resource_path = '/products/{product_id}/video-groups'.replace('{format}', 'json')
path_params = {}
if 'product_id' in params:
path_params['product_id'] = params['product_id']
query_params = {}
if 'page' in params:
query_params['page'] = params['page']
if 'per_page' in params:
query_params['per_page'] = params['per_page']
if 'sort_by' in params:
query_params['sort_by'] = params['sort_by']
if 'sort_direction' in params:
query_params['sort_direction'] = params['sort_direction']
if 'filters' in params:
query_params['filters'] = params['filters']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='VideoGroupListResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_videos_from_product(self, product_id, **kwargs):
"""
Get videos attached to product
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_videos_from_product(product_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int product_id: Product ID to fetch (required)
:param int page:
:param int per_page:
:param str sort_by: Sort by this attribute (id by default)
:param str sort_direction: Sorting direction (asc by default)
:param str ip: Filter by user IP
:param str filters: ``` name[value]=string&name][operator]=contains&date_add[value]=string&date_add[operator]=lt _______________ { \"name\": { \"value\": \"string\", \"operator\": \"contains\" }, \"date_add\": { \"value\": \"string\", \"operator\": \"lt\" } } ``` Operator can be: strict, contains, between, in, gt (greater than), lt (lower than).
:return: ProductVideoListResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_videos_from_product_with_http_info(product_id, **kwargs)
else:
(data) = self.get_videos_from_product_with_http_info(product_id, **kwargs)
return data
def get_videos_from_product_with_http_info(self, | |
# -*- coding: utf-8 -*-
from datetime import datetime
import json
import os
import socket
from django import forms
from django.conf import settings
from django.forms.models import formset_factory, modelformset_factory
from django.template.defaultfilters import filesizeformat
import commonware
import happyforms
import waffle
from quieter_formset.formset import BaseFormSet, BaseModelFormSet
from tower import ugettext as _, ugettext_lazy as _lazy, ungettext as ngettext
import amo
import addons.forms
from access import acl
from addons.forms import clean_name, icons, IconWidgetRenderer, slug_validator
from addons.models import (Addon, AddonCategory, AddonUser, BlacklistedSlug,
Category, Preview)
from addons.widgets import CategoriesSelectMultiple
from amo import get_user
from amo.utils import raise_required, remove_icons
from files.models import FileUpload
from lib.video import tasks as vtasks
from translations.fields import TransField
from translations.forms import TranslationFormMixin
from translations.models import Translation
from translations.widgets import TransInput, TransTextarea
import mkt
from mkt.constants import APP_IMAGE_SIZES, MAX_PACKAGED_APP_SIZE
from mkt.constants.ratingsbodies import (RATINGS_BY_NAME, ALL_RATINGS,
RATINGS_BODIES)
from mkt.webapps.models import (AddonExcludedRegion, ContentRating, ImageAsset,
Webapp)
from .forms_payments import * # Payment-related forms
from . import tasks
log = commonware.log.getLogger('mkt.developers')
class AuthorForm(happyforms.ModelForm):
# TODO: Remove this whole __init__ when the 'allow-refund' flag goes away.
def __init__(self, *args, **kwargs):
super(AuthorForm, self).__init__(*args, **kwargs)
self.fields['role'].choices = (
(c, s) for c, s in amo.AUTHOR_CHOICES
if c != amo.AUTHOR_ROLE_SUPPORT or
waffle.switch_is_active('allow-refund'))
def clean_user(self):
user = self.cleaned_data['user']
if not user.read_dev_agreement:
raise forms.ValidationError(
_('All authors must have read and agreed to the developer '
'agreement.'))
return user
class Meta:
model = AddonUser
exclude = ('addon')
class BaseModelFormSet(BaseModelFormSet):
"""
Override the parent's is_valid to prevent deleting all forms.
"""
def is_valid(self):
# clean() won't get called in is_valid() if all the rows are getting
# deleted. We can't allow deleting everything.
rv = super(BaseModelFormSet, self).is_valid()
return rv and not any(self.errors) and not bool(self.non_form_errors())
class BaseAuthorFormSet(BaseModelFormSet):
def clean(self):
if any(self.errors):
return
# cleaned_data could be None if it's the empty extra form.
data = filter(None, [f.cleaned_data for f in self.forms
if not f.cleaned_data.get('DELETE', False)])
if not any(d['role'] == amo.AUTHOR_ROLE_OWNER for d in data):
raise forms.ValidationError(_('Must have at least one owner.'))
if not any(d['listed'] for d in data):
raise forms.ValidationError(
_('At least one author must be listed.'))
users = [d['user'] for d in data]
if sorted(users) != sorted(set(users)):
raise forms.ValidationError(
_('An author can only be listed once.'))
AuthorFormSet = modelformset_factory(AddonUser, formset=BaseAuthorFormSet,
form=AuthorForm, can_delete=True, extra=0)
class DeleteForm(happyforms.Form):
password = forms.CharField()
def __init__(self, request):
self.user = request.amo_user
super(DeleteForm, self).__init__(request.POST)
def clean_password(self):
data = self.cleaned_data
if not self.user.check_password(data['password']):
raise forms.ValidationError(_('Password incorrect.'))
def ProfileForm(*args, **kw):
# If the add-on takes contributions, then both fields are required.
addon = kw['instance']
fields_required = (kw.pop('required', False) or
bool(addon.takes_contributions))
if addon.is_webapp():
the_reason_label = _('Why did you make this app?')
the_future_label = _("What's next for this app?")
else:
the_reason_label = _('Why did you make this add-on?')
the_future_label = _("What's next for this add-on?")
class _Form(TranslationFormMixin, happyforms.ModelForm):
the_reason = TransField(widget=TransTextarea(),
required=fields_required,
label=the_reason_label)
the_future = TransField(widget=TransTextarea(),
required=fields_required,
label=the_future_label)
class Meta:
model = Addon
fields = ('the_reason', 'the_future')
return _Form(*args, **kw)
def trap_duplicate(request, manifest_url):
# See if this user has any other apps with the same manifest.
owned = (request.user.get_profile().addonuser_set
.filter(addon__manifest_url=manifest_url))
if not owned:
return
try:
app = owned[0].addon
except Addon.DoesNotExist:
return
error_url = app.get_dev_url()
msg = None
if app.status == amo.STATUS_PUBLIC:
msg = _(u'Oops, looks like you already submitted that manifest '
'for %s, which is currently public. '
'<a href="%s">Edit app</a>')
elif app.status == amo.STATUS_PENDING:
msg = _(u'Oops, looks like you already submitted that manifest '
'for %s, which is currently pending. '
'<a href="%s">Edit app</a>')
elif app.status == amo.STATUS_NULL:
msg = _(u'Oops, looks like you already submitted that manifest '
'for %s, which is currently incomplete. '
'<a href="%s">Resume app</a>')
elif app.status == amo.STATUS_REJECTED:
msg = _(u'Oops, looks like you already submitted that manifest '
'for %s, which is currently rejected. '
'<a href="%s">Edit app</a>')
elif app.status == amo.STATUS_DISABLED:
msg = _(u'Oops, looks like you already submitted that manifest '
'for %s, which is currently disabled by Mozilla. '
'<a href="%s">Edit app</a>')
elif app.disabled_by_user:
msg = _(u'Oops, looks like you already submitted that manifest '
'for %s, which is currently disabled. '
'<a href="%s">Edit app</a>')
if msg:
return msg % (app.name, error_url)
def verify_app_domain(manifest_url, exclude=None):
if waffle.switch_is_active('webapps-unique-by-domain'):
domain = Webapp.domain_from_url(manifest_url)
qs = Webapp.objects.filter(app_domain=domain)
if exclude:
qs = qs.exclude(pk=exclude.pk)
if qs.exists():
raise forms.ValidationError(
_('An app already exists on this domain; '
'only one app per domain is allowed.'))
class PreviewForm(happyforms.ModelForm):
caption = TransField(widget=TransTextarea, required=False)
file_upload = forms.FileField(required=False)
upload_hash = forms.CharField(required=False)
# This lets us POST the data URIs of the unsaved previews so we can still
# show them if there were form errors.
unsaved_image_data = forms.CharField(required=False,
widget=forms.HiddenInput)
unsaved_image_type = forms.CharField(required=False,
widget=forms.HiddenInput)
def save(self, addon, commit=True):
if self.cleaned_data:
self.instance.addon = addon
if self.cleaned_data.get('DELETE'):
# Existing preview.
if self.instance.id:
self.instance.delete()
# User has no desire to save this preview.
return
super(PreviewForm, self).save(commit=commit)
if self.cleaned_data['upload_hash']:
upload_hash = self.cleaned_data['upload_hash']
upload_path = os.path.join(settings.TMP_PATH, 'preview',
upload_hash)
filetype = (os.path.splitext(upload_hash)[1][1:]
.replace('-', '/'))
if filetype in amo.VIDEO_TYPES:
self.instance.update(filetype=filetype)
vtasks.resize_video.delay(upload_path, self.instance,
user=amo.get_user(),
set_modified_on=[self.instance])
else:
self.instance.update(filetype='image/png')
tasks.resize_preview.delay(upload_path, self.instance,
set_modified_on=[self.instance])
class Meta:
model = Preview
fields = ('caption', 'file_upload', 'upload_hash', 'id', 'position')
class ImageAssetForm(happyforms.Form):
file_upload = forms.FileField(required=False)
upload_hash = forms.CharField(required=False)
# This lets us POST the data URIs of the unsaved previews so we can still
# show them if there were form errors.
unsaved_image_data = forms.CharField(required=False,
widget=forms.HiddenInput)
def setup(self, data):
self.size = data.get('size')
self.required = data.get('required')
self.slug = data.get('slug')
self.name = data.get('name')
self.description = data.get('description')
def get_id(self):
return '_'.join(map(str, self.size))
def save(self, addon):
if self.cleaned_data:
if self.cleaned_data['upload_hash']:
if not self.instance:
self.instance, c = ImageAsset.objects.get_or_create(
addon=addon, slug=self.slug)
upload_hash = self.cleaned_data['upload_hash']
upload_path = os.path.join(settings.TMP_PATH, 'image',
upload_hash)
self.instance.update(filetype='image/png')
tasks.resize_imageasset.delay(
upload_path, self.instance.image_path, self.size,
set_modified_on=[self.instance])
def clean(self):
self.cleaned_data = super(ImageAssetForm, self).clean()
if self.required and not self.cleaned_data['upload_hash']:
raise forms.ValidationError(
# L10n: {0} is the name of the image asset type.
_('The {0} image asset is required.').format(self.name))
return self.cleaned_data
class AdminSettingsForm(PreviewForm):
DELETE = forms.BooleanField(required=False)
mozilla_contact = forms.EmailField(required=False)
app_ratings = forms.MultipleChoiceField(
required=False,
choices=RATINGS_BY_NAME)
class Meta:
model = Preview
fields = ('caption', 'file_upload', 'upload_hash', 'position')
def __init__(self, *args, **kw):
# Get the object for the app's promo `Preview` and pass it to the form.
if kw.get('instance'):
addon = kw.pop('instance')
self.instance = addon
self.promo = addon.get_promo()
# Just consume the request - we don't care.
kw.pop('request', None)
super(AdminSettingsForm, self).__init__(*args, **kw)
if self.instance:
self.initial['mozilla_contact'] = addon.mozilla_contact
rs = []
for r in addon.content_ratings.all():
rating = RATINGS_BODIES[r.ratings_body].ratings[r.rating]
rs.append(ALL_RATINGS.index(rating))
self.initial['app_ratings'] = rs
def clean_caption(self):
return '__promo__'
def clean_position(self):
return -1
def clean_app_ratings(self):
ratings_ids = self.cleaned_data.get('app_ratings')
ratings = [ALL_RATINGS[int(i)] for i in ratings_ids]
ratingsbodies = set([r.ratingsbody for r in ratings])
if len(ratingsbodies) != len(ratings):
raise forms.ValidationError(_('Only one rating from each ratings '
'body may be selected.'))
return ratings_ids
def save(self, addon, commit=True):
if (self.cleaned_data.get('DELETE') and
'upload_hash' not in self.changed_data and self.promo.id):
self.promo.delete()
elif self.promo and 'upload_hash' in self.changed_data:
self.promo.delete()
elif self.cleaned_data.get('upload_hash'):
super(AdminSettingsForm, self).save(addon, True)
contact = self.cleaned_data.get('mozilla_contact')
if contact:
addon.update(mozilla_contact=contact)
ratings = self.cleaned_data.get('app_ratings')
if ratings:
before = set(addon.content_ratings.filter(rating__in=ratings)
.values_list('rating', flat=True))
after = set(int(r) for r in ratings)
addon.content_ratings.exclude(rating__in=after).delete()
for i in after - before:
r = ALL_RATINGS[i]
ContentRating.objects.create(addon=addon, rating=r.id,
ratings_body=r.ratingsbody.id)
else:
addon.content_ratings.all().delete()
uses_flash = self.cleaned_data.get('flash')
af = addon.get_latest_file()
if af is not None:
af.update(uses_flash=bool(uses_flash))
return addon
class BasePreviewFormSet(BaseModelFormSet):
def clean(self):
if any(self.errors):
return
at_least_one = False
for form in self.forms:
if (not form.cleaned_data.get('DELETE') and
form.cleaned_data.get('upload_hash') is not None):
at_least_one = True
if not at_least_one:
if waffle.switch_is_active('video-upload'):
raise forms.ValidationError(_('You must upload at least one '
'screenshot or video.'))
else:
raise forms.ValidationError(_('You must upload at least one '
'screenshot.'))
PreviewFormSet = modelformset_factory(Preview, formset=BasePreviewFormSet,
form=PreviewForm, can_delete=True,
extra=1)
class BaseImageAssetFormSet(BaseFormSet):
def __init__(self, *args, **kw):
self.app = kw.pop('app')
super(BaseImageAssetFormSet, self).__init__(*args, **kw)
self.initial = APP_IMAGE_SIZES
# Reconstruct the forms according to the initial data.
self._construct_forms()
for data, form in zip(APP_IMAGE_SIZES, self.forms):
form.setup(data)
form.app = self.app
try:
form.instance = ImageAsset.objects.get(addon=self.app,
slug=form.slug)
except ImageAsset.DoesNotExist:
form.instance = None
def save(self):
for f in self.forms:
f.save(self.app)
ImageAssetFormSet = formset_factory(form=ImageAssetForm,
formset=BaseImageAssetFormSet,
can_delete=False, extra=0)
class NewManifestForm(happyforms.Form):
manifest = forms.URLField(verify_exists=False)
def __init__(self, *args, **kwargs):
self.is_standalone = kwargs.pop('is_standalone', False)
super(NewManifestForm, self).__init__(*args, **kwargs)
def clean_manifest(self):
manifest = self.cleaned_data['manifest']
# Skip checking the domain for the standalone validator.
if not self.is_standalone:
verify_app_domain(manifest)
return manifest
class NewPackagedAppForm(happyforms.Form):
upload = forms.FileField()
def __init__(self, *args, **kwargs):
self.max_size = kwargs.pop('max_size', MAX_PACKAGED_APP_SIZE)
self.user = kwargs.pop('user', get_user())
self.file_upload = None
super(NewPackagedAppForm, self).__init__(*args, **kwargs)
def clean_upload(self):
upload = self.cleaned_data['upload']
if upload.size > self.max_size:
msg = 'Packaged app too large for submission.'
big = | |
- make_all - Has the required: foo'),
('dynamake', 'DEBUG', '#1 - make_all - Has the output: all time: 1'),
('dynamake', 'DEBUG',
'#1 - make_all - Write the persistent actions: .dynamake/make_all.actions.yaml'),
('dynamake', 'TRACE', '#1 - make_all - Done'),
('dynamake', 'DEBUG', '#0 - make - Synced'),
('dynamake', 'DEBUG', '#0 - make - Has the required: all'),
('dynamake', 'TRACE', '#0 - make - Done'),
])
sleep(2)
write_file('foo', '1\n')
self.check(_register, log=[
('dynamake', 'TRACE', '#0 - make - Targets: all'),
('dynamake', 'DEBUG', '#0 - make - Build the required: all'),
('dynamake', 'DEBUG',
'#0 - make - The required: all will be produced by the spawned: #1 - make_all'),
('dynamake', 'TRACE', '#1 - make_all - Call'),
('dynamake', 'DEBUG',
'#1 - make_all - Read the persistent actions: .dynamake/make_all.actions.yaml'),
('dynamake', 'DEBUG', '#1 - make_all - Existing output: all'),
('dynamake', 'DEBUG', '#1 - make_all - Oldest output: all time: 1'),
('dynamake', 'DEBUG', '#1 - make_all - Build the required: foo'),
('dynamake', 'DEBUG', '#1 - make_all - The required: foo is a source file'),
('dynamake', 'DEBUG', '#0 - make - Sync'),
('dynamake', 'DEBUG', '#1 - make_all - Synced'),
('dynamake', 'DEBUG', '#1 - make_all - Has the required: foo'),
('dynamake', 'DEBUG', '#1 - make_all - Newest input: foo time: 2'),
('dynamake', 'WHY',
'#1 - make_all - Must run actions because the modification time of '
'the required: foo has changed from: 0 into: 2'),
('dynamake', 'FILE', '#1 - make_all - Remove the stale output: all'),
('dynamake', 'INFO', '#1 - make_all - Run: touch all'),
('dynamake', 'TRACE', '#1 - make_all - Success: touch all'),
('dynamake', 'DEBUG', '#1 - make_all - Synced'),
('dynamake', 'DEBUG', '#1 - make_all - Has the required: foo'),
('dynamake', 'DEBUG', '#1 - make_all - Has the output: all time: 3'),
('dynamake', 'DEBUG',
'#1 - make_all - Write the persistent actions: .dynamake/make_all.actions.yaml'),
('dynamake', 'TRACE', '#1 - make_all - Done'),
('dynamake', 'DEBUG', '#0 - make - Synced'),
('dynamake', 'DEBUG', '#0 - make - Has the required: all'),
('dynamake', 'TRACE', '#0 - make - Done'),
])
def test_change_required_producer(self) -> None:
def _register_without() -> None:
@step(output='all')
async def make_all() -> None: # pylint: disable=unused-variable
require('foo')
await shell('touch all')
write_file('foo', '!\n')
sys.argv += ['--jobs', '0']
self.check(_register_without, log=[
('dynamake', 'TRACE', '#0 - make - Targets: all'),
('dynamake', 'DEBUG', '#0 - make - Build the required: all'),
('dynamake', 'DEBUG',
'#0 - make - The required: all will be produced by the spawned: #1 - make_all'),
('dynamake', 'TRACE', '#1 - make_all - Call'),
('dynamake', 'WHY',
'#1 - make_all - Must run actions because missing '
'the persistent actions: .dynamake/make_all.actions.yaml'),
('dynamake', 'DEBUG', '#1 - make_all - Nonexistent required output(s): all'),
('dynamake', 'DEBUG', '#1 - make_all - Build the required: foo'),
('dynamake', 'DEBUG', '#1 - make_all - The required: foo is a source file'),
('dynamake', 'DEBUG', '#1 - make_all - Synced'),
('dynamake', 'DEBUG', '#1 - make_all - Has the required: foo'),
('dynamake', 'INFO', '#1 - make_all - Run: touch all'),
('dynamake', 'DEBUG', '#0 - make - Sync'),
('dynamake', 'TRACE', '#1 - make_all - Success: touch all'),
('dynamake', 'DEBUG', '#1 - make_all - Synced'),
('dynamake', 'DEBUG', '#1 - make_all - Has the required: foo'),
('dynamake', 'DEBUG', '#1 - make_all - Has the output: all time: 1'),
('dynamake', 'DEBUG',
'#1 - make_all - Write the persistent actions: .dynamake/make_all.actions.yaml'),
('dynamake', 'TRACE', '#1 - make_all - Done'),
('dynamake', 'DEBUG', '#0 - make - Synced'),
('dynamake', 'DEBUG', '#0 - make - Has the required: all'),
('dynamake', 'TRACE', '#0 - make - Done'),
])
def _register_with() -> None:
@step(output='all')
async def make_all() -> None: # pylint: disable=unused-variable
require('foo')
await shell('touch all')
@step(output='foo')
async def make_foo() -> None: # pylint: disable=unused-variable
await shell('touch foo')
self.check(_register_with, log=[
('dynamake', 'TRACE', '#0 - make - Targets: all'),
('dynamake', 'DEBUG', '#0 - make - Build the required: all'),
('dynamake', 'DEBUG',
'#0 - make - The required: all will be produced by the spawned: #1 - make_all'),
('dynamake', 'TRACE', '#1 - make_all - Call'),
('dynamake', 'DEBUG',
'#1 - make_all - Read the persistent actions: .dynamake/make_all.actions.yaml'),
('dynamake', 'DEBUG', '#1 - make_all - Existing output: all'),
('dynamake', 'DEBUG', '#1 - make_all - Oldest output: all time: 1'),
('dynamake', 'DEBUG', '#1 - make_all - Build the required: foo'),
('dynamake', 'DEBUG',
'#1 - make_all - The required: foo will be produced by the spawned: #1.1 - make_foo'),
('dynamake', 'DEBUG', '#1 - make_all - Sync'),
('dynamake', 'DEBUG', '#0 - make - Sync'),
('dynamake', 'TRACE', '#1.1 - make_foo - Call'),
('dynamake', 'WHY',
'#1.1 - make_foo - Must run actions because missing '
'the persistent actions: .dynamake/make_foo.actions.yaml'),
('dynamake', 'DEBUG', '#1.1 - make_foo - Existing output: foo'),
('dynamake', 'DEBUG', '#1.1 - make_foo - Synced'),
('dynamake', 'FILE', '#1.1 - make_foo - Remove the stale output: foo'),
('dynamake', 'INFO', '#1.1 - make_foo - Run: touch foo'),
('dynamake', 'TRACE', '#1.1 - make_foo - Success: touch foo'),
('dynamake', 'DEBUG', '#1.1 - make_foo - Synced'),
('dynamake', 'DEBUG', '#1.1 - make_foo - Has the output: foo time: 2'),
('dynamake', 'DEBUG',
'#1.1 - make_foo - Write the persistent actions: .dynamake/make_foo.actions.yaml'),
('dynamake', 'TRACE', '#1.1 - make_foo - Done'),
('dynamake', 'DEBUG', '#1 - make_all - Synced'),
('dynamake', 'DEBUG', '#1 - make_all - Has the required: foo'),
('dynamake', 'DEBUG', '#1 - make_all - Newest input: foo time: 2'),
('dynamake', 'WHY',
'#1 - make_all - Must run actions because the producer of '
'the required: foo has changed from: source file into: make_foo'),
('dynamake', 'FILE', '#1 - make_all - Remove the stale output: all'),
('dynamake', 'INFO', '#1 - make_all - Run: touch all'),
('dynamake', 'TRACE', '#1 - make_all - Success: touch all'),
('dynamake', 'DEBUG', '#1 - make_all - Synced'),
('dynamake', 'DEBUG', '#1 - make_all - Has the required: foo'),
('dynamake', 'DEBUG', '#1 - make_all - Has the output: all time: 3'),
('dynamake', 'DEBUG',
'#1 - make_all - Write the persistent actions: .dynamake/make_all.actions.yaml'),
('dynamake', 'TRACE', '#1 - make_all - Done'),
('dynamake', 'DEBUG', '#0 - make - Synced'),
('dynamake', 'DEBUG', '#0 - make - Has the required: all'),
('dynamake', 'TRACE', '#0 - make - Done'),
])
def test_resources(self) -> None:
def _register() -> None:
@step(output=phony('all'))
async def make_all() -> None: # pylint: disable=unused-variable
require('foo')
require('bar')
@step(output='foo')
async def make_foo() -> None: # pylint: disable=unused-variable
await shell('sleep 1; touch foo')
@step(output='bar')
async def make_bar() -> None: # pylint: disable=unused-variable
await shell('sleep 2; touch bar')
sys.argv += ['--jobs', '1', '--rebuild_changed_actions', 'false']
self.check(_register, log=[
('dynamake', 'TRACE', '#0 - make - Targets: all'),
('dynamake', 'DEBUG', '#0 - make - Available resources: jobs=1'),
('dynamake', 'DEBUG', '#0 - make - Build the required: all'),
('dynamake', 'DEBUG',
'#0 - make - The required: all will be produced by the spawned: #1 - make_all'),
('dynamake', 'TRACE', '#1 - make_all - Call'),
('dynamake', 'DEBUG', '#1 - make_all - Build the required: foo'),
('dynamake', 'DEBUG',
'#1 - make_all - The required: foo will be produced by the spawned: #1.1 - make_foo'),
('dynamake', 'DEBUG', '#1 - make_all - Build the required: bar'),
('dynamake', 'DEBUG',
'#1 - make_all - The required: bar will be produced by the spawned: #1.2 - make_bar'),
('dynamake', 'DEBUG', '#1 - make_all - Sync'),
('dynamake', 'DEBUG', '#0 - make - Sync'),
('dynamake', 'TRACE', '#1.1 - make_foo - Call'),
('dynamake', 'DEBUG', '#1.1 - make_foo - Nonexistent required output(s): foo'),
('dynamake', 'DEBUG', '#1.1 - make_foo - Synced'),
('dynamake', 'WHY',
'#1.1 - make_foo - Must run actions to create the missing output(s): foo'),
('dynamake', 'DEBUG', '#1.1 - make_foo - Grab resources: jobs=1'),
('dynamake', 'DEBUG', '#1.1 - make_foo - Available resources: jobs=0'),
('dynamake', 'INFO', '#1.1 - make_foo - Run: sleep 1; touch foo'),
('dynamake', 'TRACE', '#1.2 - make_bar - Call'),
('dynamake', 'DEBUG', '#1.2 - make_bar - Nonexistent required output(s): bar'),
('dynamake', 'DEBUG', '#1.2 - make_bar - Synced'),
('dynamake', 'WHY',
'#1.2 - make_bar - Must run actions to create the missing output(s): bar'),
('dynamake', 'DEBUG', '#1.2 - make_bar - Available resources: jobs=0'),
('dynamake', 'DEBUG', '#1.2 - make_bar - Paused by waiting for resources: jobs=1'),
('dynamake', 'TRACE', '#1.1 - make_foo - Success: sleep 1; touch foo'),
| |
import sys
from pypy.rlib.debug import check_nonneg
from pypy.rlib.unroll import unrolling_iterable
from pypy.rlib.rsre import rsre_char
from pypy.tool.sourcetools import func_with_new_name
from pypy.rlib.objectmodel import we_are_translated
from pypy.rlib import jit
from pypy.rlib.rsre.rsre_jit import install_jitdriver, install_jitdriver_spec
OPCODE_FAILURE = 0
OPCODE_SUCCESS = 1
OPCODE_ANY = 2
OPCODE_ANY_ALL = 3
OPCODE_ASSERT = 4
OPCODE_ASSERT_NOT = 5
OPCODE_AT = 6
OPCODE_BRANCH = 7
#OPCODE_CALL = 8
OPCODE_CATEGORY = 9
#OPCODE_CHARSET = 10
#OPCODE_BIGCHARSET = 11
OPCODE_GROUPREF = 12
OPCODE_GROUPREF_EXISTS = 13
OPCODE_GROUPREF_IGNORE = 14
OPCODE_IN = 15
OPCODE_IN_IGNORE = 16
OPCODE_INFO = 17
OPCODE_JUMP = 18
OPCODE_LITERAL = 19
OPCODE_LITERAL_IGNORE = 20
OPCODE_MARK = 21
OPCODE_MAX_UNTIL = 22
OPCODE_MIN_UNTIL = 23
OPCODE_NOT_LITERAL = 24
OPCODE_NOT_LITERAL_IGNORE = 25
#OPCODE_NEGATE = 26
#OPCODE_RANGE = 27
OPCODE_REPEAT = 28
OPCODE_REPEAT_ONE = 29
#OPCODE_SUBPATTERN = 30
OPCODE_MIN_REPEAT_ONE = 31
# ____________________________________________________________
_seen_specname = {}
def specializectx(func):
"""A decorator that specializes 'func(ctx,...)' for each concrete subclass
of AbstractMatchContext. During annotation, if 'ctx' is known to be a
specific subclass, calling 'func' is a direct call; if 'ctx' is only known
to be of class AbstractMatchContext, calling 'func' is an indirect call.
"""
assert func.func_code.co_varnames[0] == 'ctx'
specname = '_spec_' + func.func_name
while specname in _seen_specname:
specname += '_'
_seen_specname[specname] = True
# Install a copy of the function under the name '_spec_funcname' in each
# concrete subclass
specialized_methods = []
for prefix, concreteclass in [('str', StrMatchContext),
('uni', UnicodeMatchContext)]:
newfunc = func_with_new_name(func, prefix + specname)
assert not hasattr(concreteclass, specname)
setattr(concreteclass, specname, newfunc)
specialized_methods.append(newfunc)
# Return a dispatcher function, specialized on the exact type of 'ctx'
def dispatch(ctx, *args):
return getattr(ctx, specname)(*args)
dispatch._annspecialcase_ = 'specialize:argtype(0)'
dispatch._specialized_methods_ = specialized_methods
return func_with_new_name(dispatch, specname)
# ____________________________________________________________
class Error(Exception):
def __init__(self, msg):
self.msg = msg
class AbstractMatchContext(object):
"""Abstract base class"""
_immutable_fields_ = ['pattern[*]', 'flags', 'end']
match_start = 0
match_end = 0
match_marks = None
match_marks_flat = None
def __init__(self, pattern, match_start, end, flags):
# 'match_start' and 'end' must be known to be non-negative
# and they must not be more than len(string).
check_nonneg(match_start)
check_nonneg(end)
self.pattern = pattern
self.match_start = match_start
self.end = end
self.flags = flags
def reset(self, start):
self.match_start = start
self.match_marks = None
self.match_marks_flat = None
def pat(self, index):
check_nonneg(index)
result = self.pattern[index]
# Check that we only return non-negative integers from this helper.
# It is possible that self.pattern contains negative integers
# (see set_charset() and set_bigcharset() in rsre_char.py)
# but they should not be fetched via this helper here.
assert result >= 0
return result
def str(self, index):
"""NOT_RPYTHON: Must be overridden in a concrete subclass.
The tag ^^^ here is used to generate a translation-time crash
if there is a call to str() that is indirect. All calls must
be direct for performance reasons; you need to specialize the
caller with @specializectx."""
raise NotImplementedError
def lowstr(self, index):
"""NOT_RPYTHON: Similar to str()."""
raise NotImplementedError
def get_mark(self, gid):
return find_mark(self.match_marks, gid)
def flatten_marks(self):
# for testing
if self.match_marks_flat is None:
self.match_marks_flat = [self.match_start, self.match_end]
mark = self.match_marks
if mark is not None:
self.match_lastindex = mark.gid
else:
self.match_lastindex = -1
while mark is not None:
index = mark.gid + 2
while index >= len(self.match_marks_flat):
self.match_marks_flat.append(-1)
if self.match_marks_flat[index] == -1:
self.match_marks_flat[index] = mark.position
mark = mark.prev
self.match_marks = None # clear
return self.match_marks_flat
def span(self, groupnum=0):
# compatibility
fmarks = self.flatten_marks()
groupnum *= 2
if groupnum >= len(fmarks):
return (-1, -1)
return (fmarks[groupnum], fmarks[groupnum+1])
def group(self, groupnum=0):
frm, to = self.span(groupnum)
if 0 <= frm <= to:
return self._string[frm:to]
else:
return None
def fresh_copy(self, start):
raise NotImplementedError
class StrMatchContext(AbstractMatchContext):
"""Concrete subclass for matching in a plain string."""
def __init__(self, pattern, string, match_start, end, flags):
AbstractMatchContext.__init__(self, pattern, match_start, end, flags)
self._string = string
if not we_are_translated() and isinstance(string, unicode):
self.flags |= rsre_char.SRE_FLAG_UNICODE # for rsre_re.py
def str(self, index):
check_nonneg(index)
return ord(self._string[index])
def lowstr(self, index):
c = self.str(index)
return rsre_char.getlower(c, self.flags)
def fresh_copy(self, start):
return StrMatchContext(self.pattern, self._string, start,
self.end, self.flags)
class UnicodeMatchContext(AbstractMatchContext):
"""Concrete subclass for matching in a unicode string."""
def __init__(self, pattern, unicodestr, match_start, end, flags):
AbstractMatchContext.__init__(self, pattern, match_start, end, flags)
self._unicodestr = unicodestr
def str(self, index):
check_nonneg(index)
return ord(self._unicodestr[index])
def lowstr(self, index):
c = self.str(index)
return rsre_char.getlower(c, self.flags)
def fresh_copy(self, start):
return UnicodeMatchContext(self.pattern, self._unicodestr, start,
self.end, self.flags)
# ____________________________________________________________
class Mark(object):
_immutable_ = True
def __init__(self, gid, position, prev):
self.gid = gid
self.position = position
self.prev = prev # chained list
def find_mark(mark, gid):
while mark is not None:
if mark.gid == gid:
return mark.position
mark = mark.prev
return -1
# ____________________________________________________________
class MatchResult(object):
subresult = None
def move_to_next_result(self, ctx):
# returns either 'self' or None
result = self.subresult
if result is None:
return
if result.move_to_next_result(ctx):
return self
return self.find_next_result(ctx)
def find_next_result(self, ctx):
raise NotImplementedError
MATCHED_OK = MatchResult()
class BranchMatchResult(MatchResult):
def __init__(self, ppos, ptr, marks):
self.ppos = ppos
self.start_ptr = ptr
self.start_marks = marks
@jit.unroll_safe
def find_first_result(self, ctx):
ppos = jit.hint(self.ppos, promote=True)
while ctx.pat(ppos):
result = sre_match(ctx, ppos + 1, self.start_ptr, self.start_marks)
ppos += ctx.pat(ppos)
if result is not None:
self.subresult = result
self.ppos = ppos
return self
find_next_result = find_first_result
class RepeatOneMatchResult(MatchResult):
install_jitdriver('RepeatOne',
greens=['nextppos', 'ctx.pattern'],
reds=['ptr', 'self', 'ctx'],
debugprint=(1, 0)) # indices in 'greens'
def __init__(self, nextppos, minptr, ptr, marks):
self.nextppos = nextppos
self.minptr = minptr
self.start_ptr = ptr
self.start_marks = marks
def find_first_result(self, ctx):
ptr = self.start_ptr
nextppos = self.nextppos
while ptr >= self.minptr:
ctx.jitdriver_RepeatOne.jit_merge_point(
self=self, ptr=ptr, ctx=ctx, nextppos=nextppos)
result = sre_match(ctx, nextppos, ptr, self.start_marks)
ptr -= 1
if result is not None:
self.subresult = result
self.start_ptr = ptr
return self
find_next_result = find_first_result
class MinRepeatOneMatchResult(MatchResult):
install_jitdriver('MinRepeatOne',
greens=['nextppos', 'ppos3', 'ctx.pattern'],
reds=['ptr', 'self', 'ctx'],
debugprint=(2, 0)) # indices in 'greens'
def __init__(self, nextppos, ppos3, maxptr, ptr, marks):
self.nextppos = nextppos
self.ppos3 = ppos3
self.maxptr = maxptr
self.start_ptr = ptr
self.start_marks = marks
def find_first_result(self, ctx):
ptr = self.start_ptr
nextppos = self.nextppos
ppos3 = self.ppos3
while ptr <= self.maxptr:
ctx.jitdriver_MinRepeatOne.jit_merge_point(
self=self, ptr=ptr, ctx=ctx, nextppos=nextppos, ppos3=ppos3)
result = sre_match(ctx, nextppos, ptr, self.start_marks)
if result is not None:
self.subresult = result
self.start_ptr = ptr
return self
if not self.next_char_ok(ctx, ptr, ppos3):
break
ptr += 1
def find_next_result(self, ctx):
ptr = self.start_ptr
if not self.next_char_ok(ctx, ptr, self.ppos3):
return
self.start_ptr = ptr + 1
return self.find_first_result(ctx)
def next_char_ok(self, ctx, ptr, ppos):
if ptr == ctx.end:
return False
op = ctx.pat(ppos)
for op1, checkerfn in unroll_char_checker:
if op1 == op:
return checkerfn(ctx, ptr, ppos)
raise Error("next_char_ok[%d]" % op)
class AbstractUntilMatchResult(MatchResult):
def __init__(self, ppos, tailppos, ptr, marks):
self.ppos = ppos
self.tailppos = tailppos
self.cur_ptr = ptr
self.cur_marks = marks
self.pending = None
self.num_pending = 0
class Pending(object):
def __init__(self, ptr, marks, enum, next):
self.ptr = ptr
self.marks = marks
self.enum = enum
self.next = next # chained list
class MaxUntilMatchResult(AbstractUntilMatchResult):
install_jitdriver('MaxUntil',
greens=['ppos', 'tailppos', 'match_more', 'ctx.pattern'],
reds=['ptr', 'marks', 'self', 'ctx'],
debugprint=(3, 0, 2))
def find_first_result(self, ctx):
return self.search_next(ctx, match_more=True)
def find_next_result(self, ctx):
return self.search_next(ctx, match_more=False)
def search_next(self, ctx, match_more):
ppos = self.ppos
tailppos = self.tailppos
ptr = self.cur_ptr
marks = self.cur_marks
while True:
ctx.jitdriver_MaxUntil.jit_merge_point(
ppos=ppos, tailppos=tailppos, match_more=match_more,
ptr=ptr, marks=marks, self=self, ctx=ctx)
if match_more:
max = ctx.pat(ppos+2)
if max == 65535 or self.num_pending < max:
# try to match one more 'item'
enum = sre_match(ctx, ppos + 3, ptr, marks)
else:
enum = None # 'max' reached, no more matches
else:
p = self.pending
if p is None:
return
self.pending = p.next
self.num_pending -= 1
ptr = p.ptr
marks = p.marks
enum = p.enum.move_to_next_result(ctx)
#
# zero-width match protection
min = ctx.pat(ppos+1)
if self.num_pending >= min:
while enum is not None and ptr == ctx.match_end:
enum = enum.move_to_next_result(ctx)
# matched marks for zero-width assertions
marks = ctx.match_marks
#
if enum is not None:
# matched one more 'item'. record it and continue.
self.pending = Pending(ptr, marks, enum, self.pending)
self.num_pending += 1
ptr = ctx.match_end
marks = ctx.match_marks
match_more = True
else:
# 'item' no longer matches.
if self.num_pending >= min:
# try to match 'tail' if we have enough 'item'
result = sre_match(ctx, tailppos, ptr, marks)
if result is not None:
self.subresult = result
self.cur_ptr = ptr
self.cur_marks = marks
return self
match_more = False
class MinUntilMatchResult(AbstractUntilMatchResult):
def find_first_result(self, ctx):
return self.search_next(ctx, resume=False)
def find_next_result(self, ctx):
return self.search_next(ctx, resume=True)
def search_next(self, ctx, resume):
# XXX missing jit support here
ppos = self.ppos
min = ctx.pat(ppos+1)
max = ctx.pat(ppos+2)
ptr = self.cur_ptr
marks = self.cur_marks
while True:
| |
:param pulumi.Input[str] description: Network security rule description.
:param pulumi.Input[Sequence[pulumi.Input[str]]] destination_address_prefixes: The destination address prefixes. CIDR or destination IP ranges.
:param pulumi.Input[Sequence[pulumi.Input[str]]] destination_port_ranges: The destination port ranges.
:param pulumi.Input[Sequence[pulumi.Input[str]]] source_address_prefixes: The CIDR or source IP ranges.
:param pulumi.Input[Sequence[pulumi.Input[str]]] source_port_ranges: The source port ranges.
"""
pulumi.set(__self__, "access", access)
pulumi.set(__self__, "direction", direction)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "priority", priority)
pulumi.set(__self__, "protocol", protocol)
if description is not None:
pulumi.set(__self__, "description", description)
if destination_address_prefixes is not None:
pulumi.set(__self__, "destination_address_prefixes", destination_address_prefixes)
if destination_port_ranges is not None:
pulumi.set(__self__, "destination_port_ranges", destination_port_ranges)
if source_address_prefixes is not None:
pulumi.set(__self__, "source_address_prefixes", source_address_prefixes)
if source_port_ranges is not None:
pulumi.set(__self__, "source_port_ranges", source_port_ranges)
@property
@pulumi.getter
def access(self) -> pulumi.Input[Union[str, 'Access']]:
"""
The network traffic is allowed or denied.
"""
return pulumi.get(self, "access")
@access.setter
def access(self, value: pulumi.Input[Union[str, 'Access']]):
pulumi.set(self, "access", value)
@property
@pulumi.getter
def direction(self) -> pulumi.Input[Union[str, 'Direction']]:
"""
Network security rule direction.
"""
return pulumi.get(self, "direction")
@direction.setter
def direction(self, value: pulumi.Input[Union[str, 'Direction']]):
pulumi.set(self, "direction", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Network security rule name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def priority(self) -> pulumi.Input[int]:
"""
The priority of the rule. The value can be in the range 1000 to 3000. Values outside this range are reserved for Service Fabric ManagerCluster Resource Provider. The priority number must be unique for each rule in the collection. The lower the priority number, the higher the priority of the rule.
"""
return pulumi.get(self, "priority")
@priority.setter
def priority(self, value: pulumi.Input[int]):
pulumi.set(self, "priority", value)
@property
@pulumi.getter
def protocol(self) -> pulumi.Input[Union[str, 'NsgProtocol']]:
"""
Network protocol this rule applies to.
"""
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: pulumi.Input[Union[str, 'NsgProtocol']]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Network security rule description.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="destinationAddressPrefixes")
def destination_address_prefixes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The destination address prefixes. CIDR or destination IP ranges.
"""
return pulumi.get(self, "destination_address_prefixes")
@destination_address_prefixes.setter
def destination_address_prefixes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "destination_address_prefixes", value)
@property
@pulumi.getter(name="destinationPortRanges")
def destination_port_ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The destination port ranges.
"""
return pulumi.get(self, "destination_port_ranges")
@destination_port_ranges.setter
def destination_port_ranges(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "destination_port_ranges", value)
@property
@pulumi.getter(name="sourceAddressPrefixes")
def source_address_prefixes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The CIDR or source IP ranges.
"""
return pulumi.get(self, "source_address_prefixes")
@source_address_prefixes.setter
def source_address_prefixes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "source_address_prefixes", value)
@property
@pulumi.getter(name="sourcePortRanges")
def source_port_ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The source port ranges.
"""
return pulumi.get(self, "source_port_ranges")
@source_port_ranges.setter
def source_port_ranges(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "source_port_ranges", value)
@pulumi.input_type
class NodeTypeSkuArgs:
def __init__(__self__, *,
capacity: pulumi.Input[int],
name: Optional[pulumi.Input[str]] = None,
tier: Optional[pulumi.Input[str]] = None):
"""
Describes a node type sku.
:param pulumi.Input[int] capacity: The number of nodes in the node type.<br /><br />If present in request it will override properties.vmInstanceCount.
:param pulumi.Input[str] name: The sku name. <br /><br />Name is internally generated and is used in auto-scale scenarios.<br /> Property does not allow to be changed to other values than generated.<br /> To avoid deployment errors please omit the property.
:param pulumi.Input[str] tier: Specifies the tier of the node type. <br /><br /> Possible Values:<br /> **Standard**
"""
pulumi.set(__self__, "capacity", capacity)
if name is not None:
pulumi.set(__self__, "name", name)
if tier is not None:
pulumi.set(__self__, "tier", tier)
@property
@pulumi.getter
def capacity(self) -> pulumi.Input[int]:
"""
The number of nodes in the node type.<br /><br />If present in request it will override properties.vmInstanceCount.
"""
return pulumi.get(self, "capacity")
@capacity.setter
def capacity(self, value: pulumi.Input[int]):
pulumi.set(self, "capacity", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The sku name. <br /><br />Name is internally generated and is used in auto-scale scenarios.<br /> Property does not allow to be changed to other values than generated.<br /> To avoid deployment errors please omit the property.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def tier(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the tier of the node type. <br /><br /> Possible Values:<br /> **Standard**
"""
return pulumi.get(self, "tier")
@tier.setter
def tier(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tier", value)
@pulumi.input_type
class PartitionInstanceCountScaleMechanismArgs:
def __init__(__self__, *,
kind: pulumi.Input[str],
max_instance_count: pulumi.Input[int],
min_instance_count: pulumi.Input[int],
scale_increment: pulumi.Input[int]):
"""
Represents a scaling mechanism for adding or removing instances of stateless service partition.
:param pulumi.Input[str] kind: Enumerates the ways that a service can be partitioned.
Expected value is 'ScalePartitionInstanceCount'.
:param pulumi.Input[int] max_instance_count: Maximum number of instances of the partition.
:param pulumi.Input[int] min_instance_count: Minimum number of instances of the partition.
:param pulumi.Input[int] scale_increment: The number of instances to add or remove during a scaling operation.
"""
pulumi.set(__self__, "kind", 'ScalePartitionInstanceCount')
pulumi.set(__self__, "max_instance_count", max_instance_count)
pulumi.set(__self__, "min_instance_count", min_instance_count)
pulumi.set(__self__, "scale_increment", scale_increment)
@property
@pulumi.getter
def kind(self) -> pulumi.Input[str]:
"""
Enumerates the ways that a service can be partitioned.
Expected value is 'ScalePartitionInstanceCount'.
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: pulumi.Input[str]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter(name="maxInstanceCount")
def max_instance_count(self) -> pulumi.Input[int]:
"""
Maximum number of instances of the partition.
"""
return pulumi.get(self, "max_instance_count")
@max_instance_count.setter
def max_instance_count(self, value: pulumi.Input[int]):
pulumi.set(self, "max_instance_count", value)
@property
@pulumi.getter(name="minInstanceCount")
def min_instance_count(self) -> pulumi.Input[int]:
"""
Minimum number of instances of the partition.
"""
return pulumi.get(self, "min_instance_count")
@min_instance_count.setter
def min_instance_count(self, value: pulumi.Input[int]):
pulumi.set(self, "min_instance_count", value)
@property
@pulumi.getter(name="scaleIncrement")
def scale_increment(self) -> pulumi.Input[int]:
"""
The number of instances to add or remove during a scaling operation.
"""
return pulumi.get(self, "scale_increment")
@scale_increment.setter
def scale_increment(self, value: pulumi.Input[int]):
pulumi.set(self, "scale_increment", value)
@pulumi.input_type
class RollingUpgradeMonitoringPolicyArgs:
def __init__(__self__, *,
failure_action: pulumi.Input[Union[str, 'FailureAction']],
health_check_retry_timeout: pulumi.Input[str],
health_check_stable_duration: pulumi.Input[str],
health_check_wait_duration: pulumi.Input[str],
upgrade_domain_timeout: pulumi.Input[str],
upgrade_timeout: pulumi.Input[str]):
"""
The policy used for monitoring the application upgrade
:param pulumi.Input[Union[str, 'FailureAction']] failure_action: The compensating action to perform when a Monitored upgrade encounters monitoring policy or health policy violations. Invalid indicates the failure action is invalid. Rollback specifies that the upgrade will start rolling back automatically. Manual indicates that the upgrade will switch to UnmonitoredManual upgrade mode.
:param pulumi.Input[str] health_check_retry_timeout: The amount of time to retry health evaluation when the application or cluster is unhealthy before FailureAction is executed. It is interpreted as a string representing an ISO 8601 duration with following format "hh:mm:ss.fff".
:param pulumi.Input[str] health_check_stable_duration: The amount of time that the application or cluster must remain healthy before the upgrade proceeds to the next upgrade domain. It is interpreted as a string representing an ISO 8601 duration with following format "hh:mm:ss.fff".
:param pulumi.Input[str] health_check_wait_duration: The amount of time to wait after completing an upgrade domain before applying health policies. It is interpreted as a string representing an ISO 8601 duration with following format "hh:mm:ss.fff".
:param pulumi.Input[str] upgrade_domain_timeout: The amount of time each upgrade domain has to complete before FailureAction is executed. Cannot be larger than 12 hours. It is interpreted as a string representing an ISO 8601 duration with following format "hh:mm:ss.fff".
:param pulumi.Input[str] upgrade_timeout: The amount of time the overall upgrade has to complete before FailureAction is executed. Cannot be larger than 12 hours. It is interpreted as a string representing an ISO 8601 duration with following format "hh:mm:ss.fff".
"""
pulumi.set(__self__, "failure_action", failure_action)
pulumi.set(__self__, "health_check_retry_timeout", health_check_retry_timeout)
pulumi.set(__self__, "health_check_stable_duration", health_check_stable_duration)
pulumi.set(__self__, "health_check_wait_duration", health_check_wait_duration)
pulumi.set(__self__, "upgrade_domain_timeout", upgrade_domain_timeout)
pulumi.set(__self__, "upgrade_timeout", upgrade_timeout)
@property
@pulumi.getter(name="failureAction")
def failure_action(self) -> pulumi.Input[Union[str, 'FailureAction']]:
"""
The compensating action to perform when a Monitored upgrade encounters monitoring policy or health policy violations. Invalid indicates the failure action is invalid. Rollback specifies that the upgrade will start rolling back automatically. Manual indicates that the upgrade will switch to UnmonitoredManual upgrade mode.
"""
return pulumi.get(self, "failure_action")
@failure_action.setter
def failure_action(self, value: pulumi.Input[Union[str, 'FailureAction']]):
pulumi.set(self, "failure_action", value)
@property
@pulumi.getter(name="healthCheckRetryTimeout")
def health_check_retry_timeout(self) -> pulumi.Input[str]:
"""
The amount of time to retry health evaluation when the application or cluster is unhealthy before FailureAction is executed. It is interpreted as a string representing an ISO 8601 duration with following format "hh:mm:ss.fff".
"""
return pulumi.get(self, "health_check_retry_timeout")
@health_check_retry_timeout.setter
def health_check_retry_timeout(self, value: pulumi.Input[str]):
pulumi.set(self, "health_check_retry_timeout", value)
@property
@pulumi.getter(name="healthCheckStableDuration")
def health_check_stable_duration(self) -> pulumi.Input[str]:
"""
The amount of time that the application or cluster must remain healthy before the upgrade proceeds to the next upgrade domain. It is interpreted as a string representing an ISO 8601 duration with following format "hh:mm:ss.fff".
"""
| |
cms.vdouble(2.56363, 0.0),
SMB_12 = cms.vdouble(2.128, -0.956, 0.0, 0.199, 0.0,
0.0),
SMB_12_0_scale = cms.vdouble(2.283221, 0.0),
SMB_20 = cms.vdouble(1.011, -0.052, 0.0, 0.188, 0.0,
0.0),
SMB_20_0_scale = cms.vdouble(1.486168, 0.0),
SMB_21 = cms.vdouble(1.043, -0.124, 0.0, 0.183, 0.0,
0.0),
SMB_21_0_scale = cms.vdouble(1.58384, 0.0),
SMB_22 = cms.vdouble(1.474, -0.758, 0.0, 0.185, 0.0,
0.0),
SMB_22_0_scale = cms.vdouble(1.346681, 0.0),
SMB_30 = cms.vdouble(0.505, -0.022, 0.0, 0.215, 0.0,
0.0),
SMB_30_0_scale = cms.vdouble(-3.629838, 0.0),
SMB_31 = cms.vdouble(0.549, -0.145, 0.0, 0.207, 0.0,
0.0),
SMB_31_0_scale = cms.vdouble(-3.323768, 0.0),
SMB_32 = cms.vdouble(0.67, -0.327, 0.0, 0.22, 0.0,
0.0),
SMB_32_0_scale = cms.vdouble(-3.054156, 0.0),
SME_11 = cms.vdouble(3.295, -1.527, 0.112, 0.378, 0.02,
0.0),
SME_11_0_scale = cms.vdouble(1.325085, 0.0),
SME_12 = cms.vdouble(0.102, 0.599, 0.0, 0.38, 0.0,
0.0),
SME_12_0_scale = cms.vdouble(2.279181, 0.0),
SME_13 = cms.vdouble(-1.286, 1.711, 0.0, 0.356, 0.0,
0.0),
SME_13_0_scale = cms.vdouble(0.104905, 0.0),
SME_21 = cms.vdouble(-0.529, 1.194, -0.358, 0.472, 0.086,
0.0),
SME_21_0_scale = cms.vdouble(-0.040862, 0.0),
SME_22 = cms.vdouble(-1.207, 1.491, -0.251, 0.189, 0.243,
0.0),
SME_22_0_scale = cms.vdouble(-3.457901, 0.0),
SME_31 = cms.vdouble(-1.594, 1.482, -0.317, 0.487, 0.097,
0.0),
SME_32 = cms.vdouble(-0.901, 1.333, -0.47, 0.41, 0.073,
0.0),
SME_41 = cms.vdouble(-0.003, 0.005, 0.005, 0.608, 0.076,
0.0),
SME_42 = cms.vdouble(-0.003, 0.005, 0.005, 0.608, 0.076,
0.0),
beamSpotTag = cms.InputTag("hltOnlineBeamSpot"),
crackEtas = cms.vdouble(0.2, 1.6, 1.7),
crackWindow = cms.double(0.04),
deltaEtaCrackSearchWindow = cms.double(0.25),
deltaEtaSearchWindow = cms.double(0.2),
deltaPhiSearchWindow = cms.double(0.25),
scaleDT = cms.bool(True)
)
process.hltL3MuonCandidates = cms.EDProducer("L3MuonCandidateProducer",
InputLinksObjects = cms.InputTag("hltL3MuonsLinksCombination"),
InputObjects = cms.InputTag("hltL3Muons"),
MuonPtOption = cms.string('Tracker')
)
process.hltL3Muons = cms.EDProducer("L3TrackCombiner",
labels = cms.VInputTag("hltL3MuonsOIState", "hltL3MuonsOIHit", "hltL3MuonsIOHit")
)
process.hltL3MuonsIOHit = cms.EDProducer("L3MuonProducer",
L3TrajBuilderParameters = cms.PSet(
GlbRefitterParameters = cms.PSet(
CSCRecSegmentLabel = cms.InputTag("hltCscSegments"),
Chi2CutCSC = cms.double(150.0),
Chi2CutDT = cms.double(10.0),
Chi2CutRPC = cms.double(1.0),
DTRecSegmentLabel = cms.InputTag("hltDt4DSegments"),
DYTthrs = cms.vint32(30, 15),
DoPredictionsOnly = cms.bool(False),
Fitter = cms.string('hltESPL3MuKFTrajectoryFitter'),
HitThreshold = cms.int32(1),
MuonHitsOption = cms.int32(1),
MuonRecHitBuilder = cms.string('hltESPMuonTransientTrackingRecHitBuilder'),
PropDirForCosmics = cms.bool(False),
Propagator = cms.string('hltESPSmartPropagatorAny'),
RefitDirection = cms.string('insideOut'),
RefitRPCHits = cms.bool(True),
SkipStation = cms.int32(-1),
TrackerRecHitBuilder = cms.string('hltESPTTRHBWithTrackAngle'),
TrackerSkipSection = cms.int32(-1),
TrackerSkipSystem = cms.int32(-1)
),
GlobalMuonTrackMatcher = cms.PSet(
Chi2Cut_1 = cms.double(50.0),
Chi2Cut_2 = cms.double(50.0),
Chi2Cut_3 = cms.double(200.0),
DeltaDCut_1 = cms.double(40.0),
DeltaDCut_2 = cms.double(10.0),
DeltaDCut_3 = cms.double(15.0),
DeltaRCut_1 = cms.double(0.1),
DeltaRCut_2 = cms.double(0.2),
DeltaRCut_3 = cms.double(1.0),
Eta_threshold = cms.double(1.2),
LocChi2Cut = cms.double(0.001),
MinP = cms.double(2.5),
MinPt = cms.double(1.0),
Propagator = cms.string('hltESPSmartPropagator'),
Pt_threshold1 = cms.double(0.0),
Pt_threshold2 = cms.double(999999999.0),
Quality_1 = cms.double(20.0),
Quality_2 = cms.double(15.0),
Quality_3 = cms.double(7.0)
),
MuonRecHitBuilder = cms.string('hltESPMuonTransientTrackingRecHitBuilder'),
MuonTrackingRegionBuilder = cms.PSet(
refToPSet_ = cms.string('HLTPSetMuonTrackingRegionBuilder8356')
),
PCut = cms.double(2.5),
PtCut = cms.double(1.0),
RefitRPCHits = cms.bool(True),
ScaleTECxFactor = cms.double(-1.0),
ScaleTECyFactor = cms.double(-1.0),
TrackTransformer = cms.PSet(
DoPredictionsOnly = cms.bool(False),
Fitter = cms.string('hltESPL3MuKFTrajectoryFitter'),
MuonRecHitBuilder = cms.string('hltESPMuonTransientTrackingRecHitBuilder'),
Propagator = cms.string('hltESPSmartPropagatorAny'),
RefitDirection = cms.string('insideOut'),
RefitRPCHits = cms.bool(True),
Smoother = cms.string('hltESPKFTrajectorySmootherForMuonTrackLoader'),
TrackerRecHitBuilder = cms.string('hltESPTTRHBWithTrackAngle')
),
TrackerPropagator = cms.string('SteppingHelixPropagatorAny'),
TrackerRecHitBuilder = cms.string('hltESPTTRHBWithTrackAngle'),
tkTrajBeamSpot = cms.InputTag("hltOnlineBeamSpot"),
tkTrajLabel = cms.InputTag("hltL3TkTracksFromL2IOHit"),
tkTrajMaxChi2 = cms.double(9999.0),
tkTrajMaxDXYBeamSpot = cms.double(0.2),
tkTrajUseVertex = cms.bool(False),
tkTrajVertex = cms.InputTag("pixelVertices")
),
MuonCollectionLabel = cms.InputTag("hltL2Muons","UpdatedAtVtx"),
ServiceParameters = cms.PSet(
Propagators = cms.untracked.vstring('hltESPSmartPropagatorAny',
'SteppingHelixPropagatorAny',
'hltESPSmartPropagator',
'hltESPSteppingHelixPropagatorOpposite'),
RPCLayers = cms.bool(True),
UseMuonNavigation = cms.untracked.bool(True)
),
TrackLoaderParameters = cms.PSet(
DoSmoothing = cms.bool(True),
MuonSeededTracksInstance = cms.untracked.string('L2Seeded'),
MuonUpdatorAtVertexParameters = cms.PSet(
BeamSpotPositionErrors = cms.vdouble(0.1, 0.1, 5.3),
MaxChi2 = cms.double(1000000.0),
Propagator = cms.string('hltESPSteppingHelixPropagatorOpposite')
),
PutTkTrackIntoEvent = cms.untracked.bool(False),
SmoothTkTrack = cms.untracked.bool(False),
Smoother = cms.string('hltESPKFTrajectorySmootherForMuonTrackLoader'),
TTRHBuilder = cms.string('hltESPTTRHBWithTrackAngle'),
VertexConstraint = cms.bool(False),
beamSpot = cms.InputTag("hltOnlineBeamSpot")
)
)
process.hltL3MuonsLinksCombination = cms.EDProducer("L3TrackLinksCombiner",
labels = cms.VInputTag("hltL3MuonsOIState", "hltL3MuonsOIHit", "hltL3MuonsIOHit")
)
process.hltL3MuonsOIHit = cms.EDProducer("L3MuonProducer",
L3TrajBuilderParameters = cms.PSet(
GlbRefitterParameters = cms.PSet(
CSCRecSegmentLabel = cms.InputTag("hltCscSegments"),
Chi2CutCSC = cms.double(150.0),
Chi2CutDT = cms.double(10.0),
Chi2CutRPC = cms.double(1.0),
DTRecSegmentLabel = cms.InputTag("hltDt4DSegments"),
DYTthrs = cms.vint32(30, 15),
DoPredictionsOnly = cms.bool(False),
Fitter = cms.string('hltESPL3MuKFTrajectoryFitter'),
HitThreshold = cms.int32(1),
MuonHitsOption = cms.int32(1),
MuonRecHitBuilder = cms.string('hltESPMuonTransientTrackingRecHitBuilder'),
PropDirForCosmics = cms.bool(False),
Propagator = cms.string('hltESPSmartPropagatorAny'),
RefitDirection = cms.string('insideOut'),
RefitRPCHits = cms.bool(True),
SkipStation = cms.int32(-1),
TrackerRecHitBuilder = cms.string('hltESPTTRHBWithTrackAngle'),
TrackerSkipSection = cms.int32(-1),
TrackerSkipSystem = cms.int32(-1)
),
GlobalMuonTrackMatcher = cms.PSet(
Chi2Cut_1 = cms.double(50.0),
Chi2Cut_2 = cms.double(50.0),
Chi2Cut_3 = cms.double(200.0),
DeltaDCut_1 = cms.double(40.0),
DeltaDCut_2 = cms.double(10.0),
DeltaDCut_3 = cms.double(15.0),
DeltaRCut_1 = cms.double(0.1),
DeltaRCut_2 = cms.double(0.2),
DeltaRCut_3 = cms.double(1.0),
Eta_threshold = cms.double(1.2),
LocChi2Cut = cms.double(0.001),
MinP = cms.double(2.5),
MinPt = cms.double(1.0),
Propagator = cms.string('hltESPSmartPropagator'),
Pt_threshold1 = cms.double(0.0),
Pt_threshold2 = cms.double(999999999.0),
Quality_1 = cms.double(20.0),
Quality_2 = cms.double(15.0),
Quality_3 = cms.double(7.0)
),
MuonRecHitBuilder = cms.string('hltESPMuonTransientTrackingRecHitBuilder'),
MuonTrackingRegionBuilder = cms.PSet(
refToPSet_ = cms.string('HLTPSetMuonTrackingRegionBuilder8356')
),
PCut = cms.double(2.5),
PtCut = cms.double(1.0),
RefitRPCHits = cms.bool(True),
ScaleTECxFactor = cms.double(-1.0),
ScaleTECyFactor = cms.double(-1.0),
TrackTransformer = cms.PSet(
DoPredictionsOnly = cms.bool(False),
Fitter = cms.string('hltESPL3MuKFTrajectoryFitter'),
MuonRecHitBuilder = cms.string('hltESPMuonTransientTrackingRecHitBuilder'),
Propagator = cms.string('hltESPSmartPropagatorAny'),
RefitDirection = cms.string('insideOut'),
RefitRPCHits = cms.bool(True),
Smoother = cms.string('hltESPKFTrajectorySmootherForMuonTrackLoader'),
TrackerRecHitBuilder = cms.string('hltESPTTRHBWithTrackAngle')
),
TrackerPropagator = cms.string('SteppingHelixPropagatorAny'),
TrackerRecHitBuilder = cms.string('hltESPTTRHBWithTrackAngle'),
tkTrajBeamSpot = cms.InputTag("hltOnlineBeamSpot"),
tkTrajLabel = cms.InputTag("hltL3TkTracksFromL2OIHit"),
tkTrajMaxChi2 = cms.double(9999.0),
tkTrajMaxDXYBeamSpot = cms.double(0.2),
tkTrajUseVertex = cms.bool(False),
tkTrajVertex = cms.InputTag("pixelVertices")
),
MuonCollectionLabel = cms.InputTag("hltL2Muons","UpdatedAtVtx"),
ServiceParameters = cms.PSet(
Propagators = cms.untracked.vstring('hltESPSmartPropagatorAny',
'SteppingHelixPropagatorAny',
'hltESPSmartPropagator',
'hltESPSteppingHelixPropagatorOpposite'),
RPCLayers = cms.bool(True),
UseMuonNavigation = cms.untracked.bool(True)
),
TrackLoaderParameters = cms.PSet(
DoSmoothing = cms.bool(True),
MuonSeededTracksInstance = cms.untracked.string('L2Seeded'),
MuonUpdatorAtVertexParameters = cms.PSet(
BeamSpotPositionErrors = cms.vdouble(0.1, 0.1, 5.3),
MaxChi2 = cms.double(1000000.0),
Propagator = cms.string('hltESPSteppingHelixPropagatorOpposite')
),
PutTkTrackIntoEvent = cms.untracked.bool(False),
SmoothTkTrack = cms.untracked.bool(False),
Smoother = cms.string('hltESPKFTrajectorySmootherForMuonTrackLoader'),
TTRHBuilder = cms.string('hltESPTTRHBWithTrackAngle'),
VertexConstraint = cms.bool(False),
beamSpot = cms.InputTag("hltOnlineBeamSpot")
)
)
process.hltL3MuonsOIState = cms.EDProducer("L3MuonProducer",
L3TrajBuilderParameters = cms.PSet(
GlbRefitterParameters = cms.PSet(
CSCRecSegmentLabel = cms.InputTag("hltCscSegments"),
Chi2CutCSC = cms.double(150.0),
Chi2CutDT = cms.double(10.0),
Chi2CutRPC = cms.double(1.0),
DTRecSegmentLabel = cms.InputTag("hltDt4DSegments"),
DYTthrs = cms.vint32(30, 15),
DoPredictionsOnly = cms.bool(False),
Fitter = cms.string('hltESPL3MuKFTrajectoryFitter'),
HitThreshold = cms.int32(1),
MuonHitsOption = cms.int32(1),
MuonRecHitBuilder = cms.string('hltESPMuonTransientTrackingRecHitBuilder'),
PropDirForCosmics = cms.bool(False),
Propagator = cms.string('hltESPSmartPropagatorAny'),
RefitDirection = cms.string('insideOut'),
RefitRPCHits = cms.bool(True),
SkipStation = cms.int32(-1),
TrackerRecHitBuilder = cms.string('hltESPTTRHBWithTrackAngle'),
TrackerSkipSection = cms.int32(-1),
TrackerSkipSystem = cms.int32(-1)
),
GlobalMuonTrackMatcher = cms.PSet(
Chi2Cut_1 = cms.double(50.0),
Chi2Cut_2 = cms.double(50.0),
Chi2Cut_3 = cms.double(200.0),
DeltaDCut_1 = cms.double(40.0),
DeltaDCut_2 = cms.double(10.0),
DeltaDCut_3 = cms.double(15.0),
DeltaRCut_1 = cms.double(0.1),
DeltaRCut_2 = cms.double(0.2),
DeltaRCut_3 = cms.double(1.0),
Eta_threshold = cms.double(1.2),
LocChi2Cut = cms.double(0.001),
MinP = cms.double(2.5),
MinPt = cms.double(1.0),
Propagator = cms.string('hltESPSmartPropagator'),
Pt_threshold1 = cms.double(0.0),
Pt_threshold2 = cms.double(999999999.0),
Quality_1 = cms.double(20.0),
Quality_2 = cms.double(15.0),
Quality_3 = cms.double(7.0)
),
MuonRecHitBuilder = cms.string('hltESPMuonTransientTrackingRecHitBuilder'),
MuonTrackingRegionBuilder = cms.PSet(
refToPSet_ = cms.string('HLTPSetMuonTrackingRegionBuilder8356')
),
PCut = cms.double(2.5),
PtCut = cms.double(1.0),
RefitRPCHits = cms.bool(True),
ScaleTECxFactor = cms.double(-1.0),
ScaleTECyFactor = cms.double(-1.0),
TrackTransformer = cms.PSet(
DoPredictionsOnly = cms.bool(False),
Fitter = cms.string('hltESPL3MuKFTrajectoryFitter'),
MuonRecHitBuilder = cms.string('hltESPMuonTransientTrackingRecHitBuilder'),
Propagator = cms.string('hltESPSmartPropagatorAny'),
RefitDirection = cms.string('insideOut'),
RefitRPCHits = cms.bool(True),
Smoother = cms.string('hltESPKFTrajectorySmootherForMuonTrackLoader'),
TrackerRecHitBuilder = cms.string('hltESPTTRHBWithTrackAngle')
),
TrackerPropagator = cms.string('SteppingHelixPropagatorAny'),
TrackerRecHitBuilder = cms.string('hltESPTTRHBWithTrackAngle'),
tkTrajBeamSpot = cms.InputTag("hltOnlineBeamSpot"),
tkTrajLabel = cms.InputTag("hltL3TkTracksFromL2OIState"),
tkTrajMaxChi2 = cms.double(9999.0),
tkTrajMaxDXYBeamSpot = cms.double(0.2),
tkTrajUseVertex = cms.bool(False),
tkTrajVertex = cms.InputTag("pixelVertices")
),
MuonCollectionLabel = cms.InputTag("hltL2Muons","UpdatedAtVtx"),
ServiceParameters = cms.PSet(
Propagators = cms.untracked.vstring('hltESPSmartPropagatorAny',
'SteppingHelixPropagatorAny',
'hltESPSmartPropagator',
'hltESPSteppingHelixPropagatorOpposite'),
RPCLayers = cms.bool(True),
UseMuonNavigation = cms.untracked.bool(True)
),
TrackLoaderParameters = cms.PSet(
DoSmoothing = cms.bool(True),
MuonSeededTracksInstance = cms.untracked.string('L2Seeded'),
MuonUpdatorAtVertexParameters = cms.PSet(
BeamSpotPositionErrors = cms.vdouble(0.1, 0.1, 5.3),
MaxChi2 = cms.double(1000000.0),
Propagator = cms.string('hltESPSteppingHelixPropagatorOpposite')
),
PutTkTrackIntoEvent = cms.untracked.bool(False),
SmoothTkTrack = cms.untracked.bool(False),
Smoother = cms.string('hltESPKFTrajectorySmootherForMuonTrackLoader'),
TTRHBuilder = cms.string('hltESPTTRHBWithTrackAngle'),
VertexConstraint = cms.bool(False),
beamSpot = cms.InputTag("hltOnlineBeamSpot")
)
)
process.hltL3TkFromL2OICombination = cms.EDProducer("L3TrackCombiner",
labels = cms.VInputTag("hltL3MuonsOIState", "hltL3MuonsOIHit")
)
process.hltL3TkTracksFromL2 = cms.EDProducer("TrackListMerger",
Epsilon = cms.double(-0.001),
FoundHitBonus = cms.double(100.0),
LostHitPenalty = cms.double(0.0),
MaxNormalizedChisq = cms.double(1000.0),
MinFound = cms.int32(3),
MinPT = cms.double(0.05),
ShareFrac = cms.double(0.19),
TrackProducers = cms.VInputTag("hltL3TkTracksMergeStep1", "hltL3TkTracksFromL2IOHit"),
allowFirstHitShare = cms.bool(True),
copyExtras = cms.untracked.bool(True),
copyMVA = cms.bool(False),
hasSelector = cms.vint32(0, 0),
indivShareFrac = cms.vdouble(1.0, 1.0),
newQuality = cms.string('confirmed'),
selectedTrackQuals = cms.VInputTag("hltL3TkTracksMergeStep1", "hltL3TkTracksFromL2IOHit"),
setsToMerge = cms.VPSet(cms.PSet(
pQual = cms.bool(False),
tLists = cms.vint32(0, 1)
)),
writeOnlyTrkQuals = cms.bool(False)
)
process.hltL3TkTracksFromL2IOHit = cms.EDProducer("TrackProducer",
AlgorithmName = cms.string('hltIterX'),
Fitter = cms.string('hltESPKFFittingSmoother'),
GeometricInnerState = cms.bool(True),
MeasurementTracker = cms.string(''),
MeasurementTrackerEvent = cms.InputTag("hltSiStripClusters"),
NavigationSchool = cms.string(''),
Propagator = cms.string('PropagatorWithMaterial'),
SimpleMagneticField = cms.string(''),
TTRHBuilder = cms.string('hltESPTTRHBWithTrackAngle'),
TrajectoryInEvent = cms.bool(True),
alias = cms.untracked.string(''),
beamSpot = cms.InputTag("hltOnlineBeamSpot"),
clusterRemovalInfo = cms.InputTag(""),
src = cms.InputTag("hltL3TrackCandidateFromL2IOHit"),
useHitsSplitting = cms.bool(False),
useSimpleMF = cms.bool(False)
)
process.hltL3TkTracksFromL2OIHit = cms.EDProducer("TrackProducer",
AlgorithmName = cms.string('hltIterX'),
Fitter = cms.string('hltESPKFFittingSmoother'),
GeometricInnerState = cms.bool(True),
MeasurementTracker = cms.string(''),
MeasurementTrackerEvent = cms.InputTag("hltSiStripClusters"),
NavigationSchool = cms.string(''),
Propagator = cms.string('PropagatorWithMaterial'),
SimpleMagneticField = cms.string(''),
TTRHBuilder = cms.string('hltESPTTRHBWithTrackAngle'),
TrajectoryInEvent = cms.bool(True),
alias = cms.untracked.string(''),
beamSpot = cms.InputTag("hltOnlineBeamSpot"),
clusterRemovalInfo = cms.InputTag(""),
src = cms.InputTag("hltL3TrackCandidateFromL2OIHit"),
useHitsSplitting = cms.bool(False),
useSimpleMF = cms.bool(False)
)
process.hltL3TkTracksFromL2OIState = cms.EDProducer("TrackProducer",
AlgorithmName = cms.string('hltIterX'),
Fitter = cms.string('hltESPKFFittingSmoother'),
GeometricInnerState = cms.bool(True),
MeasurementTracker = cms.string(''),
MeasurementTrackerEvent = cms.InputTag("hltSiStripClusters"),
NavigationSchool = cms.string(''),
Propagator = cms.string('PropagatorWithMaterial'),
SimpleMagneticField = cms.string(''),
TTRHBuilder = cms.string('hltESPTTRHBWithTrackAngle'),
TrajectoryInEvent = cms.bool(True),
alias = cms.untracked.string(''),
beamSpot = cms.InputTag("hltOnlineBeamSpot"),
clusterRemovalInfo = cms.InputTag(""),
src = cms.InputTag("hltL3TrackCandidateFromL2OIState"),
useHitsSplitting = cms.bool(False),
useSimpleMF = cms.bool(False)
)
process.hltL3TkTracksMergeStep1 = cms.EDProducer("TrackListMerger",
Epsilon = | |
= root_folder_path
# TODO: if os.name == 'nt' and len (root_folder_path) == 2 and root_folder_path[2] == ':': self.macos_root_folder += '\\'
if self.is_linux:
log.warning('Since this is a linux (mounted) system, there is no way for python to extract created_date timestamps. '\
'This is a limitation of Python. Created timestamps shown/seen will actually be same as Last_Modified timestamps.')
def BuildFullPath(self, path_in_image):
'''
Takes path inside image as input and returns the full path on current volume
Eg: Image mounted at D:\Images\mac_osx\ Path=\etc\hosts Return= D:\Images\mac_osx\etc\hosts
'''
full_path = ''
path = path_in_image
# remove leading / for os.path.join()
if path != '/' and path.startswith('/'):
path = path[1:]
if self.is_windows:
path = path.replace('/', '\\')
try:
full_path = os.path.join(self.macos_root_folder, path)
except Exception:
log.error("Exception in BuildFullPath(), path was " + path_in_image)
log.exception("Exception details")
#log.debug("req={} final={}".format(path_in_image, full_path))
return full_path
def _get_creation_time(self, local_path):
if self.is_windows:
return CommonFunctions.ReadUnixTime(os.path.getctime(local_path))
elif self.is_linux:
try:
t = statx(local_path).get_btime() # New Linux kernel 4+ has this ability
except (OSError, ValueError) as ex:
t = 0 # Old linux kernel that does not support statx
if t != 0:
return CommonFunctions.ReadUnixTime(t)
else: # Either old linux or a version of FUSE that does not populates btime (current does not)!
return CommonFunctions.ReadUnixTime(os.path.getmtime(local_path)) # Since this is not possible to fetch in Linux (using python)!
else:
return CommonFunctions.ReadUnixTime(os.stat(local_path).st_birthtime)
def GetFileMACTimes(self, file_path):
file_path = self.BuildFullPath(file_path)
times = { 'c_time':None, 'm_time':None, 'cr_time':None, 'a_time':None }
try:
times['c_time'] = None if self.is_windows else CommonFunctions.ReadUnixTime(os.path.getctime(file_path))
times['m_time'] = CommonFunctions.ReadUnixTime(os.path.getmtime(file_path))
times['cr_time'] = self._get_creation_time(file_path)
times['a_time'] = CommonFunctions.ReadUnixTime(os.path.getatime(file_path))
except OSError as ex:
log.exception('Error trying to get MAC times')
return times
def IsSymbolicLink(self, path):
try:
return os.path.islink(self.BuildFullPath(path))
except OSError as ex:
log.exception("Exception in IsSymbolicLink() for path : {} " + path)
return False
def IsValidFilePath(self, path):
try:
return os.path.lexists(self.BuildFullPath(path))
except OSError as ex:
log.exception("Exception in IsValidFilePath() for path : {} " + path)
return False
def IsValidFolderPath(self, path):
return self.IsValidFilePath(path)
def _GetFileSizeNoPathMod(self, full_path, error=None):
'''Simply calls os.path.getsize(), BEWARE-does not build full path!'''
try:
return os.path.getsize(full_path)
except OSError as ex:
log.error("Exception in _GetFileSizeNoPathMod() : " + str(ex))
return error
def GetFileSize(self, full_path, error=None):
'''Builds full path, then gets size'''
try:
return os.path.getsize(self.BuildFullPath(full_path))
except OSError as ex:
log.debug("Exception in GetFileSize() : " + str(ex) + " Perhaps file does not exist: " + full_path)
return error
def GetUserAndGroupIDForFile(self, path):
return self._GetUserAndGroupID(self.BuildFullPath(path))
def GetUserAndGroupIDForFolder(self, path):
return self._GetUserAndGroupID(self.BuildFullPath(path))
def ListItemsInFolder(self, path='/', types_to_fetch=EntryType.FILES_AND_FOLDERS, include_dates=False):
'''
Returns a list of files and/or folders in a list
Format of list = [ {'name':'got.txt', 'type':EntryType.FILES, 'size':10}, .. ]
'path' should be linux style using forward-slash like '/var/db/xxyy/file.tdc'
and starting at root /
'''
items = [] # List of dictionaries
try:
mounted_path = self.BuildFullPath(path)
dir = os.listdir(mounted_path)
for entry in dir:
# Exclude the mounted encase <file>.Stream which is uncompressed stream of file,
# not needed as we have the actual file
if entry.find('\xB7Stream') >= 0 or entry.find('\xB7Resource') >= 0:
log.debug(f'Excluding {entry} as it is raw stream not FILE. If you think this should be included, let the developers know!')
continue
newpath = os.path.join(mounted_path, entry)
entry_type = EntryType.FOLDERS if os.path.isdir(newpath) else EntryType.FILES
item = { 'name':entry, 'type':entry_type, 'size':self._GetFileSizeNoPathMod(newpath, 0)}
if include_dates:
item['dates'] = self.GetFileMACTimes(path + '/' + entry)
if types_to_fetch == EntryType.FILES_AND_FOLDERS:
items.append( item )
elif types_to_fetch == EntryType.FILES and entry_type == EntryType.FILES:
items.append( item )
elif types_to_fetch == EntryType.FOLDERS and entry_type == EntryType.FOLDERS:
items.append( item )
except FileNotFoundError as ex:
if str(ex).find('There are no more files') >= 0: # known windows issue on some empty folders!! '[WinError 18] There are no more files:...'
pass
else:
log.debug("Path not found : " + mounted_path)
except Exception as ex:
log.exception('')
if str(ex).find('cannot find the path specified'):
log.debug("Path not found : " + mounted_path)
else:
log.debug("Problem accessing path : " + mounted_path)
log.debug("Exception details:\n", exc_info=True) #traceback.print_exc()
log.error("Failed to get dir info!")
return items
def ReadSymLinkTargetPath(self, path):
'''Returns the target file/folder's path from the sym link path provided'''
target_path = ''
try:
if not self.is_windows:
target_path = os.readlink(self.BuildFullPath(path))
else:
target_path = super().ReadSymLinkTargetPath(path)
except:
log.exception("Error resolving symlink : " + path)
return target_path
def Open(self, path):
try:
mounted_path = self.BuildFullPath(path)
log.debug("Trying to open file : " + mounted_path)
file = MountedFile().open(mounted_path, 'rb')
return file
except (OSError) as ex:
log.exception("Error opening file : " + mounted_path)
return None
def ExtractFile(self, path_in_image, destination_path):
source_file = self.Open(path_in_image)
if source_file:
size = self.GetFileSize(path_in_image)
BUFF_SIZE = 20 * 1024 * 1024
offset = 0
try:
with open(destination_path, 'wb') as f:
while offset < size:
available_to_read = min(BUFF_SIZE, size - offset)
data = source_file.read(available_to_read)
if not data: break
offset += len(data)
f.write(data)
f.flush()
except (OSError) as ex:
log.exception ("Failed to create file for writing at " + destination_path)
source_file.close()
return False
source_file.close()
return True
return False
def _GetUserAndGroupID(self, path):
'''
Returns tuple (success, UID, GID) for object identified by path.
UID & GID are returned as strings.
If failed to get values, success=False
'''
success, uid, gid = False, 0, 0
try:
stat = os.stat(path)
uid = str(stat.st_uid)
gid = str(stat.st_gid)
success = True
except OSError as ex:
log.error("Exception trying to get uid & gid for file " + path + ' Exception details: ' + str(ex))
return success, uid, gid
def _GetDarwinFoldersInfo(self):
'''Gets DARWIN_*_DIR paths '''
if not self.is_windows:
# Unix/Linux or Mac mounted disks should preserve UID/GID, so we can read it normally from the files.
super()._GetDarwinFoldersInfo()
return
for user in self.users:
if user.UUID != '' and user.UID not in ('', '-2', '1', '201'): # Users nobody, daemon, guest don't have one
darwin_path = '/private/var/folders/' + GetDarwinPath2(user.UUID, user.UID)
if not self.IsValidFolderPath(darwin_path):
darwin_path = '/private/var/folders/' + GetDarwinPath(user.UUID, user.UID)
if not self.IsValidFolderPath(darwin_path):
if user.user_name.startswith('_') and user.UUID.upper().startswith('FFFFEEEE'):
pass
else:
log.warning(f'Could not find DARWIN_PATH for user {user.user_name}, uid={user.UID}, uuid={user.UUID}')
continue
user.DARWIN_USER_DIR = darwin_path + '/0'
user.DARWIN_USER_CACHE_DIR = darwin_path + '/C'
user.DARWIN_USER_TEMP_DIR = darwin_path + '/T'
def _GetDomainUserInfo(self):
if not self.is_windows:
# Unix/Linux or Mac mounted disks should preserve UID/GID, so we can read it normally from the files.
super()._GetDomainUserInfo()
return
log.debug('Trying to get domain profiles from /Users/')
domain_users = []
users_folder = self.ListItemsInFolder('/Users/', EntryType.FOLDERS)
for folder in users_folder:
folder_name = folder['name']
if folder_name in ('Shared', 'root'):
continue
found_user = False
for user in self.users:
if user.user_name == folder_name:
found_user = True # Existing local user
break
if found_user: continue
else:
log.info(f'Found a domain user {folder_name} or deleted user?')
target_user = UserInfo()
domain_users.append(target_user)
target_user.home_dir = '/Users/' + folder_name
target_user.user_name = folder_name
target_user.real_name = folder_name
target_user._source = '/Users/' + folder_name
if domain_users:
known_darwin_paths = set()
for user in self.users:
if user.UID and user.UUID and not user.UID.startswith('-'):
known_darwin_paths.add('/private/var/folders/' + GetDarwinPath(user.UUID, user.UID)) # They haven't been populated yet in user!
known_darwin_paths.add('/private/var/folders/' + GetDarwinPath2(user.UUID, user.UID))
# try to get darwin_cache folders
var_folders = self.ListItemsInFolder('/private/var/folders', EntryType.FOLDERS)
for level_1 in var_folders:
name_1 = level_1['name']
var_folders_level_2 = self.ListItemsInFolder(f'/private/var/folders/{name_1}', EntryType.FOLDERS)
for level_2 in var_folders_level_2:
darwin_path = f'/private/var/folders/{name_1}/' + level_2['name']
if darwin_path in known_darwin_paths:
continue
else:
matched_darwin_path_to_user = False
font_reg_db = darwin_path + '/C/com.apple.FontRegistry/fontregistry.user'
if self.IsValidFilePath(font_reg_db):
try:
sqlite_wrapper = SqliteWrapper(self)
db = sqlite_wrapper.connect(font_reg_db)
if db:
cursor = db.cursor()
cursor.execute('SELECT path_column from dir_table WHERE domain_column=1')
user_path = ''
for row in cursor:
user_path = row[0]
break
cursor.close()
db.close()
if user_path:
if user_path.startswith('/Users/'):
username = user_path.split('/')[2]
for dom_user in domain_users:
if dom_user.user_name == username:
dom_user.DARWIN_USER_DIR = darwin_path + '/0'
dom_user.DARWIN_USER_TEMP_DIR = darwin_path + '/T'
dom_user.DARWIN_USER_CACHE_DIR = darwin_path + '/C'
log.debug(f'Found darwin path for user {username}')
matched_darwin_path_to_user = True
# Try to get uid now.
if self.IsValidFolderPath(dom_user.DARWIN_USER_DIR + '/com.apple.LaunchServices.dv'):
for item in self.ListItemsInFolder(dom_user.DARWIN_USER_DIR + '/com.apple.LaunchServices.dv', EntryType.FILES):
name = item['name']
if name.startswith('com.apple.LaunchServices.trustedsignatures-') and name.endswith('.db'):
dom_user.UID = name[43:-3]
break
break
else:
log.error(f'user profile path was non-standard - {user_path}')
else:
log.error('Query did not yield any output!')
if not matched_darwin_path_to_user:
log.error(f'Could not find mapping for darwin folder {darwin_path} to user')
except sqlite3.Error:
log.exception(f'Error reading {font_reg_db}, Cannot map darwin folder to user profile!')
else:
log.error(f'Could not find {font_reg_db}, Cannot map darwin folder to user profile!')
self.users.extend(domain_users)
class MountedMacInfoSeperateSysData(MountedMacInfo):
'''Same as MountedMacInfo, | |
<filename>tencentcloud/bmeip/v20180625/bmeip_client.py
# -*- coding: utf8 -*-
# Copyright (c) 2017-2018 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException
from tencentcloud.common.abstract_client import AbstractClient
from tencentcloud.bmeip.v20180625 import models
class BmeipClient(AbstractClient):
_apiVersion = '2018-06-25'
_endpoint = 'bmeip.tencentcloudapi.com'
def BindEipAcls(self, request):
"""此接口用于为某个 EIP 关联 ACL。
:param request: Request instance for BindEipAcls.
:type request: :class:`tencentcloud.bmeip.v20180625.models.BindEipAclsRequest`
:rtype: :class:`tencentcloud.bmeip.v20180625.models.BindEipAclsResponse`
"""
try:
params = request._serialize()
body = self.call("BindEipAcls", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.BindEipAclsResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def BindHosted(self, request):
"""BindHosted接口用于绑定黑石弹性公网IP到黑石托管机器上
:param request: Request instance for BindHosted.
:type request: :class:`tencentcloud.bmeip.v20180625.models.BindHostedRequest`
:rtype: :class:`tencentcloud.bmeip.v20180625.models.BindHostedResponse`
"""
try:
params = request._serialize()
body = self.call("BindHosted", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.BindHostedResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def BindRs(self, request):
"""绑定黑石EIP
:param request: Request instance for BindRs.
:type request: :class:`tencentcloud.bmeip.v20180625.models.BindRsRequest`
:rtype: :class:`tencentcloud.bmeip.v20180625.models.BindRsResponse`
"""
try:
params = request._serialize()
body = self.call("BindRs", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.BindRsResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def BindVpcIp(self, request):
"""黑石EIP绑定VPCIP
:param request: Request instance for BindVpcIp.
:type request: :class:`tencentcloud.bmeip.v20180625.models.BindVpcIpRequest`
:rtype: :class:`tencentcloud.bmeip.v20180625.models.BindVpcIpResponse`
"""
try:
params = request._serialize()
body = self.call("BindVpcIp", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.BindVpcIpResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def CreateEip(self, request):
"""创建黑石弹性公网IP
:param request: Request instance for CreateEip.
:type request: :class:`tencentcloud.bmeip.v20180625.models.CreateEipRequest`
:rtype: :class:`tencentcloud.bmeip.v20180625.models.CreateEipResponse`
"""
try:
params = request._serialize()
body = self.call("CreateEip", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.CreateEipResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def CreateEipAcl(self, request):
"""创建黑石弹性公网 EIPACL
:param request: Request instance for CreateEipAcl.
:type request: :class:`tencentcloud.bmeip.v20180625.models.CreateEipAclRequest`
:rtype: :class:`tencentcloud.bmeip.v20180625.models.CreateEipAclResponse`
"""
try:
params = request._serialize()
body = self.call("CreateEipAcl", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.CreateEipAclResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DeleteEip(self, request):
"""释放黑石弹性公网IP
:param request: Request instance for DeleteEip.
:type request: :class:`tencentcloud.bmeip.v20180625.models.DeleteEipRequest`
:rtype: :class:`tencentcloud.bmeip.v20180625.models.DeleteEipResponse`
"""
try:
params = request._serialize()
body = self.call("DeleteEip", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DeleteEipResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DeleteEipAcl(self, request):
"""删除弹性公网IP ACL
:param request: Request instance for DeleteEipAcl.
:type request: :class:`tencentcloud.bmeip.v20180625.models.DeleteEipAclRequest`
:rtype: :class:`tencentcloud.bmeip.v20180625.models.DeleteEipAclResponse`
"""
try:
params = request._serialize()
body = self.call("DeleteEipAcl", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DeleteEipAclResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeEipAcls(self, request):
"""查询弹性公网IP ACL
:param request: Request instance for DescribeEipAcls.
:type request: :class:`tencentcloud.bmeip.v20180625.models.DescribeEipAclsRequest`
:rtype: :class:`tencentcloud.bmeip.v20180625.models.DescribeEipAclsResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeEipAcls", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeEipAclsResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeEipQuota(self, request):
"""查询黑石EIP 限额
:param request: Request instance for DescribeEipQuota.
:type request: :class:`tencentcloud.bmeip.v20180625.models.DescribeEipQuotaRequest`
:rtype: :class:`tencentcloud.bmeip.v20180625.models.DescribeEipQuotaResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeEipQuota", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeEipQuotaResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeEipTask(self, request):
"""黑石EIP查询任务状态
:param request: Request instance for DescribeEipTask.
:type request: :class:`tencentcloud.bmeip.v20180625.models.DescribeEipTaskRequest`
:rtype: :class:`tencentcloud.bmeip.v20180625.models.DescribeEipTaskResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeEipTask", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeEipTaskResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeEips(self, request):
"""黑石EIP查询接口
:param request: Request instance for DescribeEips.
:type request: :class:`tencentcloud.bmeip.v20180625.models.DescribeEipsRequest`
:rtype: :class:`tencentcloud.bmeip.v20180625.models.DescribeEipsResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeEips", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeEipsResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def ModifyEipAcl(self, request):
"""修改弹性公网IP ACL
:param request: Request instance for ModifyEipAcl.
:type request: :class:`tencentcloud.bmeip.v20180625.models.ModifyEipAclRequest`
:rtype: :class:`tencentcloud.bmeip.v20180625.models.ModifyEipAclResponse`
"""
try:
params = request._serialize()
body = self.call("ModifyEipAcl", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.ModifyEipAclResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def ModifyEipCharge(self, request):
"""黑石EIP修改计费方式
:param request: Request instance for ModifyEipCharge.
:type request: :class:`tencentcloud.bmeip.v20180625.models.ModifyEipChargeRequest`
:rtype: :class:`tencentcloud.bmeip.v20180625.models.ModifyEipChargeResponse`
"""
try:
params = request._serialize()
body = self.call("ModifyEipCharge", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.ModifyEipChargeResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def ModifyEipName(self, request):
"""更新黑石EIP名称
:param request: Request instance for ModifyEipName.
:type request: :class:`tencentcloud.bmeip.v20180625.models.ModifyEipNameRequest`
:rtype: :class:`tencentcloud.bmeip.v20180625.models.ModifyEipNameResponse`
"""
try:
params = request._serialize()
body = self.call("ModifyEipName", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.ModifyEipNameResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def UnbindEipAcls(self, request):
"""解绑弹性公网IP ACL
:param request: Request instance for UnbindEipAcls.
:type request: :class:`tencentcloud.bmeip.v20180625.models.UnbindEipAclsRequest`
:rtype: :class:`tencentcloud.bmeip.v20180625.models.UnbindEipAclsResponse`
"""
try:
params = request._serialize()
body = self.call("UnbindEipAcls", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.UnbindEipAclsResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def UnbindHosted(self, request):
"""UnbindHosted接口用于解绑托管机器上的EIP
:param request: Request instance for UnbindHosted.
:type request: :class:`tencentcloud.bmeip.v20180625.models.UnbindHostedRequest`
:rtype: :class:`tencentcloud.bmeip.v20180625.models.UnbindHostedResponse`
"""
try:
params = request._serialize()
body = self.call("UnbindHosted", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.UnbindHostedResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def UnbindRs(self, request):
"""解绑黑石EIP
:param request: Request instance for UnbindRs.
:type request: :class:`tencentcloud.bmeip.v20180625.models.UnbindRsRequest`
:rtype: :class:`tencentcloud.bmeip.v20180625.models.UnbindRsResponse`
"""
try:
params = request._serialize()
body = self.call("UnbindRs", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.UnbindRsResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def UnbindVpcIp(self, request):
"""黑石EIP解绑VPCIP
:param request: Request | |
asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_device_datasource_list_with_http_info(device_id, **kwargs) # noqa: E501
else:
(data) = self.get_device_datasource_list_with_http_info(device_id, **kwargs) # noqa: E501
return data
def get_device_datasource_list_with_http_info(self, device_id, **kwargs): # noqa: E501
"""get device datasource list # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_device_datasource_list_with_http_info(device_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int device_id: (required)
:param str fields:
:param int size:
:param int offset:
:param str filter:
:return: DeviceDatasourcePaginationResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['device_id', 'fields', 'size', 'offset', 'filter'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_device_datasource_list" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'device_id' is set
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `get_device_datasource_list`") # noqa: E501
if 'device_id' in params and not re.search('\d+', params['device_id'] if type(params['device_id']) is str else str(params['device_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `device_id` when calling `get_device_datasource_list`, must conform to the pattern `/\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'device_id' in params:
path_params['deviceId'] = params['device_id'] # noqa: E501
query_params = []
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
if 'size' in params:
query_params.append(('size', params['size'])) # noqa: E501
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'filter' in params:
query_params.append(('filter', params['filter'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['LMv1'] # noqa: E501
return self.api_client.call_api(
'/device/devices/{deviceId}/devicedatasources', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DeviceDatasourcePaginationResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_device_group_by_id(self, id, **kwargs): # noqa: E501
"""get device group # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_device_group_by_id(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:param str fields:
:return: DeviceGroup
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_device_group_by_id_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_device_group_by_id_with_http_info(id, **kwargs) # noqa: E501
return data
def get_device_group_by_id_with_http_info(self, id, **kwargs): # noqa: E501
"""get device group # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_device_group_by_id_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:param str fields:
:return: DeviceGroup
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'fields'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_device_group_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_device_group_by_id`") # noqa: E501
if 'id' in params and not re.search('\d+', params['id'] if type(params['id']) is str else str(params['id'])): # noqa: E501
raise ValueError("Invalid value for parameter `id` when calling `get_device_group_by_id`, must conform to the pattern `/\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['LMv1'] # noqa: E501
return self.api_client.call_api(
'/device/groups/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DeviceGroup', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_device_group_cluster_alert_conf_by_id(self, device_group_id, id, **kwargs): # noqa: E501
"""Get cluster alert configuration by id # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_device_group_cluster_alert_conf_by_id(device_group_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int device_group_id: (required)
:param int id: (required)
:return: DeviceClusterAlertConfig
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_device_group_cluster_alert_conf_by_id_with_http_info(device_group_id, id, **kwargs) # noqa: E501
else:
(data) = self.get_device_group_cluster_alert_conf_by_id_with_http_info(device_group_id, id, **kwargs) # noqa: E501
return data
def get_device_group_cluster_alert_conf_by_id_with_http_info(self, device_group_id, id, **kwargs): # noqa: E501
"""Get cluster alert configuration by id # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_device_group_cluster_alert_conf_by_id_with_http_info(device_group_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int device_group_id: (required)
:param int id: (required)
:return: DeviceClusterAlertConfig
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['device_group_id', 'id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_device_group_cluster_alert_conf_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'device_group_id' is set
if ('device_group_id' not in params or
params['device_group_id'] is None):
raise ValueError("Missing the required parameter `device_group_id` when calling `get_device_group_cluster_alert_conf_by_id`") # noqa: E501
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_device_group_cluster_alert_conf_by_id`") # noqa: E501
if 'device_group_id' in params and not re.search('\d+', params['device_group_id'] if type(params['device_group_id']) is str else str(params['device_group_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `device_group_id` when calling `get_device_group_cluster_alert_conf_by_id`, must conform to the pattern `/\d+/`") # noqa: E501
if 'id' in params and not re.search('\d+', params['id'] if type(params['id']) is str else str(params['id'])): # noqa: E501
raise ValueError("Invalid value for parameter `id` when calling `get_device_group_cluster_alert_conf_by_id`, must conform to the pattern `/\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'device_group_id' in params:
path_params['deviceGroupId'] = params['device_group_id'] # noqa: E501
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['LMv1'] # noqa: E501
return self.api_client.call_api(
'/device/groups/{deviceGroupId}/clusterAlertConf/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DeviceClusterAlertConfig', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_device_group_cluster_alert_conf_list(self, device_group_id, **kwargs): # noqa: E501
"""get a list of cluster alert configurations for a device group # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_device_group_cluster_alert_conf_list(device_group_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int device_group_id: (required)
:param str fields:
:param int size:
:param int offset:
:param str filter:
:return: DeviceClusterAlertConfigPaginationResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_device_group_cluster_alert_conf_list_with_http_info(device_group_id, **kwargs) # noqa: E501
else:
(data) = self.get_device_group_cluster_alert_conf_list_with_http_info(device_group_id, **kwargs) # noqa: E501
return data
def get_device_group_cluster_alert_conf_list_with_http_info(self, device_group_id, **kwargs): # noqa: E501
"""get a list of cluster alert configurations for a device group # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_device_group_cluster_alert_conf_list_with_http_info(device_group_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int device_group_id: (required)
:param str fields:
:param int size:
:param int offset:
:param str filter:
:return: DeviceClusterAlertConfigPaginationResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['device_group_id', 'fields', 'size', 'offset', 'filter'] # | |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import numpy as np
import math
from modules.encoder import EncoderCNN
from modules.ff_decoder import FFDecoder
from modules.transformer_decoder import DecoderTransformer
from modules.rnn_decoder import DecoderRNN
from utils.metrics import softIoU, softIoULoss, DCLoss, DC, targetDistLoss
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def label2_k_hots(labels, pad_value, remove_eos=False):
# labels is a list of (possibly variable length) lists.
# labels are numpy array
if type(labels) == list:
tmp = np.array([i + [pad_value]*(len(max(labels, key=len))-len(i)) for i in labels])
labels = torch.from_numpy(tmp).to(device)
# input labels to one hot vector
inp_ = torch.unsqueeze(labels, 2)
k_hots = torch.FloatTensor(labels.size(0), labels.size(1), pad_value + 1).zero_().to(device)
k_hots.scatter_(2, inp_, 1)
k_hots, _ = k_hots.max(dim=1)
# remove pad position
k_hots = k_hots[:, :-1]
# handle eos
if remove_eos:
# this is used by tfset/lstmset when computing losses and
# by all auto-regressive models when computing f1 metrics
k_hots = k_hots[:, 1:]
return k_hots
def mask_from_eos(prediction, eos_value, mult_before=True):
mask = torch.ones(prediction.size()).to(device).byte()
mask_aux = torch.ones(prediction.size(0)).to(device).byte()
# find eos in label prediction
for idx in range(prediction.size(1)):
# force mask to have 1s in the first position to avoid division
# by 0 when predictions start with eos
if idx == 0:
continue
if mult_before:
mask[:, idx] = mask[:, idx] * mask_aux
mask_aux = mask_aux * (prediction[:, idx] != eos_value)
else:
mask_aux = mask_aux * (prediction[:, idx] != eos_value)
mask[:, idx] = mask[:, idx] * mask_aux
return mask
def predictions_to_idxs(label_logits,
maxnumlabels,
pad_value,
th=1,
cardinality_prediction=None,
which_loss='bce',
accumulate_probs=False,
use_empty_set=False):
assert th > 0 and th <= 1
card_offset = 0 if use_empty_set else 1
# select topk elements
probs, idxs = torch.topk(label_logits, k=maxnumlabels, dim=1, largest=True, sorted=True)
idxs_clone = idxs.clone()
# mask to identify elements within the top-maxnumlabel ones which satisfy the threshold th
if which_loss == 'td':
# cumulative threshold
mask = torch.ones(probs.size()).to(device).byte()
for idx in range(probs.size(1)):
mask_step = torch.sum(probs[:, 0:idx], dim=-1) < th
mask[:, idx] = mask[:, idx] * mask_step
else:
# probility threshold
mask = (probs > th).byte()
# if the model has cardinality prediction
if cardinality_prediction is not None:
# get the argmax for each element in the batch to get the cardinality
# (note that the output is N - 1, e.g. argmax = 0 means that there's 1 element)
# unless we are in the empty set case, e.g. argmax = 0 means there there are 0 elements
if accumulate_probs:
for c in range(cardinality_prediction.size(-1)):
value = torch.sum(torch.log(probs[:, 0:c + 1]), dim=-1)
cardinality_prediction[:, c] += value
# select cardinality
_, card_idx = torch.max(cardinality_prediction, dim=-1)
mask = torch.ones(probs.size()).to(device).byte()
aux_mask = torch.ones(mask.size(0)).to(device).byte()
for i in range(mask.size(-1)):
# If the cardinality prediction is higher than i, it means that from this point
# on the mask must be 0. Predicting 0 cardinality means 0 objects when
# use_empty_set=True and 1 object when use_empty_set=False
# real cardinality value is
above_cardinality = (i < card_idx + card_offset)
# multiply the auxiliar mask with this condition
# (once you multiply by 0, the following entries will also be 0)
aux_mask = aux_mask * above_cardinality
mask[:, i] = aux_mask
else:
if not use_empty_set:
mask[:, 0] = 1
idxs_clone[mask == 0] = pad_value
return idxs_clone
def get_model(args, vocab_size):
# build image encoder
encoder_image = EncoderCNN(args.embed_size, args.dropout_encoder,
args.image_model)
use_empty_set = (True if args.dataset in ['coco', 'nuswide'] else False)
# build set predictor
if args.decoder == 'ff':
print(
'Building feed-forward decoder. Embed size {} / Dropout {} / '
'Cardinality Prediction {} / Max. Num. Labels {} / Num. Layers {}'.format(
args.embed_size, args.dropout_decoder, args.pred_cardinality, args.maxnumlabels,
args.ff_layers),
flush=True)
decoder = FFDecoder(
args.embed_size,
vocab_size,
args.embed_size,
dropout=args.dropout_decoder,
pred_cardinality=args.pred_cardinality,
nobjects=args.maxnumlabels,
n_layers=args.ff_layers,
use_empty_set=use_empty_set)
elif args.decoder == 'lstm':
print(
'Building LSTM decoder. Embed size {} / Dropout {} / Max. Num. Labels {}. '.format(
args.embed_size, args.dropout_decoder, args.maxnumlabels),
flush=True)
decoder = DecoderRNN(
args.embed_size,
args.embed_size,
vocab_size,
dropout=args.dropout_decoder,
seq_length=args.maxnumlabels,
num_instrs=1)
elif args.decoder == 'tf':
print(
'Building Transformer decoder. Embed size {} / Dropout {} / Max. Num. Labels {} / '
'Num. Attention Heads {} / Num. Layers {}.'.format(
args.embed_size, args.dropout_decoder, args.maxnumlabels, args.n_att,
args.tf_layers),
flush=True)
decoder = DecoderTransformer(
args.embed_size,
vocab_size,
dropout=args.dropout_decoder,
seq_length=args.maxnumlabels,
num_instrs=1,
attention_nheads=args.n_att,
pos_embeddings=False,
num_layers=args.tf_layers,
learned=False,
normalize_before=True)
# label and eos loss
label_losses = {
'bce': nn.BCEWithLogitsLoss(reduction='mean') if args.decoder == 'ff' else nn.BCELoss(reduction='mean'),
'iou': softIoULoss(reduction='mean'),
'td': targetDistLoss(reduction='mean'),
}
pad_value = vocab_size - 1
print('Using {} loss.'.format(args.label_loss), flush=True)
if args.decoder == 'ff':
label_loss = label_losses[args.label_loss]
eos_loss = None
elif args.decoder in ['tf', 'lstm'] and args.perminv:
label_loss = label_losses[args.label_loss]
eos_loss = nn.BCELoss(reduction='mean')
else:
label_loss = nn.CrossEntropyLoss(ignore_index=pad_value, reduction='mean')
eos_loss = None
# cardinality loss
if args.pred_cardinality == 'dc':
print('Using Dirichlet-Categorical cardinality loss.', flush=True)
cardinality_loss = DCLoss(U=args.U, dataset=args.dataset, reduction='mean')
elif args.pred_cardinality == 'cat':
print('Using categorical cardinality loss.', flush=True)
cardinality_loss = nn.CrossEntropyLoss(reduction='mean')
else:
print('Using no cardinality loss.', flush=True)
cardinality_loss = None
model = SetPred(
decoder,
encoder_image,
args.maxnumlabels,
crit=label_loss,
crit_eos=eos_loss,
crit_cardinality=cardinality_loss,
pad_value=pad_value,
perminv=args.perminv,
decoder_ff=True if args.decoder == 'ff' else False,
th=args.th,
loss_label=args.label_loss,
replacement=args.replacement,
card_type=args.pred_cardinality,
dataset=args.dataset,
U=args.U,
use_empty_set=use_empty_set)
return model
class SetPred(nn.Module):
def __init__(self,
decoder,
image_encoder,
maxnumlabels,
crit=None,
crit_eos=None,
crit_cardinality=None,
pad_value=0,
perminv=True,
decoder_ff=False,
th=0.5,
loss_label='bce',
replacement=False,
card_type='none',
dataset='voc',
U=2.36,
use_empty_set=False,
eps=1e-8):
super(SetPred, self).__init__()
self.image_encoder = image_encoder
self.decoder = decoder
self.decoder_ff = decoder_ff
self.maxnumlabels = maxnumlabels
self.crit = crit
self.th = th
self.perminv = perminv
self.pad_value = pad_value
self.crit_eos = crit_eos
self.crit_cardinality = crit_cardinality
self.loss_label = loss_label
self.replacement = replacement
self.card_type = card_type
self.dataset = dataset
self.u_term = math.log(U)
self.eps = eps
self.use_empty_set = use_empty_set
def forward(self, img_inputs, label_target=None, maxnumlabels=0, keep_cnn_gradients=False, compute_losses=False, compute_predictions=False):
losses = {}
predictions = None
assert (label_target is not None and compute_losses) or (label_target is None and not compute_losses)
if not compute_losses and not compute_predictions:
return losses, predictions
# encode image
img_features = self.image_encoder(img_inputs, keep_cnn_gradients)
if self.decoder_ff:
# use ff decoder to predict set of labels and cardinality
label_logits, cardinality_logits = self.decoder(img_features)
if compute_losses:
# label target to k_hot
target_k_hot = label2_k_hots(label_target, self.pad_value)
# cardinality target
cardinality_target = target_k_hot.sum(dim=-1).unsqueeze(1)
# compute labels loss
losses['label_loss'] = self.crit(label_logits, target_k_hot)
# compute cardinality loss if needed
if self.crit_cardinality is not None:
# subtract 1 from num_target to match class idxs (1st label corresponds to class 0) only
# 1st label corresponds to 0 only if use_empty_set is false
# otherwise, 1st label corresponds to 1
offset = 0 if self.use_empty_set else 1
losses['cardinality_loss'] = self.crit_cardinality(
cardinality_logits, (cardinality_target.squeeze() - offset).long())
if compute_predictions:
# consider cardinality
if self.card_type == 'dc' and self.loss_label == 'bce':
offset = 0 if self.use_empty_set else 1
cardinality = torch.log(DC(cardinality_logits, dataset=self.dataset))
u_term = np.array(list(range(cardinality.size(-1)))) + offset
u_term = u_term * self.u_term
u_term = torch.from_numpy(u_term).to(device).unsqueeze(0).float()
cardinality = cardinality + u_term
elif self.card_type == 'cat':
cardinality = torch.nn.functional.log_softmax(cardinality_logits + self.eps, dim=-1)
else:
cardinality = None
# apply nonlinearity to label logits
if self.loss_label == 'td':
label_probs = nn.functional.softmax(label_logits, dim=-1)
else:
label_probs = torch.sigmoid(label_logits)
# get label ids
predictions = predictions_to_idxs(
label_probs,
maxnumlabels,
self.pad_value,
th=self.th,
cardinality_prediction=cardinality,
which_loss=self.loss_label,
accumulate_probs=self.card_type == 'dc' and self.loss_label == 'bce',
use_empty_set=self.use_empty_set)
else: # auto-regressive models
# use auto-regressive decoder to predict labels (sample function)
# output label_logits is only used to compute losses in case of self.perminv (no teacher forcing)
# predictions output is used for all auto-regressive models
predictions, label_logits = self.decoder.sample(
img_features,
None,
first_token_value=0,
replacement=self.replacement)
if compute_predictions:
# mask labels after finding eos (cardinality)
sample_mask = mask_from_eos(predictions, eos_value=0, mult_before=False)
predictions[sample_mask == 0] = self.pad_value
else:
predictions = None
if compute_losses:
# add dummy first word to sequence and remove last
first_word = torch.zeros(label_target.size(0))
shift_target = torch.cat([first_word.unsqueeze(-1).to(device).long(), label_target],
-1)[:, :-1]
if self.perminv:
# autoregressive mode for decoder when training with permutation invariant objective
# e.g. lstmset and tfset
# apply softmax nonlinearity before pooling across timesteps
label_probs = torch.nn.functional.softmax(label_logits, dim=-1)
# find idxs for eos label
# eos probability is the one assigned to the first position of the softmax
# this is used with bce loss only
eos = label_probs[:, :, 0]
eos_pos = (label_target == 0) # all zeros except position where eos is | |
"name": 'Makuhita',
"value": "makuhita",
"image": "img/makuhita.png",
"exclude": ['swsh']
},
{
"name": 'Hariyama',
"value": "hariyama",
"image": "img/hariyama.png",
"exclude": ['swsh']
},
{
"name": 'Azurill',
"value": "azurill",
"image": "img/azurill.png"
},
{
"name": 'Nosepass',
"value": "nosepass",
"image": "img/nosepass.png",
"exclude": ['swsh']
},
{
"name": 'Skitty',
"value": "skitty",
"image": "img/skitty.png",
"exclude": ['swsh']
},
{
"name": 'Delcatty',
"value": "delcatty",
"image": "img/delcatty.png",
"exclude": ['swsh']
},
{
"name": 'Sableye',
"value": "sableye",
"image": "img/sableye.png"
},
{
"name": 'Mawile',
"value": "mawile",
"image": "img/mawile.png"
},
{
"name": 'Aron',
"value": "aron",
"image": "img/aron.png"
},
{
"name": 'Lairon',
"value": "lairon",
"image": "img/lairon.png"
},
{
"name": 'Aggron',
"value": "aggron",
"image": "img/aggron.png"
},
{
"name": 'Meditite',
"value": "meditite",
"image": "img/meditite.png",
"exclude": ['swsh']
},
{
"name": 'Medicham',
"value": "medicham",
"image": "img/medicham.png",
"exclude": ['swsh']
},
{
"name": 'Electrike',
"value": "electrike",
"image": "img/electrike.png"
},
{
"name": 'Manectric',
"value": "manectric",
"image": "img/manectric.png"
},
{
"name": 'Plusle',
"value": "plusle",
"image": "img/plusle.png",
"exclude": ['swsh']
},
{
"name": 'Minun',
"value": "minun",
"image": "img/minun.png",
"exclude": ['swsh']
},
{
"name": 'Volbeat',
"value": "volbeat",
"image": "img/volbeat.png",
"exclude": ['swsh']
},
{
"name": 'Illumise',
"value": "illumise",
"image": "img/illumise.png",
"exclude": ['swsh']
},
{
"name": 'Roselia',
"value": "roselia",
"image": "img/roselia.png"
},
{
"name": 'Gulpin',
"value": "gulpin",
"image": "img/gulpin.png",
"exclude": ['swsh']
},
{
"name": 'Swalot',
"value": "swalot",
"image": "img/swalot.png",
"exclude": ['swsh']
},
{
"name": 'Carvanha',
"value": "carvanha",
"image": "img/carvanha.png"
},
{
"name": 'Sharpedo',
"value": "sharpedo",
"image": "img/sharpedo.png"
},
{
"name": 'Wailmer',
"value": "wailmer",
"image": "img/wailmer.png"
},
{
"name": 'Wailord',
"value": "wailord",
"image": "img/wailord.png"
},
{
"name": 'Numel',
"value": "numel",
"image": "img/numel.png",
"exclude": ['swsh']
},
{
"name": 'Camerupt',
"value": "camerupt",
"image": "img/camerupt.png",
"exclude": ['swsh']
},
{
"name": 'Torkoal',
"value": "torkoal",
"image": "img/torkoal.png"
},
{
"name": 'Spoink',
"value": "spoink",
"image": "img/spoink.png",
"exclude": ['swsh']
},
{
"name": 'Grumpig',
"value": "grumpig",
"image": "img/grumpig.png",
"exclude": ['swsh']
},
{
"name": 'Spinda',
"value": "spinda",
"image": "img/spinda.png",
"exclude": ['swsh']
},
{
"name": 'Trapinch',
"value": "trapinch",
"image": "img/trapinch.png"
},
{
"name": 'Vibrava',
"value": "vibrava",
"image": "img/vibrava.png"
},
{
"name": 'Flygon',
"value": "flygon",
"image": "img/flygon.png"
},
{
"name": 'Cacnea',
"value": "cacnea",
"image": "img/cacnea.png",
"exclude": ['swsh']
},
{
"name": 'Cacturne',
"value": "cacturne",
"image": "img/cacturne.png",
"exclude": ['swsh']
},
{
"name": 'Swablu',
"value": "swablu",
"image": "img/swablu.png"
},
{
"name": 'Altaria',
"value": "altaria",
"image": "img/altaria.png"
},
{
"name": 'Zangoose',
"value": "zangoose",
"image": "img/zangoose.png",
"exclude": ['swsh']
},
{
"name": 'Seviper',
"value": "seviper",
"image": "img/seviper.png",
"exclude": ['swsh']
},
{
"name": 'Lunatone',
"value": "lunatone",
"image": "img/lunatone.png"
},
{
"name": 'Solrock',
"value": "solrock",
"image": "img/solrock.png"
},
{
"name": 'Barboach',
"value": "barboach",
"image": "img/barboach.png"
},
{
"name": 'Whiscash',
"value": "whiscash",
"image": "img/whiscash.png"
},
{
"name": 'Corphish',
"value": "corphish",
"image": "img/corphish.png"
},
{
"name": 'Crawdaunt',
"value": "crawdaunt",
"image": "img/crawdaunt.png"
},
{
"name": 'Baltoy',
"value": "baltoy",
"image": "img/baltoy.png"
},
{
"name": 'Claydol',
"value": "claydol",
"image": "img/claydol.png"
},
{
"name": 'Lileep',
"value": "lileep",
"image": "img/lileep.png"
},
{
"name": 'Cradily',
"value": "cradily",
"image": "img/cradily.png"
},
{
"name": 'Anorith',
"value": "anorith",
"image": "img/anorith.png"
},
{
"name": 'Armaldo',
"value": "armaldo",
"image": "img/armaldo.png"
},
{
"name": 'Feebas',
"value": "feebas",
"image": "img/feebas.png"
},
{
"name": 'Milotic',
"value": "milotic",
"image": "img/milotic.png"
},
{
"name": 'Castform',
"value": "castform",
"image": "img/castform.png",
"exclude": ['swsh']
},
{
"name": 'Kecleon',
"value": "kecleon",
"image": "img/kecleon.png",
"exclude": ['swsh']
},
{
"name": 'Shuppet',
"value": "shuppet",
"image": "img/shuppet.png",
"exclude": ['swsh']
},
{
"name": 'Banette',
"value": "banette",
"image": "img/banette.png",
"exclude": ['swsh']
},
{
"name": 'Duskull',
"value": "duskull",
"image": "img/duskull.png"
},
{
"name": 'Dusclops',
"value": "dusclops",
"image": "img/dusclops.png"
},
{
"name": 'Tropius',
"value": "tropius",
"image": "img/tropius.png",
"exclude": ['swsh']
},
{
"name": 'Chimecho',
"value": "chimecho",
"image": "img/chimecho.png",
"exclude": ['swsh']
},
{
"name": 'Absol',
"value": "absol",
"image": "img/absol.png"
},
{
"name": 'Wynaut',
"value": "wynaut",
"image": "img/wynaut.png"
},
{
"name": 'Snorunt',
"value": "snorunt",
"image": "img/snorunt.png"
},
{
"name": 'Glalie',
"value": "glalie",
"image": "img/glalie.png"
},
{
"name": 'Spheal',
"value": "spheal",
"image": "img/spheal.png"
},
{
"name": 'Sealeo',
"value": "sealeo",
"image": "img/sealeo.png"
},
{
"name": 'Walrein',
"value": "walrein",
"image": "img/walrein.png"
},
{
"name": 'Clamperl',
"value": "clamperl",
"image": "img/clamperl.png",
"exclude": ['swsh']
},
{
"name": 'Huntail',
"value": "huntail",
"image": "img/huntail.png",
"exclude": ['swsh']
},
{
"name": 'Gorebyss',
"value": "gorebyss",
"image": "img/gorebyss.png",
"exclude": ['swsh']
},
{
"name": 'Relicanth',
"value": "relicanth",
"image": "img/relicanth.png"
},
{
"name": 'Luvdisc',
"value": "luvdisc",
"image": "img/luvdisc.png",
"exclude": ['swsh']
},
{
"name": 'Bagon',
"value": "bagon",
"image": "img/bagon.png"
},
{
"name": 'Shelgon',
"value": "shelgon",
"image": "img/shelgon.png"
},
{
"name": 'Salamence',
"value": "salamence",
"image": "img/salamence.png"
},
{
"name": 'Beldum',
"value": "beldum",
"image": "img/beldum.png"
},
{
"name": 'Metang',
"value": "metang",
"image": "img/metang.png"
},
{
"name": 'Metagross',
"value": "metagross",
"image": "img/metagross.png"
},
{
"name": 'Regirock',
"value": "regirock",
"image": "img/regirock.png"
},
{
"name": 'Regice',
"value": "regice",
"image": "img/regice.png"
},
{
"name": 'Registeel',
"value": "registeel",
"image": "img/registeel.png"
},
{
"name": 'Latias',
"value": "latias",
"image": "img/latias.png"
},
{
"name": 'Latios',
"value": "latios",
"image": "img/latios.png"
},
{
"name": 'Kyogre',
"value": "kyogre",
"image": "img/kyogre.png"
},
{
"name": 'Groudon',
"value": "groudon",
"image": "img/groudon.png"
},
{
"name": 'Rayquaza',
"value": "rayquaza",
"image": "img/rayquaza.png"
},
{
"name": 'Jirachi',
"value": "jirachi",
"image": "img/jirachi.png"
},
{
"name": 'Deoxys',
"value": "deoxys",
"image": "img/deoxys.png",
"exclude": ['swsh']
},
{
"name": 'Turtwig',
"value": "turtwig",
"image": "img/turtwig.png",
"exclude": ['swsh']
},
{
"name": 'Grotle',
"value": "grotle",
"image": "img/grotle.png",
"exclude": ['swsh']
},
{
"name": 'Torterra',
"value": "torterra",
"image": "img/torterra.png",
"exclude": ['swsh']
},
{
"name": 'Chimchar',
"value": "chimchar",
"image": "img/chimchar.png",
"exclude": ['swsh']
},
{
"name": 'Monferno',
"value": "monferno",
"image": "img/monferno.png",
"exclude": ['swsh']
},
{
"name": 'Infernape',
"value": "infernape",
"image": "img/infernape.png",
"exclude": ['swsh']
},
{
"name": 'Piplup',
"value": "piplup",
"image": "img/piplup.png",
"exclude": ['swsh']
},
{
"name": 'Prinplup',
"value": "prinplup",
"image": "img/prinplup.png",
"exclude": ['swsh']
},
{
"name": 'Empoleon',
"value": "empoleon",
"image": "img/empoleon.png",
"exclude": ['swsh']
},
{
"name": 'Starly',
"value": "starly",
"image": "img/starly.png",
"exclude": ['swsh']
},
{
"name": 'Staravia',
"value": "staravia",
"image": "img/staravia.png",
"exclude": ['swsh']
},
{
"name": 'Staraptor',
"value": "staraptor",
"image": "img/staraptor.png",
"exclude": ['swsh']
},
{
"name": 'Bidoof',
"value": "bidoof",
"image": "img/bidoof.png",
"exclude": ['swsh']
},
{
"name": 'Bibarel',
"value": "bibarel",
"image": "img/bibarel.png",
"exclude": ['swsh']
},
{
"name": 'Kricketot',
"value": "kricketot",
"image": "img/kricketot.png",
"exclude": ['swsh']
},
{
"name": 'Kricketune',
"value": "kricketune",
"image": "img/kricketune.png",
"exclude": ['swsh']
},
{
"name": 'Shinx',
"value": "shinx",
"image": "img/shinx.png"
},
{
"name": 'Luxio',
"value": "luxio",
"image": "img/luxio.png"
},
{
"name": 'Luxray',
"value": "luxray",
"image": "img/luxray.png"
},
{
"name": 'Budew',
"value": "budew",
"image": "img/budew.png"
},
{
"name": 'Roserade',
"value": "roserade",
"image": "img/roserade.png"
},
{
"name": 'Cranidos',
"value": "cranidos",
"image": "img/cranidos.png",
"exclude": ['swsh']
},
{
"name": 'Rampardos',
"value": "rampardos",
"image": "img/rampardos.png",
"exclude": ['swsh']
},
{
"name": 'Shieldon',
"value": "shieldon",
"image": "img/shieldon.png",
"exclude": ['swsh']
},
{
"name": 'Bastiodon',
"value": "bastiodon",
"image": "img/bastiodon.png",
"exclude": ['swsh']
},
{
"name": 'Burmy',
"value": "burmy",
"image": "img/burmy.png",
"exclude": ['swsh']
},
{
"name": 'Wormadam',
"value": "wormadam",
"image": "img/wormadam.png",
"exclude": ['swsh']
},
{
"name": 'Mothim',
"value": "mothim",
"image": "img/mothim.png",
"exclude": ['swsh']
},
{
"name": 'Combee',
"value": "combee",
"image": "img/combee.png"
},
{
"name": 'Vespiquen',
"value": "vespiquen",
"image": "img/vespiquen.png"
},
{
"name": 'Pachirisu',
"value": "pachirisu",
"image": "img/pachirisu.png",
"exclude": ['swsh']
},
{
"name": 'Buizel',
"value": "buizel",
"image": "img/buizel.png",
"exclude": ['swsh']
},
{
"name": 'Floatzel',
"value": "floatzel",
"image": "img/floatzel.png",
"exclude": ['swsh']
},
{
"name": 'Cherubi',
"value": "cherubi",
"image": "img/cherubi.png"
},
{
"name": 'Cherrim',
"value": "cherrim",
"image": "img/cherrim.png"
},
{
"name": 'Shellos',
"value": "shellos",
"image": "img/shellos.png"
},
{
"name": 'Gastrodon',
"value": "gastrodon",
"image": "img/gastrodon.png"
},
{
"name": 'Ambipom',
"value": "ambipom",
"image": "img/ambipom.png",
"exclude": ['swsh']
},
{
"name": 'Drifloon',
"value": "drifloon",
"image": "img/drifloon.png"
},
{
"name": 'Drifblim',
"value": "drifblim",
"image": "img/drifblim.png"
},
{
"name": 'Buneary',
"value": "buneary",
"image": "img/buneary.png"
},
{
"name": 'Lopunny',
"value": "lopunny",
"image": "img/lopunny.png"
},
{
"name": 'Mismagius',
"value": "mismagius",
"image": "img/mismagius.png",
"exclude": ['swsh']
},
{
"name": 'Honchkrow',
"value": "honchkrow",
"image": "img/honchkrow.png",
"exclude": ['swsh']
},
{
"name": 'Glameow',
"value": "glameow",
"image": "img/glameow.png",
"exclude": ['swsh']
},
{
"name": 'Purugly',
"value": "purugly",
"image": "img/purugly.png",
"exclude": ['swsh']
},
{
"name": 'Chingling',
"value": "chingling",
"image": "img/chingling.png",
"exclude": ['swsh']
},
{
"name": 'Stunky',
"value": "stunky",
"image": "img/stunky.png"
},
{
"name": 'Skuntank',
"value": "skuntank",
"image": "img/skuntank.png"
},
{
"name": 'Bronzor',
"value": "bronzor",
"image": "img/bronzor.png"
},
{
"name": 'Bronzong',
"value": "bronzong",
"image": "img/bronzong.png"
},
{
"name": 'Bonsly',
"value": "bonsly",
"image": "img/bonsly.png"
},
{
"name": '<NAME>.',
"value": "mime-jr",
"image": "img/mime-jr.png"
| |
- 31: ooOoO0o - iIii1I11I1II1 + iII111i . Oo0Ooo / IiII % iIii1I11I1II1
if 6 - 6: IiII * i11iIiiIii % iIii1I11I1II1 % i11iIiiIii + o0oOOo0O0Ooo / i1IIi
if 53 - 53: I11i + iIii1I11I1II1
lisp . lisp_ipc_write_xtr_parameters ( lisp . lisp_debug_logging ,
lisp . lisp_data_plane_logging )
return
if 70 - 70: I1ii11iIi11i
if 67 - 67: OoooooooOO
if 29 - 29: O0 - i11iIiiIii - II111iiii + OOooOOo * IiII
if 2 - 2: i1IIi - ooOoO0o + I1IiiI . o0oOOo0O0Ooo * o0oOOo0O0Ooo / OoOoOO00
if 93 - 93: i1IIi
ooOOOo = {
"lisp xtr-parameters" : [ iIIi1iI1I1IIi , {
"rloc-probing" : [ True , "yes" , "no" ] ,
"nonce-echoing" : [ True , "yes" , "no" ] ,
"data-plane-security" : [ True , "yes" , "no" ] ,
"data-plane-logging" : [ True , "yes" , "no" ] ,
"frame-logging" : [ True , "yes" , "no" ] ,
"flow-logging" : [ True , "yes" , "no" ] ,
"nat-traversal" : [ True , "yes" , "no" ] ,
"checkpoint-map-cache" : [ True , "yes" , "no" ] ,
"ipc-data-plane" : [ True , "yes" , "no" ] ,
"decentralized-push-xtr" : [ True , "yes" , "no" ] ,
"decentralized-pull-xtr-modulus" : [ True , 1 , 0xff ] ,
"decentralized-pull-xtr-dns-suffix" : [ True ] ,
"register-reachable-rtrs" : [ True , "yes" , "no" ] ,
"program-hardware" : [ True , "yes" , "no" ] } ] ,
"lisp map-resolver" : [ o00oo0000 , {
"mr-name" : [ True ] ,
"ms-name" : [ True ] ,
"dns-name" : [ True ] ,
"address" : [ True ] } ] ,
"lisp map-cache" : [ lispconfig . lisp_map_cache_command , {
"prefix" : [ ] ,
"mr-name" : [ True ] ,
"ms-name" : [ True ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"eid-prefix" : [ True ] ,
"group-prefix" : [ True ] ,
"send-map-request" : [ True , "yes" , "no" ] ,
"rloc" : [ ] ,
"rloc-record-name" : [ True ] ,
"rle-name" : [ True ] ,
"elp-name" : [ True ] ,
"address" : [ True ] ,
"priority" : [ True , 0 , 255 ] ,
"weight" : [ True , 0 , 100 ] } ] ,
"lisp rtr-map-cache" : [ lispconfig . lisp_map_cache_command , {
"prefix" : [ ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"eid-prefix" : [ True ] ,
"group-prefix" : [ True ] ,
"rloc" : [ ] ,
"rloc-record-name" : [ True ] ,
"rle-name" : [ True ] ,
"elp-name" : [ True ] ,
"address" : [ True ] ,
"priority" : [ True , 0 , 255 ] ,
"weight" : [ True , 0 , 100 ] } ] ,
"lisp explicit-locator-path" : [ lispconfig . lisp_elp_command , {
"elp-name" : [ False ] ,
"elp-node" : [ ] ,
"address" : [ True ] ,
"probe" : [ True , "yes" , "no" ] ,
"strict" : [ True , "yes" , "no" ] ,
"eid" : [ True , "yes" , "no" ] } ] ,
"lisp replication-list-entry" : [ lispconfig . lisp_rle_command , {
"rle-name" : [ False ] ,
"rle-node" : [ ] ,
"address" : [ True ] ,
"level" : [ True , 0 , 255 ] } ] ,
"lisp json" : [ lispconfig . lisp_json_command , {
"json-name" : [ False ] ,
"json-string" : [ False ] } ] ,
"lisp database-mapping" : [ oO00 , {
"prefix" : [ ] ,
"mr-name" : [ True ] ,
"ms-name" : [ True ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"secondary-instance-id" : [ True , 0 , 0xffffffff ] ,
"eid-prefix" : [ True ] ,
"group-prefix" : [ True ] ,
"dynamic-eid" : [ True , "yes" , "no" ] ,
"signature-eid" : [ True , "yes" , "no" ] ,
"rloc" : [ ] ,
"rloc-record-name" : [ True ] ,
"elp-name" : [ True ] ,
"geo-name" : [ True ] ,
"rle-name" : [ True ] ,
"json-name" : [ True ] ,
"address" : [ True ] ,
"interface" : [ True ] ,
"priority" : [ True , 0 , 255 ] ,
"weight" : [ True , 0 , 100 ] } ] ,
"show rtr-rloc-probing" : [ o00O , { } ] ,
"show rtr-keys" : [ o0OOOOO00o0O0 , { } ] ,
"show rtr-map-cache" : [ iIiiI1 , { } ] ,
"show rtr-map-cache-dns" : [ i1iIIIiI1I , { } ]
}
if 98 - 98: oO0o % IiII * i11iIiiIii % I1ii11iIi11i
if 29 - 29: IiII
if 66 - 66: Oo0Ooo
if 97 - 97: i1IIi - OoooooooOO / I1Ii111 * I1IiiI
if 55 - 55: o0oOOo0O0Ooo . iII111i
if 87 - 87: o0oOOo0O0Ooo % iIii1I11I1II1
def O00Iii1111III111 ( lisp_socket ) :
if 28 - 28: OoooooooOO . oO0o % I1ii11iIi11i / i1IIi / OOooOOo
if 36 - 36: o0oOOo0O0Ooo + I11i - IiII + iIii1I11I1II1 + OoooooooOO
if 4 - 4: II111iiii . I11i + Ii1I * I1Ii111 . ooOoO0o
if 87 - 87: OoOoOO00 / OoO0O00 / i11iIiiIii
oO0OO , OO , IIIIii , oO0OOOO0 = lisp . lisp_receive ( lisp_socket , False )
OO0o0OO0 = lisp . lisp_trace ( )
if ( OO0o0OO0 . decode ( oO0OOOO0 ) == False ) : return
if 56 - 56: i11iIiiIii - Oo0Ooo / iII111i / OoOoOO00
if 43 - 43: o0oOOo0O0Ooo . iII111i . I11i + iIii1I11I1II1
if 78 - 78: iIii1I11I1II1 % OoOoOO00 + I1ii11iIi11i / i1IIi % II111iiii + OOooOOo
if 91 - 91: iIii1I11I1II1 % OoO0O00 . o0oOOo0O0Ooo + Ii1I + o0oOOo0O0Ooo
if 95 - 95: Ii1I + I1ii11iIi11i * OOooOOo
OO0o0OO0 . rtr_cache_nat_trace ( OO , IIIIii )
if 16 - 16: I11i / I1IiiI + OoO0O00 % iIii1I11I1II1 - i1IIi . oO0o
if 26 - 26: o0oOOo0O0Ooo * IiII . i1IIi
if 59 - 59: O0 + i1IIi - o0oOOo0O0Ooo
if 62 - 62: i11iIiiIii % OOooOOo . IiII . OOooOOo
if 84 - 84: i11iIiiIii * OoO0O00
if 18 - 18: OOooOOo - Ii1I - OoOoOO00 / I1Ii111 - O0
if 30 - 30: O0 + I1ii11iIi11i + II111iiii
if ( II1II1 ( ) == False ) :
lisp . lprint ( "lisp_rtr_startup() failed" )
lisp . lisp_print_banner ( "RTR abnormal exit" )
exit ( 1 )
if 14 - 14: o0oOOo0O0Ooo / OOooOOo - iIii1I11I1II1 - oO0o % ooOoO0o
if 49 - 49: ooOoO0o * oO0o / o0oOOo0O0Ooo / Oo0Ooo * iIii1I11I1II1
OOoO00ooO = [ II1Ii1iI1i , Oo0oO0oo0oO00 ,
i111I , oO0oIIII ]
I1IIIIiii1i = [ II1Ii1iI1i ] * 3
if 51 - 51: OOooOOo . I1IiiI
while ( True ) :
try : Ooi11III1II1 , Ii1iIi111I1i , I1III111i = select . select ( OOoO00ooO , [ ] , [ ] )
except : break
if 4 - 4: i1IIi + ooOoO0o + i1IIi
if 31 - 31: Ii1I
if 78 - 78: i11iIiiIii + o0oOOo0O0Ooo + I1Ii111 / o0oOOo0O0Ooo % iIii1I11I1II1 % IiII
if 83 - 83: iIii1I11I1II1 % OoOoOO00 % o0oOOo0O0Ooo % I1Ii111 . I1ii11iIi11i % O0
if ( lisp . lisp_ipc_data_plane and i111I in Ooi11III1II1 ) :
lisp . lisp_process_punt ( i111I , II1iII1i ,
iiI1iIiI )
if 47 - 47: o0oOOo0O0Ooo
if 66 - 66: I1IiiI - IiII
if 33 - 33: I1IiiI / OoO0O00
if 12 - 12: II111iiii
if 2 - 2: i1IIi - I1IiiI + I11i . II111iiii
if ( oO0oIIII in Ooi11III1II1 ) :
O00Iii1111III111 ( oO0oIIII )
if 25 - 25: oO0o
if 34 - 34: OoOoOO00 . iIii1I11I1II1 % O0
if 43 - 43: I1ii11iIi11i - iII111i
if 70 - 70: iII111i / | |
<filename>src/cool/Visitors/cil_visitor.py
from cool.AST.ast_hierarchy import * # Quitar el 'Cool.' para correrlo
from cool.AST.ast_cil import *
import cool.Visitors.visitor as visitor
import cool.Context.context2 as enviroment
## Falta el CASE y STRING no c que hacer con los .DATA
## Falta tambien funcion entry
class CIL_Tree(object):
def __init__(self,context):
self.context = context
self.code_count = 0
self.local_count = 0
self.label_count = 0
self.self_instance = 0
self.void = 0
self.empty_string = 0
self.data = []
def __builtin(self):
f00 = FunctionCIL("initialize_Object", [ArgCIL(LocalCIL("instance"))],[],[])
m0 = MethodCIL("initialize_Object", "initialize_Object", f00)
##Object copy
l = LocalCIL("to_copy")
arg = ArgCIL(l)
local = LocalCIL("save_copy")
f0 = FunctionCIL("Object_copy",[arg],[self.void, local],[AssignCIL(local, CopyCIL(l))])
m = MethodCIL("copy", "Object_copy", f0)
##Object abort
arg2 = ArgCIL(LocalCIL("self_Object"))
f1 = FunctionCIL("Object_abort",[arg2],[self.void],[AbortCIL()])
m1 = MethodCIL("abort","Object_abort",f1)
##
##type_name
l = LocalCIL("self_Object")
arg2 = ArgCIL(l)
name = "name"
local = LocalCIL(name)
name2 = "type_name_ref"
local2 = LocalCIL(name2)
# m = MessageCIL("s_object","Object")
# self.data.append(m)
typ = TypeofCIL(l)
assign = AssignCIL(local, typ)
assign2 = AssignCIL(local2, TypeNameCIL(local))
f2 = FunctionCIL("Object_type_name",[arg2],[local, local2, self.void],[assign,assign2])
m2 = MethodCIL("type_name","Object_type_name",f2)
##
Obj = TypeCIL("Object", [], [m,m1,m2,m0])
##Int
f01 = FunctionCIL("initialize_Int", [ArgCIL(LocalCIL("instance"))],[],[])
m0 = MethodCIL("initialize_Int", "initialize_Int", f01)
Int = TypeCIL("Int",[AttributeCIL("value", 0)],[m,m1,m2,m0])
##Bool
f02 = FunctionCIL("initialize_Bool", [ArgCIL(LocalCIL("instance"))],[],[])
m0 = MethodCIL("initialize_Bool", "initialize_Bool", f02)
Bool = TypeCIL("Bool",[AttributeCIL("value", 0)],[m,m1,m2,m0])
##IO out_string out_int
f03 = FunctionCIL("initialize_IO", [ArgCIL(LocalCIL("instance"))],[],[])
m0 = MethodCIL("initialize_IO", "initialize_IO", f03)
l = LocalCIL("out_string")
arg = ArgCIL(l)
l2 = LocalCIL("self_IO")
arg2 = ArgCIL(l2)
p = PrintStringCIL(l)
f4 = FunctionCIL("IO_out_string",[arg2, arg],[self.void],[p,AssignCIL(l, l2)])
m4 = MethodCIL("out_string","IO_out_string",f4)
l = LocalCIL("out_int")
arg = ArgCIL(l)
l2 = LocalCIL("self_IO")
arg2 = ArgCIL(l2)
p = PrintIntegerCIL(l)
f5 = FunctionCIL("IO_out_int",[arg2, arg],[self.void],[p,AssignCIL(l, l2)])
m5 = MethodCIL("out_int","IO_out_int",f5)
l = LocalCIL("x_in_string")
arg2 = ArgCIL(LocalCIL("self_IO"))
asign = AssignCIL(l,ReadStringCIL())
f6 = FunctionCIL("IO_in_string",[arg2],[self.void,l],[asign])
m6 = MethodCIL("in_string","IO_in_string",f6)
l = LocalCIL("x_in_int")
arg2 = ArgCIL(LocalCIL("self_IO"))
asign = AssignCIL(l,ReadIntegerCIL())
f7 = FunctionCIL("IO_in_int",[arg2],[l,self.void],[asign])
m7 = MethodCIL("in_int","IO_in_int",f7)
IO = TypeCIL("IO",[],[m,m1,m2,m4,m5,m6,m7,m0])
##String
f04 = FunctionCIL("initialize_String", [ArgCIL(LocalCIL("instance"))],[],[])
m0 = MethodCIL("initialize_String", "initialize_String", f04)
l = LocalCIL("x_str")
l1 = LocalCIL("x_str_length")
arg = ArgCIL(l)
# arg2 = ArgCIL(LocalCIL("self_String"))
assign = AssignCIL(l1, LengthCIL(l))
f8 = FunctionCIL("String_length",[arg],[l1,self.void],[assign])
m8 = MethodCIL("length","String_length",f8)
l = LocalCIL("x_str1")
l2 = LocalCIL("self_String")
arg = ArgCIL(l)
arg2 = ArgCIL(l2)
l1 = LocalCIL("x_str_concat")
assign = AssignCIL(l1, ConcatCIL(l,l2))
f9 = FunctionCIL("String_concat",[arg,arg2],[l1,self.void],[assign])
m9 = MethodCIL("concat","String_concat",f9)
l = LocalCIL("x_i")
l2 = LocalCIL("x_j")
l3 = LocalCIL("self_String")
arg = ArgCIL(l)
arg2 = ArgCIL(l2)
arg3 = ArgCIL(l3)
l1 = LocalCIL("x_sub_str")
assign = AssignCIL(l1, SubStrCIL(l3,l2,l))
f10 = FunctionCIL("String_substring",[arg,arg2,arg3],[l1,self.void],[assign])
m10 = MethodCIL("substr","String_substring",f10)
String = TypeCIL("String",[AttributeCIL("value", 0)],[m,m1,m2,m8,m9,m10,m0])
##
return [Obj, Int, Bool, IO, String], [f00,f01,f02,f03,f04,f0,f1,f2,f4,f5,f6,f7,f8,f9,f10]
def entry(self):
# self.void = LocalCIL("isvoid")
_locals = []
_body = []
name_initial = "x_"+str(self.local_count)
self.local_count += 1
local_initial = LocalCIL(name_initial)
name_instance = "x_"+str(self.local_count)
self.local_count += 1
name_result = "x_"+str(self.local_count)
self.local_count += 1
local_result = LocalCIL(name_result)
local_instance = LocalCIL(name_instance)
allocate = AllocateCIL("Main")
assign_main = AssignCIL(local_instance, allocate)
param = ParamCIL(local_instance)
call = CallCIL("Main_main")
assign_result = AssignCIL(local_result, call)
initial = CallCIL("initialize_Main")
assign = AssignCIL(local_initial, initial)
_locals.append(local_instance)
_locals.append(local_result)
_locals.append(local_initial)
_body.append(assign_main)
_body.append(ParamCIL(local_instance))
_body.append(assign)
_body.append(param)
_body.append(assign_result)
_body.append(AbortCIL())
return FunctionCIL("main",[], _locals, _body)
def __boxing(self, x_type, local_value):
name_local = "x_"+str(self.local_count)
self.local_count += 1
local = LocalCIL(name_local)
allocate = AllocateCIL(x_type)
assign = AssignCIL(local, allocate)
setAttr = SetAttrCIL(local, 0, local_value)
return [local], [assign, setAttr], local
def __search(self, parent, list_type):
for _type in list_type:
if _type.name == parent:
return _type
# return TypeCIL("Object", [], [m,m1,m2,m0])
def __method_union(self, methods_parent, methods):
union = []
for m in methods_parent:
from_parent = False
for m_new in methods:
if m.old_name == m_new.old_name:
from_parent = True
m1 = MethodCIL(m.old_name, m_new.name, m_new.function)
union.append(m1)
if not from_parent:
union.append(m)
news = []
for m_new in methods:
is_in_union = False
for u in union:
if m_new.old_name == u.old_name:
is_in_union = True
break
if not is_in_union:
news.append(m_new)
return union + news
@visitor.on('node')
def visit(self, node, current_class):
pass
@visitor.when(ProgramNode)
def visit(self, node, current_class):
type_list = []
function_list = []
m = MessageCIL("_vacio","")
self.data.append(m)
self.empty_string = m
self.void = LocalCIL("isvoid")
t,f = self.__builtin()
type_list += t
function_list += f
order = self.context.lca.preorden()
for _class in order:
self.self_instance = LocalCIL("self_{}".format(_class))
n = node.class_list[_class]
parent = self.__search(n.parent, type_list)
_type,_function = self.visit(n, parent, _class, enviroment.Scope(_class))
type_list.append(_type)
function_list += _function
function_list.append(self.entry())
code = CodeCIL(function_list)
return ProgramCIL(type_list,[DataCIL(self.data)], code)
@visitor.when(ClassNode)
def visit(self, node, parent, current_class, scope):
attr_list = []
method_list = []
code_list = []
name = node.name
O = self.context.get_O_by_class(current_class)
# list_id = self.context.get_list_id(current_class)
# print(current_class + "->" + ",".join(list_id))
scope.new_scope(O)
init_local = [self.void]
init_code = []
for feature in node.feature_list:
attr, method, code = self.visit(feature, name, scope)
if attr != None:
l, c = code
init_local += l
init_code += c
attr_list.append(attr)
if method != None:
method_list.append(method)
code_list += code
local_result = LocalCIL("return")
assign = AssignCIL(local_result, self.self_instance)
init_local.append(local_result)
init_code.append(assign)
if len(parent.attr_list) != 0:
name_l = LocalCIL("x_"+str(self.local_count))
self.local_count += 1
p = ParamCIL(self.self_instance)
assign_call = AssignCIL(name_l, CallCIL("initialize_"+parent.name))
init_local.append(name_l)
init_code = [p, assign_call] + init_code
f = FunctionCIL("initialize_"+current_class,[ArgCIL(self.self_instance)],init_local,init_code)
m = MethodCIL("initialize_"+current_class,"initialize_"+current_class,f)
method_list += [m]
code_list.append(f)
union_m = self.__method_union(parent.method_list, method_list)
new_type = TypeCIL(name, parent.attr_list + attr_list, union_m)
scope.end_scope()
return (new_type, code_list)
@visitor.when(Atribute_Definition)
def visit(self, node, current_class, scope):
name = node.att_id
attr = AttributeCIL(name, 0)
_locals = []
_code = []
if node.expr != None:
_locals, _code, local = self.visit(node.expr, current_class, scope)
_, left_type = scope.is_defined(node.att_id)
if left_type == "Object" and node.expr.static_type in ["String","Int","Bool"]:
l, b, v = self.__boxing(node.expr.static_type, local)
local = v
_locals += l
_code += b
l = self.context.get_list_id(current_class)
i = l.index(name)
setAttr = SetAttrCIL(self.self_instance, i, local)
_code.append(setAttr)
else:
if not node.att_type in ["Int", "Bool", "String"]:
l = self.context.get_list_id(current_class)
i = l.index(name)
setAttr = SetAttrCIL(self.self_instance, i, self.void)
_code.append(setAttr)
elif node.att_type == "String":
l = self.context.get_list_id(current_class)
i = l.index(name)
name_local = "x_"+str(self.local_count)
self.local_count += 1
local = LocalCIL(name_local)
assign = AssignCIL(local, LoadCIL(self.empty_string))
setAttr = SetAttrCIL(self.self_instance, i, local)
_code.append(assign)
_code.append(setAttr)
_code.append(assign)
_locals.append(local)
else:
name_local = "x_"+str(self.local_count)
self.local_count += 1
local = LocalCIL(name_local)
l = self.context.get_list_id(current_class)
i = l.index(name)
setAttr = SetAttrCIL(self.self_instance, i, local)
_code.append(setAttr)
_locals.append(local)
return attr, None, (_locals,_code)
@visitor.when(Method_Definition)
def visit(self, node, current_class, scope):
new_name = "{}_{}".format(current_class,node.meth_id)
_locals_args = []
# if node.meth_id == "main":
_locals_args.append(self.void)
# save_name = self.self_instance.name
# self.self_instance.name += "_{}".format(node.meth_id)
param_list = [ArgCIL(self.self_instance)]
O = enviroment.ObjectEnviroment(current_class)
for param in node.param_list[::-1]:
p,t = self.visit(param, current_class, node.meth_id, scope)
# _locals_args.append(p.arg)
O.add(p,t, self.local_count)
self.local_count += 1
temp_local = O.get_local(p)
param_list.append(ArgCIL(temp_local))
scope.new_scope(O)
_locals, _code, _ = self.visit(node.exp, current_class, scope)
scope.end_scope()
f = FunctionCIL(new_name,param_list, _locals_args+_locals, _code)
meth = MethodCIL(node.meth_id, new_name, f)
return None, meth, [f]
@visitor.when(ParamNode)
def visit(self, node, current_class, function, scope):
return node.par_id, node.par_type
@visitor.when(AssignNode)
def visit(self, node, current_class, scope):
_locals = []
_body = []
local_expr, body_expr, value_expr = self.visit(node.expr, current_class, scope)
_locals += local_expr
_body += body_expr
_, left_type = scope.is_defined(node.idx_token)
if left_type == "Object" and node.expr.static_type in ["String","Int","Bool"]:
l, b, v = self.__boxing(node.expr.static_type, value_expr)
value_expr = v
_locals += l
_body += b
name = "x_"+str(self.local_count)
self.local_count += 1
name_id = node.idx_token
l = self.context.get_list_id(current_class)
is_d, local_id, is_attr= scope.is_defined_local(node.idx_token)
if name_id in l and is_attr:
index = l.index(name_id)
setAttr = SetAttrCIL(self.self_instance, index, value_expr)
local = LocalCIL(name)
assign = AssignCIL(local, GetAttrCIL(self.self_instance, index))
_body.append(setAttr)
elif is_d:
# local = LocalCIL(name)
local = local_id
assign = AssignCIL(local, value_expr)
else:
local = LocalCIL(name)
assign = AssignCIL(local, value_expr)
_locals.append(local)
_body.append(assign)
return _locals ,_body, local
@visitor.when(PlusNode)
def visit(self, node, current_class, scope):
_locals = []
_body = []
local_left, body_left, value_left = self.visit(node.left, current_class, scope)
local_right, body_right, value_right = self.visit(node.right, current_class, scope)
_locals += local_left + local_right
_body += body_left + body_right
name = "x_"+str(self.local_count)
self.local_count += 1
local = LocalCIL(name)
expr = PlusCIL(value_left,value_right)
assign = AssignCIL(local, expr)
_locals.append(local)
# _body.append(expr)
_body.append(assign)
return _locals, _body, local
@visitor.when(MinusNode)
def visit(self, node, current_class, scope):
_locals = []
_body = []
local_left, body_left, value_left = self.visit(node.left, current_class, scope)
local_right, body_right, value_right = self.visit(node.right, current_class, scope)
_locals += local_left + local_right
_body += body_left + body_right
name = "x_"+str(self.local_count)
self.local_count += 1
local = LocalCIL(name)
expr = MinusCIL(value_left,value_right)
assign = AssignCIL(local, expr)
_locals.append(local)
# _body.append(expr)
_body.append(assign)
return _locals, _body, local
@visitor.when(StarNode)
def visit(self, node, current_class, scope):
_locals = []
_body = []
local_left, body_left, value_left = self.visit(node.left, current_class, scope)
local_right, body_right, value_right = self.visit(node.right, current_class, scope)
_locals += local_left + local_right
_body += body_left + body_right
name = "x_"+str(self.local_count)
self.local_count += 1
| |
format checking against ontology using equivalentClass.""")
@pytest.mark.cwl_conformance
@pytest.mark.cwl_conformance_v1_1
@pytest.mark.docker
@pytest.mark.command_line_tool
@pytest.mark.green
def test_conformance_v1_1_output_secondaryfile_optional(self):
"""Test optional output file and optional secondaryFile on output.
Generated from::
id: 67
job: tests/cat-job.json
label: output_secondaryfile_optional
output:
optional_file: null
output_file:
checksum: sha1$47a013e660d408619d894b20806b1d5086aab03b
class: File
location: output.txt
size: 13
tags:
- docker
- command_line_tool
tool: tests/optional-output.cwl
""" # noqa: W293
self.cwl_populator.run_conformance_test("""v1.1""", """Test optional output file and optional secondaryFile on output.""")
@pytest.mark.cwl_conformance
@pytest.mark.cwl_conformance_v1_1
@pytest.mark.inline_javascript
@pytest.mark.command_line_tool
@pytest.mark.green
def test_conformance_v1_1_valuefrom_ignored_null(self):
"""Test that valueFrom is ignored when the parameter is null
Generated from::
id: 68
job: tests/empty.json
label: valuefrom_ignored_null
output:
out: '
'
tags:
- inline_javascript
- command_line_tool
tool: tests/vf-concat.cwl
""" # noqa: W293
self.cwl_populator.run_conformance_test("""v1.1""", """Test that valueFrom is ignored when the parameter is null""")
@pytest.mark.cwl_conformance
@pytest.mark.cwl_conformance_v1_1
@pytest.mark.inline_javascript
@pytest.mark.command_line_tool
@pytest.mark.green
def test_conformance_v1_1_valuefrom_secondexpr_ignored(self):
"""Test that second expression in concatenated valueFrom is not ignored
Generated from::
id: 69
job: tests/cat-job.json
label: valuefrom_secondexpr_ignored
output:
out: 'a string
'
tags:
- inline_javascript
- command_line_tool
tool: tests/vf-concat.cwl
""" # noqa: W293
self.cwl_populator.run_conformance_test("""v1.1""", """Test that second expression in concatenated valueFrom is not ignored""")
@pytest.mark.cwl_conformance
@pytest.mark.cwl_conformance_v1_1
@pytest.mark.step_input
@pytest.mark.inline_javascript
@pytest.mark.workflow
@pytest.mark.red
def test_conformance_v1_1_valuefrom_wf_step(self):
"""Test valueFrom on workflow step.
Generated from::
id: 70
job: tests/step-valuefrom-wf.json
label: valuefrom_wf_step
output:
count_output: 16
tags:
- step_input
- inline_javascript
- workflow
tool: tests/step-valuefrom-wf.cwl
""" # noqa: W293
self.cwl_populator.run_conformance_test("""v1.1""", """Test valueFrom on workflow step.""")
@pytest.mark.cwl_conformance
@pytest.mark.cwl_conformance_v1_1
@pytest.mark.step_input
@pytest.mark.inline_javascript
@pytest.mark.multiple_input
@pytest.mark.workflow
@pytest.mark.red
def test_conformance_v1_1_valuefrom_wf_step_multiple(self):
"""Test valueFrom on workflow step with multiple sources
Generated from::
id: 71
job: tests/step-valuefrom-job.json
label: valuefrom_wf_step_multiple
output:
val: '3
'
tags:
- step_input
- inline_javascript
- multiple_input
- workflow
tool: tests/step-valuefrom2-wf.cwl
""" # noqa: W293
self.cwl_populator.run_conformance_test("""v1.1""", """Test valueFrom on workflow step with multiple sources""")
@pytest.mark.cwl_conformance
@pytest.mark.cwl_conformance_v1_1
@pytest.mark.step_input
@pytest.mark.inline_javascript
@pytest.mark.workflow
@pytest.mark.red
def test_conformance_v1_1_valuefrom_wf_step_other(self):
"""Test valueFrom on workflow step referencing other inputs
Generated from::
id: 72
job: tests/step-valuefrom-job.json
label: valuefrom_wf_step_other
output:
val: '3
'
tags:
- step_input
- inline_javascript
- workflow
tool: tests/step-valuefrom3-wf.cwl
""" # noqa: W293
self.cwl_populator.run_conformance_test("""v1.1""", """Test valueFrom on workflow step referencing other inputs""")
@pytest.mark.cwl_conformance
@pytest.mark.cwl_conformance_v1_1
@pytest.mark.shell_command
@pytest.mark.command_line_tool
@pytest.mark.red
def test_conformance_v1_1_record_output_binding(self):
"""Test record type output binding.
Generated from::
id: 73
job: tests/record-output-job.json
label: record_output_binding
output:
orec:
obar:
checksum: sha1$aeb3d11bdf536511649129f4077d5cda6a324118
class: File
location: bar
size: 12010
ofoo:
checksum: sha1$327fc7aedf4f6b69a42a7c8b808dc5a7aff61376
class: File
location: foo
size: 1111
tags:
- shell_command
- command_line_tool
tool: tests/record-output.cwl
""" # noqa: W293
self.cwl_populator.run_conformance_test("""v1.1""", """Test record type output binding.""")
@pytest.mark.cwl_conformance
@pytest.mark.cwl_conformance_v1_1
@pytest.mark.shell_command
@pytest.mark.command_line_tool
@pytest.mark.green
def test_conformance_v1_1_docker_json_output_path(self):
"""Test support for reading cwl.output.json when running in a Docker container and just 'path' is provided.
Generated from::
id: 74
job: tests/empty.json
label: docker_json_output_path
output:
foo:
checksum: sha1$f1d2d2f924e986ac86fdf7b36c94bcdf32beec15
class: File
location: foo
size: 4
tags:
- shell_command
- command_line_tool
tool: tests/test-cwl-out.cwl
""" # noqa: W293
self.cwl_populator.run_conformance_test("""v1.1""", """Test support for reading cwl.output.json when running in a Docker container and just 'path' is provided.""")
@pytest.mark.cwl_conformance
@pytest.mark.cwl_conformance_v1_1
@pytest.mark.shell_command
@pytest.mark.command_line_tool
@pytest.mark.green
def test_conformance_v1_1_docker_json_output_location(self):
"""Test support for reading cwl.output.json when running in a Docker container and just 'location' is provided.
Generated from::
id: 75
job: tests/empty.json
label: docker_json_output_location
output:
foo:
checksum: sha1$f1d2d2f924e986ac86fdf7b36c94bcdf32beec15
class: File
location: foo
size: 4
tags:
- shell_command
- command_line_tool
tool: tests/test-cwl-out2.cwl
""" # noqa: W293
self.cwl_populator.run_conformance_test("""v1.1""", """Test support for reading cwl.output.json when running in a Docker container and just 'location' is provided.""")
@pytest.mark.cwl_conformance
@pytest.mark.cwl_conformance_v1_1
@pytest.mark.required
@pytest.mark.command_line_tool
@pytest.mark.green
def test_conformance_v1_1_multiple_glob_expr_list(self):
"""Test support for returning multiple glob patterns from expression
Generated from::
id: 76
job: tests/abc.json
label: multiple_glob_expr_list
output:
files:
- checksum: sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709
class: File
location: a
size: 0
- checksum: sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709
class: File
location: b
size: 0
- checksum: sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709
class: File
location: c
size: 0
tags:
- required
- command_line_tool
tool: tests/glob-expr-list.cwl
""" # noqa: W293
self.cwl_populator.run_conformance_test("""v1.1""", """Test support for returning multiple glob patterns from expression""")
@pytest.mark.cwl_conformance
@pytest.mark.cwl_conformance_v1_1
@pytest.mark.scatter
@pytest.mark.step_input
@pytest.mark.workflow
@pytest.mark.red
def test_conformance_v1_1_wf_scatter_oneparam_valuefrom(self):
"""Test workflow scatter with single scatter parameter and two valueFrom on step input (first and current el)
Generated from::
id: 77
job: tests/scatter-valuefrom-job1.json
label: wf_scatter_oneparam_valuefrom
output:
out:
- foo one one
- foo one two
- foo one three
- foo one four
tags:
- scatter
- step_input
- workflow
tool: tests/scatter-valuefrom-wf1.cwl
""" # noqa: W293
self.cwl_populator.run_conformance_test("""v1.1""", """Test workflow scatter with single scatter parameter and two valueFrom on step input (first and current el)""")
@pytest.mark.cwl_conformance
@pytest.mark.cwl_conformance_v1_1
@pytest.mark.scatter
@pytest.mark.step_input
@pytest.mark.workflow
@pytest.mark.red
def test_conformance_v1_1_wf_scatter_twoparam_nested_crossproduct_valuefrom(self):
"""Test workflow scatter with two scatter parameters and nested_crossproduct join method and valueFrom on step input
Generated from::
id: 78
job: tests/scatter-valuefrom-job2.json
label: wf_scatter_twoparam_nested_crossproduct_valuefrom
output:
out:
- - foo one one three
- foo one one four
- - foo one two three
- foo one two four
tags:
- scatter
- step_input
- workflow
tool: tests/scatter-valuefrom-wf2.cwl
""" # noqa: W293
self.cwl_populator.run_conformance_test("""v1.1""", """Test workflow scatter with two scatter parameters and nested_crossproduct join method and valueFrom on step input""")
@pytest.mark.cwl_conformance
@pytest.mark.cwl_conformance_v1_1
@pytest.mark.scatter
@pytest.mark.step_input
@pytest.mark.workflow
@pytest.mark.red
def test_conformance_v1_1_wf_scatter_twoparam_flat_crossproduct_valuefrom(self):
"""Test workflow scatter with two scatter parameters and flat_crossproduct join method and valueFrom on step input
Generated from::
id: 79
job: tests/scatter-valuefrom-job2.json
label: wf_scatter_twoparam_flat_crossproduct_valuefrom
output:
out:
- foo one one three
- foo one one four
- foo one two three
- foo one two four
tags:
- scatter
- step_input
- workflow
tool: tests/scatter-valuefrom-wf3.cwl#main
""" # noqa: W293
self.cwl_populator.run_conformance_test("""v1.1""", """Test workflow scatter with two scatter parameters and flat_crossproduct join method and valueFrom on step input""")
@pytest.mark.cwl_conformance
@pytest.mark.cwl_conformance_v1_1
@pytest.mark.scatter
@pytest.mark.step_input
@pytest.mark.workflow
@pytest.mark.red
def test_conformance_v1_1_wf_scatter_twoparam_dotproduct_valuefrom(self):
"""Test workflow scatter with two scatter parameters and dotproduct join method and valueFrom on step input
Generated from::
id: 80
job: tests/scatter-valuefrom-job2.json
label: wf_scatter_twoparam_dotproduct_valuefrom
output:
out:
- foo one one three
- foo one two four
tags:
- scatter
- step_input
- workflow
tool: tests/scatter-valuefrom-wf4.cwl#main
""" # noqa: W293
self.cwl_populator.run_conformance_test("""v1.1""", """Test workflow scatter with two scatter parameters and dotproduct join method and valueFrom on step input""")
@pytest.mark.cwl_conformance
@pytest.mark.cwl_conformance_v1_1
@pytest.mark.scatter
@pytest.mark.step_input
@pytest.mark.workflow
@pytest.mark.red
def test_conformance_v1_1_wf_scatter_oneparam_valuefrom_twice_current_el(self):
"""Test workflow scatter with single scatter parameter and two valueFrom on step input (current el twice)
Generated from::
id: 81
job: tests/scatter-valuefrom-job1.json
label: wf_scatter_oneparam_valuefrom_twice_current_el
output:
out:
- foo one one
- foo two two
- foo three three
- foo four four
tags:
- scatter
- step_input
- workflow
tool: tests/scatter-valuefrom-wf5.cwl
""" # noqa: W293
self.cwl_populator.run_conformance_test("""v1.1""", """Test workflow scatter with single scatter parameter and two valueFrom on step input (current el twice)""")
@pytest.mark.cwl_conformance
@pytest.mark.cwl_conformance_v1_1
@pytest.mark.scatter
@pytest.mark.step_input
@pytest.mark.workflow
@pytest.mark.red
def test_conformance_v1_1_wf_scatter_oneparam_valueFrom(self):
"""Test valueFrom eval on scattered input parameter
Generated from::
id: 82
job: tests/scatter-valuefrom-job3.json
label: wf_scatter_oneparam_valueFrom
output:
out_message:
- checksum: sha1$98030575f6fc40e5021be5a8803a6bef94aee11f
class: File
location: Any
size: 16
- checksum: sha1$edcacd50778d98ae113015406b3195c165059dd8
class: File
location: Any
size: 16
tags:
- scatter
- step_input
- workflow
tool: tests/scatter-valuefrom-wf6.cwl
""" # noqa: W293
self.cwl_populator.run_conformance_test("""v1.1""", """Test valueFrom eval on scattered input parameter""")
@pytest.mark.cwl_conformance
@pytest.mark.cwl_conformance_v1_1
@pytest.mark.required
@pytest.mark.workflow
@pytest.mark.green
def test_conformance_v1_1_wf_two_inputfiles_namecollision(self):
"""Test workflow two input files with same name.
Generated from::
id: 83
job: tests/conflict-job.json
label: wf_two_inputfiles_namecollision
output:
fileout:
checksum: sha1$a2d8d6e7b28295dc9977dc3bdb652ddd480995f0
class: File
location: out.txt
size: 25
tags:
- required
- workflow
tool: tests/conflict-wf.cwl#collision
""" # noqa: W293
self.cwl_populator.run_conformance_test("""v1.1""", """Test workflow two input files with same name.""")
@pytest.mark.cwl_conformance
@pytest.mark.cwl_conformance_v1_1
@pytest.mark.shell_command
@pytest.mark.command_line_tool
@pytest.mark.green
def test_conformance_v1_1_directory_input_param_ref(self):
"""Test directory input with parameter reference
Generated from::
id: 84
job: tests/dir-job.yml
label: directory_input_param_ref
output:
outlist:
checksum: sha1$13cda8661796ae241da3a18668fb552161a72592
class: File
location: output.txt
size: 20
tags:
- shell_command
- command_line_tool
tool: tests/dir.cwl
""" # noqa: W293
self.cwl_populator.run_conformance_test("""v1.1""", """Test directory input with parameter reference""")
@pytest.mark.cwl_conformance
@pytest.mark.cwl_conformance_v1_1
@pytest.mark.required
@pytest.mark.command_line_tool
@pytest.mark.shell_command
@pytest.mark.green
def test_conformance_v1_1_directory_input_docker(self):
"""Test directory input in Docker
Generated from::
id: 85
job: tests/dir-job.yml
label: directory_input_docker
output:
outlist:
checksum: sha1$13cda8661796ae241da3a18668fb552161a72592
class: File
location: output.txt
size: 20
tags:
- required
- command_line_tool
- shell_command
tool: tests/dir2.cwl
""" # noqa: W293
self.cwl_populator.run_conformance_test("""v1.1""", """Test directory input in Docker""")
@pytest.mark.cwl_conformance
@pytest.mark.cwl_conformance_v1_1
@pytest.mark.required
@pytest.mark.command_line_tool
@pytest.mark.green
def test_conformance_v1_1_directory_output(self):
"""Test directory output
Generated from::
id: 86
job: tests/dir3-job.yml
label: directory_output
output:
outdir:
class: Directory
listing:
- checksum: sha1$dd0a4c4c49ba43004d6611771972b6cf969c1c01
class: File
location: goodbye.txt
size: 24
- checksum: sha1$47a013e660d408619d894b20806b1d5086aab03b
class: File
location: hello.txt
size: 13
tags:
- required
- command_line_tool
tool: tests/dir3.cwl
""" # noqa: W293
self.cwl_populator.run_conformance_test("""v1.1""", """Test directory output""")
@pytest.mark.cwl_conformance
@pytest.mark.cwl_conformance_v1_1
@pytest.mark.shell_command
@pytest.mark.command_line_tool
@pytest.mark.red
def test_conformance_v1_1_directory_secondaryfiles(self):
"""Test directories in secondaryFiles
Generated from::
id: 87
job: tests/dir4-job.yml
| |
<filename>neo/Prompt/Commands/Tokens.py
from neo.Prompt.Commands.Invoke import InvokeContract, InvokeWithTokenVerificationScript
from neo.Wallets.NEP5Token import NEP5Token
from neo.Core.Fixed8 import Fixed8
from neo.Core.UInt160 import UInt160
from prompt_toolkit import prompt
from decimal import Decimal
from neo.Core.TX.TransactionAttribute import TransactionAttribute
import binascii
from neo.Prompt.CommandBase import CommandBase, CommandDesc, ParameterDesc
from neo.Prompt.PromptData import PromptData
from neo.Prompt import Utils as PromptUtils
from neo.Implementations.Wallets.peewee.Models import NEP5Token as ModelNEP5Token
from neo.Implementations.Notifications.LevelDB.NotificationDB import NotificationDB
from neo.Core.TX.TransactionAttribute import TransactionAttributeUsage
from neo.Core.Utils import isValidPublicAddress
import peewee
import traceback
from neo.Prompt.PromptPrinter import prompt_print as print
from neo.logging import log_manager
logger = log_manager.getLogger()
class CommandWalletToken(CommandBase):
def __init__(self):
super().__init__()
self.register_sub_command(CommandTokenDelete())
self.register_sub_command(CommandTokenSend())
self.register_sub_command(CommandTokenSendFrom())
self.register_sub_command(CommandTokenHistory())
self.register_sub_command(CommandTokenApprove())
self.register_sub_command(CommandTokenAllowance())
self.register_sub_command(CommandTokenMint())
self.register_sub_command(CommandTokenRegister())
def command_desc(self):
return CommandDesc('token', 'various token operations')
def execute(self, arguments):
item = PromptUtils.get_arg(arguments)
if not item:
print(f"run `{self.command_desc().command} help` to see supported queries")
return False
try:
return self.execute_sub_command(item, arguments[1:])
except KeyError:
print(f"{item} is an invalid parameter")
return False
class CommandTokenDelete(CommandBase):
def __init__(self):
super().__init__()
def execute(self, arguments):
wallet = PromptData.Wallet
if len(arguments) != 1:
print("Please specify the required parameter")
return False
hash_string = arguments[0]
try:
script_hash = UInt160.ParseString(hash_string)
except Exception:
# because UInt160 throws a generic exception. Should be fixed in the future
print("Invalid script hash")
return False
# try to find token and collect some data
try:
token = ModelNEP5Token.get(ContractHash=script_hash)
except peewee.DoesNotExist:
print(f"Could not find a token with script_hash {arguments[0]}")
return False
success = wallet.DeleteNEP5Token(script_hash)
if success:
print(f"Token {token.Symbol} with script_hash {arguments[0]} deleted")
else:
# probably unreachable to due token check earlier. Better safe than sorrow
print(f"Could not find a token with script_hash {arguments[0]}")
return success
def command_desc(self):
p1 = ParameterDesc('contract', 'token contract hash (script hash)')
return CommandDesc('delete', 'remove a token from the wallet', [p1])
class CommandTokenSend(CommandBase):
def __init__(self):
super().__init__()
def execute(self, arguments):
wallet = PromptData.Wallet
if len(arguments) < 4:
print("Please specify the required parameters")
return False
if len(arguments) > 6:
# the 5th and 6th arguments are optional
print("Too many parameters supplied. Please check your command")
return False
arguments, priority_fee = PromptUtils.get_fee(arguments)
arguments, user_tx_attributes = PromptUtils.get_tx_attr_from_args(arguments)
token = arguments[0]
from_addr = arguments[1]
to_addr = arguments[2]
try:
amount = float(arguments[3])
except ValueError:
print(f"{arguments[3]} is not a valid amount")
return False
fee = Fixed8.Zero()
if priority_fee is not None:
fee = priority_fee
if fee is False:
logger.debug("invalid fee")
return False
try:
success = token_send(wallet, token, from_addr, to_addr, amount, fee=fee, user_tx_attributes=user_tx_attributes)
except ValueError as e:
# occurs if arguments are invalid
print(str(e))
success = False
return success
def command_desc(self):
p1 = ParameterDesc('token', 'token symbol or script hash')
p2 = ParameterDesc('from_addr', 'address to send token from')
p3 = ParameterDesc('to_addr', 'address to send token to')
p4 = ParameterDesc('amount', 'number of tokens to send')
p5 = ParameterDesc('--fee', 'Attach GAS amount to give your transaction priority (> 0.001) e.g. --fee=0.01', optional=True)
p6 = ParameterDesc('--tx-attr', f"a list of transaction attributes to attach to the transaction\n\n"
f"{' ':>17} See: http://docs.neo.org/en-us/network/network-protocol.html section 4 for a description of possible attributes\n\n" # noqa: E128 ignore indentation
f"{' ':>17} Example:\n"
f"{' ':>20} --tx-attr=[{{\"usage\": <value>,\"data\":\"<remark>\"}}, ...]\n"
f"{' ':>20} --tx-attr=[{{\"usage\": 0x90,\"data\":\"my brief description\"}}]\n", optional=True)
return CommandDesc('send', 'send a token from the wallet', [p1, p2, p3, p4, p5, p6])
class CommandTokenSendFrom(CommandBase):
"""
This command is for old style NEP-5 tokens before the proposal got amended to remove this optional command.
"""
def __init__(self):
super().__init__()
def execute(self, arguments):
wallet = PromptData.Wallet
if len(arguments) < 4:
print("Please specify the required parameters")
return False
arguments, priority_fee = PromptUtils.get_fee(arguments)
token_str = arguments[0]
from_addr = arguments[1]
to_addr = arguments[2]
try:
amount = float(arguments[3])
except ValueError:
print(f"{arguments[3]} is not a valid amount")
return False
p_fee = Fixed8.Zero()
if priority_fee is not None:
p_fee = priority_fee
if p_fee is False:
logger.debug("invalid fee")
return False
try:
token, tx, fee, results = test_token_send_from(wallet, token_str, from_addr, to_addr, amount)
except ValueError as e:
# invalid arguments or bad allowance
print(str(e))
return False
except Exception as e:
# we act as the final capturing place
print("Something really unexpected happened")
logger.error(traceback.format_exc())
return False
if tx and results:
vm_result = results[0].GetBigInteger()
if vm_result == 1:
print("\n-----------------------------------------------------------")
print("Transfer of %s %s from %s to %s" % (
string_from_amount(token, amount), token.symbol, from_addr, to_addr))
print("Transfer fee: %s " % (fee.value / Fixed8.D))
print("-------------------------------------------------------------\n")
comb_fee = p_fee + fee
if comb_fee != fee:
print(f"Priority Fee ({p_fee.value / Fixed8.D}) + Transfer Fee ({fee.value / Fixed8.D}) = {comb_fee.value / Fixed8.D}\n")
print("Enter your password to send to the network")
try:
passwd = prompt("[Password]> ", is_password=True)
except KeyboardInterrupt:
print("Transaction cancelled")
return False
if not wallet.ValidatePassword(passwd):
print("incorrect password")
return False
return InvokeContract(wallet, tx, comb_fee)
print(f"Could not transfer tokens. Virtual machine returned: {vm_result}")
return False
print(f"Could not transfer tokens. An unknown error occurred resulting in no Transaction object or VM output.")
return False
def command_desc(self):
p1 = ParameterDesc('token', 'token symbol or script hash')
p2 = ParameterDesc('from_addr', 'address to send token from')
p3 = ParameterDesc('to_addr', 'address to send token to')
p4 = ParameterDesc('amount', 'number of tokens to send')
p5 = ParameterDesc('--fee', 'Attach GAS amount to give your transaction priority (> 0.001) e.g. --fee=0.01', optional=True)
return CommandDesc('sendfrom', 'send a token on behalf of another account (requires approval)', [p1, p2, p3, p4, p5])
class CommandTokenHistory(CommandBase):
def __init__(self):
super().__init__()
def execute(self, arguments):
wallet = PromptData.Wallet
if len(arguments) != 1:
print("Please specify the required parameter")
return False
try:
token, events = token_history(wallet, arguments[0])
except ValueError as e:
print(str(e))
return False
if events:
addresses = wallet.Addresses
print("-----------------------------------------------------------")
print("Recent transaction history (last = more recent):")
for event in events:
if event.Type != 'transfer':
continue
if event.AddressFrom in addresses:
print(f"[{event.AddressFrom}]: Sent {string_from_amount(token, event.Amount)}"
f" {token.symbol} to {event.AddressTo}")
if event.AddressTo in addresses:
print(f"[{event.AddressTo}]: Received {string_from_amount(token, event.Amount)}"
f" {token.symbol} from {event.AddressFrom}")
print("-----------------------------------------------------------")
else:
print("History contains no transactions")
return True
def command_desc(self):
p1 = ParameterDesc('symbol', 'token symbol or script hash')
return CommandDesc('history', 'show transaction history', [p1])
class CommandTokenApprove(CommandBase):
def __init__(self):
super().__init__()
def execute(self, arguments):
wallet = PromptData.Wallet
if len(arguments) < 4:
print("Please specify the required parameters")
return False
arguments, priority_fee = PromptUtils.get_fee(arguments)
token_str = arguments[0]
from_addr = arguments[1]
to_addr = arguments[2]
try:
amount = float(arguments[3])
except ValueError:
print(f"{arguments[3]} is not a valid amount")
return False
p_fee = Fixed8.Zero()
if priority_fee is not None:
p_fee = priority_fee
if p_fee is False:
logger.debug("invalid fee")
return False
try:
token = _validate_nep5_args(wallet, token_str, from_addr, to_addr, amount)
except ValueError as e:
print(str(e))
return False
decimal_amount = amount_from_string(token, amount)
tx, fee, results = token.Approve(wallet, from_addr, to_addr, decimal_amount)
if tx and results:
if results[0].GetBigInteger() == 1:
print("\n-----------------------------------------------------------")
print(f"Approve allowance of {amount} {token.symbol} from {from_addr} to {to_addr}")
print(f"Invocation fee: {fee.value / Fixed8.D}")
print("-------------------------------------------------------------\n")
comb_fee = p_fee + fee
if comb_fee != fee:
print(f"Priority Fee ({p_fee.value / Fixed8.D}) + Invocation Fee ({fee.value / Fixed8.D}) = {comb_fee.value / Fixed8.D}\n")
print("Enter your password to send to the network")
try:
passwd = prompt("[Password]> ", is_password=True)
except KeyboardInterrupt:
print("Allowance approval cancelled")
return False
if not wallet.ValidatePassword(passwd):
print("incorrect password")
return False
return InvokeContract(wallet, tx, comb_fee)
print("Failed to approve tokens. Make sure you are entitled for approving.")
return False
def command_desc(self):
p1 = ParameterDesc('symbol', 'token symbol or script hash')
p2 = ParameterDesc('from_addr', 'address to send token from')
p3 = ParameterDesc('to_addr', 'address to send token to')
p4 = ParameterDesc('amount', 'number of tokens to send')
p5 = ParameterDesc('--fee', 'Attach GAS amount to give your transaction priority (> 0.001) e.g. --fee=0.01', optional=True)
return CommandDesc('approve', 'approve an allowance', [p1, p2, p3, p4, p5])
def handle_help(self, arguments):
super().handle_help(arguments)
print(
"\nThis is an optional NEP-5 command (now legacy).\nFor more information see https://github.com/neo-project/proposals/blob/c357f5965afc2155615b6b96c7d15da688f81982/nep-5.mediawiki#approve_optional")
class CommandTokenAllowance(CommandBase):
def __init__(self):
super().__init__()
def execute(self, arguments):
wallet = PromptData.Wallet
if len(arguments) != 3:
print("Please specify the required parameters")
return False
token_str = arguments[0]
from_addr = arguments[1]
to_addr = arguments[2]
try:
token = PromptUtils.get_token(wallet, token_str)
except ValueError as e:
print(str(e))
return False
try:
allowance = token_get_allowance(wallet, token_str, from_addr, to_addr)
print(f"{token.symbol} allowance for {from_addr} from {to_addr} : {allowance} ")
return True
except ValueError as e:
print(str(e))
return False
def command_desc(self):
p1 = ParameterDesc('symbol', 'token symbol or script hash')
p2 = ParameterDesc('from_addr', 'address to send token from')
p3 = ParameterDesc('to_addr', 'address to send token to')
return CommandDesc('allowance', 'get the amount an account can transfer from another acount', [p1, p2, p3])
class CommandTokenMint(CommandBase):
def __init__(self):
super().__init__()
def execute(self, arguments):
wallet = PromptData.Wallet
if len(arguments) < 2:
print("Please specify the required parameters")
return False
| |
<filename>test/base/interactor.py
from __future__ import print_function
import os
import re
from json import dumps
from logging import getLogger
from requests import get, post, delete, patch
from six import StringIO
from six import text_type
from galaxy import util
from galaxy.tools.parser.interface import TestCollectionDef
from galaxy.util.bunch import Bunch
from galaxy.util.odict import odict
log = getLogger( __name__ )
# Off by default because it can pound the database pretty heavily
# and result in sqlite errors on larger tests or larger numbers of
# tests.
VERBOSE_ERRORS = util.asbool( os.environ.get( "GALAXY_TEST_VERBOSE_ERRORS", False ) )
UPLOAD_ASYNC = util.asbool( os.environ.get( "GALAXY_TEST_UPLOAD_ASYNC", True ) )
ERROR_MESSAGE_DATASET_SEP = "--------------------------------------"
def build_interactor( test_case, type="api" ):
interactor_class = GALAXY_INTERACTORS[ type ]
return interactor_class( test_case )
def stage_data_in_history( galaxy_interactor, all_test_data, history, shed_tool_id=None ):
# Upload any needed files
upload_waits = []
if UPLOAD_ASYNC:
for test_data in all_test_data:
upload_waits.append( galaxy_interactor.stage_data_async( test_data, history, shed_tool_id ) )
for upload_wait in upload_waits:
upload_wait()
else:
for test_data in all_test_data:
upload_wait = galaxy_interactor.stage_data_async( test_data, history, shed_tool_id )
upload_wait()
class GalaxyInteractorApi( object ):
def __init__( self, twill_test_case, test_user=None ):
self.twill_test_case = twill_test_case
self.api_url = "%s/api" % twill_test_case.url.rstrip("/")
self.master_api_key = twill_test_case.master_api_key
self.api_key = self.__get_user_key( twill_test_case.user_api_key, twill_test_case.master_api_key, test_user=test_user )
self.uploads = {}
def verify_output( self, history_id, jobs, output_data, output_testdef, shed_tool_id, maxseconds ):
outfile = output_testdef.outfile
attributes = output_testdef.attributes
name = output_testdef.name
self.wait_for_jobs( history_id, jobs, maxseconds )
hid = self.__output_id( output_data )
# TODO: Twill version verifys dataset is 'ok' in here.
self.verify_output_dataset( history_id=history_id, hda_id=hid, outfile=outfile, attributes=attributes, shed_tool_id=shed_tool_id )
primary_datasets = attributes.get( 'primary_datasets', {} )
if primary_datasets:
job_id = self._dataset_provenance( history_id, hid )[ "job_id" ]
outputs = self._get( "jobs/%s/outputs" % ( job_id ) ).json()
for designation, ( primary_outfile, primary_attributes ) in primary_datasets.items():
primary_output = None
for output in outputs:
if output[ "name" ] == '__new_primary_file_%s|%s__' % ( name, designation ):
primary_output = output
break
if not primary_output:
msg_template = "Failed to find primary dataset with designation [%s] for output with name [%s]"
msg_args = ( designation, name )
raise Exception( msg_template % msg_args )
primary_hda_id = primary_output[ "dataset" ][ "id" ]
self.verify_output_dataset( history_id, primary_hda_id, primary_outfile, primary_attributes, shed_tool_id=shed_tool_id )
def wait_for_jobs( self, history_id, jobs, maxseconds ):
for job in jobs:
self.wait_for_job( job[ 'id' ], history_id, maxseconds )
def verify_output_dataset( self, history_id, hda_id, outfile, attributes, shed_tool_id ):
fetcher = self.__dataset_fetcher( history_id )
self.twill_test_case.verify_hid(
outfile,
hda_id=hda_id,
attributes=attributes,
dataset_fetcher=fetcher,
shed_tool_id=shed_tool_id
)
self._verify_metadata( history_id, hda_id, attributes )
def _verify_metadata( self, history_id, hid, attributes ):
"""Check dataset metadata.
ftype on output maps to `file_ext` on the hda's API description, `name`, `info`,
and `dbkey` all map to the API description directly. Other metadata attributes
are assumed to be datatype-specific and mapped with a prefix of `metadata_`.
"""
metadata = attributes.get( 'metadata', {} ).copy()
for key, value in metadata.copy().items():
if key not in ['name', 'info']:
new_key = "metadata_%s" % key
metadata[ new_key ] = metadata[ key ]
del metadata[ key ]
elif key == "info":
metadata[ "misc_info" ] = metadata[ "info" ]
del metadata[ "info" ]
expected_file_type = attributes.get( 'ftype', None )
if expected_file_type:
metadata[ "file_ext" ] = expected_file_type
if metadata:
import time
time.sleep(5)
dataset = self._get( "histories/%s/contents/%s" % ( history_id, hid ) ).json()
for key, value in metadata.items():
try:
dataset_value = dataset.get( key, None )
def compare(val, expected):
if text_type(val) != text_type(expected):
msg = "Dataset metadata verification for [%s] failed, expected [%s] but found [%s]. Dataset API value was [%s]."
msg_params = ( key, value, dataset_value, dataset )
msg = msg % msg_params
raise Exception( msg )
if isinstance(dataset_value, list):
value = text_type(value).split(",")
if len(value) != len(dataset_value):
msg = "Dataset metadata verification for [%s] failed, expected [%s] but found [%s], lists differ in length. Dataset API value was [%s]."
msg_params = ( key, value, dataset_value, dataset )
msg = msg % msg_params
raise Exception( msg )
for val, expected in zip(dataset_value, value):
compare(val, expected)
else:
compare(dataset_value, value)
except KeyError:
msg = "Failed to verify dataset metadata, metadata key [%s] was not found." % key
raise Exception( msg )
def wait_for_job( self, job_id, history_id, maxseconds ):
self.twill_test_case.wait_for( lambda: not self.__job_ready( job_id, history_id ), maxseconds=maxseconds)
def get_job_stdio( self, job_id ):
job_stdio = self.__get_job_stdio( job_id ).json()
return job_stdio
def __get_job( self, job_id ):
return self._get( 'jobs/%s' % job_id )
def __get_job_stdio( self, job_id ):
return self._get( 'jobs/%s?full=true' % job_id )
def new_history( self ):
history_json = self._post( "histories", {"name": "test_history"} ).json()
return history_json[ 'id' ]
def __output_id( self, output_data ):
# Allow data structure coming out of tools API - {id: <id>, output_name: <name>, etc...}
# or simple id as comes out of workflow API.
try:
output_id = output_data.get( 'id' )
except AttributeError:
output_id = output_data
return output_id
def stage_data_async( self, test_data, history_id, shed_tool_id, async=True ):
fname = test_data[ 'fname' ]
tool_input = {
"file_type": test_data[ 'ftype' ],
"dbkey": test_data[ 'dbkey' ],
}
for elem in test_data.get('metadata', []):
tool_input["files_metadata|%s" % elem.get( 'name' )] = elem.get( 'value' )
composite_data = test_data[ 'composite_data' ]
if composite_data:
files = {}
for i, composite_file in enumerate( composite_data ):
file_name = self.twill_test_case.get_filename( composite_file.get( 'value' ), shed_tool_id=shed_tool_id )
files["files_%s|file_data" % i] = open( file_name, 'rb' )
tool_input.update({
# "files_%d|NAME" % i: name,
"files_%d|type" % i: "upload_dataset",
# TODO:
# "files_%d|space_to_tab" % i: composite_file.get( 'space_to_tab', False )
})
name = test_data[ 'name' ]
else:
file_name = self.twill_test_case.get_filename( fname, shed_tool_id=shed_tool_id )
name = test_data.get( 'name', None )
if not name:
name = os.path.basename( file_name )
tool_input.update({
"files_0|NAME": name,
"files_0|type": "upload_dataset",
})
files = {
"files_0|file_data": open( file_name, 'rb')
}
submit_response_object = self.__submit_tool( history_id, "upload1", tool_input, extra_data={"type": "upload_dataset"}, files=files )
submit_response = submit_response_object.json()
try:
dataset = submit_response["outputs"][0]
except KeyError:
raise Exception(submit_response)
# raise Exception(str(dataset))
hid = dataset['id']
self.uploads[ os.path.basename(fname) ] = self.uploads[ fname ] = self.uploads[ name ] = {"src": "hda", "id": hid}
return self.__wait_for_history( history_id )
def run_tool( self, testdef, history_id ):
# We need to handle the case where we've uploaded a valid compressed file since the upload
# tool will have uncompressed it on the fly.
inputs_tree = testdef.inputs.copy()
for key, value in inputs_tree.items():
values = [value] if not isinstance(value, list) else value
new_values = []
for value in values:
if isinstance( value, TestCollectionDef ):
hdca_id = self._create_collection( history_id, value )
new_values = [ dict( src="hdca", id=hdca_id ) ]
elif value in self.uploads:
new_values.append( self.uploads[ value ] )
else:
new_values.append( value )
inputs_tree[ key ] = new_values
# HACK: Flatten single-value lists. Required when using expand_grouping
for key, value in inputs_tree.items():
if isinstance(value, list) and len(value) == 1:
inputs_tree[key] = value[0]
submit_response = self.__submit_tool( history_id, tool_id=testdef.tool.id, tool_input=inputs_tree )
submit_response_object = submit_response.json()
try:
return Bunch(
inputs=inputs_tree,
outputs=self.__dictify_outputs( submit_response_object ),
output_collections=self.__dictify_output_collections( submit_response_object ),
jobs=submit_response_object[ 'jobs' ],
)
except KeyError:
message = "Error creating a job for these tool inputs - %s" % submit_response_object[ 'err_msg' ]
raise RunToolException( message, inputs_tree )
def _create_collection( self, history_id, collection_def ):
create_payload = dict(
name=collection_def.name,
element_identifiers=dumps( self._element_identifiers( collection_def ) ),
collection_type=collection_def.collection_type,
history_id=history_id,
)
return self._post( "dataset_collections", data=create_payload ).json()[ "id" ]
def _element_identifiers( self, collection_def ):
element_identifiers = []
for ( element_identifier, element ) in collection_def.elements:
if isinstance( element, TestCollectionDef ):
subelement_identifiers = self._element_identifiers( element )
element = dict(
name=element_identifier,
src="new_collection",
collection_type=element.collection_type,
element_identifiers=subelement_identifiers
)
else:
element_name = element[ 0 ]
element = self.uploads[ element[ 1 ] ].copy()
element[ "name" ] = element_name
element_identifiers.append( element )
return element_identifiers
def __dictify_output_collections( self, submit_response ):
output_collections_dict = odict()
for output_collection in submit_response[ 'output_collections' ]:
output_collections_dict[ output_collection.get("output_name") ] = output_collection
return output_collections_dict
def __dictify_outputs( self, datasets_object ):
# Convert outputs list to a dictionary that can be accessed by
# output_name so can be more flexiable about ordering of outputs
# but also allows fallback to legacy access as list mode.
outputs_dict = odict()
index = 0
for output in datasets_object[ 'outputs' ]:
outputs_dict[ index ] = outputs_dict[ output.get("output_name") ] = output
index += 1
# Adding each item twice (once with index for backward compat),
# overiding length to reflect the real number of outputs.
outputs_dict.__len__ = lambda: index
return outputs_dict
def output_hid( self, output_data ):
return output_data[ 'id' ]
def delete_history( self, history ):
return None
def __wait_for_history( self, history_id ):
| |
import abc
import numpy as np
import scipy.integrate as scint
from . import sv_abc as sv
class OusvSchobelZhu1998(sv.SvABC):
"""
The implementation of Schobel & Zhu (1998)'s inverse FT pricing formula for European
options the Ornstein-Uhlenbeck driven stochastic volatility process.
References:
- <NAME>., & <NAME>. (1999). Stochastic Volatility With an Ornstein–Uhlenbeck Process: an Extension. Review of Finance, 3(1), 23–46. https://doi.org/10.1023/A:1009803506170
Examples:
>>> import pyfeng as pfex
>>> model = pfex.OusvSchobelZhu1998(0.2, mr=4, vov=0.1, rho=-0.7, intr=0.09531)
>>> model.price(100, 100, texp=np.array([1, 5, 10]))
array([13.21493, 40.79773, 62.76312])
>>> model = pfex.OusvSchobelZhu1998(0.25, mr=8, vov=0.3, rho=-0.6, intr=0.09531)
>>> model.price(np.array([90, 100, 110]), 100, texp=1)
array([21.41873, 15.16798, 10.17448])
"""
model_type = "OUSV"
var_process = False
def D_B_C(self, s1, s2, s3, texp):
# implement the formula for D(t,T), B(t,T), C(t,T) in paper appendix
mr, theta, vov = self.mr, self.theta, self.vov
gamma1 = np.sqrt(2 * vov**2 * s1 + mr**2)
gamma2 = (mr - 2 * vov**2 * s3) / gamma1
gamma3 = mr**2 * theta - s2 * vov**2
sinh = np.sinh(gamma1 * texp)
cosh = np.cosh(gamma1 * texp)
sincos = sinh + gamma2 * cosh
cossin = cosh + gamma2 * sinh
ktg3 = mr * theta * gamma1 - gamma2 * gamma3
s2g3 = vov**2 * gamma1**3
D = (mr - gamma1 * sincos / cossin) / vov**2
B = ((ktg3 + gamma3 * sincos) / cossin - mr * theta * gamma1) / (
vov**2 * gamma1
)
C = (
-0.5 * np.log(cossin)
+ 0.5 * mr * texp
+ ((mr * theta * gamma1)**2 - gamma3**2)
/ (2 * s2g3)
* (sinh / cossin - gamma1 * texp)
+ ktg3 * gamma3 / s2g3 * ((cosh - 1) / cossin)
)
return D, B, C
def f_1(self, phi, texp):
# implement the formula (12)
mr, theta, vov, rho = self.mr, self.theta, self.vov, self.rho
tmp = 1 + 1j * phi
s1 = 0.5 * tmp * (-tmp * (1 - rho**2) + (1 - 2 * mr * rho / vov))
s2 = tmp * mr * theta * rho / vov
s3 = 0.5 * tmp * rho / vov
res = -0.5 * rho * tmp * (self.sigma**2 / vov + vov * texp)
D, B, C = self.D_B_C(s1, s2, s3, texp)
res += (D/2 * self.sigma + B) * self.sigma + C
return np.exp(res)
def f_2(self, phi, texp):
# implement the formula (13)
mr, theta, vov, rho = self.mr, self.theta, self.vov, self.rho
s1 = 0.5 * phi * (phi * (1 - rho**2) + 1j * (1 - 2 * mr * rho / vov))
s2 = 1j * phi * mr * theta * rho / vov
s3 = 0.5 * 1j * phi * rho / vov
res = -0.5 * 1j * phi * rho * (self.sigma**2 / vov + vov * texp)
D, B, C = self.D_B_C(s1, s2, s3, texp)
res += (D/2 * self.sigma + B) * self.sigma + C
return np.exp(res)
def price(self, strike, spot, texp, cp=1):
# implement the formula (14) and (15)
fwd, df, _ = self._fwd_factor(spot, texp)
kk = strike / fwd
log_k = np.log(kk)
J, h = 100001, 0.001 # need to take these as parameters
phi = (np.arange(J)[:, None] + 1) * h # shape=(J,1)
ff = self.f_1(phi, texp) - kk * self.f_2(phi, texp)
## Need to convert using iFFT later
price = scint.simps(
(ff * np.exp(-1j * phi * log_k) / (1j * phi)).real,
dx=h, axis=0,
) / np.pi
price += (1 - kk) / 2 * np.where(cp > 0, 1, -1)
if len(price) == 1:
price = price[0]
return df * fwd * price
class OusvMcABC(sv.SvABC, sv.CondMcBsmABC, abc.ABC):
model_type = "OUSV"
var_process = False
@abc.abstractmethod
def cond_states(self, vol_0, texp):
"""
Final variance and integrated variance over dt given var_0
The integrated variance is normalized by dt
Args:
vol_0: initial volatility
texp: time-to-expiry
Returns:
(var_final, var_mean, vol_mean)
"""
return NotImplementedError
def vol_step(self, vol_0, dt):
"""
Stepping volatility according to OU process dynamics
Args:
vol_0: initial volatility
dt: time step
Returns:
volatility after dt
"""
mr_t = self.mr * dt
e_mr = np.exp(-mr_t)
sinh = np.sinh(mr_t)
zz = self.rv_normal()
vol_t = self.vov * np.sqrt(e_mr * sinh / self.mr) * zz
vol_t += self.theta + (vol_0 - self.theta) * e_mr
return vol_t
def cond_spot_sigma(self, vol_0, texp):
vol_texp, var_mean, vol_mean = self.cond_states(vol_0, texp)
spot_cond = (vol_texp**2 - vol_0**2) / (2 * self.vov) - self.vov * texp / 2 \
- (self.mr * self.theta / self.vov) * texp * vol_mean \
+ (self.mr / self.vov - self.rho / 2) * texp * var_mean
np.exp(self.rho * spot_cond, out=spot_cond)
sigma_cond = np.sqrt((1 - self.rho**2) * var_mean) / vol_0
return spot_cond, sigma_cond
class OusvMcTimeStep(OusvMcABC):
"""
OUSV model with conditional Monte-Carlo simulation
The SDE of SV is: d sigma_t = mr (theta - sigma_t) dt + vov dB_T
"""
def vol_paths(self, tobs):
# 2d array of (time, path) including t=0
exp_tobs = np.exp(self.mr * tobs)
bm_path = self._bm_incr(exp_tobs**2 - 1, cum=True) # B_s (0 <= s <= 1)
sigma_t = self.theta + (
self.sigma - self.theta + self.vov / np.sqrt(2 * self.mr) * bm_path
) / exp_tobs[:, None]
sigma_t = np.insert(sigma_t, 0, self.sigma, axis=0)
return sigma_t
def cond_states_full(self, sig_0, texp):
tobs = self.tobs(texp)
n_dt = len(tobs)
sigma_paths = self.vol_paths(tobs)
s_t = sigma_paths[-1, :]
u_t_std = scint.simps(sigma_paths, dx=1, axis=0) / n_dt
v_t_std = scint.simps(sigma_paths**2, dx=1, axis=0) / n_dt
return s_t, v_t_std, u_t_std
def cond_states(self, vol_0, texp):
tobs = self.tobs(texp)
n_dt = len(tobs)
dt = np.diff(tobs, prepend=0)
# precalculate the Simpson's rule weight
weight = np.ones(n_dt + 1)
weight[1:-1:2] = 4
weight[2:-1:2] = 2
weight /= weight.sum()
vol_t = np.full(self.n_path, vol_0)
mean_vol = weight[0] * vol_t
mean_var = weight[0] * vol_t**2
for i in range(n_dt):
vol_t = self.vol_step(vol_t, dt[i])
mean_vol += weight[i+1] * vol_t
mean_var += weight[i+1] * vol_t**2
return vol_t, mean_var, mean_vol
class OusvMcChoi2023(OusvMcABC):
def set_mc_params(self, n_path=10000, dt=None, rn_seed=None, antithetic=True, n_sin=2, n_sin_max=None):
"""
Set MC parameters
Args:
n_path: number of paths
dt: time step for Euler/Milstein steps
rn_seed: random number seed
antithetic: antithetic
"""
assert n_sin % 2 == 0
if n_sin_max is not None:
assert n_sin_max % 2 == 0
self.n_sin = n_sin
self.n_sin_max = n_sin_max or n_sin
super().set_mc_params(n_path, dt, rn_seed, antithetic)
@classmethod
def _a2sum(cls, mr_t, ns=0, odd=None):
if odd == 2: # even
rv = cls._a2sum(mr_t / 2) / 2**2
elif odd == 1: # odd
rv = (mr_t / np.tanh(mr_t) - 1) / mr_t**2 - cls._a2sum(mr_t / 2) / 2**2
else: # all
rv = (mr_t / np.tanh(mr_t) - 1) / mr_t**2
if ns == 0:
return rv
n_pi_2 = (np.arange(1, ns + 1) * np.pi)**2
a2 = 2 / (mr_t**2 + n_pi_2)
if odd == 2: # even
rv -= np.sum(a2[1::2])
elif odd == 1: # odd
rv -= np.sum(a2[::2])
else: # all
rv -= np.sum(a2)
return rv
@classmethod
def _a2overn2sum(cls, mr_t, ns=0, odd=None):
if odd == 2: # even
rv = cls._a2overn2sum(mr_t / 2) / 2**4
elif odd == 1: # odd
rv = (1/3 - (mr_t / np.tanh(mr_t) - 1) / mr_t**2) / mr_t**2 - cls._a2overn2sum(mr_t / 2) / 2**4
else: # all
rv = (1/3 - (mr_t / np.tanh(mr_t) - 1) / mr_t**2) / mr_t**2
if ns == 0:
return rv
n_pi_2 = (np.arange(1, ns + 1) * np.pi)**2
a2overn2 = 2 / n_pi_2 / (mr_t**2 + n_pi_2)
if odd == 2: # even
rv -= np.sum(a2overn2[1::2])
elif odd == 1: # odd
rv -= np.sum(a2overn2[::2])
else: # all
rv -= np.sum(a2overn2)
return rv
@classmethod
def _a4sum(cls, mr_t, ns=0, odd=None):
if odd == 2: # even
rv = cls._a4sum(mr_t / 2) / 2**4
elif odd == 1: # odd
rv = (mr_t / np.tanh(mr_t) + mr_t**2 / np.sinh(mr_t)**2 - 2) / mr_t**4 - cls._a4sum(mr_t / 2) / 2**4
else: # all
rv = (mr_t / np.tanh(mr_t) + mr_t**2 / np.sinh(mr_t)**2 - 2) / mr_t**4
if ns == 0:
return | |
# -*- coding: utf-8 -*-
"""This module provides access to the auth REST api of Camunda."""
from __future__ import annotations
import typing
import dataclasses
import enum
import datetime as dt
import pycamunda
import pycamunda.base
import pycamunda.resource
from pycamunda.request import QueryParameter, PathParameter, BodyParameter
URL_SUFFIX = '/auth'
__all__ = ['AuthorizationType', 'GetList', 'Count', 'Get', 'Check', 'Options', 'Update', 'Create',
'Delete']
class AuthorizationType(enum.IntEnum):
global_ = 0
grant = 1
revoke = 2
@dataclasses.dataclass
class Authorization:
"""Data class of authorization as returned by the REST api of Camunda."""
id_: str
type_: AuthorizationType
permissions: typing.Tuple[str]
user_id: str
group_id: str
resource_type: pycamunda.resource.ResourceType
resource_id: str
links: typing.Tuple[pycamunda.resource.Link] = None
root_process_instance_id: str = None
removal_time: dt.datetime = None
@classmethod
def load(cls, data: typing.Mapping[str, typing.Any]) -> Authorization:
authorization = cls(
id_=data['id'],
type_=data['type'],
permissions=data['permissions'],
user_id=data['userId'],
group_id=data['groupId'],
resource_type=pycamunda.resource.ResourceType(data['resourceType']),
resource_id=data['resourceId']
)
try:
authorization.links = tuple(
pycamunda.resource.Link.load(data=link) for link in data['links']
)
except KeyError:
pass
try:
authorization.removal_time = pycamunda.base.from_isoformat(data['removalTime'])
except KeyError:
pass
try:
authorization.root_process_instance_id = data['rootProcessInstanceId']
except KeyError:
pass
return authorization
@dataclasses.dataclass
class Permission:
"""Data class of permission as returned by the REST api of Camunda."""
permission_name: str
resource_name: str
resource_id: str
authorized: bool
@classmethod
def load(cls, data: typing.Mapping[str, typing.Any]) -> Permission:
return cls(
permission_name=data['permissionName'],
resource_name=data['resourceName'],
resource_id=data['resourceId'],
authorized=data['authorized']
)
class GetList(pycamunda.base.CamundaRequest):
id_ = QueryParameter('id')
type_ = QueryParameter('type')
user_id_in = QueryParameter('userIdIn')
group_id_in = QueryParameter('groupIdIn')
resource_type = QueryParameter('resourceType')
resource_id = QueryParameter('resourceId')
sort_by = QueryParameter(
'sortBy',
mapping={
'resource_type': 'resourceType',
'resource_id': 'resourceId'
}
)
ascending = QueryParameter(
'sortOrder',
mapping={True: 'asc', False: 'desc'},
provide=lambda self, obj, obj_type: vars(obj).get('sort_by', None) is not None
)
first_result = QueryParameter('firstResult')
max_results = QueryParameter('maxResults')
def __init__(
self,
url: str,
id_: str = None,
type_: typing.Union[str, AuthorizationType] = None,
user_id_in: typing.Iterable[str] = None,
group_id_in: typing.Iterable[str] = None,
resource_type: typing.Union[str, pycamunda.resource.ResourceType] = None,
resource_id: int = None,
sort_by: str = None,
ascending: bool = True,
first_result: int = None,
max_results: int = None
):
"""Query for a list of authorizations using a list of parameters. The size of the result set
can be retrieved by using the Get Count request.
:param url: Camunda Rest engine URL.
:param id_: Filter by the id of the authorization.
:param type_: Filter by the authorization type.
:param user_id_in: Filter whether the user id is one of multiple ones.
:param group_id_in: Filter whether the group id is one of multiple ones.
:param resource_type: Filter by the resource type.
:param resource_id: Filter by the resource id.
:param sort_by: Sort the results by `id_`, `lock_expiration_time, `process_instance_id`,
`process_definition_key`, `tenant_id` or `task_priority`.
:param ascending: Sort order.
:param first_result: Pagination of results. Index of the first result to return.
:param max_results: Pagination of results. Maximum number of results to return.
"""
super().__init__(url=url + URL_SUFFIX)
self.id_ = id_
self.type_ = None
if type_ is not None:
self.type_ = AuthorizationType(type_)
self.user_id_in = user_id_in
self.group_id_in = group_id_in
self.resource_type = None
if type_ is not None:
self.resource_type = pycamunda.resource.ResourceType(resource_type)
self.resource_id = resource_id
self.sort_by = sort_by
self.ascending = ascending
self.first_result = first_result
self.max_results = max_results
def __call__(self, *args, **kwargs) -> typing.Tuple[Authorization]:
"""Send the request."""
response = super().__call__(pycamunda.base.RequestMethod.GET, *args, **kwargs)
return tuple(Authorization.load(auth_json) for auth_json in response.json())
class Count(pycamunda.base.CamundaRequest):
id_ = QueryParameter('id')
type_ = QueryParameter('type')
user_id_in = QueryParameter('userIdIn')
group_id_in = QueryParameter('groupIdIn')
resource_type = QueryParameter('resourceType')
resource_id = QueryParameter('resourceId')
def __init__(
self,
url: str,
id_: str = None,
type_: typing.Union[str, AuthorizationType] = None,
user_id_in: typing.Iterable[str] = None,
group_id_in: typing.Iterable[str] = None,
resource_type: typing.Union[str, pycamunda.resource.ResourceType] = None,
resource_id: int = None,
):
"""Get the size of the result returned by the Get List request.
:param url: Camunda Rest engine URL.
:param id_: Filter by the id of the authorization.
:param type_: Filter by the authorization type.
:param user_id_in: Filter whether the user id is one of multiple ones.
:param group_id_in: Filter whether the group id is one of multiple ones.
:param resource_type: Filter by the resource type.
:param resource_id: Filter by the resource id.
"""
super().__init__(url=url + URL_SUFFIX + '/count')
self.id_ = id_
self.type_ = None
if type_ is not None:
self.type_ = AuthorizationType(type_)
self.user_id_in = user_id_in
self.group_id_in = group_id_in
self.resource_type = None
if type_ is not None:
self.resource_type = pycamunda.resource.ResourceType(resource_type)
self.resource_id = resource_id
def __call__(self, *args, **kwargs) -> int:
"""Send the request."""
response = super().__call__(pycamunda.base.RequestMethod.GET, *args, **kwargs)
return int(response.json()['count'])
class Get(pycamunda.base.CamundaRequest):
id_ = PathParameter('id')
def __init__(self, url: str, id_: str):
"""Get an auth.
:param url: Camunda Rest engine URL.
:param id_: Id of the authorization.
"""
super().__init__(url=url + URL_SUFFIX + '/{id}')
self.id_ = id_
def __call__(self, *args, **kwargs) -> Authorization:
"""Send the request."""
response = super().__call__(pycamunda.base.RequestMethod.GET, *args, **kwargs)
return Authorization.load(data=response.json())
class Check(pycamunda.base.CamundaRequest):
permission_name = QueryParameter('permissionName')
permission_value = QueryParameter('permissionValue')
resource_name = QueryParameter('resourceName')
resource_type = QueryParameter('resourceType')
resource_id = QueryParameter('resourceId')
def __init__(
self,
url: str,
permission_name: str,
permission_value: int,
resource_name: str,
resource_type: typing.Union[str, pycamunda.resource.ResourceType],
resource_id: str = None
):
"""Check the authorization of the currently authenticated user.
:param url: Camunda Rest engine URL.
:param permission_name: Name of the permission to check.
:param permission_value: Value of the permission to check for.
:param resource_name: Name of the resource to check for.
:param resource_type: Type of the resource to check for.
:param resource_id: Id of the resource to check for.
"""
super().__init__(url=url + URL_SUFFIX + '/check')
self.permission_name = permission_name
self.permission_value = permission_value
self.resource_name = resource_name
self.resource_type = resource_type
self.resource_id = resource_id
def __call__(self, *args, **kwargs) -> Permission:
"""Send the request."""
response = super().__call__(pycamunda.base.RequestMethod.GET, *args, **kwargs)
return Permission.load(data=response.json())
class Options(pycamunda.base.CamundaRequest):
id_ = PathParameter('id')
def __init__(self, url: str, id_: str = None):
"""Get a list of options the currently authenticated user can perform on the authorization
resource.
:param url: Camunda Rest engine URL.
:param id_: Id of the authorization
"""
super().__init__(url=url + URL_SUFFIX + '{path}')
self.id_ = id_
@property
def url(self):
return self._url.format(path='/{id}'.format(id=self.id_) if self.id_ is not None else '')
def __call__(self, *args, **kwargs) -> pycamunda.resource.ResourceOptions:
"""Send the request."""
response = super().__call__(pycamunda.base.RequestMethod.OPTIONS, *args, **kwargs)
return pycamunda.resource.ResourceOptions.load(data=response.json())
class Create(pycamunda.base.CamundaRequest):
type_ = BodyParameter('type')
permissions = BodyParameter('permissions')
user_id = BodyParameter('userId')
group_id = BodyParameter('groupId')
resource_type = BodyParameter('resourceType')
resource_id = BodyParameter('resourceId')
def __init__(
self,
url: str,
type_: typing.Union[int, AuthorizationType],
permissions: typing.Iterable[str],
resource_type: typing.Union[str, pycamunda.resource.ResourceType],
resource_id: str,
user_id: str = None,
group_id: str = None
):
"""Create an auth.
:param url: Camunda Rest engine URL.
:param type_: Id of the authorization.
:param permissions: Permissions provided by this authorization. A permission be 'READ' or
'CREATE' for example.
:param user_id: Id of the user this authorization is for. The value '*' means all users.
:param group_id: Id of the group this authorization is for.
:param resource_type: Resource type this authorization is for.
:param resource_id: Id of the resource. The value '*' means all instances of a resource.
"""
super().__init__(url=url + URL_SUFFIX + '/create')
self.type_ = type_
self.permissions = permissions
self.user_id = user_id
self.group_id = group_id
self.resource_type = resource_type
self.resource_id = resource_id
def __call__(self, *args, **kwargs) -> Authorization:
"""Send the request."""
assert (self.user_id is not None) != (self.group_id is not None), (
'Either \'user_id\' or \'group_id\' has to be provided, not both.'
)
response = super().__call__(pycamunda.base.RequestMethod.POST, *args, **kwargs)
return Authorization.load(data=response.json())
class Update(pycamunda.base.CamundaRequest):
id_ = PathParameter('id')
permissions = BodyParameter('permissions')
user_id = BodyParameter('userId')
group_id = BodyParameter('groupId')
resource_type = BodyParameter('resourceType')
resource_id = BodyParameter('resourceId')
def __init__(
self,
url: str,
id_: str,
permissions: typing.Iterable[str],
resource_type: typing.Union[str, pycamunda.resource.ResourceType],
resource_id: str,
user_id: str = None,
group_id: str = None
):
"""Update an auth.
:param url: Camunda Rest engine URL.
:param id_: Id of the authorization.
:param permissions: Permissions provided by this authorization. A permission be 'READ' or
'CREATE' for example.
:param user_id: Id of the user this authorization is for. The value '*' means all users.
:param group_id: Id of the group this authorization is for.
:param resource_type: Resource type this authorization is for.
:param resource_id: Id of the resource. The value '*' means all instances of a resource.
"""
super().__init__(url=url + URL_SUFFIX + '/{id}')
self.id_ = id_
self.permissions = permissions
self.user_id = user_id
self.group_id = group_id
self.resource_type = resource_type
self.resource_id = resource_id
def __call__(self, *args, **kwargs) -> None:
"""Send the request."""
assert (self.user_id is not None) != (self.group_id is not None), (
'Either \'user_id\' or \'group_id\' has to be provided, not both.'
)
super().__call__(pycamunda.base.RequestMethod.PUT, *args, **kwargs)
class Delete(pycamunda.base.CamundaRequest):
id_ = PathParameter('id')
def __init__(self, url: | |
<reponame>jkalleberg/NEAT<filename>source/SequenceContainer.py
import random
import copy
import pathlib
import bisect
import pickle
import sys
import numpy as np
from Bio.Seq import Seq
from source.neat_cigar import CigarString
from source.probability import DiscreteDistribution, poisson_list
# TODO This whole file is in desperate need of refactoring
"""
Constants needed for analysis
"""
MAX_ATTEMPTS = 100 # max attempts to insert a mutation into a valid position
MAX_MUTFRAC = 0.3 # the maximum percentage of a window that can contain mutations
NUCL = ['A', 'C', 'G', 'T']
TRI_IND = {'AA': 0, 'AC': 1, 'AG': 2, 'AT': 3, 'CA': 4, 'CC': 5, 'CG': 6, 'CT': 7,
'GA': 8, 'GC': 9, 'GG': 10, 'GT': 11, 'TA': 12, 'TC': 13, 'TG': 14, 'TT': 15}
NUC_IND = {'A': 0, 'C': 1, 'G': 2, 'T': 3}
ALL_TRI = [NUCL[i] + NUCL[j] + NUCL[k] for i in range(len(NUCL)) for j in range(len(NUCL)) for k in range(len(NUCL))]
ALL_IND = {ALL_TRI[i]: i for i in range(len(ALL_TRI))}
# DEBUG
IGNORE_TRINUC = False
# percentile resolution used for fraglen quantizing
COV_FRAGLEN_PERCENTILE = 10.
LARGE_NUMBER = 9999999999
"""
DEFAULT MUTATION MODELS
"""
DEFAULT_1_OVERALL_MUT_RATE = 0.001
DEFAULT_1_HOMOZYGOUS_FREQ = 0.010
DEFAULT_1_INDEL_FRACTION = 0.05
DEFAULT_1_INS_VS_DEL = 0.6
DEFAULT_1_INS_LENGTH_VALUES = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
DEFAULT_1_INS_LENGTH_WEIGHTS = [0.4, 0.2, 0.1, 0.05, 0.05, 0.05, 0.05, 0.034, 0.033, 0.033]
DEFAULT_1_DEL_LENGTH_VALUES = [1, 2, 3, 4, 5]
DEFAULT_1_DEL_LENGTH_WEIGHTS = [0.3, 0.2, 0.2, 0.2, 0.1]
example_matrix_1 = [[0.0, 0.15, 0.7, 0.15],
[0.15, 0.0, 0.15, 0.7],
[0.7, 0.15, 0.0, 0.15],
[0.15, 0.7, 0.15, 0.0]]
DEFAULT_1_TRI_FREQS = [copy.deepcopy(example_matrix_1) for _ in range(16)]
DEFAULT_1_TRINUC_BIAS = [1. / float(len(ALL_TRI)) for _ in ALL_TRI]
DEFAULT_MODEL_1 = [DEFAULT_1_OVERALL_MUT_RATE,
DEFAULT_1_HOMOZYGOUS_FREQ,
DEFAULT_1_INDEL_FRACTION,
DEFAULT_1_INS_VS_DEL,
DEFAULT_1_INS_LENGTH_VALUES,
DEFAULT_1_INS_LENGTH_WEIGHTS,
DEFAULT_1_DEL_LENGTH_VALUES,
DEFAULT_1_DEL_LENGTH_WEIGHTS,
DEFAULT_1_TRI_FREQS,
DEFAULT_1_TRINUC_BIAS]
DEFAULT_2_OVERALL_MUT_RATE = 0.002
DEFAULT_2_HOMOZYGOUS_FREQ = 0.200
DEFAULT_2_INDEL_FRACTION = 0.1
DEFAULT_2_INS_VS_DEL = 0.3
DEFAULT_2_INS_LENGTH_VALUES = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
DEFAULT_2_INS_LENGTH_WEIGHTS = [0.1, 0.1, 0.2, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05]
# noinspection DuplicatedCode
DEFAULT_2_DEL_LENGTH_VALUES = [1, 2, 3, 4, 5]
DEFAULT_2_DEL_LENGTH_WEIGHTS = [0.3, 0.2, 0.2, 0.2, 0.1]
example_matrix_2 = [[0.0, 0.15, 0.7, 0.15],
[0.15, 0.0, 0.15, 0.7],
[0.7, 0.15, 0.0, 0.15],
[0.15, 0.7, 0.15, 0.0]]
DEFAULT_2_TRI_FREQS = [copy.deepcopy(example_matrix_2) for _ in range(16)]
DEFAULT_2_TRINUC_BIAS = [1. / float(len(ALL_TRI)) for _ in ALL_TRI]
DEFAULT_MODEL_2 = [DEFAULT_2_OVERALL_MUT_RATE,
DEFAULT_2_HOMOZYGOUS_FREQ,
DEFAULT_2_INDEL_FRACTION,
DEFAULT_2_INS_VS_DEL,
DEFAULT_2_INS_LENGTH_VALUES,
DEFAULT_2_INS_LENGTH_WEIGHTS,
DEFAULT_2_DEL_LENGTH_VALUES,
DEFAULT_2_DEL_LENGTH_WEIGHTS,
DEFAULT_2_TRI_FREQS,
DEFAULT_2_TRINUC_BIAS]
class SequenceContainer:
"""
Container for reference sequences, applies mutations
"""
def __init__(self, x_offset, sequence, ploidy, window_overlap, read_len, mut_models=None, mut_rate=None,
only_vcf=False):
# initialize basic variables
self.only_vcf = only_vcf
self.x = x_offset
self.ploidy = ploidy
self.read_len = read_len
self.sequences = [Seq(str(sequence)) for _ in range(self.ploidy)]
self.seq_len = len(sequence)
self.indel_list = [[] for _ in range(self.ploidy)]
self.snp_list = [[] for _ in range(self.ploidy)]
self.all_cigar = [[] for _ in range(self.ploidy)]
self.fm_pos = [[] for _ in range(self.ploidy)]
self.fm_span = [[] for _ in range(self.ploidy)]
# Blacklist explanation:
# black_list[ploid][pos] = 0 safe to insert variant here
# black_list[ploid][pos] = 1 indel inserted here
# black_list[ploid][pos] = 2 snp inserted here
# black_list[ploid][pos] = 3 invalid position for various processing reasons
self.black_list = [np.zeros(self.seq_len, dtype='<i4') for _ in range(self.ploidy)]
# disallow mutations to occur on window overlap points
self.win_buffer = window_overlap
for p in range(self.ploidy):
self.black_list[p][-self.win_buffer] = 3
self.black_list[p][-self.win_buffer - 1] = 3
# initialize mutation models
if not mut_models:
default_model = [copy.deepcopy(DEFAULT_MODEL_1) for _ in range(self.ploidy)]
self.model_data = default_model[:self.ploidy]
else:
if len(mut_models) != self.ploidy:
print('\nError: Number of mutation models received is not equal to specified ploidy\n')
sys.exit(1)
self.model_data = copy.deepcopy(mut_models)
# do we need to rescale mutation frequencies?
mut_rate_sum = sum([n[0] for n in self.model_data])
self.mut_rescale = mut_rate
if self.mut_rescale is None:
self.mut_scalar = 1.0
else:
self.mut_scalar = float(self.mut_rescale) // (mut_rate_sum / float(len(self.model_data)))
# how are mutations spread to each ploid, based on their specified mut rates?
self.ploid_mut_frac = [float(n[0]) / mut_rate_sum for n in self.model_data]
self.ploid_mut_prior = DiscreteDistribution(self.ploid_mut_frac, range(self.ploidy))
# init mutation models
#
# self.models[ploid][0] = average mutation rate
# self.models[ploid][1] = p(mut is homozygous | mutation occurs)
# self.models[ploid][2] = p(mut is indel | mut occurs)
# self.models[ploid][3] = p(insertion | indel occurs)
# self.models[ploid][4] = distribution of insertion lengths
# self.models[ploid][5] = distribution of deletion lengths
# self.models[ploid][6] = distribution of trinucleotide SNP transitions
# self.models[ploid][7] = p(trinuc mutates)
self.models = []
for n in self.model_data:
self.models.append([self.mut_scalar * n[0], n[1], n[2], n[3], DiscreteDistribution(n[5], n[4]),
DiscreteDistribution(n[7], n[6]), []])
for m in n[8]:
# noinspection PyTypeChecker
self.models[-1][6].append([DiscreteDistribution(m[0], NUCL), DiscreteDistribution(m[1], NUCL),
DiscreteDistribution(m[2], NUCL), DiscreteDistribution(m[3], NUCL)])
self.models[-1].append([m for m in n[9]])
# initialize poisson attributes
self.indel_poisson, self.snp_poisson = self.init_poisson()
# sample the number of variants that will be inserted into each ploid
self.indels_to_add = [n.sample() for n in self.indel_poisson]
self.snps_to_add = [n.sample() for n in self.snp_poisson]
# initialize trinuc snp bias
# compute mutation positional bias given trinucleotide strings of the sequence (ONLY AFFECTS SNPs)
#
# note: since indels are added before snps, it's possible these positional biases aren't correctly utilized
# at positions affected by indels. At the moment I'm going to consider this negligible.
trinuc_snp_bias = [[0. for _ in range(self.seq_len)] for _ in range(self.ploidy)]
self.trinuc_bias = [None for _ in range(self.ploidy)]
for p in range(self.ploidy):
for i in range(self.win_buffer + 1, self.seq_len - 1):
trinuc_snp_bias[p][i] = self.models[p][7][ALL_IND[str(self.sequences[p][i - 1:i + 2])]]
self.trinuc_bias[p] = DiscreteDistribution(trinuc_snp_bias[p][self.win_buffer + 1:self.seq_len - 1],
range(self.win_buffer + 1, self.seq_len - 1))
# initialize coverage attributes
self.window_size = None
self.coverage_distribution = None
self.fraglen_ind_map = None
def update_basic_vars(self, x_offset, sequence, ploidy, window_overlap, read_len):
self.x = x_offset
self.ploidy = ploidy
self.read_len = read_len
self.sequences = [Seq(str(sequence)) for _ in range(self.ploidy)]
self.seq_len = len(sequence)
self.indel_list = [[] for _ in range(self.ploidy)]
self.snp_list = [[] for _ in range(self.ploidy)]
self.all_cigar = [[] for _ in range(self.ploidy)]
self.fm_pos = [[] for _ in range(self.ploidy)]
self.fm_span = [[] for _ in range(self.ploidy)]
self.black_list = [np.zeros(self.seq_len, dtype='<i4') for _ in range(self.ploidy)]
# disallow mutations to occur on window overlap points
self.win_buffer = window_overlap
for p in range(self.ploidy):
self.black_list[p][-self.win_buffer] = 3
self.black_list[p][-self.win_buffer - 1] = 3
def update_mut_models(self, mut_models, mut_rate):
if not mut_models:
default_model = [copy.deepcopy(DEFAULT_MODEL_1) for _ in range(self.ploidy)]
self.model_data = default_model[:self.ploidy]
else:
if len(mut_models) != self.ploidy:
print('\nError: Number of mutation models received is not equal to specified ploidy\n')
sys.exit(1)
self.model_data = copy.deepcopy(mut_models)
# do we need to rescale mutation frequencies?
mut_rate_sum = sum([n[0] for n in self.model_data])
self.mut_rescale = mut_rate
if self.mut_rescale is None:
self.mut_scalar = 1.0
else:
self.mut_scalar = float(self.mut_rescale) // (mut_rate_sum / float(len(self.model_data)))
# how are mutations spread to each ploid, based on their specified mut rates?
self.ploid_mut_frac = [float(n[0]) / mut_rate_sum for n in self.model_data]
self.ploid_mut_prior = DiscreteDistribution(self.ploid_mut_frac, range(self.ploidy))
self.models = []
for n in self.model_data:
self.models.append([self.mut_scalar * n[0], n[1], n[2], n[3], DiscreteDistribution(n[5], n[4]),
DiscreteDistribution(n[7], n[6]), []])
for m in n[8]:
# noinspection PyTypeChecker
self.models[-1][6].append([DiscreteDistribution(m[0], NUCL), DiscreteDistribution(m[1], NUCL),
DiscreteDistribution(m[2], NUCL), DiscreteDistribution(m[3], NUCL)])
self.models[-1].append([m for m in n[9]])
def update_trinuc_bias(self):
trinuc_snp_bias = [[0. for _ in range(self.seq_len)] for _ in range(self.ploidy)]
self.trinuc_bias = [None for _ in range(self.ploidy)]
for p in range(self.ploidy):
for i in range(self.win_buffer + 1, self.seq_len - 1):
trinuc_snp_bias[p][i] = self.models[p][7][ALL_IND[str(self.sequences[p][i - 1:i + 2])]]
self.trinuc_bias[p] = DiscreteDistribution(trinuc_snp_bias[p][self.win_buffer + 1:self.seq_len - 1],
range(self.win_buffer + 1, self.seq_len - 1))
def init_coverage(self, coverage_data, frag_dist=None):
"""
Initializes coverage for the sequence container. Only makes changes if we are not in vcf-only mode.
:param coverage_data: A tuple containing the window size, gc scalars and target coverage values.
:param frag_dist: A probability distribution of the fragment size.
:return: Mean coverage value
"""
# TODO this section is also quite slow and will need further investigation
# If we're only creating a vcf, skip some expensive initialization related to coverage depth
if not self.only_vcf:
(self.window_size, gc_scalars, target_cov_vals) = coverage_data
gc_cov_vals = [[] for _ in self.sequences]
tr_cov_vals = [[] for _ in self.sequences]
avg_out = []
self.coverage_distribution = []
for i in range(len(self.sequences)):
# Zach implemented a change here but I can't remember if I changed it back for some reason.
# If second line below doesn't work, reactivate the first line.
# max_coord = min([len(self.sequences[i]) - self.read_len, len(self.all_cigar[i]) - self.read_len])
max_coord = min([len(self.sequences[i]) - self.read_len, len(self.all_cigar[i]) - 1])
# Trying to fix a problem wherein the above line gives a negative answer
if max_coord <= 0:
max_coord = min([len(self.sequences[i]), len(self.all_cigar[i])])
# compute gc-bias
j = 0
while j + self.window_size < len(self.sequences[i]):
| |
or (key_resp_light.keys == path1_corr):
key_resp_light.corr = 1
else:
key_resp_light.corr = 0
# a response ends the routine
continueRoutine = False
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in lightComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "light"-------
for thisComponent in lightComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
loop_light1.addData('concentration.started', concentration.tStartRefresh)
loop_light1.addData('concentration.stopped', concentration.tStopRefresh)
loop_light1.addData('image.started', image.tStartRefresh)
loop_light1.addData('image.stopped', image.tStopRefresh)
# check responses
if key_resp_light.keys in ['', [], None]: # No response was made
key_resp_light.keys = None
# was no response the correct answer?!
if str(path1_corr).lower() == 'none':
key_resp_light.corr = 1; # correct non-response
else:
key_resp_light.corr = 0; # failed to respond (incorrectly)
# store data for loop_light1 (TrialHandler)
loop_light1.addData('key_resp_light.keys',key_resp_light.keys)
loop_light1.addData('key_resp_light.corr', key_resp_light.corr)
if key_resp_light.keys != None: # we had a response
loop_light1.addData('key_resp_light.rt', key_resp_light.rt)
loop_light1.addData('key_resp_light.started', key_resp_light.tStartRefresh)
loop_light1.addData('key_resp_light.stopped', key_resp_light.tStopRefresh)
thisExp.nextEntry()
# completed 12 repeats of 'loop_light1'
# ------Prepare to start Routine "introduction3"-------
continueRoutine = True
# update component parameters for each repeat
key_resp_3.keys = []
key_resp_3.rt = []
_key_resp_3_allKeys = []
# keep track of which components have finished
introduction3Components = [introduction_3, key_resp_3]
for thisComponent in introduction3Components:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
introduction3Clock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "introduction3"-------
while continueRoutine:
# get current time
t = introduction3Clock.getTime()
tThisFlip = win.getFutureFlipTime(clock=introduction3Clock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *introduction_3* updates
if introduction_3.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
introduction_3.frameNStart = frameN # exact frame index
introduction_3.tStart = t # local t and not account for scr refresh
introduction_3.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(introduction_3, 'tStartRefresh') # time at next scr refresh
introduction_3.setAutoDraw(True)
# *key_resp_3* updates
waitOnFlip = False
if key_resp_3.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
key_resp_3.frameNStart = frameN # exact frame index
key_resp_3.tStart = t # local t and not account for scr refresh
key_resp_3.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(key_resp_3, 'tStartRefresh') # time at next scr refresh
key_resp_3.status = STARTED
# keyboard checking is just starting
waitOnFlip = True
win.callOnFlip(key_resp_3.clock.reset) # t=0 on next screen flip
win.callOnFlip(key_resp_3.clearEvents, eventType='keyboard') # clear events on next screen flip
if key_resp_3.status == STARTED and not waitOnFlip:
theseKeys = key_resp_3.getKeys(keyList=['space'], waitRelease=False)
_key_resp_3_allKeys.extend(theseKeys)
if len(_key_resp_3_allKeys):
key_resp_3.keys = _key_resp_3_allKeys[-1].name # just the last key pressed
key_resp_3.rt = _key_resp_3_allKeys[-1].rt
# a response ends the routine
continueRoutine = False
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in introduction3Components:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "introduction3"-------
for thisComponent in introduction3Components:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
thisExp.addData('introduction_3.started', introduction_3.tStartRefresh)
thisExp.addData('introduction_3.stopped', introduction_3.tStopRefresh)
# the Routine "introduction3" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# set up handler to look after randomisation of conditions etc
loop_face1 = data.TrialHandler(nReps=2, method='random',
extraInfo=expInfo, originPath=-1,
trialList=data.importConditions('documents\\face.xlsx'),
seed=None, name='loop_face1')
thisExp.addLoop(loop_face1) # add the loop to the experiment
thisLoop_face1 = loop_face1.trialList[0] # so we can initialise stimuli with some values
# abbreviate parameter names if possible (e.g. rgb = thisLoop_face1.rgb)
if thisLoop_face1 != None:
for paramName in thisLoop_face1:
exec('{} = thisLoop_face1[paramName]'.format(paramName))
for thisLoop_face1 in loop_face1:
currentLoop = loop_face1
# abbreviate parameter names if possible (e.g. rgb = thisLoop_face1.rgb)
if thisLoop_face1 != None:
for paramName in thisLoop_face1:
exec('{} = thisLoop_face1[paramName]'.format(paramName))
# ------Prepare to start Routine "face"-------
continueRoutine = True
routineTimer.add(2.400000)
# update component parameters for each repeat
image_2.setImage(path2)
key_resp_face.keys = []
key_resp_face.rt = []
_key_resp_face_allKeys = []
# keep track of which components have finished
faceComponents = [concentration2, image_2, key_resp_face]
for thisComponent in faceComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
faceClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "face"-------
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = faceClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=faceClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *concentration2* updates
if concentration2.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
concentration2.frameNStart = frameN # exact frame index
concentration2.tStart = t # local t and not account for scr refresh
concentration2.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(concentration2, 'tStartRefresh') # time at next scr refresh
concentration2.setAutoDraw(True)
if concentration2.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > concentration2.tStartRefresh + 0.4-frameTolerance:
# keep track of stop time/frame for later
concentration2.tStop = t # not accounting for scr refresh
concentration2.frameNStop = frameN # exact frame index
win.timeOnFlip(concentration2, 'tStopRefresh') # time at next scr refresh
concentration2.setAutoDraw(False)
# *image_2* updates
if image_2.status == NOT_STARTED and tThisFlip >= 0.4-frameTolerance:
# keep track of start time/frame for later
image_2.frameNStart = frameN # exact frame index
image_2.tStart = t # local t and not account for scr refresh
image_2.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(image_2, 'tStartRefresh') # time at next scr refresh
image_2.setAutoDraw(True)
if image_2.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > image_2.tStartRefresh + 2-frameTolerance:
# keep track of stop time/frame for later
image_2.tStop = t # not accounting for scr refresh
image_2.frameNStop = frameN # exact frame index
win.timeOnFlip(image_2, 'tStopRefresh') # time at next scr refresh
image_2.setAutoDraw(False)
# *key_resp_face* updates
waitOnFlip = False
if key_resp_face.status == NOT_STARTED and tThisFlip >= 0.4-frameTolerance:
# keep track of start time/frame for later
key_resp_face.frameNStart = frameN # exact frame index
key_resp_face.tStart = t # local t and not account for scr refresh
key_resp_face.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(key_resp_face, 'tStartRefresh') # time at next scr refresh
key_resp_face.status = STARTED
# keyboard checking is just starting
waitOnFlip = True
win.callOnFlip(key_resp_face.clock.reset) # t=0 on next screen flip
win.callOnFlip(key_resp_face.clearEvents, eventType='keyboard') # clear events on next screen flip
if key_resp_face.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > key_resp_face.tStartRefresh + 2-frameTolerance:
# keep track of stop time/frame for later
key_resp_face.tStop = t # not accounting for scr refresh
key_resp_face.frameNStop = frameN # exact frame index
win.timeOnFlip(key_resp_face, 'tStopRefresh') # time at next scr refresh
key_resp_face.status = FINISHED
if key_resp_face.status == STARTED and not waitOnFlip:
theseKeys = key_resp_face.getKeys(keyList=['space'], waitRelease=False)
_key_resp_face_allKeys.extend(theseKeys)
if len(_key_resp_face_allKeys):
key_resp_face.keys = _key_resp_face_allKeys[-1].name # just the last key pressed
key_resp_face.rt = _key_resp_face_allKeys[-1].rt
# was this correct?
if (key_resp_face.keys == str(path2_corr)) or (key_resp_face.keys == path2_corr):
key_resp_face.corr = 1
else:
key_resp_face.corr = 0
# a response ends the routine
continueRoutine = False
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = | |
<gh_stars>0
# Generated from names extracted from https://support.google.com/docs/table/25273
from gigamonkeys.formulas import Function
def DATE(*args) -> Function:
"""
Converts a provided year, month, and day into a date.
Learn more: https//support.google.com/docs/answer/3092969
"""
return Function("DATE", args)
def DATEDIF(*args) -> Function:
"""
Calculates the number of days, months, or years between two dates.
Learn more: https//support.google.com/docs/answer/6055612
"""
return Function("DATEDIF", args)
def DATEVALUE(*args) -> Function:
"""
Converts a provided date string in a known format to a date value.
Learn more: https//support.google.com/docs/answer/3093039
"""
return Function("DATEVALUE", args)
def DAY(*args) -> Function:
"""
Returns the day of the month that a specific date falls on, in numeric format.
Learn more: https//support.google.com/docs/answer/3093040
"""
return Function("DAY", args)
def DAYS(*args) -> Function:
"""
Returns the number of days between two dates.
Learn more:
https//support.google.comhttps://support.google.com/docs/answer/9061296
"""
return Function("DAYS", args)
def DAYS360(*args) -> Function:
"""
Returns the difference between two days based on the 360 day year used in some
financial interest calculations.
Learn more: https//support.google.com/docs/answer/3093042
"""
return Function("DAYS360", args)
def EDATE(*args) -> Function:
"""
Returns a date a specified number of months before or after another date.
Learn more: https//support.google.com/docs/answer/3092974
"""
return Function("EDATE", args)
def EOMONTH(*args) -> Function:
"""
Returns a date representing the last day of a month which falls a specified
number of months before or after another date.
Learn more: https//support.google.com/docs/answer/3093044
"""
return Function("EOMONTH", args)
def HOUR(*args) -> Function:
"""
Returns the hour component of a specific time, in numeric format.
Learn more: https//support.google.com/docs/answer/3093045
"""
return Function("HOUR", args)
def ISOWEEKNUM(*args) -> Function:
"""
Returns the number of the ISO week of the year where the provided date falls.
Learn more: https//support.google.com/docs/answer/7368793
"""
return Function("ISOWEEKNUM", args)
def MINUTE(*args) -> Function:
"""
Returns the minute component of a specific time, in numeric format.
Learn more: https//support.google.com/docs/answer/3093048
"""
return Function("MINUTE", args)
def MONTH(*args) -> Function:
"""
Returns the month of the year a specific date falls in, in numeric format.
Learn more: https//support.google.com/docs/answer/3093052
"""
return Function("MONTH", args)
def NETWORKDAYS(*args) -> Function:
"""
Returns the number of net working days between two provided days.
Learn more: https//support.google.com/docs/answer/3092979
"""
return Function("NETWORKDAYS", args)
def NETWORKDAYS_INTL(*args) -> Function:
"""
Returns the number of net working days between two provided days excluding
specified weekend days and holidays.
Learn more: https//support.google.com/docs/answer/3295902
"""
return Function("NETWORKDAYS_INTL", args)
def NOW(*args) -> Function:
"""
Returns the current date and time as a date value.
Learn more: https//support.google.com/docs/answer/3092981
"""
return Function("NOW", args)
def SECOND(*args) -> Function:
"""
Returns the second component of a specific time, in numeric format.
Learn more: https//support.google.com/docs/answer/3093054
"""
return Function("SECOND", args)
def TIME(*args) -> Function:
"""
Converts a provided hour, minute, and second into a time.
Learn more: https//support.google.com/docs/answer/3093056
"""
return Function("TIME", args)
def TIMEVALUE(*args) -> Function:
"""
Returns the fraction of a 24-hour day the time represents.
Learn more: https//support.google.com/docs/answer/3267350
"""
return Function("TIMEVALUE", args)
def TODAY(*args) -> Function:
"""
Returns the current date as a date value.
Learn more: https//support.google.com/docs/answer/3092984
"""
return Function("TODAY", args)
def WEEKDAY(*args) -> Function:
"""
Returns a number representing the day of the week of the date provided.
Learn more: https//support.google.com/docs/answer/3092985
"""
return Function("WEEKDAY", args)
def WEEKNUM(*args) -> Function:
"""
Returns a number representing the week of the year where the provided date
falls.
Learn more: https//support.google.com/docs/answer/3294949
"""
return Function("WEEKNUM", args)
def WORKDAY(*args) -> Function:
"""
Calculates the end date after a specified number of working days.
Learn more: https//support.google.com/docs/answer/3093059
"""
return Function("WORKDAY", args)
def WORKDAY_INTL(*args) -> Function:
"""
Calculates the date after a specified number of workdays excluding specified
weekend days and holidays.
Learn more: https//support.google.com/docs/answer/3294972
"""
return Function("WORKDAY_INTL", args)
def YEAR(*args) -> Function:
"""
Returns the year specified by a given date.
Learn more: https//support.google.com/docs/answer/3093061
"""
return Function("YEAR", args)
def YEARFRAC(*args) -> Function:
"""
Returns the number of years, including fractional years, between two dates using
a specified day count convention.
Learn more: https//support.google.com/docs/answer/3092989
"""
return Function("YEARFRAC", args)
def BIN2DEC(*args) -> Function:
"""
Converts a signed binary number to decimal format.
Learn more: https//support.google.com/docs/answer/3092991
"""
return Function("BIN2DEC", args)
def BIN2HEX(*args) -> Function:
"""
Converts a signed binary number to signed hexadecimal format.
Learn more: https//support.google.com/docs/answer/3093133
"""
return Function("BIN2HEX", args)
def BIN2OCT(*args) -> Function:
"""
Converts a signed binary number to signed octal format.
Learn more: https//support.google.com/docs/answer/3092993
"""
return Function("BIN2OCT", args)
def BITAND(*args) -> Function:
"""
Bitwise boolean AND of two numbers.
Learn more:
https//support.google.comhttps://support.google.com/docs/answer/9061440
"""
return Function("BITAND", args)
def BITLSHIFT(*args) -> Function:
"""
Shifts the bits of the input a certain number of places to the left.
Learn more:
https//support.google.comhttps://support.google.com/docs/answer/9061443
"""
return Function("BITLSHIFT", args)
def BITOR(*args) -> Function:
"""
Bitwise boolean OR of 2 numbers.
Learn more:
https//support.google.comhttps://support.google.com/docs/answer/9083934
"""
return Function("BITOR", args)
def BITRSHIFT(*args) -> Function:
"""
Shifts the bits of the input a certain number of places to the right.
Learn more:
https//support.google.comhttps://support.google.com/docs/answer/9084100
"""
return Function("BITRSHIFT", args)
def BITXOR(*args) -> Function:
"""
Bitwise XOR (exclusive OR) of 2 numbers.
Learn more:
https//support.google.comhttps://support.google.com/docs/answer/9083935
"""
return Function("BITXOR", args)
def COMPLEX(*args) -> Function:
"""
Creates a complex number given real and imaginary coefficients.
Learn more: https//support.google.com/docs/answer/7407888
"""
return Function("COMPLEX", args)
def DEC2BIN(*args) -> Function:
"""
Converts a decimal number to signed binary format.
Learn more: https//support.google.com/docs/answer/3092997
"""
return Function("DEC2BIN", args)
def DEC2HEX(*args) -> Function:
"""
Converts a decimal number to signed hexadecimal format.
Learn more: https//support.google.com/docs/answer/3093137
"""
return Function("DEC2HEX", args)
def DEC2OCT(*args) -> Function:
"""
Converts a decimal number to signed octal format.
Learn more: https//support.google.com/docs/answer/3093138
"""
return Function("DEC2OCT", args)
def DELTA(*args) -> Function:
"""
Compare two numeric values, returning 1 if they're equal.
Learn more: https//support.google.com/docs/answer/3401147
"""
return Function("DELTA", args)
def ERF(*args) -> Function:
"""
The ERF function returns the integral of the Gauss error function over an
interval of values.
Learn more:
https//support.google.comhttps://support.google.com/docs/answer/9116267.
"""
return Function("ERF", args)
def ERF_PRECISE(*args) -> Function:
"See ERF"
return Function("ERF_PRECISE", args)
def GESTEP(*args) -> Function:
"""
Returns 1 if the rate is strictly greater than or equal to the provided step
value or 0 otherwise. If no step value is provided then the default value of 0
will be used.
Learn more:
https//support.google.comhttps://support.google.com/docs/answer/9061379
"""
return Function("GESTEP", args)
def HEX2BIN(*args) -> Function:
"""
Converts a signed hexadecimal number to signed binary format.
Learn more: https//support.google.com/docs/answer/3093139
"""
return Function("HEX2BIN", args)
def HEX2DEC(*args) -> Function:
"""
Converts a signed hexadecimal number to decimal format.
Learn more: https//support.google.com/docs/answer/3093192
"""
return Function("HEX2DEC", args)
def HEX2OCT(*args) -> Function:
"""
Converts a signed hexadecimal number to signed octal format.
Learn more: https//support.google.com/docs/answer/3093142
"""
return Function("HEX2OCT", args)
def IMABS(*args) -> Function:
"""
Returns absolute value of a complex number.
Learn more: https//support.google.com/docs/answer/7411899
"""
return Function("IMABS", args)
def IMAGINARY(*args) -> Function:
"""
Returns the imaginary coefficient of a complex number.
Learn more: https//support.google.com/docs/answer/7408639
"""
return Function("IMAGINARY", args)
def IMARGUMENT(*args) -> Function:
"""
The IMARGUMENT function returns the angle (also known as the argument or \theta)
of the given complex number in radians.
Learn more: https//support.google.com/docs/answer/9116360.
"""
return Function("IMARGUMENT", args)
def IMCONJUGATE(*args) -> Function:
"""
Returns the complex conjugate of a number.
Learn more: https//support.google.com/docs/answer/7410791
"""
return Function("IMCONJUGATE", args)
def IMCOS(*args) -> Function:
"""
The IMCOS function returns the cosine of the given complex number.
Learn more: https//support.google.com/docs/answer/9116546.
"""
return Function("IMCOS", args)
def IMCOSH(*args) -> Function:
"""
Returns the hyperbolic cosine of the given complex number. For example, a given
complex number "x+yi" returns "cosh(x+yi)."
Learn more: https//support.google.com/docs/answer/9366233.
"""
return Function("IMCOSH", args)
def IMCOT(*args) -> Function:
"""
Returns the cotangent of the given complex number. For example, a given complex
number "x+yi" returns "cot(x+yi)."
Learn more: https//support.google.com/docs/answer/9366254.
"""
return Function("IMCOT", args)
def IMCOTH(*args) -> Function:
"""
Returns the hyperbolic cotangent of the given complex number. For example, a
given complex number "x+yi" returns "coth(x+yi)."
Learn more: https//support.google.com/docs/answer/9366256.
"""
return Function("IMCOTH", args)
def IMCSC(*args) -> Function:
"""
Returns the cosecant of the given complex number.
Learn more: https//support.google.com/docs/answer/9199155.
"""
return Function("IMCSC", args)
def IMCSCH(*args) -> Function:
"""
Returns the hyperbolic cosecant of the given complex number. For example, a
given complex number "x+yi" returns "csch(x+yi)."
Learn more: https//support.google.com/docs/answer/9366258.
"""
return Function("IMCSCH", args)
def IMDIV(*args) -> Function:
"""
Returns one complex number divided by another.
Learn more: https//support.google.com/docs/answer/7411898
"""
return Function("IMDIV", args)
def IMEXP(*args) -> Function:
"""
Returns Euler's number, e (~2.718) raised to a complex power.
Learn more: https//support.google.com/docs/answer/9198277.
"""
return Function("IMEXP", args)
def IMLOG(*args) | |
the consignment.
Examples of a participant are:
The Sender - the company sending the consignment
The Receiver - the company receiving the consignment
The Collection Address - the address from which the consignment is picked
up
The Delivery Address - the address to which the consignment should be
delivered"""
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, name=None, addressLine1=None, addressLine2=None, addressLine3=None, town=None, exactMatch='Y', province=None, postcode=None, country=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
self.name = name
self.validate_stringMaxLength40(self.name)
self.name_nsprefix_ = None
self.addressLine1 = addressLine1
self.validate_stringMaxLength30(self.addressLine1)
self.addressLine1_nsprefix_ = None
self.addressLine2 = addressLine2
self.validate_stringMaxLength30(self.addressLine2)
self.addressLine2_nsprefix_ = None
self.addressLine3 = addressLine3
self.validate_stringMaxLength30(self.addressLine3)
self.addressLine3_nsprefix_ = None
self.town = town
self.validate_stringMaxLength40(self.town)
self.town_nsprefix_ = None
self.exactMatch = exactMatch
self.validate_booleanEnum(self.exactMatch)
self.exactMatch_nsprefix_ = None
self.province = province
self.validate_stringMaxLength30(self.province)
self.province_nsprefix_ = None
self.postcode = postcode
self.validate_stringMaxLength9(self.postcode)
self.postcode_nsprefix_ = None
self.country = country
self.validate_stringMinLength2MaxLength2(self.country)
self.country_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, nameAndAddressRequestType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if nameAndAddressRequestType.subclass:
return nameAndAddressRequestType.subclass(*args_, **kwargs_)
else:
return nameAndAddressRequestType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_name(self):
return self.name
def set_name(self, name):
self.name = name
def get_addressLine1(self):
return self.addressLine1
def set_addressLine1(self, addressLine1):
self.addressLine1 = addressLine1
def get_addressLine2(self):
return self.addressLine2
def set_addressLine2(self, addressLine2):
self.addressLine2 = addressLine2
def get_addressLine3(self):
return self.addressLine3
def set_addressLine3(self, addressLine3):
self.addressLine3 = addressLine3
def get_town(self):
return self.town
def set_town(self, town):
self.town = town
def get_exactMatch(self):
return self.exactMatch
def set_exactMatch(self, exactMatch):
self.exactMatch = exactMatch
def get_province(self):
return self.province
def set_province(self, province):
self.province = province
def get_postcode(self):
return self.postcode
def set_postcode(self, postcode):
self.postcode = postcode
def get_country(self):
return self.country
def set_country(self, country):
self.country = country
def validate_stringMaxLength40(self, value):
result = True
# Validate type stringMaxLength40, a restriction on xsd:string.
if value is not None and Validate_simpletypes_ and self.gds_collector_ is not None:
if not isinstance(value, str):
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s is not of the correct base simple type (str)' % {"value": value, "lineno": lineno, })
return False
if len(value) > 40:
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s does not match xsd maxLength restriction on stringMaxLength40' % {"value" : encode_str_2_3(value), "lineno": lineno} )
result = False
return result
def validate_stringMaxLength30(self, value):
result = True
# Validate type stringMaxLength30, a restriction on xsd:string.
if value is not None and Validate_simpletypes_ and self.gds_collector_ is not None:
if not isinstance(value, str):
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s is not of the correct base simple type (str)' % {"value": value, "lineno": lineno, })
return False
if len(value) > 30:
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s does not match xsd maxLength restriction on stringMaxLength30' % {"value" : encode_str_2_3(value), "lineno": lineno} )
result = False
return result
def validate_booleanEnum(self, value):
result = True
# Validate type booleanEnum, a restriction on xsd:string.
if value is not None and Validate_simpletypes_ and self.gds_collector_ is not None:
if not isinstance(value, str):
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s is not of the correct base simple type (str)' % {"value": value, "lineno": lineno, })
return False
value = value
enumerations = ['N', 'Y']
if value not in enumerations:
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s does not match xsd enumeration restriction on booleanEnum' % {"value" : encode_str_2_3(value), "lineno": lineno} )
result = False
return result
def validate_stringMaxLength9(self, value):
result = True
# Validate type stringMaxLength9, a restriction on xsd:string.
if value is not None and Validate_simpletypes_ and self.gds_collector_ is not None:
if not isinstance(value, str):
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s is not of the correct base simple type (str)' % {"value": value, "lineno": lineno, })
return False
if len(value) > 9:
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s does not match xsd maxLength restriction on stringMaxLength9' % {"value" : encode_str_2_3(value), "lineno": lineno} )
result = False
return result
def validate_stringMinLength2MaxLength2(self, value):
result = True
# Validate type stringMinLength2MaxLength2, a restriction on xsd:string.
if value is not None and Validate_simpletypes_ and self.gds_collector_ is not None:
if not isinstance(value, str):
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s is not of the correct base simple type (str)' % {"value": value, "lineno": lineno, })
return False
if len(value) > 2:
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s does not match xsd maxLength restriction on stringMinLength2MaxLength2' % {"value" : encode_str_2_3(value), "lineno": lineno} )
result = False
if len(value) < 2:
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s does not match xsd minLength restriction on stringMinLength2MaxLength2' % {"value" : encode_str_2_3(value), "lineno": lineno} )
result = False
return result
def hasContent_(self):
if (
self.name is not None or
self.addressLine1 is not None or
self.addressLine2 is not None or
self.addressLine3 is not None or
self.town is not None or
self.exactMatch != "Y" or
self.province is not None or
self.postcode is not None or
self.country is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='nameAndAddressRequestType', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('nameAndAddressRequestType')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'nameAndAddressRequestType':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='nameAndAddressRequestType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='nameAndAddressRequestType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='nameAndAddressRequestType'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='nameAndAddressRequestType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.name is not None:
namespaceprefix_ = self.name_nsprefix_ + ':' if (UseCapturedNS_ and self.name_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sname>%s</%sname>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.name), input_name='name')), namespaceprefix_ , eol_))
if self.addressLine1 is not None:
namespaceprefix_ = self.addressLine1_nsprefix_ + ':' if (UseCapturedNS_ and self.addressLine1_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%saddressLine1>%s</%saddressLine1>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.addressLine1), input_name='addressLine1')), namespaceprefix_ , eol_))
if self.addressLine2 is not None:
namespaceprefix_ = self.addressLine2_nsprefix_ + ':' if (UseCapturedNS_ and self.addressLine2_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%saddressLine2>%s</%saddressLine2>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.addressLine2), input_name='addressLine2')), namespaceprefix_ , eol_))
if self.addressLine3 is not None:
namespaceprefix_ = self.addressLine3_nsprefix_ + ':' if (UseCapturedNS_ and self.addressLine3_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%saddressLine3>%s</%saddressLine3>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.addressLine3), input_name='addressLine3')), namespaceprefix_ , eol_))
if self.town is not None:
namespaceprefix_ = self.town_nsprefix_ + ':' if (UseCapturedNS_ and self.town_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%stown>%s</%stown>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.town), input_name='town')), namespaceprefix_ , eol_))
if self.exactMatch != "Y":
namespaceprefix_ = self.exactMatch_nsprefix_ + ':' if (UseCapturedNS_ and self.exactMatch_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sexactMatch>%s</%sexactMatch>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.exactMatch), input_name='exactMatch')), namespaceprefix_ , eol_))
if self.province is not None:
namespaceprefix_ = self.province_nsprefix_ + ':' if (UseCapturedNS_ and self.province_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sprovince>%s</%sprovince>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.province), input_name='province')), namespaceprefix_ , eol_))
if self.postcode is not None:
namespaceprefix_ = self.postcode_nsprefix_ + ':' if (UseCapturedNS_ and self.postcode_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%spostcode>%s</%spostcode>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.postcode), input_name='postcode')), namespaceprefix_ , eol_))
if self.country is not None:
namespaceprefix_ = self.country_nsprefix_ + ':' if (UseCapturedNS_ and self.country_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%scountry>%s</%scountry>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.country), input_name='country')), namespaceprefix_ , eol_))
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'name':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'name')
value_ = self.gds_validate_string(value_, node, 'name')
self.name = value_
self.name_nsprefix_ = child_.prefix
# validate type stringMaxLength40
self.validate_stringMaxLength40(self.name)
elif nodeName_ == 'addressLine1':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'addressLine1')
value_ = self.gds_validate_string(value_, node, 'addressLine1')
self.addressLine1 = value_
self.addressLine1_nsprefix_ = child_.prefix
# validate type stringMaxLength30
self.validate_stringMaxLength30(self.addressLine1)
elif nodeName_ == 'addressLine2':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'addressLine2')
value_ = self.gds_validate_string(value_, node, 'addressLine2')
self.addressLine2 = value_
self.addressLine2_nsprefix_ = child_.prefix
# validate | |
Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_extra_f4():
"""
Test case extra f4: Collapsed link inside of full link
"""
# Arrange
source_markdown = """a[foo [bar][]][bar]a
[bar]: /url 'title'"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):a:]",
"[text(1,2):[:]",
"[text(1,3):foo :]",
"[link(1,7):collapsed:/url:title::::bar:::::]",
"[text(1,8):bar:]",
"[end-link::]",
"[text(1,14):]:]",
"[link(1,15):shortcut:/url:title::::bar:::::]",
"[text(1,16):bar:]",
"[end-link::]",
"[text(1,20):a:]",
"[end-para:::True]",
"[BLANK(2,1): ]",
"[link-ref-def(3,1):True::bar:: :/url:: :title:'title':]",
]
expected_gfm = """<p>a[foo <a href="/url" title="title">bar</a>]<a href="/url" title="title">bar</a>a</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_extra_f4a():
"""
Test case extra f4a: Collapsed link inside of full link
"""
# Arrange
source_markdown = """a[foo ![bar][]][bar]a
[bar]: /url 'title'"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):a:]",
"[link(1,2):full:/url:title:::bar:foo ![bar][]:::::]",
"[text(1,3):foo :]",
"[image(1,7):collapsed:/url:title:bar::::bar:::::]",
"[end-link::]",
"[text(1,21):a:]",
"[end-para:::True]",
"[BLANK(2,1): ]",
"[link-ref-def(3,1):True::bar:: :/url:: :title:'title':]",
]
expected_gfm = """<p>a<a href="/url" title="title">foo <img src="/url" alt="bar" title="title" /></a>a</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_extra_f4b():
"""
Test case extra f4b: Collapsed link inside of full link
"""
# Arrange
source_markdown = """a![foo ![bar][]][bar]a
[bar]: /url 'title'"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):a:]",
"[image(1,2):full:/url:title:foo bar:::bar:foo ![bar][]:::::]",
"[text(1,22):a:]",
"[end-para:::True]",
"[BLANK(2,1): ]",
"[link-ref-def(3,1):True::bar:: :/url:: :title:'title':]",
]
expected_gfm = """<p>a<img src="/url" alt="foo bar" title="title" />a</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_extra_f4c():
"""
Test case extra f4c: Collapsed link inside of full link
"""
# Arrange
source_markdown = """a![foo [bar][]][bar]a
[bar]: /url 'title'"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):a:]",
"[image(1,2):full:/url:title:foo bar:::bar:foo [bar][]:::::]",
"[text(1,21):a:]",
"[end-para:::True]",
"[BLANK(2,1): ]",
"[link-ref-def(3,1):True::bar:: :/url:: :title:'title':]",
]
expected_gfm = """<p>a<img src="/url" alt="foo bar" title="title" />a</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_extra_f5():
"""
Test case extra f5: Shortcut link inside of full link
"""
# Arrange
source_markdown = """a[foo [bar2]][bar]a
[bar]: /url 'title'"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):a:]",
"[link(1,2):full:/url:title:::bar:foo [bar2]:::::]",
"[text(1,3):foo :]",
"[text(1,7):[:]",
"[text(1,8):bar2:]",
"[text(1,12):]:]",
"[end-link::]",
"[text(1,19):a:]",
"[end-para:::True]",
"[BLANK(2,1): ]",
"[link-ref-def(3,1):True::bar:: :/url:: :title:'title':]",
]
expected_gfm = """<p>a<a href="/url" title="title">foo [bar2]</a>a</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_extra_f5a():
"""
Test case extra f5a: Shortcut link inside of full link
"""
# Arrange
source_markdown = """a[foo ![bar2]][bar]a
[bar]: /url 'title'"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):a:]",
"[link(1,2):full:/url:title:::bar:foo ![bar2]:::::]",
"[text(1,3):foo :]",
"[text(1,7):![:]",
"[text(1,9):bar2:]",
"[text(1,13):]:]",
"[end-link::]",
"[text(1,20):a:]",
"[end-para:::True]",
"[BLANK(2,1): ]",
"[link-ref-def(3,1):True::bar:: :/url:: :title:'title':]",
]
expected_gfm = """<p>a<a href="/url" title="title">foo ![bar2]</a>a</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_extra_f5b():
"""
Test case extra f5b: Shortcut link inside of full link
"""
# Arrange
source_markdown = """a![foo ![bar2]][bar]a
[bar]: /url 'title'"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):a:]",
"[image(1,2):full:/url:title:foo ![bar2]:::bar:foo ![bar2]:::::]",
"[text(1,21):a:]",
"[end-para:::True]",
"[BLANK(2,1): ]",
"[link-ref-def(3,1):True::bar:: :/url:: :title:'title':]",
]
expected_gfm = """<p>a<img src="/url" alt="foo ![bar2]" title="title" />a</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_extra_f5c():
"""
Test case extra f5c: Shortcut link inside of full link
"""
# Arrange
source_markdown = """a![foo [bar2]][bar]a
[bar]: /url 'title'"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):a:]",
"[image(1,2):full:/url:title:foo [bar2]:::bar:foo [bar2]:::::]",
"[text(1,20):a:]",
"[end-para:::True]",
"[BLANK(2,1): ]",
"[link-ref-def(3,1):True::bar:: :/url:: :title:'title':]",
]
expected_gfm = """<p>a<img src="/url" alt="foo [bar2]" title="title" />a</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_extra_f6():
"""
Test case extra f6: Shortcut link inside of full link
"""
# Arrange
source_markdown = """a[foo [bar]][bar]a
[bar]: /url 'title'"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):a:]",
"[text(1,2):[:]",
"[text(1,3):foo :]",
"[link(1,7):shortcut:/url:title::::bar:::::]",
"[text(1,8):bar:]",
"[end-link::]",
"[text(1,12):]:]",
"[link(1,13):shortcut:/url:title::::bar:::::]",
"[text(1,14):bar:]",
"[end-link::]",
"[text(1,18):a:]",
"[end-para:::True]",
"[BLANK(2,1): ]",
"[link-ref-def(3,1):True::bar:: :/url:: :title:'title':]",
]
expected_gfm = """<p>a[foo <a href="/url" title="title">bar</a>]<a href="/url" title="title">bar</a>a</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_extra_f6a():
"""
Test case extra f6a: Shortcut link inside of full link
"""
# Arrange
source_markdown = """a[foo ![bar]][bar]a
[bar]: /url 'title'"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):a:]",
"[link(1,2):full:/url:title:::bar:foo ![bar]:::::]",
"[text(1,3):foo :]",
"[image(1,7):shortcut:/url:title:bar::::bar:::::]",
"[end-link::]",
"[text(1,19):a:]",
"[end-para:::True]",
"[BLANK(2,1): ]",
"[link-ref-def(3,1):True::bar:: :/url:: :title:'title':]",
]
expected_gfm = """<p>a<a href="/url" title="title">foo <img src="/url" alt="bar" title="title" /></a>a</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_extra_f6b():
"""
Test case extra f6b: Shortcut link inside of full link
"""
# Arrange
source_markdown = """a![foo ![bar]][bar]a
[bar]: /url 'title'"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):a:]",
"[image(1,2):full:/url:title:foo bar:::bar:foo ![bar]:::::]",
"[text(1,20):a:]",
"[end-para:::True]",
"[BLANK(2,1): ]",
"[link-ref-def(3,1):True::bar:: :/url:: :title:'title':]",
]
expected_gfm = """<p>a<img src="/url" alt="foo bar" title="title" />a</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_extra_f6c():
"""
Test case extra f6c: Shortcut link inside of full link
"""
# Arrange
source_markdown = """a![foo [bar]][bar]a
[bar]: /url 'title'"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):a:]",
"[image(1,2):full:/url:title:foo bar:::bar:foo [bar]:::::]",
"[text(1,19):a:]",
"[end-para:::True]",
"[BLANK(2,1): ]",
"[link-ref-def(3,1):True::bar:: :/url:: :title:'title':]",
]
expected_gfm = """<p>a<img src="/url" alt="foo bar" title="title" />a</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_extra_f7():
"""
Test case extra f7: Inline link inside of collapsed link
"""
# Arrange
source_markdown = """a[foo [bar2](/url2)][]a
[bar]: /url 'title'"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):a:]",
"[text(1,2):[:]",
"[text(1,3):foo :]",
"[link(1,7):inline:/url2:::::bar2:False::::]",
"[text(1,8):bar2:]",
"[end-link::]",
"[text(1,20):]:]",
"[text(1,21):[:]",
"[text(1,22):]:]",
"[text(1,23):a:]",
"[end-para:::True]",
"[BLANK(2,1): ]",
"[link-ref-def(3,1):True::bar:: :/url:: :title:'title':]",
]
expected_gfm = """<p>a[foo <a href="/url2">bar2</a>][]a</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_extra_f7a():
"""
Test case extra f7a: Inline link inside of collapsed link
"""
# Arrange
source_markdown = """a[foo ][]a
[bar]: /url 'title'"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):a:]",
"[text(1,2):[:]",
"[text(1,3):foo :]",
"[image(1,7):inline:/url2::bar2::::bar2:False::::]",
"[text(1,21):]:]",
"[text(1,22):[:]",
"[text(1,23):]:]",
"[text(1,24):a:]",
"[end-para:::True]",
"[BLANK(2,1): ]",
"[link-ref-def(3,1):True::bar:: :/url:: :title:'title':]",
]
expected_gfm = """<p>a[foo <img src="/url2" alt="bar2" />][]a</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_extra_f7b():
"""
Test case extra f7b: Inline link inside of collapsed link
"""
# Arrange
source_markdown = """a][]a
[bar]: /url 'title'"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):a:]",
"[text(1,2):![:]",
"[text(1,4):foo :]",
"[image(1,8):inline:/url2::bar2::::bar2:False::::]",
"[text(1,22):]:]",
"[text(1,23):[:]",
"[text(1,24):]:]",
"[text(1,25):a:]",
"[end-para:::True]",
"[BLANK(2,1): ]",
"[link-ref-def(3,1):True::bar:: :/url:: :title:'title':]",
]
expected_gfm = """<p>a![foo <img src="/url2" alt="bar2" />][]a</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_extra_f7c():
"""
Test case extra f7c: Inline link inside of collapsed link
"""
# Arrange
source_markdown = """a][]a
[bar]: /url 'title'"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):a:]",
"[text(1,2):![:]",
"[text(1,4):foo :]",
"[link(1,8):inline:/url2:::::bar2:False::::]",
"[text(1,9):bar2:]",
"[end-link::]",
"[text(1,21):]:]",
"[text(1,22):[:]",
"[text(1,23):]:]",
"[text(1,24):a:]",
"[end-para:::True]",
"[BLANK(2,1): ]",
"[link-ref-def(3,1):True::bar:: :/url:: :title:'title':]",
]
expected_gfm = """<p>a![foo <a href="/url2">bar2</a>][]a</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_extra_f8():
"""
Test case extra f8: Inline link inside of collapsed link
"""
# Arrange
source_markdown = """a[foo [bar](/url2)][]a
[bar]: /url 'title'"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):a:]",
"[text(1,2):[:]",
"[text(1,3):foo :]",
"[link(1,7):inline:/url2:::::bar:False::::]",
"[text(1,8):bar:]",
"[end-link::]",
"[text(1,19):]:]",
"[text(1,20):[:]",
"[text(1,21):]:]",
"[text(1,22):a:]",
"[end-para:::True]",
"[BLANK(2,1): ]",
"[link-ref-def(3,1):True::bar:: :/url:: :title:'title':]",
]
expected_gfm = """<p>a[foo <a href="/url2">bar</a>][]a</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_extra_f8a():
"""
Test case extra f8a: Inline link inside of collapsed link
"""
# Arrange
source_markdown = """a[foo ][]a
[bar]: /url 'title'"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):a:]",
"[text(1,2):[:]",
"[text(1,3):foo :]",
"[image(1,7):inline:/url2::bar::::bar:False::::]",
"[text(1,20):]:]",
"[text(1,21):[:]",
"[text(1,22):]:]",
"[text(1,23):a:]",
"[end-para:::True]",
"[BLANK(2,1): ]",
"[link-ref-def(3,1):True::bar:: :/url:: :title:'title':]",
]
expected_gfm = """<p>a[foo <img src="/url2" alt="bar" />][]a</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_extra_f8b():
"""
Test case extra f8b: Inline link inside of collapsed link
"""
# Arrange
source_markdown = """a][]a
[bar]: /url 'title'"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):a:]",
"[text(1,2):![:]",
"[text(1,4):foo :]",
"[image(1,8):inline:/url2::bar::::bar:False::::]",
"[text(1,21):]:]",
"[text(1,22):[:]",
"[text(1,23):]:]",
"[text(1,24):a:]",
"[end-para:::True]",
"[BLANK(2,1): ]",
"[link-ref-def(3,1):True::bar:: :/url:: :title:'title':]",
]
expected_gfm = """<p>a![foo <img src="/url2" alt="bar" />][]a</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_extra_f8c():
"""
Test case extra f8c: Inline link inside of collapsed link
"""
# Arrange
source_markdown = """a][]a
[bar]: /url 'title'"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):a:]",
"[text(1,2):![:]",
"[text(1,4):foo :]",
"[link(1,8):inline:/url2:::::bar:False::::]",
"[text(1,9):bar:]",
"[end-link::]",
"[text(1,20):]:]",
"[text(1,21):[:]",
"[text(1,22):]:]",
"[text(1,23):a:]",
"[end-para:::True]",
"[BLANK(2,1): ]",
"[link-ref-def(3,1):True::bar:: :/url:: :title:'title':]",
]
expected_gfm = """<p>a![foo <a href="/url2">bar</a>][]a</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_extra_f9():
"""
Test case extra f9: Full link inside of collapsed link
"""
# Arrange
source_markdown = """a[foo [bar2][bar]][]a
[bar]: /url 'title'"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):a:]",
"[text(1,2):[:]",
"[text(1,3):foo :]",
"[link(1,7):full:/url:title:::bar:bar2:::::]",
"[text(1,8):bar2:]",
"[end-link::]",
"[text(1,18):]:]",
"[text(1,19):[:]",
"[text(1,20):]:]",
"[text(1,21):a:]",
"[end-para:::True]",
"[BLANK(2,1): ]",
"[link-ref-def(3,1):True::bar:: :/url:: :title:'title':]",
]
expected_gfm = """<p>a[foo <a href="/url" title="title">bar2</a>][]a</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_extra_f9a():
"""
Test case extra f9a: Full link inside of collapsed link
"""
# Arrange
source_markdown = """a[foo ![bar2][bar]][]a
[bar]: /url 'title'"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):a:]",
"[text(1,2):[:]",
"[text(1,3):foo :]",
"[image(1,7):full:/url:title:bar2:::bar:bar2:::::]",
"[text(1,19):]:]",
"[text(1,20):[:]",
"[text(1,21):]:]",
"[text(1,22):a:]",
"[end-para:::True]",
"[BLANK(2,1): ]",
"[link-ref-def(3,1):True::bar:: :/url:: :title:'title':]",
]
expected_gfm = """<p>a[foo <img src="/url" alt="bar2" title="title" />][]a</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_extra_f9b():
"""
Test case extra f9b: Full link inside of collapsed link
"""
# Arrange
source_markdown = """a![foo ![bar2][bar]][]a
[bar]: /url 'title'"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):a:]",
"[text(1,2):![:]",
"[text(1,4):foo :]",
"[image(1,8):full:/url:title:bar2:::bar:bar2:::::]",
"[text(1,20):]:]",
"[text(1,21):[:]",
"[text(1,22):]:]",
"[text(1,23):a:]",
"[end-para:::True]",
"[BLANK(2,1): ]",
"[link-ref-def(3,1):True::bar:: :/url:: :title:'title':]",
]
expected_gfm = """<p>a![foo <img | |
df[Y[0]] = pd.to_numeric(df[Y[0]])
# ate = df.groupby(X)[Y[0]].mean()
# print(ate)
return df
def old_marginal_repair(self, data, X, Y, method='MF'):
self.data = data # A continacy table as Matrix
self.columns = data.columns
self.X = X
self.Y = Y
rest = []
for att in self.columns:
if att not in X and att not in Y and att:
rest.insert(0, att)
# computes the continacy table of X and Y and repair it
Contin_XY = ContinTable()
Contin_XY.data_to_cnt(data, X, Y)
print('Original: \n', Contin_XY.matrix)
print('Coupling: \n', Contin_XY.indep_cop())
print('MI: \n', Contin_XY.mi())
#shuffle_table = ContinTable()
#data_shuff = data.copy()
#mi = 100
'''
while mi>0.0001:
data_shuff[Y] = data[Y].transform(np.random.permutation)
shuffle_table.data_to_cnt(data_shuff,X,Y)
print('shuff_Table',shuffle_table.matrix)
mi=shuffle_table.mi()
print('MI Shuffle: \n', mi)
print('Low rank',Contin_XY.low_rank_approx())
contin_matrix_XY_REP=shuffle_table.matrix
'''
#data_shuff[Y] = data[Y].transform(np.random.permutation)
#shuffle_table.data_to_cnt(data_shuff, X, Y)
#print('shuff_Table', shuffle_table.matrix)
if method == 'MF':
#print('SVD')
contin_matrix_XY_REP = Contin_XY.low_rank_approx() ## repaied contingacy table
elif method == 'IC':
#print('method=coupling')
contin_matrix_XY_REP = Contin_XY.indep_cop()
# print(Contin_XY.row_index)
# print(Contin_XY.col_index)
Contin_XY.matrix = contin_matrix_XY_REP
Contin_XY_REP = ContinTable()
Contin_XY_REP.matrix_to_cnt(contin_matrix_XY_REP)
print('Repaired: \n', Contin_XY_REP.matrix)
print('MI: \n', Contin_XY_REP.mi())
'''
if Contin_XY_REP.matrix.shape==(2,2):
c1=Contin_XY_REP.matrix.item(0,0)+Contin_XY_REP.matrix.item(0,1)
c2 = Contin_XY_REP.matrix.item(1, 0) + Contin_XY_REP.matrix.item(1, 1)
rep=(c1*Contin_XY_REP.matrix.item(0,1)/c2)-Contin_XY_REP.matrix.item(0, 1)
Contin_XY_REP.matrix[0, 1]=int(rep)+ Contin_XY_REP.matrix[0, 1]
print('repared \n',Contin_XY_REP.matrix)
# computes conditional probabalities of P(Z|XY)
'''
Contin_Full = ContinTable()
Contin_Full.data_to_cnt(data, X + Y, rest)
dim = Contin_Full.matrix.shape
w, h = dim[1], dim[0];
x = Contin_XY.matrix.shape[0] * Contin_XY.matrix.shape[1]
y = dim[0]
if y != x:
return pd.DataFrame(columns=X + Y + rest)
if dim[1] < 2 or dim[0] < 2:
return pd.DataFrame(columns=X + Y + rest)
con_prop = [[0.0 for x in range(w)] for y in range(h)]
con_prop = asmatrix(con_prop)
ct_dim = Contin_XY.matrix.shape
devisor = ct_dim[1]
for i in range(0, dim[0]):
e = Contin_Full.matrix[i, :]
mrg = sum(e)
# (i)
# print(int(i / devisor), i % devisor)
for j in range(0, dim[1]):
el1 = Contin_Full.matrix.item(i, j)
# el=el1/float(mrg)
freq = Contin_XY.matrix.item(int(i / devisor), i % devisor)
a = random.randint(0, 10)
con_prop[i, j] = el1 * freq / float(mrg)
# if a<=4:
# con_prop[i, j]=math.ceil(el*freq)
# print(el*freq)
# print(con_prop[i, j])
# #print(con_prop[i, j], freq, el1)
# else:
# con_prop[i, j] = math.ceil(el * freq)
# #print(con_prop[i, j],freq,el1)
### repair
for i in range(0, dim[0]):
A = con_prop[i, :]
# print(A, type(A))
dim = A.shape
A = np.array(A).flatten().tolist()
freq = Contin_XY.matrix.item(int(i / devisor), i % devisor)
B = [int(e) + (random.random() < e - int(e)) for e in A]
print(sum(B), freq)
# rounded_A=smart_round(A,int(freq))
# The optimal Lagrange multiplier for a constraint
# is stoanswerred in constraint.dual_value.
con_prop[i, :] = B
# con_prop=con_prop.astype(int)
final_table = ContinTable()
final_table.matrix_to_cnt(con_prop)
# final_table.matrix=final_table.matrix.astype(int)
# print(final_table.matrix.sum())
# print(Contin_Full.matrix.sum())
# print(final_table.Xmrn)
# print(Contin_XY_REP)
# final_table.Xmrn.reshape(Contin_XY.matrix.shape)
print(final_table.Xmrn, Contin_XY.matrix.shape)
m = np.reshape(final_table.Xmrn, Contin_XY.matrix.shape)
tbl3 = ContinTable()
tbl3.matrix_to_cnt(m)
# print(tbl3.mi(m))
# print(Contin_XY.mi())
# print(final_table.Xmrn)
Contin_Full.matrix = final_table.matrix
# print(Contin_Full.matrix,Contin_Full.col_index,Contin_Full.row_index)
Contin_Full.matrix = np.asarray(Contin_Full.matrix.astype(int))
# print(Contin_Full.matrix)
# print(Contin_Full.row_index,Contin_Full.col_index)
# print(Contin_Full.matrix)
df = pd.DataFrame(columns=X + Y + rest)
j = 0
for row in Contin_Full.row_index:
i = 0
for col in Contin_Full.col_index:
# print([Contin_Full.matrix.item(j, i)] + list(row) + list(col))
for z in range(0, Contin_Full.matrix.item(j, i)):
df.loc[-1] = list(row) + list(col)
df.index = df.index + 1 # shifting index
df = df.sort_index()
i += 1
j += 1
Contin_XY = ContinTable()
Contin_XY.data_to_cnt(df, X, Y)
print('Original: \n', Contin_XY.matrix)
print('MI: \n', Contin_XY.mi())
# df[Y[0]] = pd.to_numeric(df[Y[0]])
# ate = df.groupby(X)[Y[0]].mean()
# print(ate)
return df
def marginal_repair(self, data, X, Y, method='MF'):
print("marginal_repair")
self.data = data # A continacy table as Matrix
self.columns = data.columns
self.X = X
self.Y = Y
rest = []
for att in self.columns:
if att not in X and att not in Y and att:
rest.insert(0, att)
# computes the continacy table of X and Y and repair it
Contin_XY = ContinTable()
Contin_XY.data_to_cnt(data, X, Y)
#print('Original: \n', Contin_XY.matrix)
#print('Coupling: \n', Contin_XY.indep_cop())
#print('MI: \n', Contin_XY.mi())
if method == 'MF':
#print('SVD')
contin_matrix_XY_REP = Contin_XY.low_rank_approx() ## repaied contingacy table
if method == 'kl':
#print('SVD')
contin_matrix_XY_REP = Contin_XY.low_rank_approx(loss='kullback-leibler')
elif method == 'IC':
#print('method=coupling')
contin_matrix_XY_REP = Contin_XY.indep_cop()
print(Contin_XY.row_index)
print(Contin_XY.col_index)
print(Contin_XY.matrix)
print(contin_matrix_XY_REP)
#Contin_XY.matrix = contin_matrix_XY_REP
Contin_XY_REP = ContinTable()
Contin_XY_REP.matrix_to_cnt(contin_matrix_XY_REP)
#print('Repaired: \n', Contin_XY_REP.matrix)
#print('MI: \n', Contin_XY_REP.mi())
'''
if Contin_XY_REP.matrix.shape==(2,2):
c1=Contin_XY_REP.matrix.item(0,0)+Contin_XY_REP.matrix.item(0,1)
c2 = Contin_XY_REP.matrix.item(1, 0) + Contin_XY_REP.matrix.item(1, 1)
rep=(c1*Contin_XY_REP.matrix.item(0,1)/c2)-Contin_XY_REP.matrix.item(0, 1)
Contin_XY_REP.matrix[0, 1]=int(rep)+ Contin_XY_REP.matrix[0, 1]
print('repared \n',Contin_XY_REP.matrix)
# computes conditional probabalities of P(Z|XY)
'''
#dim=Contin_XY_REP.matrix.shape
#for i in range(0, dim[0]):
# for j in range(0, dim[1]):
# e = Contin_XY_REP.matrix.item(i, j)
# e=int(e) + (random.random() < e - int(e))
# Contin_XY_REP.matrix[i, j]=e
#Contin_XY_REP = ContinTable()
#Contin_XY_REP.matrix_to_cnt(contin_matrix_XY_REP)
#print('Repaired: \n', Contin_XY_REP.matrix)
#print('MI: \n', Contin_XY_REP.mi())
# a contingacy table over all attributes of dataset
Contin_Full = ContinTable()
Contin_Full.data_to_cnt(data, X ,Y)
Contin_Full.matrix=contin_matrix_XY_REP
final_table = ContinTable()
final_table.matrix_to_cnt(Contin_Full.matrix)
# final_table.matrix=final_table.matrix.astype(int)
# print(final_table.matrix.sum())
# print(Contin_Full.matrix.sum())
# print(final_table.Xmrn)
# print(Contin_XY_REP)
# final_table.Xmrn.reshape(Contin_XY.matrix.shape)
# print(tbl3.mi(m))
# print(Contin_XY.mi())
# print(final_table.Xmrn)
Contin_Full.matrix = final_table.matrix
# print(Contin_Full.matrix,Contin_Full.col_index,Contin_Full.row_index)
Contin_Full.matrix = np.asarray(Contin_Full.matrix.astype(int))
# print(Contin_Full.matrix)
# print(Contin_Full.row_index,Contin_Full.col_index)
# print(Contin_Full.matrix)
df = pd.DataFrame(columns=X + Y)
j = 0
for row in Contin_Full.row_index:
i = 0
for col in Contin_Full.col_index:
# print([Contin_Full.matrix.item(j, i)] + list(row) + list(col))
for z in range(0, Contin_Full.matrix.item(j, i)):
if type(row)!=tuple:
row=tuple([row])
if type(col)!=tuple:
col=tuple([col])
df.loc[-1] = row + col
df.index = df.index + 1 # shifting index
df = df.sort_index()
i += 1
j += 1
Contin_XY = ContinTable()
Contin_XY.data_to_cnt(df, X, Y)
#print('Original: \n', Contin_XY.matrix)
#print('MI: \n', Contin_XY.mi())
# df[Y[0]] = pd.to_numeric(df[Y[0]])
# ate = df.groupby(X)[Y[0]].mean()
# print(ate)
return df
def new_marginal_repair(self, data, X, Y, method='MF'):
self.data = data # A continacy table as Matrix
self.columns = data.columns
self.X = X
self.Y = Y
rest = []
for att in self.columns:
if att not in X and att not in Y and att:
rest.insert(0, att)
# computes the continacy table of X and Y and repair it
Contin_XY = ContinTable()
Contin_XY.data_to_cnt(data, X, Y)
print('Original: \n', Contin_XY.matrix)
#print('Coupling: \n', Contin_XY.indep_cop())
print('MI: \n', Contin_XY.mi())
if method == 'MF':
print('SVD')
contin_matrix_XY_REP = Contin_XY.low_rank_approx() ## repaied contingacy table
elif method == 'IC':
print('method=coupling')
contin_matrix_XY_REP = Contin_XY.indep_cop()
# print(Contin_XY.row_index)
# print(Contin_XY.col_index)
#Contin_XY.matrix = contin_matrix_XY_REP
Contin_XY_REP = ContinTable()
Contin_XY_REP.matrix_to_cnt(contin_matrix_XY_REP)
print('Repaired: \n', Contin_XY_REP.matrix)
print('MI: \n', Contin_XY_REP.mi())
'''
if Contin_XY_REP.matrix.shape==(2,2):
c1=Contin_XY_REP.matrix.item(0,0)+Contin_XY_REP.matrix.item(0,1)
c2 = Contin_XY_REP.matrix.item(1, 0) + Contin_XY_REP.matrix.item(1, 1)
rep=(c1*Contin_XY_REP.matrix.item(0,1)/c2)-Contin_XY_REP.matrix.item(0, 1)
Contin_XY_REP.matrix[0, 1]=int(rep)+ Contin_XY_REP.matrix[0, 1]
print('repared \n',Contin_XY_REP.matrix)
# computes conditional probabalities of P(Z|XY)
'''
#dim=Contin_XY_REP.matrix.shape
#for i in range(0, dim[0]):
# for j in range(0, dim[1]):
# e = Contin_XY_REP.matrix.item(i, j)
# e=int(e) + (random.random() < e - int(e))
# Contin_XY_REP.matrix[i, j]=e
#Contin_XY_REP = ContinTable()
#Contin_XY_REP.matrix_to_cnt(contin_matrix_XY_REP)
#print('Repaired: \n', Contin_XY_REP.matrix)
#print('MI: \n', Contin_XY_REP.mi())
# a contingacy table over all attributes of dataset
Contin_Full = ContinTable()
Contin_Full.data_to_cnt(data, X + Y, rest)
dim = Contin_Full.matrix.shape
org=Contin_XY.matrix.flatten()
org=org.tolist()
if type(org[0])==list:
org = org[0]
rep=contin_matrix_XY_REP.flatten()
rep=rep.tolist()
if type(rep[0])==list:
rep = rep[0]
print(Contin_XY.row_index)
print(Contin_XY.col_index)
print(Contin_Full.row_index)
print(Contin_Full.col_index)
for i in range(0, dim[0]):
for j in range(0, dim[1]):
if org[i]>0:
el=Contin_Full.matrix.item(i, j)
el1 = (el*rep[i])/(org[i])
else:
el = Contin_Full.matrix.item(i, j)
el1 = (el * rep[i])
el1=int(el1) + (random.random() < el1 - int(el1))
# el=el1/float(mrg)
Contin_Full.matrix[i, j]=el1
# if a<=4:
# con_prop[i, j]=math.ceil(el*freq)
# print(el*freq)
# print(con_prop[i, j])
# #print(con_prop[i, j], freq, el1)
# else:
# con_prop[i, j] = math.ceil(el * freq)
# #print(con_prop[i, j],freq,el1)
### repair
print('**********************')
#print(Contin_Full.matrix)
final_table = ContinTable()
final_table.matrix_to_cnt(Contin_Full.matrix)
# final_table.matrix=final_table.matrix.astype(int)
# print(final_table.matrix.sum())
# print(Contin_Full.matrix.sum())
# print(final_table.Xmrn)
# print(Contin_XY_REP)
# final_table.Xmrn.reshape(Contin_XY.matrix.shape)
# print(tbl3.mi(m))
# print(Contin_XY.mi())
# print(final_table.Xmrn)
Contin_Full.matrix = final_table.matrix
# print(Contin_Full.matrix,Contin_Full.col_index,Contin_Full.row_index)
Contin_Full.matrix = np.asarray(Contin_Full.matrix.astype(int))
# print(Contin_Full.matrix)
# print(Contin_Full.row_index,Contin_Full.col_index)
# print(Contin_Full.matrix)
df = pd.DataFrame(columns=X + Y + rest)
j = 0
for row in Contin_Full.row_index:
i = 0
for col in Contin_Full.col_index:
# print([Contin_Full.matrix.item(j, i)] + list(row) + list(col))
for z in range(0, Contin_Full.matrix.item(j, i)):
df.loc[-1] = list(row) + list(col)
df.index = df.index + 1 # shifting index
df = df.sort_index()
i += 1
j += 1
Contin_XY = ContinTable()
Contin_XY.data_to_cnt(df, X, Y)
print('Original: \n', Contin_XY.matrix)
print('MI: \n', Contin_XY.mi())
# df[Y[0]] = pd.to_numeric(df[Y[0]])
# ate = df.groupby(X)[Y[0]].mean()
# print(ate)
return df
def marginal_repair1(self,data,X,Y,method='IC'):
Z=[] ## Attributes other than X and Y
for att in data.columns:
if att not in X and att not in Y and att:
Z.insert(0,att)
# computes the continacy table of X and Y and repairs it
Contin_XY=ContinTable()
Contin_XY.data_to_cnt(data,X,Y)
print('Original: \n', Contin_XY.matrix)
print('Coupling: \n', Contin_XY.indep_cop())
| |
# Copyright (c) 2020 <NAME> - <EMAIL>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import numpy
import _multiscale
from CGLutil.AdaptiveTree import AdaptiveTree
import chimera
import FitMap
import os
import Matrix
import VolumeData
import VolumeViewer
import _contour
import _gaussian
chargedIons = { "MG":2, "NA":1, "CL":-1, "CA":2, "ZN":2, "MN":2, "FE":3, "CO":2, "NI":2 }
# returns the min and max density value in a map
def MinMaxD ( dmap ) :
# dmap - the map
M = dmap.data.full_matrix()
maxM = numpy.max(M)
minM = numpy.min(M)
maxD = min ( numpy.average(M)+numpy.std(M)*10, maxM )
minD = max ( numpy.average(M)-numpy.std(M)*1, minM )
# xray
#maxD = min ( numpy.average(M)+numpy.std(M)*3.5, maxM )
#minD = max ( numpy.average(M)-numpy.std(M)*0.77, minM )
#print "%s - %.2f->%.2f, %.2f->%.2f" % (dmap.name, minD, maxD, minM, maxM )
#minD = numpy.min(M)
#minD, maxD = numpy.min(M), numpy.max(M)
return minD, maxD
# attempt to do Q-score with volume-volume CC rather than sphere points
# works ok, but is not faster - main reason why to try
# another difference is that with sphere points, the same number of points
# is used at each distance, so the map values at each radial distance even weigh
def QscoreM ( atoms, dmap, sigma, agrid=None, allAtTree=None, show=0, log=0, toRAD=2.0, step=0.2, minD=None, maxD=None, useMask=False ) :
xyz = _multiscale.get_atom_coordinates(atoms, transformed = False)
#_contour.affine_transform_vertices ( points1, Matrix.xform_matrix( dmap.openState.xform.inverse() ) )
li,lj,lk = numpy.min ( xyz, axis=0 ) - (toRAD, toRAD, toRAD)
hi,hj,hk = numpy.max ( xyz, axis=0 ) + (toRAD, toRAD, toRAD)
nO = ( li, lj, lk )
#print nO
#print " - bounds - %d %d %d --> %d %d %d --> %d %d %d" % ( li,lj,lk, hi,hj,hk, d1,d2,d3 )
d1, d2, d3 = hi - li, hj - lj, hk - lk
nstep = (step, step, step)
#nstep = (fmap.data.step[0]/2.0, fmap.data.step[1]/2.0, fmap.data.step[2]/2.0 )
nn1 = int ( numpy.ceil ( float(d1) / step) )
nn2 = int ( numpy.ceil ( float(d2) / step) )
nn3 = int ( numpy.ceil ( float(d3) / step) )
#print " - step %.2f, n: %d %d %d" % (S, nn1, nn2, nn3)
nmat = numpy.zeros ( (nn3,nn2,nn1), numpy.float32 )
ii = 1.0 / step
ni = -ii
xyz_to_ijk = ((ii, 0.0, 0.0, ni*nO[0]), (0.0, ii, 0.0, ni*nO[1]), (0.0, 0.0, ii, ni*nO[2]))
ijk_to_xyz = ((step, 0.0, 0.0, nO[0]), (0.0, step, 0.0, nO[1]), (0.0, 0.0, step, nO[2]))
#print ijk_to_xyz
#ijk[:] = xyz
weights = [ 1.0 for a in atoms]
sdevs = [ [sigma, sigma, sigma] for a in atoms ]
cutoff_range = 5
A, B = maxD - minD, minD
#ndata = VolumeData.Array_Grid_Data ( nmat, nO, nstep, dmap.data.cell_angles )
#print ndata.xyz_to_ijk_transform
#print ndata.ijk_to_xyz_transform
#Matrix.transform_points(xyz, ndata.xyz_to_ijk_transform)
if useMask == False :
Matrix.transform_points(xyz, xyz_to_ijk)
_gaussian.sum_of_gaussians(xyz, weights, sdevs, cutoff_range, nmat)
#print " -gm max %.3f" % numpy.max ( nmat )
nmat *= A
nmat += B
#print " -gm max %.3f" % numpy.max ( nmat )
# make smaller atom tree...
if 1 and allAtTree != None :
ats_near = []
for at in atoms :
anear = allAtTree.searchTree ( at.coord().data(), toRAD*2.0 )
ats_near.extend ( anear )
points = _multiscale.get_atom_coordinates ( ats_near, transformed = False )
if log :
print " - new search tree: %d pts" % ( len(ats_near) )
allAtTree = AdaptiveTree ( points.tolist(), ats_near, 1.0)
if useMask :
nearAts = []
if agrid != None :
for at in atoms :
nats = agrid.AtsNearPtLocal ( at.coord() )
for nat, v in nats :
if at != nat :
nearAts.append ( nat )
#print " - %s, %d.%s - %.3f" % (nat.name, nat.residue.id.position, nat.residue.id.chainId, v.length)
if allAtTree != None :
for at in atoms :
opointsNear = allAtTree.searchTree ( at.coord(), toRAD )
for nat in opointsNear :
if nat == at :
continue
v = at.coord() - nat.coord()
if v.length < toRAD :
nearAts.append (nat)
if len(nearAts) == 0 :
print " - no near ats?"
#print " - %d near ats" % len(nearAts)
for k in range(nn3) :
pz = nO[2] + float(k)*step
for j in range(nn2) :
py = nO[1] + float(j)*step
for i in range(nn1) :
px = nO[0] + float(i)*step
P = chimera.Point(px, py, pz)
minDToAt = 1e9
for at in atoms :
v = at.coord() - P
if v.length < minDToAt :
minDToAt = v.length
if minDToAt > toRAD :
nmat[k,j,i] = B-0.1
continue
closestToAt = True
for nat in nearAts :
v = nat.coord() - P
if v.length < minDToAt :
closestToAt = False
#break
if not closestToAt :
nmat[k,j,i] = minD-0.1
else :
nmat[k,j,i] = A * numpy.exp ( -0.5 * numpy.power(minDToAt/sigma,2) ) + B
if 0 and agrid :
nearAts = []
for at in atoms :
nats = agrid.AtsNearPtLocal ( at.coord() )
for nat, v in nats :
if at != nat :
print " - %s, %d.%s - %.3f" % (nat.name, nat.residue.id.position, nat.residue.id.chainId, v.length)
nearAts.append ( at )
#print "%d near ats" % len(nearAts)
mat1 = numpy.ones ( (nn1,nn2,nn3), numpy.float32 )
ndata = VolumeData.Array_Grid_Data ( mat1, nO, nstep, dmap.data.cell_angles )
points = _multiscale.get_atom_coordinates(nearAts, transformed = False)
mdata = VolumeData.zone_masked_grid_data ( ndata, points, toRAD, invert_mask=False )
#nmat = mdata.matrix()
nv = VolumeViewer.volume.volume_from_grid_data ( mdata )
nv.openState.xform = dmap.openState.xform
mdata = mask
fpoints = VolumeData.grid_indices ( (nn3,nn2,nn1), numpy.single) # i,j,k indices
_contour.affine_transform_vertices ( fpoints, ijk_to_xyz )
fpoint_weights = numpy.ravel(nmat).astype(numpy.single)
#print " - %d points" % len(fpoints)
ge = numpy.greater_equal(fpoint_weights, B)
fpoints = numpy.compress(ge, fpoints, 0)
fpoint_weights = numpy.compress(ge, fpoint_weights)
#print " - %d above thr" % len(fpoint_weights)
#nz = numpy.nonzero( fpoint_weights )[0]
#print " - %d above thr" % len(nz)
#map_values, outside = VolumeData.interpolate_volume_data(pts, xyz_to_ijk_tf, darray)
#olap0, cc0, other = overlap_and_correlation ( wts, map_values )
map_values = dmap.interpolated_values ( fpoints, atoms[0].molecule.openState.xform )
#print map_values
olap, cc, ccm = FitMap.overlap_and_correlation ( fpoint_weights, map_values )
#print olap, cc, ccm
if show :
ndata = VolumeData.Array_Grid_Data ( nmat, nO, nstep, dmap.data.cell_angles )
nv = VolumeViewer.volume.volume_from_grid_data ( ndata )
nv.openState.xform = dmap.openState.xform
nv.name = "bam"
return ccm
def zone_mask ( grid_data, zone_points, zone_radius, invert_mask = False, zone_point_mask_values = None ):
from numpy import single as floatc, array, ndarray, zeros, int8, intc
if not isinstance(zone_points, ndarray):
zone_points = array(zone_points, floatc)
if (not zone_point_mask_values is None and not isinstance(zone_point_mask_values, ndarray)):
zone_point_mask_values = array(zone_point_mask_values, int8)
shape = tuple(reversed(grid_data.size))
mask_3d = zeros(shape, int8)
mask_1d = mask_3d.ravel()
if zone_point_mask_values is None:
if invert_mask:
mask_value = 0
mask_1d[:] = 1
else:
mask_value = 1
from VolumeData import grid_indices
from _contour import affine_transform_vertices
from _closepoints import find_closest_points, BOXES_METHOD
size_limit = 2 ** 22 # 4 Mvoxels
if mask_3d.size > size_limit:
# Calculate plane by plane to save memory with grid point array
xsize, ysize, zsize = grid_data.size
grid_points = grid_indices((xsize,ysize,1), floatc)
affine_transform_vertices(grid_points, grid_data.ijk_to_xyz_transform)
zstep = [grid_data.ijk_to_xyz_transform[a][2] for a in range(3)]
for z in range(zsize):
i1, i2, n1 = find_closest_points(BOXES_METHOD, grid_points, zone_points, zone_radius)
offset = xsize*ysize*z
if zone_point_mask_values is None:
mask_1d[i1 + offset] = mask_value
else:
mask_1d[i1 + offset] = zone_point_mask_values[n1]
grid_points[:,:] += zstep
else :
grid_points = grid_indices(grid_data.size, floatc)
affine_transform_vertices(grid_points, grid_data.ijk_to_xyz_transform)
i1, i2, n1 = find_closest_points(BOXES_METHOD, grid_points, zone_points, zone_radius)
if zone_point_mask_values is | |
coin_flip == 1:
# filename.append(raw_filename + '_R')
# right = True
if self.parity == 'combined':
filename.append(raw_filename + '_L')
filename.append(raw_filename+'_R')
# filename is now a list of the correct filenames.
# now add warps if required
if self.number_of_warps != 0:
warp_choice = str(random.randint(0,self.number_of_warps))
if warp_choice !='0':
filename = [s + '_W'+warp_choice for s in filename ]
return filename
def __getitem__(self, idx):
"""
First load the images and collect them as numpy arrays
then collect the label
then collect the metadata (though might be None)
"""
if self.parity == 'both':
T = self.__len__()//2
idx, right = idx % T, idx // T
filename = self.__genfilename__(idx, right)
else:
right = False
filename = self.__genfilename__(idx, right)
image_gifti = [nb.load(self.directory + '/'+individual_filename+'.shape.gii').darrays for individual_filename in filename]
image = []
if self.rotations == True:
rotation_choice = random.randint(0, len(rotation_arr)-1)
if rotation_choice !=0:
for file in image_gifti:
image.extend(item.data[rotation_arr[rotation_choice]] for item in file)
else:
for file in image_gifti:
image.extend(item.data for item in file)
else:
for file in image_gifti:
image.extend(item.data for item in file)
if right == True:
image = [item[reversing_arr] for item in image]
### labels
# if self.number_of_warps != 0:
#
# idx = idx%len(self.input_arr)
# label = self.label[idx]
###### metadata grabbing if necessary
label = self.label[idx]
if self.input_arr.shape[1] > 2:
self.metadata = self.input_arr[:,1:-1]
metadata = self.metadata[idx]
else:
metadata = None
if self.smoothing != False:
for i in range(len(image)):
image[i] = np.clip(image[i], lower_bound[i%len(lower_bound)].item(), upper_bound[i%len(upper_bound)].item())
# torchify if required:
if self.normalisation != None:
if self.normalisation == 'std':
for i in range(len(image)):
image[i] = ( image[i] - means[i%len(means)].item()) / stds[i%len(stds)].item()
elif self.normalisation == 'range':
for i in range(len(image)):
image[i] = image[i] - minima[i%len(minima)].item()
image[i] = image[i] / (maxima[i%len(maxima)].item()- minima[i%len(minima)].item())
if self.output_as_torch:
image = torch.Tensor( image )
label = torch.Tensor( [label] )
if isinstance(metadata,np.ndarray):
metadata = torch.Tensor( [metadata] )#.squeeze(1)
if hasattr(metadata,'shape'):
sample = Data(x = image.permute(1,0), metadata = metadata, edge_index = self.edges,
y = label)
else:
sample = Data(x = image.permute(1,0), edge_index = self.edges,
y = label)
return sample
class My_dHCP_Data_Graph_Test_Rot(torch.utils.data.Dataset):
def __init__(self, input_arr, warped_files_directory, unwarped_files_directory, edges, rotations = False,
number_of_warps = 0, parity_choice = 'left', smoothing = False, normalisation = None, projected =False,
sample_only = True, output_as_torch = True):
"""
A Full Dataset for the dHCP Data. Can include warps, rotations and parity flips.
Fileanme style:
in the array: only 'sub-X-ses-Y'
but for the filenames themselves
Left = 'sub-X_ses-Y_L'
Right = 'sub-X_ses-Y_R'
if warped:
'sub-X_ses-Y_L_W1'
INPUT ARGS:
1. input_arr:
Numpy array size Nx2
FIRST index MUST be the filename (excluding directory AND L or R ) of MERGED nibabel files
LAST index must be the (float) label
(OPTIONAL) Middle index if size 3 (optional) is any confounding metadata (Also float, e.g scan age for predicting birth age)
2 . rotations - boolean: to add rotations or not to add rotations
3. number of warps to include - INT
NB WARPED AR INCLUDED AS FILENAME CHANGES. WARP NUMBER X IS WRITTEN AS filename_WX
NUMBER OF WARPS CANNOT EXCEED NUMBER OF WARPES PRESENT IN FILES
4. Particy Choice (JMPORTANT!) - defines left and right-ness
If: 'left'- will output ONLY LEFT
If: 'both' - will randomly choose L or R
If 'combined' - will output a combined array (left first), will be eventually read as a file with twice the number of input channels. as they will be stacked together
5. smoothing - boolean, will clip extremal values according to the smoothing_array
6. normalisation - str. Will normalise according to 'range', 'std' or 'None'
Range is from -1 to 1
Std is mean = 0, std = 1
7. output_as_torch - boolean:
outputs values as torch Tensors if you want (usually yes)
"""
self.input_arr = input_arr
self.image_files = input_arr[:,0]
self.label = input_arr[:,-1]
self.edges = edges
self.rotations = rotations
self.projected = False
self.number_of_warps = number_of_warps
self.parity = parity_choice
self.smoothing = smoothing
self.normalisation = normalisation
self.sample_only = sample_only
self.output_as_torch = output_as_torch
if self.number_of_warps != 0 and self.number_of_warps != None:
self.directory = warped_files_directory
else:
self.directory = unwarped_files_directory
def __len__(self):
L = len(self.input_arr)
if self.number_of_warps !=0:
if self.sample_only == False:
L = L*self.number_of_warps
if self.parity == 'both':
L = 2* L
return L
def __test_input_params__(self):
assert self.input_arr.shape[1] >=2, 'check your input array is a nunpy array of files and labels'
assert type(self.number_of_warps) == int, "number of warps must be an in integer (can be 0)"
assert self.parity in ['left', 'both', 'combined'], "parity choice must be either left, combined or both"
if self.number_of_rotations != 0:
assert self.rotation_arr != None,'Must specify a rotation file containing rotation vertex ids if rotations are non-zero'
assert self.rotations == bool, 'rotations must be boolean'
assert self.normalisation in [None, 'none', 'std', 'range'], 'Normalisation must be either std or range'
def __genfilename__(self,idx, right):
"""
gets the appropriate file based on input parameters on PARITY and on WARPS
"""
# grab raw filename
raw_filename = self.image_files[idx]
# add parity to it. IN THE FORM OF A LIST! If requries both will output a list of length 2
filename = []
if self.parity != 'combined':
if right == True:
filename.append(raw_filename + '_R')
else:
filename.append(raw_filename + '_L')
# if self.parity == 'left':
# filename.append(raw_filename + '_L')
#
# elif self.parity == 'both':
# coin_flip = random.randint(0,1)
# if coin_flip == 0:
# filename.append(raw_filename + '_L')
# elif coin_flip == 1:
# filename.append(raw_filename + '_R')
# right = True
if self.parity == 'combined':
filename.append(raw_filename + '_L')
filename.append(raw_filename+'_R')
# filename is now a list of the correct filenames.
# now add warps if required
if self.number_of_warps != 0:
warp_choice = str(random.randint(0,self.number_of_warps))
if warp_choice !='0':
filename = [s + '_W'+warp_choice for s in filename ]
return filename
def __getitem__(self, idx):
"""
First load the images and collect them as numpy arrays
then collect the label
then collect the metadata (though might be None)
"""
if self.parity == 'both':
T = self.__len__()//2
idx, right = idx % T, idx // T
filename = self.__genfilename__(idx, right)
else:
right = False
filename = self.__genfilename__(idx, right)
image_gifti = [nb.load(self.directory + '/'+individual_filename+'.shape.gii').darrays for individual_filename in filename]
image = []
if self.rotations == True:
rotation_choice = random.randint(0, len(test_rotation_arr)-1)
if rotation_choice !=0:
for file in image_gifti:
image.extend(item.data[test_rotation_arr[rotation_choice]] for item in file)
else:
for file in image_gifti:
image.extend(item.data for item in file)
else:
for file in image_gifti:
image.extend(item.data for item in file)
if right == True:
image = [item[reversing_arr] for item in image]
### labels
# if self.number_of_warps != 0:
#
# idx = idx%len(self.input_arr)
# label = self.label[idx]
###### metadata grabbing if necessary
label = self.label[idx]
if self.input_arr.shape[1] > 2:
self.metadata = self.input_arr[:,1:-1]
metadata = self.metadata[idx]
else:
metadata = None
if self.smoothing != False:
for i in range(len(image)):
image[i] = np.clip(image[i], lower_bound[i%len(lower_bound)].item(), upper_bound[i%len(upper_bound)].item())
# torchify if required:
if self.normalisation != None:
if self.normalisation == 'std':
for i in range(len(image)):
image[i] = ( image[i] - means[i%len(means)].item()) / stds[i%len(stds)].item()
elif self.normalisation == 'range':
for i in range(len(image)):
image[i] = image[i] - minima[i%len(minima)].item()
image[i] = image[i] / (maxima[i%len(maxima)].item()- minima[i%len(minima)].item())
if self.output_as_torch:
image = torch.Tensor( image )
label = torch.Tensor( [label] )
if isinstance(metadata,np.ndarray):
metadata = torch.Tensor( [metadata] )#.squeeze(1)
if hasattr(metadata,'shape'):
sample = Data(x = image.permute(1,0), metadata = metadata, | |
-> Optional[pulumi.Input[Sequence[pulumi.Input['CheckAlertChannelSubscriptionArgs']]]]:
return pulumi.get(self, "alert_channel_subscriptions")
@alert_channel_subscriptions.setter
def alert_channel_subscriptions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['CheckAlertChannelSubscriptionArgs']]]]):
pulumi.set(self, "alert_channel_subscriptions", value)
@property
@pulumi.getter(name="alertSettings")
def alert_settings(self) -> Optional[pulumi.Input['CheckAlertSettingsArgs']]:
"""
. Supported values documented below.
"""
return pulumi.get(self, "alert_settings")
@alert_settings.setter
def alert_settings(self, value: Optional[pulumi.Input['CheckAlertSettingsArgs']]):
pulumi.set(self, "alert_settings", value)
@property
@pulumi.getter(name="degradedResponseTime")
def degraded_response_time(self) -> Optional[pulumi.Input[int]]:
"""
The response time in milliseconds where a check should be considered degraded. Possible values are between 0 and 30000. Defaults to `15000`.
"""
return pulumi.get(self, "degraded_response_time")
@degraded_response_time.setter
def degraded_response_time(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "degraded_response_time", value)
@property
@pulumi.getter(name="doubleCheck")
def double_check(self) -> Optional[pulumi.Input[bool]]:
"""
Setting this to "true" will trigger a retry when a check fails from the failing region and another, randomly selected region before marking the check as failed. Possible values `true`, and `false`.
"""
return pulumi.get(self, "double_check")
@double_check.setter
def double_check(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "double_check", value)
@property
@pulumi.getter(name="environmentVariables")
def environment_variables(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
Key/value pairs for setting environment variables during check execution. These are only relevant for Browser checks. Use global environment variables whenever possible.
"""
return pulumi.get(self, "environment_variables")
@environment_variables.setter
def environment_variables(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "environment_variables", value)
@property
@pulumi.getter
def frequency(self) -> Optional[pulumi.Input[int]]:
"""
The frequency in minutes to run the check. Possible values are `0`, `1`, `5`, `10`, `15`, `30`, `60`, `720`, and `1440`.
"""
return pulumi.get(self, "frequency")
@frequency.setter
def frequency(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "frequency", value)
@property
@pulumi.getter(name="frequencyOffset")
def frequency_offset(self) -> Optional[pulumi.Input[int]]:
"""
This property only valid for API high frequency checks. To create a hight frequency check, the property `frequency` must be `0` and `frequency_offset` could be `10`, `20` or `30`.
"""
return pulumi.get(self, "frequency_offset")
@frequency_offset.setter
def frequency_offset(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "frequency_offset", value)
@property
@pulumi.getter(name="groupId")
def group_id(self) -> Optional[pulumi.Input[int]]:
"""
. The id of the check group this check is part of.
"""
return pulumi.get(self, "group_id")
@group_id.setter
def group_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "group_id", value)
@property
@pulumi.getter(name="groupOrder")
def group_order(self) -> Optional[pulumi.Input[int]]:
"""
The position of this check in a check group. It determines in what order checks are run when a group is triggered from the API or from CI/CD.
"""
return pulumi.get(self, "group_order")
@group_order.setter
def group_order(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "group_order", value)
@property
@pulumi.getter(name="localSetupScript")
def local_setup_script(self) -> Optional[pulumi.Input[str]]:
"""
A valid piece of Node.js code to run in the setup phase.
"""
return pulumi.get(self, "local_setup_script")
@local_setup_script.setter
def local_setup_script(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "local_setup_script", value)
@property
@pulumi.getter(name="localTeardownScript")
def local_teardown_script(self) -> Optional[pulumi.Input[str]]:
"""
A valid piece of Node.js code to run in the teardown phase.
"""
return pulumi.get(self, "local_teardown_script")
@local_teardown_script.setter
def local_teardown_script(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "local_teardown_script", value)
@property
@pulumi.getter
def locations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
An array of one or more data center locations where to run the this check. Defaults to["us-east-1"].
"""
return pulumi.get(self, "locations")
@locations.setter
def locations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "locations", value)
@property
@pulumi.getter(name="maxResponseTime")
def max_response_time(self) -> Optional[pulumi.Input[int]]:
"""
The response time in milliseconds where a check should be considered failing. Possible values are between 0 and 30000. Defaults to `30000`.
"""
return pulumi.get(self, "max_response_time")
@max_response_time.setter
def max_response_time(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_response_time", value)
@property
@pulumi.getter
def muted(self) -> Optional[pulumi.Input[bool]]:
"""
Determines if any notifications will be sent out when a check fails and/or recovers. Possible values `true`, and `false`.
"""
return pulumi.get(self, "muted")
@muted.setter
def muted(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "muted", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the check.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def request(self) -> Optional[pulumi.Input['CheckRequestArgs']]:
"""
. An API check might have one request config. Supported values documented below.
"""
return pulumi.get(self, "request")
@request.setter
def request(self, value: Optional[pulumi.Input['CheckRequestArgs']]):
pulumi.set(self, "request", value)
@property
@pulumi.getter(name="runtimeId")
def runtime_id(self) -> Optional[pulumi.Input[str]]:
"""
. The id of the runtime to use for this check.
"""
return pulumi.get(self, "runtime_id")
@runtime_id.setter
def runtime_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "runtime_id", value)
@property
@pulumi.getter
def script(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "script")
@script.setter
def script(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "script", value)
@property
@pulumi.getter(name="setupSnippetId")
def setup_snippet_id(self) -> Optional[pulumi.Input[int]]:
"""
An ID reference to a snippet to use in the setup phase of an API check.
"""
return pulumi.get(self, "setup_snippet_id")
@setup_snippet_id.setter
def setup_snippet_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "setup_snippet_id", value)
@property
@pulumi.getter(name="shouldFail")
def should_fail(self) -> Optional[pulumi.Input[bool]]:
"""
Allows to invert the behaviour of when a check is considered to fail. Allows for validating error status like 404. Possible values `true`, and `false`.
"""
return pulumi.get(self, "should_fail")
@should_fail.setter
def should_fail(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "should_fail", value)
@property
@pulumi.getter(name="sslCheck")
def ssl_check(self) -> Optional[pulumi.Input[bool]]:
"""
Determines if the SSL certificate should be validated for expiry. Possible values `true`, and `false`.
"""
return pulumi.get(self, "ssl_check")
@ssl_check.setter
def ssl_check(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "ssl_check", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="teardownSnippetId")
def teardown_snippet_id(self) -> Optional[pulumi.Input[int]]:
"""
An ID reference to a snippet to use in the teardown phase of an API check.
"""
return pulumi.get(self, "teardown_snippet_id")
@teardown_snippet_id.setter
def teardown_snippet_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "teardown_snippet_id", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
The type of the check. Possible values are `API`, and `BROWSER`.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="useGlobalAlertSettings")
def use_global_alert_settings(self) -> Optional[pulumi.Input[bool]]:
"""
When true, the account level alert setting will be used, not the alert setting defined on this check. Possible values `true`, and `false`.
"""
return pulumi.get(self, "use_global_alert_settings")
@use_global_alert_settings.setter
def use_global_alert_settings(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "use_global_alert_settings", value)
class Check(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
activated: Optional[pulumi.Input[bool]] = None,
alert_channel_subscriptions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['CheckAlertChannelSubscriptionArgs']]]]] = None,
alert_settings: Optional[pulumi.Input[pulumi.InputType['CheckAlertSettingsArgs']]] = None,
degraded_response_time: Optional[pulumi.Input[int]] = None,
double_check: Optional[pulumi.Input[bool]] = None,
environment_variables: Optional[pulumi.Input[Mapping[str, Any]]] = None,
frequency: Optional[pulumi.Input[int]] = None,
frequency_offset: Optional[pulumi.Input[int]] = None,
group_id: Optional[pulumi.Input[int]] = None,
group_order: Optional[pulumi.Input[int]] = None,
local_setup_script: Optional[pulumi.Input[str]] = None,
local_teardown_script: Optional[pulumi.Input[str]] = None,
locations: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
max_response_time: Optional[pulumi.Input[int]] = None,
muted: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
request: Optional[pulumi.Input[pulumi.InputType['CheckRequestArgs']]] = None,
runtime_id: Optional[pulumi.Input[str]] = None,
script: Optional[pulumi.Input[str]] = None,
setup_snippet_id: Optional[pulumi.Input[int]] = None,
should_fail: Optional[pulumi.Input[bool]] = None,
ssl_check: Optional[pulumi.Input[bool]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
teardown_snippet_id: Optional[pulumi.Input[int]] = None,
type: Optional[pulumi.Input[str]] = None,
use_global_alert_settings: Optional[pulumi.Input[bool]] = None,
__props__=None):
"""
Create a Check resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] activated: Determines if the check is running or not. Possible values `true`, and `false`.
:param pulumi.Input[pulumi.InputType['CheckAlertSettingsArgs']] alert_settings: . Supported values documented below.
:param pulumi.Input[int] degraded_response_time: The response time in milliseconds where a check should be considered degraded. Possible values are between 0 and 30000. Defaults to `15000`.
:param pulumi.Input[bool] double_check: Setting this to "true" will trigger a retry when a check fails from the failing region and another, randomly selected region before marking the check as failed. Possible values `true`, and `false`.
:param pulumi.Input[Mapping[str, Any]] environment_variables: Key/value pairs for setting environment variables during check execution. These are only relevant for Browser checks. Use global environment variables whenever possible.
:param pulumi.Input[int] frequency: The frequency in minutes to run the check. Possible values are `0`, `1`, `5`, `10`, `15`, `30`, `60`, `720`, and `1440`.
:param pulumi.Input[int] frequency_offset: This property only valid for API high frequency checks. To create a hight frequency check, the property `frequency` must be `0` and `frequency_offset` could be `10`, `20` or `30`.
:param pulumi.Input[int] group_id: . The id of the check group this check is part of.
:param pulumi.Input[int] group_order: The position of this check in a check group. It determines in what order checks are run when a group is triggered from the API or from CI/CD.
:param pulumi.Input[str] local_setup_script: A valid piece of Node.js code to run in the setup phase.
:param pulumi.Input[str] local_teardown_script: A valid piece of Node.js code to run in the teardown phase.
:param pulumi.Input[Sequence[pulumi.Input[str]]] locations: An array of one or more data center locations where to run the this check. Defaults to["us-east-1"].
:param pulumi.Input[int] max_response_time: The response time in milliseconds where a check should be considered failing. Possible values are between | |
import unittest
import numpy as np
import tinygfx.g3d as cg
from tinygfx.g3d import primitives
class TestCountedObject(unittest.TestCase):
def setUp(self):
self.obj = cg.CountedObject()
def test_count_incrementing(self):
obj_id = self.obj.get_id()
for _ in range(20):
next_id = cg.CountedObject().get_id()
self.assertTrue(next_id > obj_id)
obj_id = next_id
class WorldObjectTestCase(unittest.TestCase):
"""
a base class whose setup method creates a new world object
"""
def setUp(self):
self._obj = cg.WorldObject()
class TestWorldObjectCreation(WorldObjectTestCase):
def test_object_creation(self):
# the object should be centered at the origin facing the positive z-axis
self.assertTrue(np.array_equal(self._obj.get_position(), primitives.Point(0, 0, 0)))
self.assertTrue(np.array_equal(self._obj.get_orientation(), primitives.Vector(0, 0, 1)))
def test_modifying_transform_matrix(self):
# transforming a returned value should not be copied
tx_matrix = self._obj.get_world_transform()
tx_matrix[3, 0] = 5
tx_matrix = self._obj.get_world_transform()
self.assertNotEqual(tx_matrix[3, 0], 5)
tx_matrix = self._obj.get_object_transform()
tx_matrix[3, 0] = 5
tx_matrix = self._obj.get_object_transform()
self.assertNotEqual(tx_matrix[3, 0], 5)
def test_updating_world_transform(self):
### if you update the world transform matrix the object transform matrix will be updated when called
self._obj.scale_all(10)
to_world_mat = self._obj.get_object_transform()
expected_matrix = np.identity(4, dtype=float)
for x in range(3):
expected_matrix[x, x] = 0.1
self.assertTrue(np.allclose(expected_matrix, to_world_mat))
def test_getting_world_coordinates(self):
self._obj.scale_all(10)
local_point = primitives.Point(1, 1, 1)
world_point = self._obj.to_world_coordinates(local_point)
self.assertTrue(np.allclose(world_point, primitives.Point(10, 10, 10)))
class TestWorldObjectScaling(WorldObjectTestCase):
def setUp(self):
super().setUp() # call the parent setup function to make the object
self._obj.move(1, 1, 1) # move the object to 1,1,1 so scale operations work
def test_orientation_does_not_scale(self):
self._obj.scale(100, 100, 100)
self.assertTrue(np.allclose(self._obj.get_orientation(), primitives.Vector(0, 0, 1.)))
def test_single_axis_scale(self):
# move the object to 1,1,1 so that the scale effects it
scale_axes = "xyz"
scale_fns = [getattr(self._obj, "scale_" + axis) for axis in scale_axes]
scale_values = [3, 4, 5]
for fn, scale in zip(scale_fns, scale_values):
fn(scale)
self.assertTrue(np.allclose(self._obj.get_position(), primitives.Point(*scale_values)),
f"{self._obj.get_position()}")
def test_3axis_scale(self):
scale_values = (3, 4, 5)
self._obj.scale(*scale_values)
self.assertTrue(np.allclose(self._obj.get_position(), primitives.Point(*scale_values)),
f"{self._obj.get_position()}")
def test_chained_scale(self):
scale_values = (3, 4, 5)
self._obj.scale_x(scale_values[0]).scale_y(scale_values[1]).scale_z(scale_values[2])
self.assertTrue(np.allclose(self._obj.get_position(), primitives.Point(*scale_values)),
f"{self._obj.get_position()}")
def test_scale_all(self):
scale_factor = 1000
expected_pos = scale_factor * np.ones(3)
self._obj.scale_all(scale_factor)
self.assertTrue(np.allclose(self._obj.get_position(), primitives.Point(*expected_pos)),
f"{self._obj.get_position()}")
def test_negative_scaling(self):
# negative values should raise an exception
scale_fns = (getattr(self._obj, x) for x in ['scale', 'scale_x', 'scale_y', 'scale_z', 'scale_all'])
for fn in scale_fns:
with self.assertRaises(ValueError):
fn(-1)
def test_invalid_norm(self):
# force a scale of zero and assert an error is raised
scale_factor = 0
with self.assertRaises(ValueError):
self._obj.scale_all(scale_factor)
class TestWorldObjectTranslation(WorldObjectTestCase):
def test_3axis_movement(self):
my_obj = cg.WorldObject()
# We should be able to move the object multiple times and the position will move but not direction
move_vector = np.array((1, 2, -5))
my_obj.move(*move_vector) # move the object in space
self.assertTrue(np.array_equal(my_obj.get_position(), primitives.Point(*move_vector)))
# reversing the move gets you back to the origin
my_obj.move(*(-move_vector)) # move the object in space
self.assertTrue(np.array_equal(my_obj.get_position(), primitives.Point()))
def test_single_axis_movement(self):
# individual move functions execute properly
attr_names = ["move_" + direction for direction in "xyz"]
move_attrs = [getattr(self._obj, attribute) for attribute in attr_names]
movement = 3
for n, fn_call in enumerate(move_attrs):
fn_call(movement)
self.assertEqual(self._obj.get_position()[n], movement)
def test_chained_movement(self):
movement = 3
self._obj.move_x(movement).move_y(movement).move_z(movement)
self.assertTrue(np.array_equal(self._obj.get_position(), primitives.Point(movement, movement, movement)))
class TestWorldObjectRotation(WorldObjectTestCase):
def test_rotation(self):
my_obj = self._obj
# rotation about the y-axis by 90 degree should change the direction vector to x
my_obj.rotate_y(90, units="deg")
self.assertTrue(np.allclose(my_obj.get_orientation(), primitives.Vector(1., 0, 0)))
# now rotation it about the z-axis 90 degree should have it point to positive y
my_obj.rotate_z(90, units="deg")
self.assertTrue(np.allclose(my_obj.get_orientation(), primitives.Vector(0, 1., 0)))
# rotation 90 degree about the x-axis should reset it to positive z
my_obj.rotate_x(90, units="deg")
self.assertTrue(np.allclose(my_obj.get_orientation(), primitives.Vector(0, 0, 1.)))
def test_rotation_chain(self):
# rotations should be able to be cascaded
self._obj.rotate_y(90, units="deg").rotate_z(90, units="deg").rotate_x(90, units="deg")
self.assertTrue(np.allclose(self._obj.get_orientation(), primitives.Vector(0, 0, 1.)))
def test_rotation_units(self):
# rotations should work for radians and degrees
rotation_angles = [90, np.pi / 2]
rotation_units = ["deg", "rad"]
for angle, unit in zip(rotation_angles, rotation_units):
self._obj.rotate_y(angle, units=unit).rotate_z(angle, units=unit).rotate_x(angle, units=unit)
self.assertTrue(np.allclose(self._obj.get_orientation(), primitives.Vector(0, 0, 1.)),
f"Test Failed for unit {unit}, has orientation {self._obj.get_orientation()}")
# make sure that an invalid rotaiton option raises an error
with self.assertRaises(ValueError):
self._obj.rotate_x(90, "Chickens")
class TestWorldObjectQuaternion(WorldObjectTestCase):
def test_default_quat(self):
quat = self._obj.get_quaternion()
self.assertTrue(np.allclose(quat[:3], np.zeros(3)))
self.assertAlmostEqual(quat[-1], 1.)
def test_single_axis_quat(self):
rotation_angle = np.pi / 2
self._obj.rotate_y(rotation_angle, "rad") # rotate along the y-axis by 90 degrees
quat = self._obj.get_quaternion()
expected_vect = np.asarray((0, 1, 0)) * np.sqrt(2) / 2
expected_scalar = np.sqrt(2) / 2
self.assertTrue(np.allclose(quat[:3], expected_vect))
self.assertAlmostEqual(quat[-1], expected_scalar)
class TestObjectGroup(unittest.TestCase):
def setUp(self):
self.group = cg.ObjectGroup()
self.obj1 = cg.WorldObject()
self.obj2 = cg.WorldObject()
self.group.append(self.obj1)
self.group.append(self.obj2)
def testing_list_properties(self):
# the group should have two elements in it
self.assertEqual(len(self.group), 2)
# the group should be iterable
expected_refs = (self.obj1, self.obj2)
for expected, actual in zip(expected_refs, self.group):
self.assertEqual(expected, actual)
self.assertTrue(hasattr(self.group, '__iter__'))
def testing_operations_on_group(self):
# objects can be moved outside of the group
self.obj1.move(1, 0, 0)
self.obj2.move(-1, 0, 0)
scale = 2
self.group.scale_all(scale) # now scale the group by 2
self.assertTrue(np.allclose(self.obj1.get_position(), primitives.Point(scale, 0, 0)))
self.assertTrue(np.allclose(self.obj2.get_position(), primitives.Point(-scale, 0, 0)))
# rotation also applies
self.group.rotate_z(90)
self.assertTrue(np.allclose(self.obj1.get_position(), primitives.Point(0, scale, 0)))
self.assertTrue(np.allclose(self.obj2.get_position(), primitives.Point(0, -scale, 0)))
def test_nesting_object_group(self):
# make a subgroup and append it to the top level group
subgroup = cg.ObjectGroup()
sub_object = cg.WorldObject()
sub_object.move(1, 0, 0)
subgroup.append(sub_object)
self.group.append(subgroup)
x_movement = 3
self.group.move_x(x_movement) # move the top level group
self.assertTrue(np.allclose(subgroup.get_position(), primitives.Point(x_movement, 0, 0)))
self.assertTrue(np.allclose(sub_object.get_position(), primitives.Point(x_movement + 1, 0, 0)))
class TestTracerSurface(unittest.TestCase):
def setUp(self) -> None:
self.radius = 3
self.sphere = cg.Sphere(self.radius)
def test_multiple_surface_id(self):
sphere_id = self.sphere.get_id()
cylinder_id = cg.Cylinder().get_id()
self.assertNotEqual(sphere_id, cylinder_id)
def test_getting_surface_id(self):
sphere_id = self.sphere.get_id()
id_tuple = self.sphere.surface_ids[0]
self.assertEqual(sphere_id, id_tuple[0])
# make sure we can reference the surface
self.assertEqual(id_tuple[1].primitive.get_radius(), 3)
def test_intersection_results(self):
# intersection should return 2 arrays
n_rays = 11
y_vals = np.linspace(-2, 2, n_rays)
rays = cg.bundle_of_rays(n_rays)
rays[1, 0] = 1 # point towards the right axis
rays[0, 0] = -5
rays[0, 1] = y_vals
hits, surface_ids = self.sphere.intersect(rays)
self.assertEqual(hits.shape, surface_ids.shape)
self.assertTrue(np.allclose(surface_ids, self.sphere.get_id()))
def test_intersection_moved_object(self):
# if we move the sphere by it's radius, a ray at (0,0,-1) with v=(0,0,1) will intersect at t=1
self.sphere.move_x(3)
test_ray = primitives.Ray(origin=cg.Point(0,0,-1), direction = cg.Vector(0,0,1))
hit, surface = self.sphere.intersect(test_ray)
self.assertTrue(np.allclose(hit, 1), f"expected 1 for all hits but got {hit}")
class TestTracerSurfaceBoundingBox(unittest.TestCase):
def setUp(self) -> None:
self.surface = cg.Sphere(1)
def test_default_bounding_box(self):
"""
The Default bounding box should be a regular cube
:return:
"""
corners = self.surface.bounding_volume.bounding_points
expected_corners = {
(-1, -1, -1),
(-1, -1, 1),
(-1, 1, -1),
(-1, 1, 1),
(1, -1, -1),
(1, -1, 1),
(1, 1, -1),
(1, 1, 1),
}
self.assertEqual(expected_corners, set(map(tuple, corners[:3].T)))
def test_scaling_bounding_box(self):
"""
Scaling the object should scale the bounding box
:return:
"""
self.surface.scale_z(3)
corners = self.surface.bounding_volume.bounding_points
expected_corners = {
(-1, -1, -3),
(-1, -1, 3),
(-1, 1, -3),
(-1, 1, 3),
(1, -1, -3),
(1, -1, 3),
(1, 1, -3),
(1, 1, 3),
}
self.assertEqual(expected_corners, set(map(tuple, corners[:3].T)))
def test_moving_bounding_box(self):
"""
Scaling the object should scale the bounding box
:return:
"""
self.surface.move_x(1)
corners = self.surface.bounding_volume.bounding_points
expected_corners = {
(0, -1, -1),
(0, -1, 1),
(0, 1, -1),
(0, 1, 1),
(2, -1, -1),
(2, -1, 1),
(2, 1, -1),
(2, 1, 1),
}
self.assertEqual(expected_corners, set(map(tuple, corners[:3].T)))
def test_rotating_bounding_box(self):
"""
Can rotate the object which will rotate the bounding box
:return:
"""
self.surface.scale_z(2)
self.surface.move_z(1)
self.surface.rotate_y(90)
corners = self.surface.bounding_volume.bounding_points
expected_corners = {
(-1, -1, -1),
(-1, -1, 1),
(-1, 1, -1),
(-1, 1, 1),
(3, -1, -1),
(3, -1, 1),
(3, 1, -1),
(3, 1, 1),
}
actual = set(map(tuple, corners[:3].astype(int).T))
self.assertAlmostEqual(expected_corners, actual, f"Expected:\n {expected_corners}\n\ngot:\n{actual}")
# These should be moved into a TracerSurface test to make sure the right values are returned
# class TestSphere(unittest.TestCase):
# def setUp(self) -> None:
# self.radius = 3
# self.sphere = cg.Sphere(self.radius)
# self.ray = primitives.Ray(direction=primitives.Vector(1, 0, 0))
#
# self.intersection_points = ((0, 0, -1), (0, 0, 1), (0, 1, 0), (0, -1, 0), (1, 0, 0), (-1, 0, 0))
# self.intersections = [primitives.Point(*intersection) for intersection in self.intersection_points]
#
# def test_intersection_scaled_sphere(self):
# # if the sphere is scaled, the intersection should grow with the scaling
# scale_factor = 10
# self.sphere.scale_all(scale_factor)
# hit = self.sphere.intersect(self.ray)
# self.assertAlmostEqual(hit[0], scale_factor * self.radius)
#
# def test_intersection_translated_sphere(self):
# movement = 10
# self.sphere.move_x(movement)
# hit = self.sphere.intersect(self.ray)
# self.assertAlmostEqual(hit[0], movement - self.radius)
#
# def test_normals_scaled_sphere(self):
# # scaling a sphere should have no effect on the normals
# scaling = 5
# self.sphere.scale_all(scaling)
# scaled_intersection_points = ((0, 0, -5), (0, 0, 5), (0, 5, 0), (0, -5, 0), (5, 0, 0), (-5, 0, 0))
# self.intersections = [primitives.Point(*intersection) for intersection in scaled_intersection_points]
# # for a nontransformed sphere the normals should be vectors of the coordinates
# normals = [self.sphere.get_world_normals(intersection) for intersection in self.intersections]
# for normal, intersection in zip(normals, scaled_intersection_points):
# expected = primitives.Vector(*intersection) / scaling
# self.assertTrue(np.allclose(normal, expected))
# self.assertAlmostEqual(np.linalg.norm(normal), | |
#!/usr/bin/env python3
# System imports
from math import sqrt
import sys
import unittest
# Import NumPy
import numpy_demo as np
major, minor = [ int(d) for d in np.__version__.split(".")[:2] ]
if major == 0: BadListError = TypeError
else: BadListError = ValueError
import Tensor
######################################################################
class TensorTestCase(unittest.TestCase):
def __init__(self, methodName="runTests"):
unittest.TestCase.__init__(self, methodName)
self.typeStr = "double"
self.typeCode = "d"
self.result = sqrt(28.0/8)
# Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap
def testNorm(self):
"Test norm function"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
norm = Tensor.__dict__[self.typeStr + "Norm"]
tensor = [[[0, 1], [2, 3]],
[[3, 2], [1, 0]]]
if isinstance(self.result, int):
self.assertEqual(norm(tensor), self.result)
else:
self.assertAlmostEqual(norm(tensor), self.result, 6)
# Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap
def testNormBadList(self):
"Test norm function with bad list"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
norm = Tensor.__dict__[self.typeStr + "Norm"]
tensor = [[[0, "one"], [2, 3]],
[[3, "two"], [1, 0]]]
self.assertRaises(BadListError, norm, tensor)
# Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap
def testNormWrongDim(self):
"Test norm function with wrong dimensions"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
norm = Tensor.__dict__[self.typeStr + "Norm"]
tensor = [[0, 1, 2, 3],
[3, 2, 1, 0]]
self.assertRaises(TypeError, norm, tensor)
# Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap
def testNormWrongSize(self):
"Test norm function with wrong size"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
norm = Tensor.__dict__[self.typeStr + "Norm"]
tensor = [[[0, 1, 0], [2, 3, 2]],
[[3, 2, 3], [1, 0, 1]]]
self.assertRaises(TypeError, norm, tensor)
# Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap
def testNormNonContainer(self):
"Test norm function with non-container"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
norm = Tensor.__dict__[self.typeStr + "Norm"]
self.assertRaises(TypeError, norm, None)
# Test (type* IN_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
def testMax(self):
"Test max function"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
max = Tensor.__dict__[self.typeStr + "Max"]
tensor = [[[1, 2], [3, 4]],
[[5, 6], [7, 8]]]
self.assertEqual(max(tensor), 8)
# Test (type* IN_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
def testMaxBadList(self):
"Test max function with bad list"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
max = Tensor.__dict__[self.typeStr + "Max"]
tensor = [[[1, "two"], [3, 4]],
[[5, "six"], [7, 8]]]
self.assertRaises(BadListError, max, tensor)
# Test (type* IN_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
def testMaxNonContainer(self):
"Test max function with non-container"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
max = Tensor.__dict__[self.typeStr + "Max"]
self.assertRaises(TypeError, max, None)
# Test (type* IN_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
def testMaxWrongDim(self):
"Test max function with wrong dimensions"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
max = Tensor.__dict__[self.typeStr + "Max"]
self.assertRaises(TypeError, max, [0, -1, 2, -3])
# Test (int DIM1, int DIM2, int DIM3, type* IN_ARRAY3) typemap
def testMin(self):
"Test min function"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
min = Tensor.__dict__[self.typeStr + "Min"]
tensor = [[[9, 8], [7, 6]],
[[5, 4], [3, 2]]]
self.assertEqual(min(tensor), 2)
# Test (int DIM1, int DIM2, int DIM3, type* IN_ARRAY3) typemap
def testMinBadList(self):
"Test min function with bad list"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
min = Tensor.__dict__[self.typeStr + "Min"]
tensor = [[["nine", 8], [7, 6]],
[["five", 4], [3, 2]]]
self.assertRaises(BadListError, min, tensor)
# Test (int DIM1, int DIM2, int DIM3, type* IN_ARRAY3) typemap
def testMinNonContainer(self):
"Test min function with non-container"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
min = Tensor.__dict__[self.typeStr + "Min"]
self.assertRaises(TypeError, min, True)
# Test (int DIM1, int DIM2, int DIM3, type* IN_ARRAY3) typemap
def testMinWrongDim(self):
"Test min function with wrong dimensions"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
min = Tensor.__dict__[self.typeStr + "Min"]
self.assertRaises(TypeError, min, [[1, 3], [5, 7]])
# Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap
def testScale(self):
"Test scale function"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
scale = Tensor.__dict__[self.typeStr + "Scale"]
tensor = np.array([[[1, 0, 1], [0, 1, 0], [1, 0, 1]],
[[0, 1, 0], [1, 0, 1], [0, 1, 0]],
[[1, 0, 1], [0, 1, 0], [1, 0, 1]]], self.typeCode)
scale(tensor, 4)
self.assertEqual((tensor == [[[4, 0, 4], [0, 4, 0], [4, 0, 4]],
[[0, 4, 0], [4, 0, 4], [0, 4, 0]],
[[4, 0, 4], [0, 4, 0], [4, 0, 4]]]).all(), True)
# Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap
def testScaleWrongType(self):
"Test scale function with wrong type"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
scale = Tensor.__dict__[self.typeStr + "Scale"]
tensor = np.array([[[1, 0, 1], [0, 1, 0], [1, 0, 1]],
[[0, 1, 0], [1, 0, 1], [0, 1, 0]],
[[1, 0, 1], [0, 1, 0], [1, 0, 1]]], 'c')
self.assertRaises(TypeError, scale, tensor)
# Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap
def testScaleWrongDim(self):
"Test scale function with wrong dimensions"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
scale = Tensor.__dict__[self.typeStr + "Scale"]
tensor = np.array([[1, 0, 1], [0, 1, 0], [1, 0, 1],
[0, 1, 0], [1, 0, 1], [0, 1, 0]], self.typeCode)
self.assertRaises(TypeError, scale, tensor)
# Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap
def testScaleWrongSize(self):
"Test scale function with wrong size"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
scale = Tensor.__dict__[self.typeStr + "Scale"]
tensor = np.array([[[1, 0], [0, 1], [1, 0]],
[[0, 1], [1, 0], [0, 1]],
[[1, 0], [0, 1], [1, 0]]], self.typeCode)
self.assertRaises(TypeError, scale, tensor)
# Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap
def testScaleNonArray(self):
"Test scale function with non-array"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
scale = Tensor.__dict__[self.typeStr + "Scale"]
self.assertRaises(TypeError, scale, True)
# Test (type* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
def testFloor(self):
"Test floor function"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
floor = Tensor.__dict__[self.typeStr + "Floor"]
tensor = np.array([[[1, 2], [3, 4]],
[[5, 6], [7, 8]]], self.typeCode)
floor(tensor, 4)
np.testing.assert_array_equal(tensor, np.array([[[4, 4], [4, 4]],
[[5, 6], [7, 8]]]))
# Test (type* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
def testFloorWrongType(self):
"Test floor function with wrong type"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
floor = Tensor.__dict__[self.typeStr + "Floor"]
tensor = np.array([[[1, 2], [3, 4]],
[[5, 6], [7, 8]]], 'c')
self.assertRaises(TypeError, floor, tensor)
# Test (type* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
def testFloorWrongDim(self):
"Test floor function with wrong type"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
floor = Tensor.__dict__[self.typeStr + "Floor"]
tensor = np.array([[1, 2], [3, 4], [5, 6], [7, 8]], self.typeCode)
self.assertRaises(TypeError, floor, tensor)
# Test (type* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
def testFloorNonArray(self):
"Test floor function with non-array"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
floor = Tensor.__dict__[self.typeStr + "Floor"]
self.assertRaises(TypeError, floor, object)
# Test (int DIM1, int DIM2, int DIM3, type* INPLACE_ARRAY3) typemap
def testCeil(self):
"Test ceil function"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
ceil = Tensor.__dict__[self.typeStr + "Ceil"]
tensor = np.array([[[9, 8], [7, 6]],
[[5, 4], [3, 2]]], self.typeCode)
ceil(tensor, 5)
np.testing.assert_array_equal(tensor, np.array([[[5, 5], [5, 5]],
[[5, 4], [3, 2]]]))
# Test (int DIM1, int DIM2, int DIM3, type* INPLACE_ARRAY3) typemap
def testCeilWrongType(self):
"Test ceil function with wrong type"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
ceil = Tensor.__dict__[self.typeStr + "Ceil"]
tensor = np.array([[[9, 8], [7, 6]],
[[5, 4], [3, 2]]], 'c')
self.assertRaises(TypeError, ceil, tensor)
# Test (int DIM1, int DIM2, int DIM3, type* INPLACE_ARRAY3) typemap
def testCeilWrongDim(self):
"Test ceil function with wrong dimensions"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
ceil = Tensor.__dict__[self.typeStr + "Ceil"]
tensor = np.array([[9, 8], [7, 6], [5, 4], [3, 2]], self.typeCode)
self.assertRaises(TypeError, ceil, tensor)
# Test (int DIM1, int DIM2, int DIM3, type* INPLACE_ARRAY3) typemap
def testCeilNonArray(self):
"Test ceil function with non-array"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
ceil = Tensor.__dict__[self.typeStr + "Ceil"]
tensor = [[[9, 8], [7, 6]],
[[5, 4], [3, 2]]]
self.assertRaises(TypeError, ceil, tensor)
# Test (type ARGOUT_ARRAY3[ANY][ANY][ANY]) typemap
def testLUSplit(self):
"Test luSplit function"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
luSplit = Tensor.__dict__[self.typeStr + "LUSplit"]
lower, upper = luSplit([[[1, 1], [1, 1]],
[[1, 1], [1, 1]]])
self.assertEqual((lower == [[[1, 1], [1, 0]],
[[1, 0], [0, 0]]]).all(), True)
self.assertEqual((upper == [[[0, 0], [0, 1]],
[[0, 1], [1, 1]]]).all(), True)
######################################################################
class scharTestCase(TensorTestCase):
def __init__(self, methodName="runTest"):
TensorTestCase.__init__(self, methodName)
self.typeStr = "schar"
self.typeCode = "b"
self.result = int(self.result)
######################################################################
class ucharTestCase(TensorTestCase):
def __init__(self, methodName="runTest"):
TensorTestCase.__init__(self, methodName)
self.typeStr = "uchar"
self.typeCode = "B"
self.result = int(self.result)
######################################################################
class shortTestCase(TensorTestCase):
def __init__(self, methodName="runTest"):
TensorTestCase.__init__(self, methodName)
self.typeStr = "short"
self.typeCode = "h"
self.result = int(self.result)
######################################################################
class ushortTestCase(TensorTestCase):
def __init__(self, methodName="runTest"):
TensorTestCase.__init__(self, methodName)
self.typeStr = "ushort"
self.typeCode = "H"
self.result = int(self.result)
######################################################################
class intTestCase(TensorTestCase):
def __init__(self, methodName="runTest"):
TensorTestCase.__init__(self, methodName)
self.typeStr = "int"
self.typeCode = "i"
self.result = int(self.result)
######################################################################
class uintTestCase(TensorTestCase):
def __init__(self, methodName="runTest"):
TensorTestCase.__init__(self, methodName)
self.typeStr = "uint"
self.typeCode = "I"
| |
<filename>frigate/object_processing.py
import base64
import copy
import datetime
import hashlib
import itertools
import json
import logging
import os
import queue
import threading
import time
from collections import Counter, defaultdict
from statistics import mean, median
from typing import Callable, Dict
import cv2
import numpy as np
from frigate.config import CameraConfig, SnapshotsConfig, RecordConfig, FrigateConfig
from frigate.const import CACHE_DIR, CLIPS_DIR, RECORD_DIR
from frigate.util import (
SharedMemoryFrameManager,
calculate_region,
draw_box_with_label,
draw_timestamp,
load_labels,
)
logger = logging.getLogger(__name__)
def on_edge(box, frame_shape):
if (
box[0] == 0
or box[1] == 0
or box[2] == frame_shape[1] - 1
or box[3] == frame_shape[0] - 1
):
return True
def is_better_thumbnail(current_thumb, new_obj, frame_shape) -> bool:
# larger is better
# cutoff images are less ideal, but they should also be smaller?
# better scores are obviously better too
# if the new_thumb is on an edge, and the current thumb is not
if on_edge(new_obj["box"], frame_shape) and not on_edge(
current_thumb["box"], frame_shape
):
return False
# if the score is better by more than 5%
if new_obj["score"] > current_thumb["score"] + 0.05:
return True
# if the area is 10% larger
if new_obj["area"] > current_thumb["area"] * 1.1:
return True
return False
class TrackedObject:
def __init__(
self, camera, colormap, camera_config: CameraConfig, frame_cache, obj_data
):
self.obj_data = obj_data
self.camera = camera
self.colormap = colormap
self.camera_config = camera_config
self.frame_cache = frame_cache
self.current_zones = []
self.entered_zones = []
self.false_positive = True
self.has_clip = False
self.has_snapshot = False
self.top_score = self.computed_score = 0.0
self.thumbnail_data = None
self.last_updated = 0
self.last_published = 0
self.frame = None
self.previous = self.to_dict()
# start the score history
self.score_history = [self.obj_data["score"]]
def _is_false_positive(self):
# once a true positive, always a true positive
if not self.false_positive:
return False
threshold = self.camera_config.objects.filters[self.obj_data["label"]].threshold
return self.computed_score < threshold
def compute_score(self):
scores = self.score_history[:]
# pad with zeros if you dont have at least 3 scores
if len(scores) < 3:
scores += [0.0] * (3 - len(scores))
return median(scores)
def update(self, current_frame_time, obj_data):
thumb_update = False
significant_change = False
# if the object is not in the current frame, add a 0.0 to the score history
if obj_data["frame_time"] != current_frame_time:
self.score_history.append(0.0)
else:
self.score_history.append(obj_data["score"])
# only keep the last 10 scores
if len(self.score_history) > 10:
self.score_history = self.score_history[-10:]
# calculate if this is a false positive
self.computed_score = self.compute_score()
if self.computed_score > self.top_score:
self.top_score = self.computed_score
self.false_positive = self._is_false_positive()
if not self.false_positive:
# determine if this frame is a better thumbnail
if self.thumbnail_data is None or is_better_thumbnail(
self.thumbnail_data, obj_data, self.camera_config.frame_shape
):
self.thumbnail_data = {
"frame_time": obj_data["frame_time"],
"box": obj_data["box"],
"area": obj_data["area"],
"region": obj_data["region"],
"score": obj_data["score"],
}
thumb_update = True
# check zones
current_zones = []
bottom_center = (obj_data["centroid"][0], obj_data["box"][3])
# check each zone
for name, zone in self.camera_config.zones.items():
# if the zone is not for this object type, skip
if len(zone.objects) > 0 and not obj_data["label"] in zone.objects:
continue
contour = zone.contour
# check if the object is in the zone
if cv2.pointPolygonTest(contour, bottom_center, False) >= 0:
# if the object passed the filters once, dont apply again
if name in self.current_zones or not zone_filtered(self, zone.filters):
current_zones.append(name)
if name not in self.entered_zones:
self.entered_zones.append(name)
if not self.false_positive:
# if the zones changed, signal an update
if set(self.current_zones) != set(current_zones):
significant_change = True
# if the position changed, signal an update
if self.obj_data["position_changes"] != obj_data["position_changes"]:
significant_change = True
# if the motionless_count reaches the stationary threshold
if (
self.obj_data["motionless_count"]
== self.camera_config.detect.stationary.threshold
):
significant_change = True
# update at least once per minute
if self.obj_data["frame_time"] - self.previous["frame_time"] > 60:
significant_change = True
self.obj_data.update(obj_data)
self.current_zones = current_zones
return (thumb_update, significant_change)
def to_dict(self, include_thumbnail: bool = False):
snapshot_time = (
self.thumbnail_data["frame_time"]
if not self.thumbnail_data is None
else 0.0
)
event = {
"id": self.obj_data["id"],
"camera": self.camera,
"frame_time": self.obj_data["frame_time"],
"snapshot_time": snapshot_time,
"label": self.obj_data["label"],
"top_score": self.top_score,
"false_positive": self.false_positive,
"start_time": self.obj_data["start_time"],
"end_time": self.obj_data.get("end_time", None),
"score": self.obj_data["score"],
"box": self.obj_data["box"],
"area": self.obj_data["area"],
"region": self.obj_data["region"],
"stationary": self.obj_data["motionless_count"]
> self.camera_config.detect.stationary.threshold,
"motionless_count": self.obj_data["motionless_count"],
"position_changes": self.obj_data["position_changes"],
"current_zones": self.current_zones.copy(),
"entered_zones": self.entered_zones.copy(),
"has_clip": self.has_clip,
"has_snapshot": self.has_snapshot,
}
if include_thumbnail:
event["thumbnail"] = base64.b64encode(self.get_thumbnail()).decode("utf-8")
return event
def get_thumbnail(self):
if (
self.thumbnail_data is None
or self.thumbnail_data["frame_time"] not in self.frame_cache
):
ret, jpg = cv2.imencode(".jpg", np.zeros((175, 175, 3), np.uint8))
jpg_bytes = self.get_jpg_bytes(
timestamp=False, bounding_box=False, crop=True, height=175
)
if jpg_bytes:
return jpg_bytes
else:
ret, jpg = cv2.imencode(".jpg", np.zeros((175, 175, 3), np.uint8))
return jpg.tobytes()
def get_clean_png(self):
if self.thumbnail_data is None:
return None
try:
best_frame = cv2.cvtColor(
self.frame_cache[self.thumbnail_data["frame_time"]],
cv2.COLOR_YUV2BGR_I420,
)
except KeyError:
logger.warning(
f"Unable to create clean png because frame {self.thumbnail_data['frame_time']} is not in the cache"
)
return None
ret, png = cv2.imencode(".png", best_frame)
if ret:
return png.tobytes()
else:
return None
def get_jpg_bytes(
self, timestamp=False, bounding_box=False, crop=False, height=None, quality=70
):
if self.thumbnail_data is None:
return None
try:
best_frame = cv2.cvtColor(
self.frame_cache[self.thumbnail_data["frame_time"]],
cv2.COLOR_YUV2BGR_I420,
)
except KeyError:
logger.warning(
f"Unable to create jpg because frame {self.thumbnail_data['frame_time']} is not in the cache"
)
return None
if bounding_box:
thickness = 2
color = self.colormap[self.obj_data["label"]]
# draw the bounding boxes on the frame
box = self.thumbnail_data["box"]
draw_box_with_label(
best_frame,
box[0],
box[1],
box[2],
box[3],
self.obj_data["label"],
f"{int(self.thumbnail_data['score']*100)}% {int(self.thumbnail_data['area'])}",
thickness=thickness,
color=color,
)
if crop:
box = self.thumbnail_data["box"]
box_size = 300
region = calculate_region(
best_frame.shape,
box[0],
box[1],
box[2],
box[3],
box_size,
multiplier=1.1,
)
best_frame = best_frame[region[1] : region[3], region[0] : region[2]]
if height:
width = int(height * best_frame.shape[1] / best_frame.shape[0])
best_frame = cv2.resize(
best_frame, dsize=(width, height), interpolation=cv2.INTER_AREA
)
if timestamp:
color = self.camera_config.timestamp_style.color
draw_timestamp(
best_frame,
self.thumbnail_data["frame_time"],
self.camera_config.timestamp_style.format,
font_effect=self.camera_config.timestamp_style.effect,
font_thickness=self.camera_config.timestamp_style.thickness,
font_color=(color.blue, color.green, color.red),
position=self.camera_config.timestamp_style.position,
)
ret, jpg = cv2.imencode(
".jpg", best_frame, [int(cv2.IMWRITE_JPEG_QUALITY), quality]
)
if ret:
return jpg.tobytes()
else:
return None
def zone_filtered(obj: TrackedObject, object_config):
object_name = obj.obj_data["label"]
if object_name in object_config:
obj_settings = object_config[object_name]
# if the min area is larger than the
# detected object, don't add it to detected objects
if obj_settings.min_area > obj.obj_data["area"]:
return True
# if the detected object is larger than the
# max area, don't add it to detected objects
if obj_settings.max_area < obj.obj_data["area"]:
return True
# if the score is lower than the threshold, skip
if obj_settings.threshold > obj.computed_score:
return True
return False
# Maintains the state of a camera
class CameraState:
def __init__(
self, name, config: FrigateConfig, frame_manager: SharedMemoryFrameManager
):
self.name = name
self.config = config
self.camera_config = config.cameras[name]
self.frame_manager = frame_manager
self.best_objects: Dict[str, TrackedObject] = {}
self.object_counts = defaultdict(int)
self.tracked_objects: Dict[str, TrackedObject] = {}
self.frame_cache = {}
self.zone_objects = defaultdict(list)
self._current_frame = np.zeros(self.camera_config.frame_shape_yuv, np.uint8)
self.current_frame_lock = threading.Lock()
self.current_frame_time = 0.0
self.motion_boxes = []
self.regions = []
self.previous_frame_id = None
self.callbacks = defaultdict(list)
def get_current_frame(self, draw_options={}):
with self.current_frame_lock:
frame_copy = np.copy(self._current_frame)
frame_time = self.current_frame_time
tracked_objects = {k: v.to_dict() for k, v in self.tracked_objects.items()}
motion_boxes = self.motion_boxes.copy()
regions = self.regions.copy()
frame_copy = cv2.cvtColor(frame_copy, cv2.COLOR_YUV2BGR_I420)
# draw on the frame
if draw_options.get("bounding_boxes"):
# draw the bounding boxes on the frame
for obj in tracked_objects.values():
if obj["frame_time"] == frame_time:
thickness = 2
color = self.config.model.colormap[obj["label"]]
else:
thickness = 1
color = (255, 0, 0)
# draw the bounding boxes on the frame
box = obj["box"]
draw_box_with_label(
frame_copy,
box[0],
box[1],
box[2],
box[3],
obj["label"],
f"{obj['score']:.0%} {int(obj['area'])}",
thickness=thickness,
color=color,
)
if draw_options.get("regions"):
for region in regions:
cv2.rectangle(
frame_copy,
(region[0], region[1]),
(region[2], region[3]),
(0, 255, 0),
2,
)
if draw_options.get("zones"):
for name, zone in self.camera_config.zones.items():
thickness = (
8
if any(
name in obj["current_zones"] for obj in tracked_objects.values()
)
else 2
)
cv2.drawContours(frame_copy, [zone.contour], -1, zone.color, thickness)
if draw_options.get("mask"):
mask_overlay = np.where(self.camera_config.motion.mask == [0])
frame_copy[mask_overlay] = [0, 0, 0]
if draw_options.get("motion_boxes"):
for m_box in motion_boxes:
cv2.rectangle(
frame_copy,
(m_box[0], m_box[1]),
(m_box[2], m_box[3]),
(0, 0, 255),
2,
)
if draw_options.get("timestamp"):
color = self.camera_config.timestamp_style.color
draw_timestamp(
frame_copy,
frame_time,
self.camera_config.timestamp_style.format,
font_effect=self.camera_config.timestamp_style.effect,
font_thickness=self.camera_config.timestamp_style.thickness,
font_color=(color.blue, color.green, color.red),
position=self.camera_config.timestamp_style.position,
)
return frame_copy
def finished(self, obj_id):
del self.tracked_objects[obj_id]
def on(self, event_type: str, callback: Callable[[Dict], None]):
self.callbacks[event_type].append(callback)
def update(self, frame_time, current_detections, motion_boxes, regions):
# get the new frame
frame_id = f"{self.name}{frame_time}"
current_frame = self.frame_manager.get(
frame_id, self.camera_config.frame_shape_yuv
)
tracked_objects = self.tracked_objects.copy()
current_ids = set(current_detections.keys())
previous_ids = set(tracked_objects.keys())
removed_ids = previous_ids.difference(current_ids)
new_ids = current_ids.difference(previous_ids)
updated_ids = current_ids.intersection(previous_ids)
for id in new_ids:
new_obj = tracked_objects[id] = TrackedObject(
self.name,
self.config.model.colormap,
self.camera_config,
self.frame_cache,
current_detections[id],
)
# call event handlers
for c in self.callbacks["start"]:
c(self.name, new_obj, frame_time)
for id in updated_ids:
updated_obj = tracked_objects[id]
thumb_update, significant_update = updated_obj.update(
frame_time, | |
= ''
plansopinstuid = ''
planseriesinstuid = ''
doseseriesuid = ''
doseinstuid = ''
planfilename = ''
dosexdim = 0
doseydim = 0
dosezdim = 0
doseoriginx = ""
doseoriginy = ""
doseoriginz = ""
beamdosefiles = []
pixspacingx = ""
pixspacingy = ""
pixspacingz = ""
posrefind = ""
image_orientation = []
imagesetnumber = ""
point_names = []
point_values = []
numfracs = ""
flag_nobinaryfile = False
flag_noimages = False
no_setup_file = False
no_beams = False
softwarev = ""
slicethick = 0
x_dim = 0
y_dim = 0
z_dim = 0
xpixdim = 0
ypixdim = 0
####################################################################################################################################################
####################################################################################################################################################
####################################################################################################################################################
# function: convertimages
# The purpose of this function is to read in the image DICOM files
# and to change the patients name to match the name of the patient
# in the pinnacle files, also fills the list values for slicelocation
# and UID. This function needs to be run, even if image files already converted
####################################################################################################################################################
def convertimages():
#print("Converting image patient name, birthdate and id to match pinnacle\n")
global patientname
global pid
global dob
global FrameUID
global imageslice
global SeriesUID
global StudyInstanceUID
global imageuid
global patientfolder
global posrefind
global imagesetnumber
global image_orientation
global flag_noimages
if not os.path.exists("%s%s/ImageSet_%s.DICOM"%(Inputf,patientfolder, imagesetnumber)):
#Image set folder not found, need to ignore patient
#Will want to call a function to be written that will create image set files from the condensed pixel data file
#print("Image files do not exist. Creating image files")
createimagefiles()
return
for file in os.listdir("%s%s/ImageSet_%s.DICOM"%(Inputf,patientfolder, imagesetnumber)):
if file == '11026.1.img':
continue
imageds = pydicom.read_file("%s%s/ImageSet_%s.DICOM/%s"%(Inputf, patientfolder, imagesetnumber, file), force=True)
imageds.PatientName = patientname
imageds.PatientID = pid
imageds.PatientBirthDate = dob
imageslice.append(imageds.SliceLocation)
imageuid.append(imageds.SOPInstanceUID)
image_orientation = imageds.ImageOrientationPatient
tempinstuid = imageds.SOPInstanceUID
posrefind = imageds.PositionReferenceIndicator
imageds.SOPInstanceUID = tempinstuid
imageds.FrameOfReferenceUID = FrameUID
imageds.StudyInstanceUID = StudyInstanceUID
imageds.SeriesInstanceUID = SeriesUID
file_meta = Dataset()
file_meta.TransferSyntaxUID = GTransferSyntaxUID
file_meta.MediaStorageSOPClassUID = '1.2.840.10008.5.1.4.1.1.2'
file_meta.MediaStorageSOPInstanceUID = tempinstuid
file_meta.ImplementationClassUID = gImplementationClassUID
imageds.file_meta = file_meta
preamble = getattr(imageds, "preamble", None)
if not preamble:
preamble = b'\x00'*128
currfile = DicomFile(Outputf+"%s/CT.%s.dcm"%(patientfolder, tempinstuid), 'wb')
currfile.write(preamble)
currfile.write(b'DICM')
pydicom.write_file(Outputf+"%s/CT.%s.dcm"%(patientfolder,tempinstuid), imageds, False)
#print("Current image: ", file)
#print(imageds)
####################################################################################################################################################
####################################################################################################################################################
####################################################################################################################################################
# Function: createimagefiles()
# This function will create dicom image files for each slice using the condensed pixel data from file ImageSet_%s.img
####################################################################################################################################################
def createimagefiles():
global slicethick
global x_dim
global y_dim
global z_dim
global xpixdim
global ypixdim
global patientname
global pid
global dob
global FrameUID
global imageslice
global SeriesUID
global StudyInstanceUID
global imageuid
global patientfolder
global posrefind
global imagesetnumber
global image_orientation
currentpatientposition = getheaderinfo()
if os.path.isfile("%s%s/ImageSet_%s.img"%(Inputf, patientfolder, imagesetnumber)):
allframeslist = []
pixel_array = np.fromfile("%s%s/ImageSet_%s.img"%(Inputf, patientfolder, imagesetnumber), dtype = np.short)
for i in range(0, int(z_dim)): # will loop over every frame
frame_array = pixel_array[i*int(x_dim)*int(y_dim):(i+1)*int(x_dim)*int(y_dim)]
allframeslist.append(frame_array)
"""frame_array = np.array([])
temp_frame_array = pixel_array[i*int(x_dim)*int(y_dim):(i+1)*int(x_dim)*int(y_dim)]
for j in range(0, int(y_dim)):
temprow = temp_frame_array[j*int(x_dim):(j+1)*int(x_dim)][::-1]
frame_array = np.append(frame_array, temprow)
allframeslist.append(frame_array)
"""
#print("Length of frames list: " + str(len(allframeslist)))
with open("%s%s/ImageSet_%s.ImageInfo"%(Inputf, patientfolder, imagesetnumber), 'rt', encoding='latin1') as f:
image_info = f.readlines()
curframe = 0
for i, line in enumerate(image_info, 0):
if "ImageInfo ={" in line:
sliceloc = -float(re.findall(r"[-+]?\d*\.\d+|\d+", image_info[i + 1])[0])*10
instuid = re.findall(r'"([^"]*)"', image_info[i + 8])[0]
seriesuid = re.findall(r'"([^"]*)"', image_info[i + 4])[0]
classuid = re.findall(r'"([^"]*)"', image_info[i + 7])[0]
frameuid = re.findall(r'"([^"]*)"', image_info[i + 6])[0]
studyinstuid = re.findall(r'"([^"]*)"', image_info[i + 5])[0]
slicenum = int(re.findall(r"[-+]?\d*\.\d+|\d+", image_info[i + 3])[0])
dateofscan, timeofscan = getdateandtime()
file_meta = Dataset()
file_meta.MediaStorageSOPClassUID = classuid
file_meta.MediaStorageSOPInstanceUID = instuid
file_meta.ImplementationClassUID = gImplementationClassUID #this value remains static since implementation for creating file is the same
ds = FileDataset(planfilename, {}, file_meta=file_meta, preamble=b'\x00'*128)
ds.SpecificCharacterSet = "ISO_IR 100"
ds.ImageType = ['ORIGINAL', 'PRIMARY', 'AXIAL']
ds.AccessionNumber = ''
ds.SOPClassUID = classuid
ds.SOPInstanceUID = instuid
ds.StudyDate = dateofscan
ds.SeriesDate = dateofscan
ds.AcquisitionDate = dateofscan
ds.ContentDate = dateofscan
ds.AcquisitionTime = timeofscan
ds.Modality = "CT" # Also should come from header file, but not always present
ds.Manufacturer = "GE MEDICAL SYSTEMS" #This should come from Manufacturer in header, but for some patients it isn't set??
ds.StationName = "CT"
ds.PatientName = patientname
ds.PatientID = pid
ds.PatientBirthDate = dob
ds.BitsAllocated = 16
ds.BitsStored = 16
ds.HighBit = 15
ds.PixelRepresentation = 1
ds.RescaleIntercept = -1024
#ds.RescaleIntercept = 0.0
ds.RescaleSlope = 1.0
# ds.kvp = ?? This should be peak kilovoltage output of x ray generator used
ds.PatientPosition = currentpatientposition
ds.DataCollectionDiameter = xpixdim*float(x_dim) # this is probably x_pixdim * xdim = y_pixdim * ydim
ds.SpatialResolution = 0.35#???????
#ds.DistanceSourceToDetector = #???
#ds.DistanceSourceToPatient = #????
ds.GantryDetectorTilt = 0.0 #??
ds.TableHeight = -158.0#??
ds.RotationDirection = "CW"#???
ds.ExposureTime = 1000#??
ds.XRayTubeCurrent = 398#??
ds.GeneratorPower = 48#??
ds.FocalSpots = 1.2#??
ds.ConvolutionKernel = "STND" #????
ds.SliceThickness = slicethick
ds.NumberOfSlices = int(z_dim)
#ds.StudyInstanceUID = studyinstuid
#ds.SeriesInstanceUID = seriesuid
ds.FrameOfReferenceUID = FrameUID
ds.StudyInstanceUID = StudyInstanceUID
ds.SeriesInstanceUID = SeriesUID
ds.InstanceNumber = slicenum # problem, some of these are repeated in image file so not sure what to do with that
ds.ImagePositionPatient = [-xpixdim*float(x_dim)/2, -ypixdim*float(y_dim)/2, sliceloc]
if "HFS" in currentpatientposition or "FFS" in currentpatientposition:
ds.ImageOrientationPatient = [1.0, 0.0, 0.0, 0.0, 1.0, -0.0]
elif "HFP" in currentpatientposition or "FFP" in currentpatientposition:
ds.ImageOrientationPatient = [-1.0, 0.0, 0.0, 0.0, -1.0, -0.0]
ds.PositionReferenceIndicator = "LM" #???
ds.SliceLocation = sliceloc
ds.SamplesPerPixel = 1
ds.PhotometricInterpretation = "MONOCHROME2"
ds.Rows = int(x_dim)
ds.Columns = int(y_dim)
ds.PixelSpacing = [xpixdim, ypixdim]
#ds.PixelData = allframeslist[curframe]
#ds.PixelData = allframeslist[slicenum - 1]
ds.PixelData = allframeslist[curframe].tostring()
imageslice.append(sliceloc)
imageuid.append(instuid)
image_orientation = ds.ImageOrientationPatient
posrefind = ds.PositionReferenceIndicator
#print("Creating image: " + Outputf + "%s/CT.%s.dcm"%(patientfolder, instuid))
#ds.save_as(Outputf + "%s/CT.%s.dcm"%(patientfolder, instuid),write_like_original=False)
ds.save_as(Outputf + "%s/CT.%s.dcm"%(patientfolder, instuid))
curframe = curframe + 1
####################################################################################################################################################
####################################################################################################################################################
####################################################################################################################################################
# Function: getheaderinfo
# This function will only be called in cases where image files do not already exist
####################################################################################################################################################
def getheaderinfo():
global slicethick
global x_dim
global y_dim
global z_dim
global xpixdim
global ypixdim
temp_pos = ""
with open("%s%s/ImageSet_%s.header"%(Inputf, patientfolder, imagesetnumber), "rt", encoding='latin1') as f2:
for line in f2:
#print("line in header: " + line)
if "x_dim =" in line:
x_dim = (line.split(" ")[-1]).replace(';','').replace('\n', '')
if "y_dim =" in line:
y_dim = (line.split(" ")[-1]).replace(';','').replace('\n', '')
if "x_pixdim =" in line:
xpixdim = float((line.split(" ")[-1]).replace(';',''))*10
if "y_pixdim =" in line:
ypixdim = float((line.split(" ")[-1]).replace(';',''))*10
if "x_start =" in line and "index" not in line:
xstart = float((line.split(" ")[-1]).replace(';',''))
print("xstart = ", xstart)
if "y_start =" in line:
ystart = float((line.split(" ")[-1]).replace(';',''))
if "z_dim =" in line:
z_dim = (line.split(" ")[-1]).replace(';','').replace('\n', '')
if "z_pixdim =" in line:
slicethick = float((line.split(" ")[-1]).replace(';',''))*10
if "z_start =" in line and "index" not in line:
zstart = float((line.split(" ")[-1]).replace(';',''))
if "patient_position" in line:
temp_pos = (line.split(" ")[-1]).replace("\n","")
print("Patient_position is: " + temp_pos)
return temp_pos
####################################################################################################################################################
####################################################################################################################################################
####################################################################################################################################################
# Function: getdateandtime
# Will read ImageSet_%s.ImageSet file to get date and time of CT image aquisition, only used in cases where image files have not been created
####################################################################################################################################################
def getdateandtime():
#with open("//Testfile", "rt", encoding='latin1') as g:
with open("%s%s/ImageSet_%s.ImageSet"%(Inputf, patientfolder, imagesetnumber), "rt", encoding='latin1') as g:
for line in g:
if "ScanTimeFromScanner" in line:
dateandtimestring = re.findall(r'"([^"]*)"', line)[0]
dateandtimelist = dateandtimestring.split(' ')
date = dateandtimelist[0].replace("-", "")
time = dateandtimelist[1].replace(":","")
return date, time
####################################################################################################################################################
####################################################################################################################################################
####################################################################################################################################################
# function: readImageInfo
# Reads in the file ImageSet_0.ImageInfo to get uid values
# Saves the general UIDs as global variables
####################################################################################################################################################
def readImageInfo():
#print("Reading image information for all image files\n")
global SeriesUID
global StudyInstanceUID
global FrameUID
global ClassUID
global patientfolder
global randval
global imagesetnumber
#print("Path to image info file: " + "%s%s/ImageSet_%s.ImageInfo"%(Inputf, patientfolder, imagesetnumber))
if not os.path.exists("%s%s/ImageSet_%s.ImageInfo"%(Inputf, patientfolder, imagesetnumber)):
#print("Leaving readImageInfo before getting info")
return
with open("%s%s/ImageSet_%s.ImageInfo"%(Inputf, patientfolder, imagesetnumber), 'rt', encoding='latin1') as f1:
for line in f1:
#print("For loop in readImageInfo")
if "SeriesUID" in line:
SeriesUID = re.findall(r'"([^"]*)"', line)[0]
#print("setting series uid: " + str(SeriesUID))
#SeriesUID = SeriesUID + "." + "0" + str(randval)
if "StudyInstanceUID" in line:
StudyInstanceUID = re.findall(r'"([^"]*)"', line)[0]
#print("setting study uid: " + str(StudyInstanceUID))
#StudyInstanceUID = StudyInstanceUID + "." + "0" + str(randval)
if "FrameUID" in line:
FrameUID = re.findall(r'"([^"]*)"', line)[0]
#print("setting frame uid: " + str(FrameUID))
# FrameUID = FrameUID[:-4] + "." + "0" + str(randval)
if "ClassUID" in line:
ClassUID = re.findall(r'"([^"]*)"', line)[0]
#print("setting class uid: " + str(ClassUID))
#ClassUID = ClassUID + "." + "0" + str(randval)
####################################################################################################################################################
####################################################################################################################################################
####################################################################################################################################################
# Creating a data structure to write to rt struct dicom file
# Based off example file write_new.py from C:\Python27\Lib\site-packages\dicom\examples\write_new.py
# returns data structure | |
<filename>pyunfurl/provider_data/oembed.py
OEMBED_PROVIDER_LIST = [
[
"https://(\\S*\\.)?youtu(\\.be/|be\\.com/watch)\\S+",
"https://www.youtube.com/oembed?scheme=https&",
],
[
"http://(\\S*\\.)?youtu(\\.be/|be\\.com/watch)\\S+",
"https://www.youtube.com/oembed",
],
["https?://wordpress\\.tv/\\S+", "http://wordpress.tv/oembed/"],
["http://\\S+\\.wordpress\\.com/\\S+", "http://public-api.wordpress.com/oembed/"],
["https://vimeo\\.com/\\S+", "https://vimeo.com/api/oembed.json"],
["http://vimeo\\.com/\\S+", "http://vimeo.com/api/oembed.json"],
[
"https?://(www\\.)?twitter\\.com/\\S+/status(es)?/\\S+",
"https://api.twitter.com/1/statuses/oembed.json",
],
["https?://(?:www\\.)?scribd\\.com/\\S*", "http://www.scribd.com/services/oembed"],
["https?://speakerdeck\\.com/\\S*", "https://speakerdeck.com/oembed.json"],
["https://\\S*?soundcloud\\.com/\\S+", "http://soundcloud.com/oembed"],
["http://\\S*\\.smugmug\\.com/\\S*", "http://api.smugmug.com/services/oembed/"],
["https?://slidesha\\.re/\\S*", "http://www.slideshare.net/api/oembed/2"],
[
"https?://(?:www\\.)?slideshare\\.net/[^\\/]+/\\S+",
"http://www.slideshare.net/api/oembed/2",
],
["https?://(.+\\.)?polldaddy\\.com/\\S*", "http://polldaddy.com/oembed/"],
[
"http://www\\.polleverywhere\\.com/(polls|multiple_choice_polls|free_text_polls)/\\S+",
"http://www.polleverywhere.com/services/oembed/",
],
["http://gi\\S*\\.photobucket\\.com/groups/\\S+", "http://photobucket.com/oembed"],
["http://i\\S*\\.photobucket\\.com/albums/\\S+", "http://photobucket.com/oembed"],
["http://moby\\.to/\\S*", "http://api.mobypicture.com/oEmbed"],
[
"http://www\\.mobypicture\\.com/user/\\S*?/view/\\S*",
"http://api.mobypicture.com/oEmbed",
],
[
"https?://(www\\.)?instagr(\\.am|am\\.com)/p/\\S+",
"http://api.instagram.com/oembed",
],
["https?://\\S*imgur\\.com/\\S+", "https://api.imgur.com/oembed"],
["http://(?:www\\.)hulu\\.com/watch/\\S+", "http://www.hulu.com/api/oembed.json"],
["https?://gist\\.github\\.com/\\S*", "https://github.com/api/oembed"],
[
"https?://(?:www\\.)?funnyordie\\.com/videos/\\S+",
"http://www.funnyordie.com/oembed",
],
["https?://flic\\.kr/\\S*", "https://www.flickr.com/services/oembed/"],
["https?://\\S*?flickr\\.com/\\S+", "https://www.flickr.com/services/oembed/"],
[
"https?://(?:www\\.)?dailymotion\\.com/\\S+",
"http://www.dailymotion.com/services/oembed",
],
[
"https?://www\\.circuitlab\\.com/circuit/\\S+",
"https://www.circuitlab.com/circuit/oembed",
],
["http://chirb\\.it/\\S+", "http://chirb.it/oembed.json"],
[
"https://(\\S*\\.)?youtu(\\.be/|be\\.com/watch)\\S+",
"https://www.youtube.com/oembed?scheme=https&",
],
[
"http://(\\S*\\.)?youtu(\\.be/|be\\.com/watch)\\S+",
"https://www.youtube.com/oembed",
],
[
"https://reports.zoho.com/ZDBDataSheetView.cc\\?OBJID=1432535000000003002&STANDALONE=true&INTERVAL=120&DATATYPESYMBOL=false&REMTOOLBAR=false&SEARCHBOX=true&INCLUDETITLE=true&INCLUDEDESC=true&SHOWHIDEOPT=true",
"http://api.provider.com/oembed.json",
],
[
"https://[^\\/\\s\\?&]+?.znipe.tv/[^\\/\\s\\?&]+?",
"https://api.znipe.tv/v3/oembed/",
],
["https://youtu.be/[^\\/\\s\\?&]+?", "https://www.youtube.com/oembed"],
[
"https://[^\\/\\s\\?&]+?.youtube.com/v/[^\\/\\s\\?&]+?",
"https://www.youtube.com/oembed",
],
[
"https://[^\\/\\s\\?&]+?.youtube.com/watch[^\\/\\s\\?&]+?",
"https://www.youtube.com/oembed",
],
["http://yfrog.us/[^\\/\\s\\?&]+?", "http://www.yfrog.com/api/oembed"],
[
"http://[^\\/\\s\\?&]+?.yfrog.com/[^\\/\\s\\?&]+?",
"http://www.yfrog.com/api/oembed",
],
["http://www.yesik.it/[^\\/\\s\\?&]+?", "http://yesik.it/s/oembed"],
["http://yesik.it/[^\\/\\s\\?&]+?", "http://yesik.it/s/oembed"],
[
"https://[^\\/\\s\\?&]+?.wizer.me/preview/[^\\/\\s\\?&]+?",
"http://app.wizer.me/api/oembed.json",
],
[
"http://[^\\/\\s\\?&]+?.wizer.me/preview/[^\\/\\s\\?&]+?",
"http://app.wizer.me/api/oembed.json",
],
[
"https://[^\\/\\s\\?&]+?.wizer.me/learn/[^\\/\\s\\?&]+?",
"http://app.wizer.me/api/oembed.json",
],
[
"http://[^\\/\\s\\?&]+?.wizer.me/learn/[^\\/\\s\\?&]+?",
"http://app.wizer.me/api/oembed.json",
],
[
"https://[^\\/\\s\\?&]+?.wistia.com/medias/[^\\/\\s\\?&]+?",
"https://fast.wistia.com/oembed.json",
],
[
"https://fast.wistia.com/embed/playlists/[^\\/\\s\\?&]+?",
"https://fast.wistia.com/oembed.json",
],
[
"https://fast.wistia.com/embed/iframe/[^\\/\\s\\?&]+?",
"https://fast.wistia.com/oembed.json",
],
[
"https://[^\\/\\s\\?&]+?.wiredrive.com/[^\\/\\s\\?&]+?",
"http://*.wiredrive.com/present-oembed/",
],
[
"https://article.voxsnap.com/[^\\/\\s\\?&]+?/[^\\/\\s\\?&]+?",
"https://data.voxsnap.com/oembed",
],
["https://vlurb.co/video/[^\\/\\s\\?&]+?", "https://vlurb.co/oembed.json"],
["http://vlurb.co/video/[^\\/\\s\\?&]+?", "https://vlurb.co/oembed.json"],
["https://www.vlive.tv/video/[^\\/\\s\\?&]+?", "https://www.vlive.tv/oembed"],
["https://vlipsy.com/[^\\/\\s\\?&]+?", "https://vlipsy.com/oembed"],
[
"http://viziosphere.com/3dphoto[^\\/\\s\\?&]+?",
"http://viziosphere.com/services/oembed/",
],
[
"https://player.vimeo.com/video/[^\\/\\s\\?&]+?",
"https://vimeo.com/api/oembed.json",
],
[
"https://vimeo.com/ondemand/[^\\/\\s\\?&]+?/[^\\/\\s\\?&]+?",
"https://vimeo.com/api/oembed.json",
],
[
"https://vimeo.com/groups/[^\\/\\s\\?&]+?/videos/[^\\/\\s\\?&]+?",
"https://vimeo.com/api/oembed.json",
],
[
"https://vimeo.com/channels/[^\\/\\s\\?&]+?/[^\\/\\s\\?&]+?",
"https://vimeo.com/api/oembed.json",
],
[
"https://vimeo.com/album/[^\\/\\s\\?&]+?/video/[^\\/\\s\\?&]+?",
"https://vimeo.com/api/oembed.json",
],
["https://vimeo.com/[^\\/\\s\\?&]+?", "https://vimeo.com/api/oembed.json"],
[
"http://[^\\/\\s\\?&]+?.hubs.vidyard.com/[^\\/\\s\\?&]+?",
"https://api.vidyard.com/dashboard/v1.1/oembed",
],
[
"http://share.vidyard.com/[^\\/\\s\\?&]+?",
"https://api.vidyard.com/dashboard/v1.1/oembed",
],
[
"http://play.vidyard.com/[^\\/\\s\\?&]+?",
"https://api.vidyard.com/dashboard/v1.1/oembed",
],
[
"http://embed.vidyard.com/[^\\/\\s\\?&]+?",
"https://api.vidyard.com/dashboard/v1.1/oembed",
],
[
"https://players.vidmizer.com/[^\\/\\s\\?&]+?",
"https://app-v2.vidmizer.com/api/oembed",
],
["https://vidl.it/[^\\/\\s\\?&]+?", "https://api.vidl.it/oembed"],
[
"http://www.videojug.com/interview/[^\\/\\s\\?&]+?",
"http://www.videojug.com/oembed.json",
],
[
"http://www.videojug.com/film/[^\\/\\s\\?&]+?",
"http://www.videojug.com/oembed.json",
],
["https://www.vevo.com/[^\\/\\s\\?&]+?", "https://www.vevo.com/oembed"],
["http://www.vevo.com/[^\\/\\s\\?&]+?", "https://www.vevo.com/oembed"],
["http://veervr.tv/videos/[^\\/\\s\\?&]+?", "https://api.veervr.tv/oembed"],
["http://veer.tv/videos/[^\\/\\s\\?&]+?", "https://api.veer.tv/oembed"],
["http://uttles.com/uttle/[^\\/\\s\\?&]+?", "http://uttles.com/api/reply/oembed"],
[
"http://utposts.com/products/[^\\/\\s\\?&]+?",
"https://www.utposts.com/api/oembed",
],
[
"https://utposts.com/products/[^\\/\\s\\?&]+?",
"https://www.utposts.com/api/oembed",
],
[
"http://www.utposts.com/products/[^\\/\\s\\?&]+?",
"https://www.utposts.com/api/oembed",
],
[
"https://www.utposts.com/products/[^\\/\\s\\?&]+?",
"https://www.utposts.com/api/oembed",
],
[
"http://[^\\/\\s\\?&]+?.ustream.com/[^\\/\\s\\?&]+?",
"http://www.ustream.tv/oembed",
],
[
"http://[^\\/\\s\\?&]+?.ustream.tv/[^\\/\\s\\?&]+?",
"http://www.ustream.tv/oembed",
],
[
"https://[^\\/\\s\\?&]+?.uol.com.br/video/[^\\/\\s\\?&]+?",
"https://mais.uol.com.br/apiuol/v3/oembed/view",
],
[
"https://[^\\/\\s\\?&]+?.uol.com.br/view/[^\\/\\s\\?&]+?",
"https://mais.uol.com.br/apiuol/v3/oembed/view",
],
["https://map.cam.ac.uk/[^\\/\\s\\?&]+?", "https://map.cam.ac.uk/oembed/"],
[
"https://player.ubideo.com/[^\\/\\s\\?&]+?",
"https://player.ubideo.com/api/oembed.json",
],
["https://play.typecast.ai/[^\\/\\s\\?&]+?", "https://play.typecast.ai/oembed"],
["https://play.typecast.ai/e/[^\\/\\s\\?&]+?", "https://play.typecast.ai/oembed"],
["https://play.typecast.ai/s/[^\\/\\s\\?&]+?", "https://play.typecast.ai/oembed"],
[
"https://[^\\/\\s\\?&]+?.twitter.com/[^\\/\\s\\?&]+?/status/[^\\/\\s\\?&]+?",
"https://publish.twitter.com/oembed",
],
[
"https://twitter.com/[^\\/\\s\\?&]+?/status/[^\\/\\s\\?&]+?",
"https://publish.twitter.com/oembed",
],
["https://twitch.tv/[^\\/\\s\\?&]+?", "https://api.twitch.tv/v4/oembed"],
["http://twitch.tv/[^\\/\\s\\?&]+?", "https://api.twitch.tv/v4/oembed"],
["https://www.twitch.tv/[^\\/\\s\\?&]+?", "https://api.twitch.tv/v4/oembed"],
["http://www.twitch.tv/[^\\/\\s\\?&]+?", "https://api.twitch.tv/v4/oembed"],
["https://clips.twitch.tv/[^\\/\\s\\?&]+?", "https://api.twitch.tv/v4/oembed"],
["http://clips.twitch.tv/[^\\/\\s\\?&]+?", "https://api.twitch.tv/v4/oembed"],
[
"http://www.tvcf.co.kr/v/[^\\/\\s\\?&]+?",
"http://www.tvcf.co.kr/services/oembed",
],
["https://www.tuxx.be/[^\\/\\s\\?&]+?", "https://www.tuxx.be/services/oembed"],
["http://www.topy.se/image/[^\\/\\s\\?&]+?", "http://www.topy.se/oembed/"],
[
"https://www.toornament.com/tournaments/[^\\/\\s\\?&]+?/stages/[^\\/\\s\\?&]+?/",
"https://widget.toornament.com/oembed",
],
[
"https://www.toornament.com/tournaments/[^\\/\\s\\?&]+?/matches/schedule",
"https://widget.toornament.com/oembed",
],
[
"https://www.toornament.com/tournaments/[^\\/\\s\\?&]+?/registration/",
"https://widget.toornament.com/oembed",
],
[
"https://www.toornament.com/tournaments/[^\\/\\s\\?&]+?/information",
"https://widget.toornament.com/oembed",
],
[
"https://www.tickcounter.com/worldclock/[^\\/\\s\\?&]+?",
"https://www.tickcounter.com/oembed",
],
[
"https://www.tickcounter.com/ticker/[^\\/\\s\\?&]+?",
"https://www.tickcounter.com/oembed",
],
[
"https://www.tickcounter.com/countup/[^\\/\\s\\?&]+?",
"https://www.tickcounter.com/oembed",
],
[
"https://www.tickcounter.com/countdown/[^\\/\\s\\?&]+?",
"https://www.tickcounter.com/oembed",
],
[
"http://www.tickcounter.com/worldclock/[^\\/\\s\\?&]+?",
"https://www.tickcounter.com/oembed",
],
[
"http://www.tickcounter.com/ticker/[^\\/\\s\\?&]+?",
"https://www.tickcounter.com/oembed",
],
[
"http://www.tickcounter.com/countup/[^\\/\\s\\?&]+?",
"https://www.tickcounter.com/oembed",
],
[
"http://www.tickcounter.com/countdown/[^\\/\\s\\?&]+?",
"https://www.tickcounter.com/oembed",
],
[
"https://theysaidso.com/image/[^\\/\\s\\?&]+?",
"https://theysaidso.com/extensions/oembed/",
],
[
"https://[^\\/\\s\\?&]+?.nytimes.com/[^\\/\\s\\?&]+?",
"https://www.nytimes.com/svc/oembed/json/",
],
["https://nytimes.com/[^\\/\\s\\?&]+?", "https://www.nytimes.com/svc/oembed/json/"],
["https://www.nytimes.com/svc/oembed", "https://www.nytimes.com/svc/oembed/json/"],
[
"https://www.ted.com/talks/[^\\/\\s\\?&]+?",
"https://www.ted.com/talks/oembed.json",
],
["https://ted.com/talks/[^\\/\\s\\?&]+?", "https://www.ted.com/talks/oembed.json"],
["http://ted.com/talks/[^\\/\\s\\?&]+?", "https://www.ted.com/talks/oembed.json"],
["https://www.sway.com/[^\\/\\s\\?&]+?", "https://sway.com/api/v1.0/oembed"],
["https://sway.com/[^\\/\\s\\?&]+?", "https://sway.com/api/v1.0/oembed"],
[
"https://www.sutori.com/story/[^\\/\\s\\?&]+?",
"https://www.sutori.com/api/oembed",
],
[
"https://content.streamonecloud.net/embed/[^\\/\\s\\?&]+?",
"https://content.streamonecloud.net/oembed",
],
[
"https://streamable.com/[^\\/\\s\\?&]+?",
"https://api.streamable.com/oembed.json",
],
["http://streamable.com/[^\\/\\s\\?&]+?", "https://api.streamable.com/oembed.json"],
[
"https://purl.stanford.edu/[^\\/\\s\\?&]+?",
"https://purl.stanford.edu/embed.json",
],
[
"https://[^\\/\\s\\?&]+?.spreaker.com/[^\\/\\s\\?&]+?",
"https://api.spreaker.com/oembed",
],
[
"http://[^\\/\\s\\?&]+?.spreaker.com/[^\\/\\s\\?&]+?",
"https://api.spreaker.com/oembed",
],
["spotify:[^\\/\\s\\?&]+?", "https://embed.spotify.com/oembed/"],
[
"https://[^\\/\\s\\?&]+?.spotify.com/[^\\/\\s\\?&]+?",
"https://embed.spotify.com/oembed/",
],
["http://play.bespotful.com/[^\\/\\s\\?&]+?", "https://api.bespotful.com/oembed"],
[
"https://speakerdeck.com/[^\\/\\s\\?&]+?/[^\\/\\s\\?&]+?",
"https://speakerdeck.com/oembed.json",
],
[
"http://speakerdeck.com/[^\\/\\s\\?&]+?/[^\\/\\s\\?&]+?",
"https://speakerdeck.com/oembed.json",
],
[
"https://soundsgood.co/playlist/[^\\/\\s\\?&]+?",
"https://play.soundsgood.co/oembed",
],
[
"https://play.soundsgood.co/playlist/[^\\/\\s\\?&]+?",
"https://play.soundsgood.co/oembed",
],
["https://soundcloud.com/[^\\/\\s\\?&]+?", "https://soundcloud.com/oembed"],
["http://soundcloud.com/[^\\/\\s\\?&]+?", "https://soundcloud.com/oembed"],
["https://song.link/[^\\/\\s\\?&]+?", "https://song.link/oembed"],
[
"https://www.socialexplorer.com/[^\\/\\s\\?&]+?/embed",
"https://www.socialexplorer.com/services/oembed/",
],
[
"https://www.socialexplorer.com/[^\\/\\s\\?&]+?/edit",
"https://www.socialexplorer.com/services/oembed/",
],
[
"https://www.socialexplorer.com/[^\\/\\s\\?&]+?/view",
"https://www.socialexplorer.com/services/oembed/",
],
[
"https://www.socialexplorer.com/[^\\/\\s\\?&]+?/explore",
"https://www.socialexplorer.com/services/oembed/",
],
[
"http://[^\\/\\s\\?&]+?.smugmug.com/[^\\/\\s\\?&]+?",
"http://api.smugmug.com/services/oembed/",
],
[
"https://smashnotes.com/p/[^\\/\\s\\?&]+?/e/[^\\/\\s\\?&]+? - https://smashnotes.com/p/[^\\/\\s\\?&]+?/e/[^\\/\\s\\?&]+?/s/[^\\/\\s\\?&]+?",
"https://smashnotes.com/services/oembed",
],
[
"https://smashnotes.com/p/[^\\/\\s\\?&]+?",
"https://smashnotes.com/services/oembed",
],
[
"http://pt.slideshare.net/[^\\/\\s\\?&]+?/[^\\/\\s\\?&]+?",
"http://www.slideshare.net/api/oembed/2",
],
[
"http://es.slideshare.net/[^\\/\\s\\?&]+?/[^\\/\\s\\?&]+?",
"http://www.slideshare.net/api/oembed/2",
],
[
"http://de.slideshare.net/[^\\/\\s\\?&]+?/[^\\/\\s\\?&]+?",
"http://www.slideshare.net/api/oembed/2",
],
[
"http://fr.slideshare.net/[^\\/\\s\\?&]+?/[^\\/\\s\\?&]+?",
"http://www.slideshare.net/api/oembed/2",
],
[
"http://www.slideshare.net/[^\\/\\s\\?&]+?/[^\\/\\s\\?&]+?",
"http://www.slideshare.net/api/oembed/2",
],
[
"https://sketchfab.com/[^\\/\\s\\?&]+?/folders/[^\\/\\s\\?&]+?",
"http://sketchfab.com/oembed",
],
["https://sketchfab.com/models/[^\\/\\s\\?&]+?", "http://sketchfab.com/oembed"],
["http://sketchfab.com/models/[^\\/\\s\\?&]+?", "http://sketchfab.com/oembed"],
["https://onsizzle.com/i/[^\\/\\s\\?&]+?", "https://onsizzle.com/oembed"],
["https://simplecast.com/s/[^\\/\\s\\?&]+?", "https://simplecast.com/oembed"],
["https://showtheway.io/to/[^\\/\\s\\?&]+?", "https://showtheway.io/oembed"],
["http://shoud.io/[^\\/\\s\\?&]+?", "http://shoudio.com/api/oembed"],
["http://shoudio.com/[^\\/\\s\\?&]+?", "http://shoudio.com/api/oembed"],
[
"https://www.shortnote.jp/view/notes/[^\\/\\s\\?&]+?",
"https://www.shortnote.jp/oembed/",
],
[
"https://embed.sendtonews.com/oembed/[^\\/\\s\\?&]+?",
"https://embed.sendtonews.com/services/oembed",
],
[
"http://www.scribd.com/doc/[^\\/\\s\\?&]+?",
"http://www.scribd.com/services/oembed/",
],
[
"https://scribblemaps.com/maps/view/[^\\/\\s\\?&]+?",
"https://scribblemaps.com/api/services/oembed.json",
],
[
"http://scribblemaps.com/maps/view/[^\\/\\s\\?&]+?",
"https://scribblemaps.com/api/services/oembed.json",
],
[
"https://www.scribblemaps.com/maps/view/[^\\/\\s\\?&]+?",
"https://scribblemaps.com/api/services/oembed.json",
],
[
"http://www.scribblemaps.com/maps/view/[^\\/\\s\\?&]+?",
"https://scribblemaps.com/api/services/oembed.json",
],
[
"http://www.screenr.com/[^\\/\\s\\?&]+?/",
"http://www.screenr.com/api/oembed.json",
],
[
"https://[^\\/\\s\\?&]+?.screen9.tv/[^\\/\\s\\?&]+?",
"https://api.screen9.com/oembed",
],
["https://console.screen9.com/[^\\/\\s\\?&]+?", "https://api.screen9.com/oembed"],
["http://videos.sapo.pt/[^\\/\\s\\?&]+?", "http://videos.sapo.pt/oembed"],
["https://roosterteeth.com/[^\\/\\s\\?&]+?", "https://roosterteeth.com/oembed"],
[
"http://roomshare.jp/en/post/[^\\/\\s\\?&]+?",
"http://roomshare.jp/en/oembed.json",
],
["http://roomshare.jp/post/[^\\/\\s\\?&]+?", "http://roomshare.jp/en/oembed.json"],
[
"https://www.reverbnation.com/[^\\/\\s\\?&]+?/songs/[^\\/\\s\\?&]+?",
"https://www.reverbnation.com/oembed",
],
[
"https://www.reverbnation.com/[^\\/\\s\\?&]+?",
"https://www.reverbnation.com/oembed",
],
[
"http://repubhub.icopyright.net/freePost.act\\?[^\\/\\s\\?&]+?",
"http://repubhub.icopyright.net/oembed.act",
],
["https://repl.it/@[^\\/\\s\\?&]+?/[^\\/\\s\\?&]+?", "https://repl.it/data/oembed"],
["http://rwire.com/[^\\/\\s\\?&]+?", "http://publisher.releasewire.com/oembed/"],
[
"https://www.reddit.com/r/[^\\/\\s\\?&]+?/comments/[^\\/\\s\\?&]+?/[^\\/\\s\\?&]+?",
"https://www.reddit.com/oembed",
],
[
"https://reddit.com/r/[^\\/\\s\\?&]+?/comments/[^\\/\\s\\?&]+?/[^\\/\\s\\?&]+?",
"https://www.reddit.com/oembed",
],
["https://rapidengage.com/s/[^\\/\\s\\?&]+?", "https://rapidengage.com/api/oembed"],
[
"http://www.quizz.biz/quizz-[^\\/\\s\\?&]+?.html",
"http://www.quizz.biz/api/oembed",
],
[
"http://www.quiz.biz/quizz-[^\\/\\s\\?&]+?.html",
"http://www.quiz.biz/api/oembed",
],
[
"https://posixion.com/[^\\/\\s\\?&]+?/question/[^\\/\\s\\?&]+?",
"http://posixion.com/services/oembed/",
],
[
"https://posixion.com/question/[^\\/\\s\\?&]+?",
"http://posixion.com/services/oembed/",
],
[
"https://portfolium.com/entry/[^\\/\\s\\?&]+?",
"https://api.portfolium.com/oembed",
],
[
"https://app.sellwithport.com/#/buyer/[^\\/\\s\\?&]+?",
"https://api.sellwithport.com/v1.0/buyer/oembed",
],
[
"http://[^\\/\\s\\?&]+?.polldaddy.com/ratings/[^\\/\\s\\?&]+?",
"http://polldaddy.com/oembed/",
],
[
"http://[^\\/\\s\\?&]+?.polldaddy.com/poll/[^\\/\\s\\?&]+?",
"http://polldaddy.com/oembed/",
],
[
"http://[^\\/\\s\\?&]+?.polldaddy.com/s/[^\\/\\s\\?&]+?",
"http://polldaddy.com/oembed/",
],
[
"http://[^\\/\\s\\?&]+?.podbean.com/e/[^\\/\\s\\?&]+?",
"https://api.podbean.com/v1/oembed",
],
[
"https://[^\\/\\s\\?&]+?.podbean.com/e/[^\\/\\s\\?&]+?",
"https://api.podbean.com/v1/oembed",
],
[
"https://store.pixdor.com/map/[^\\/\\s\\?&]+?/show",
"https://store.pixdor.com/oembed",
],
[
"https://store.pixdor.com/place-marker-widget/[^\\/\\s\\?&]+?/show",
"https://store.pixdor.com/oembed",
],
["https://www.pastery.net/[^\\/\\s\\?&]+?", "https://www.pastery.net/oembed"],
["http://www.pastery.net/[^\\/\\s\\?&]+?", "https://www.pastery.net/oembed"],
["https://pastery.net/[^\\/\\s\\?&]+?", "https://www.pastery.net/oembed"],
["http://pastery.net/[^\\/\\s\\?&]+?", "https://www.pastery.net/oembed"],
[
"https://overflow.io/embed/[^\\/\\s\\?&]+?",
"https://overflow.io/services/oembed",
],
["https://overflow.io/s/[^\\/\\s\\?&]+?", "https://overflow.io/services/oembed"],
["https://outplayed.tv/media/[^\\/\\s\\?&]+?", "https://outplayed.tv/oembed"],
["https://www.oumy.com/v/[^\\/\\s\\?&]+?", "https://www.oumy.com/oembed"],
[
"http://orbitvu.co/001/[^\\/\\s\\?&]+?/1/2/orbittour/[^\\/\\s\\?&]+?/view",
"http://orbitvu.co/service/oembed",
],
[
"http://orbitvu.co/001/[^\\/\\s\\?&]+?/2/orbittour/[^\\/\\s\\?&]+?/view",
"http://orbitvu.co/service/oembed",
],
[
"http://orbitvu.co/001/[^\\/\\s\\?&]+?/ov3602/[^\\/\\s\\?&]+?/view",
"http://orbitvu.co/service/oembed",
],
[
"http://orbitvu.co/001/[^\\/\\s\\?&]+?/ov3601/[^\\/\\s\\?&]+?/view",
"http://orbitvu.co/service/oembed",
],
[
"http://orbitvu.co/001/[^\\/\\s\\?&]+?/ov3601/view",
"http://orbitvu.co/service/oembed",
],
[
"https://orbitvu.co/001/[^\\/\\s\\?&]+?/1/2/orbittour/[^\\/\\s\\?&]+?/view",
"http://orbitvu.co/service/oembed",
],
[
"https://orbitvu.co/001/[^\\/\\s\\?&]+?/2/orbittour/[^\\/\\s\\?&]+?/view",
"http://orbitvu.co/service/oembed",
],
[
"https://orbitvu.co/001/[^\\/\\s\\?&]+?/ov3602/[^\\/\\s\\?&]+?/view",
"http://orbitvu.co/service/oembed",
],
[
"https://orbitvu.co/001/[^\\/\\s\\?&]+?/ov3601/[^\\/\\s\\?&]+?/view",
"http://orbitvu.co/service/oembed",
],
[
"https://orbitvu.co/001/[^\\/\\s\\?&]+?/ov3601/view",
"http://orbitvu.co/service/oembed",
],
["http://on.aol.com/video/[^\\/\\s\\?&]+?", "http://on.aol.com/api"],
[
"https://omniscope.me/[^\\/\\s\\?&]+?",
"https://omniscope.me/_global_/oembed/json",
],
[
"http://official.fm/playlists/[^\\/\\s\\?&]+?",
"http://official.fm/services/oembed.json",
],
[
"http://official.fm/tracks/[^\\/\\s\\?&]+?",
"http://official.fm/services/oembed.json",
],
["https://odds.com.au/[^\\/\\s\\?&]+?", "https://www.odds.com.au/api/oembed/"],
["https://www.odds.com.au/[^\\/\\s\\?&]+?", "https://www.odds.com.au/api/oembed/"],
[
"http://[^\\/\\s\\?&]+?.nfb.ca/film/[^\\/\\s\\?&]+?",
"http://www.nfb.ca/remote/services/oembed/",
],
[
"https://naturalatlas.com/[^\\/\\s\\?&]+?/[^\\/\\s\\?&]+?/[^\\/\\s\\?&]+?/[^\\/\\s\\?&]+?",
"https://naturalatlas.com/oembed.json",
],
[
"https://naturalatlas.com/[^\\/\\s\\?&]+?/[^\\/\\s\\?&]+?/[^\\/\\s\\?&]+?",
"https://naturalatlas.com/oembed.json",
],
[
"https://naturalatlas.com/[^\\/\\s\\?&]+?/[^\\/\\s\\?&]+?",
"https://naturalatlas.com/oembed.json",
],
[
"https://naturalatlas.com/[^\\/\\s\\?&]+?",
"https://naturalatlas.com/oembed.json",
],
["https://www.nb.no/items/[^\\/\\s\\?&]+?", "https://api.nb.no/catalog/v1/oembed"],
[
"https://new.media.zhdk.ch/signatur/[^\\/\\s\\?&]+?",
"https://www.nanoo.tv/services/oembed",
],
[
"https://media.zhdk.ch/signatur/[^\\/\\s\\?&]+?",
"https://www.nanoo.tv/services/oembed",
],
[
"http://new.media.zhdk.ch/signatur/[^\\/\\s\\?&]+?",
"https://www.nanoo.tv/services/oembed",
],
[
"http://media.zhdk.ch/signatur/[^\\/\\s\\?&]+?",
"https://www.nanoo.tv/services/oembed",
],
["https://nanoo.pro/link/[^\\/\\s\\?&]+?", "https://www.nanoo.tv/services/oembed"],
[
"https://[^\\/\\s\\?&]+?.nanoo.pro/link/[^\\/\\s\\?&]+?",
"https://www.nanoo.tv/services/oembed",
],
["https://nanoo.tv/link/[^\\/\\s\\?&]+?", "https://www.nanoo.tv/services/oembed"],
[
"https://[^\\/\\s\\?&]+?.nanoo.tv/link/[^\\/\\s\\?&]+?",
"https://www.nanoo.tv/services/oembed",
],
["http://nanoo.pro/link/[^\\/\\s\\?&]+?", "https://www.nanoo.tv/services/oembed"],
[
"http://[^\\/\\s\\?&]+?.nanoo.pro/link/[^\\/\\s\\?&]+?",
"https://www.nanoo.tv/services/oembed",
],
["http://nanoo.tv/link/[^\\/\\s\\?&]+?", "https://www.nanoo.tv/services/oembed"],
[
"http://[^\\/\\s\\?&]+?.nanoo.tv/link/[^\\/\\s\\?&]+?",
"https://www.nanoo.tv/services/oembed",
],
["https://namchey.com/embeds/[^\\/\\s\\?&]+?", "https://namchey.com/api/oembed"],
["https://mybeweeg.com/w/[^\\/\\s\\?&]+?", "https://mybeweeg.com/services/oembed"],
[
"https://musicboxmaniacs.com/explore/melody/[^\\/\\s\\?&]+?",
"https://musicboxmaniacs.com/embed/",
],
[
"https://m-roll.morphcast.com/mroll/[^\\/\\s\\?&]+?",
"https://m-roll.morphcast.com/service/oembed",
],
[
"https://beta.modelo.io/embedded/[^\\/\\s\\?&]+?",
"https://portal.modelo.io/oembed",
],
["http://moby.to/[^\\/\\s\\?&]+?", "http://api.mobypicture.com/oEmbed"],
[
"http://www.mobypicture.com/user/[^\\/\\s\\?&]+?/view/[^\\/\\s\\?&]+?",
"http://api.mobypicture.com/oEmbed",
],
[
"https://www.mixcloud.com/[^\\/\\s\\?&]+?/[^\\/\\s\\?&]+?/",
"https://www.mixcloud.com/oembed/",
],
[
"http://www.mixcloud.com/[^\\/\\s\\?&]+?/[^\\/\\s\\?&]+?/",
"https://www.mixcloud.com/oembed/",
],
["http://meetu.ps/[^\\/\\s\\?&]+?", "https://api.meetup.com/oembed"],
["https://meetup.com/[^\\/\\s\\?&]+?", "https://api.meetup.com/oembed"],
["https://www.meetup.com/[^\\/\\s\\?&]+?", "https://api.meetup.com/oembed"],
["http://meetup.com/[^\\/\\s\\?&]+?", "https://api.meetup.com/oembed"],
[
"https://medienarchiv.zhdk.ch/entries/[^\\/\\s\\?&]+?",
"https://medienarchiv.zhdk.ch/oembed.json",
],
["https://me.me/i/[^\\/\\s\\?&]+?", "https://me.me/oembed"],
[
"http://mathembed.com/latex\\?inputText=[^\\/\\s\\?&]+?",
"http://mathembed.com/oembed",
],
["https://app.ludus.one/[^\\/\\s\\?&]+?", "https://app.ludus.one/oembed"],
[
"https://livestream.com/[^\\/\\s\\?&]+?/[^\\/\\s\\?&]+?/videos/[^\\/\\s\\?&]+?",
"https://livestream.com/oembed",
],
[
"https://livestream.com/[^\\/\\s\\?&]+?/[^\\/\\s\\?&]+?",
"https://livestream.com/oembed",
],
[
"https://livestream.com/[^\\/\\s\\?&]+?/events/[^\\/\\s\\?&]+?/videos/[^\\/\\s\\?&]+?",
"https://livestream.com/oembed",
],
[
"https://livestream.com/[^\\/\\s\\?&]+?/events/[^\\/\\s\\?&]+?",
"https://livestream.com/oembed",
],
[
"https://livestream.com/accounts/[^\\/\\s\\?&]+?/events/[^\\/\\s\\?&]+?/videos/[^\\/\\s\\?&]+?",
"https://livestream.com/oembed",
],
[
"https://livestream.com/accounts/[^\\/\\s\\?&]+?/events/[^\\/\\s\\?&]+?",
"https://livestream.com/oembed",
],
[
"https://pod.univ-lille.fr/video/[^\\/\\s\\?&]+?",
"https://pod.univ-lille.fr/oembed",
],
["http://learningapps.org/[^\\/\\s\\?&]+?", "http://learningapps.org/oembed.php"],
["https://jdr.knacki.info/meuh/[^\\/\\s\\?&]+?", "https://jdr.knacki.info/oembed"],
["http://jdr.knacki.info/meuh/[^\\/\\s\\?&]+?", "https://jdr.knacki.info/oembed"],
[
"http://www.kitchenbowl.com/recipe/[^\\/\\s\\?&]+?",
"http://www.kitchenbowl.com/oembed",
],
["https://kit.com/[^\\/\\s\\?&]+?/[^\\/\\s\\?&]+?", "https://embed.kit.com/oembed"],
["http://kit.com/[^\\/\\s\\?&]+?/[^\\/\\s\\?&]+?", "https://embed.kit.com/oembed"],
[
"https://www.kidoju.com/fr/x/[^\\/\\s\\?&]+?/[^\\/\\s\\?&]+?",
"https://www.kidoju.com/api/oembed",
],
[
"https://www.kidoju.com/en/x/[^\\/\\s\\?&]+?/[^\\/\\s\\?&]+?",
"https://www.kidoju.com/api/oembed",
],
[
"http://www.kickstarter.com/projects/[^\\/\\s\\?&]+?",
"http://www.kickstarter.com/services/oembed",
],
["https://tv.kakao.com/channel/l/[^\\/\\s\\?&]+?", "https://tv.kakao.com/oembed"],
[
"https://tv.kakao.com/channel/[^\\/\\s\\?&]+?/livelink/[^\\/\\s\\?&]+?",
"https://tv.kakao.com/oembed",
],
["https://tv.kakao.com/channel/v/[^\\/\\s\\?&]+?", "https://tv.kakao.com/oembed"],
[
"https://tv.kakao.com/channel/[^\\/\\s\\?&]+?/cliplink/[^\\/\\s\\?&]+?",
"https://tv.kakao.com/oembed",
],
[
"https://issuu.com/[^\\/\\s\\?&]+?/docs/[^\\/\\s\\?&]+?",
"https://issuu.com/oembed",
],
["https://www.isnare.com/[^\\/\\s\\?&]+?", "https://www.isnare.com/oembed/"],
["https://www.instagr.am/p/[^\\/\\s\\?&]+?", "https://api.instagram.com/oembed"],
["https://www.instagram.com/p/[^\\/\\s\\?&]+?", "https://api.instagram.com/oembed"],
["https://instagr.am/p/[^\\/\\s\\?&]+?", "https://api.instagram.com/oembed"],
["https://instagram.com/p/[^\\/\\s\\?&]+?", "https://api.instagram.com/oembed"],
["http://www.instagr.am/p/[^\\/\\s\\?&]+?", "https://api.instagram.com/oembed"],
["http://www.instagram.com/p/[^\\/\\s\\?&]+?", "https://api.instagram.com/oembed"],
["http://instagr.am/p/[^\\/\\s\\?&]+?", "https://api.instagram.com/oembed"],
["http://instagram.com/p/[^\\/\\s\\?&]+?", "https://api.instagram.com/oembed"],
[
"http://[^\\/\\s\\?&]+?.inphood.com/[^\\/\\s\\?&]+?",
"http://api.inphood.com/oembed",
],
["https://www.inoreader.com/oembed/", "https://www.inoreader.com/oembed/api/"],
[
"https://www.injurymap.com/exercises/[^\\/\\s\\?&]+?",
"https://www.injurymap.com/services/oembed",
],
[
"https://[^\\/\\s\\?&]+?.infoveave.net/P/[^\\/\\s\\?&]+?",
"https://infoveave.net/services/oembed/",
],
[
"https://[^\\/\\s\\?&]+?.infoveave.net/E/[^\\/\\s\\?&]+?",
"https://infoveave.net/services/oembed/",
],
["https://infogr.am/[^\\/\\s\\?&]+?", "https://infogr.am/oembed"],
[
"https://player.indacolive.com/player/jwp/clients/[^\\/\\s\\?&]+?",
"https://player.indacolive.com/services/oembed",
],
["http://ifttt.com/recipes/[^\\/\\s\\?&]+?", "http://www.ifttt.com/oembed/"],
["http://www.ifixit.com/Guide/View/[^\\/\\s\\?&]+?", "http://www.ifixit.com/Embed"],
[
"http://www.hulu.com/watch/[^\\/\\s\\?&]+?",
"http://www.hulu.com/api/oembed.json",
],
[
"http://huffduffer.com/[^\\/\\s\\?&]+?/[^\\/\\s\\?&]+?",
"http://huffduffer.com/oembed",
],
[
"https://hearthis.at/[^\\/\\s\\?&]+?/[^\\/\\s\\?&]+?/",
"https://hearthis.at/oembed/",
],
["https://gyazo.com/[^\\/\\s\\?&]+?", "https://api.gyazo.com/api/oembed"],
["https://gtchannel.com/watch/[^\\/\\s\\?&]+?", "https://api.luminery.com/oembed"],
[
"https://media.giphy.com/media/[^\\/\\s\\?&]+?/giphy.gif",
"https://giphy.com/services/oembed",
],
["http://gph.is/[^\\/\\s\\?&]+?", "https://giphy.com/services/oembed"],
["https://giphy.com/gifs/[^\\/\\s\\?&]+?", "https://giphy.com/services/oembed"],
[
"https://www.gifnote.com/play/[^\\/\\s\\?&]+?",
"https://www.gifnote.com/services/oembed",
],
["https://www.gfycat.com/[^\\/\\s\\?&]+?", "https://api.gfycat.com/v1/oembed"],
["https://gfycat.com/[^\\/\\s\\?&]+?", "https://api.gfycat.com/v1/oembed"],
["http://www.gfycat.com/[^\\/\\s\\?&]+?", "https://api.gfycat.com/v1/oembed"],
["http://gfycat.com/[^\\/\\s\\?&]+?", "https://api.gfycat.com/v1/oembed"],
["http://gty.im/[^\\/\\s\\?&]+?", "http://embed.gettyimages.com/oembed"],
[
"http://germany.geograph.org/[^\\/\\s\\?&]+?",
"http://geo.hlipp.de/restapi.php/api/oembed",
],
[
"http://geo.hlipp.de/[^\\/\\s\\?&]+?",
"http://geo.hlipp.de/restapi.php/api/oembed",
],
[
"http://geo-en.hlipp.de/[^\\/\\s\\?&]+?",
"http://geo.hlipp.de/restapi.php/api/oembed",
],
[
"http://[^\\/\\s\\?&]+?.channel.geographs.org/[^\\/\\s\\?&]+?",
"http://www.geograph.org.gg/api/oembed",
],
[
"http://channel-islands.geographs.org/[^\\/\\s\\?&]+?",
"http://www.geograph.org.gg/api/oembed",
],
[
"http://channel-islands.geograph.org/[^\\/\\s\\?&]+?",
"http://www.geograph.org.gg/api/oembed",
],
[
"http://[^\\/\\s\\?&]+?.geograph.org.je/[^\\/\\s\\?&]+?",
"http://www.geograph.org.gg/api/oembed",
],
[
"http://[^\\/\\s\\?&]+?.geograph.org.gg/[^\\/\\s\\?&]+?",
"http://www.geograph.org.gg/api/oembed",
],
[
"http://[^\\/\\s\\?&]+?.wikimedia.org/[^\\/\\s\\?&]+?_geograph.org.uk_[^\\/\\s\\?&]+?",
"http://api.geograph.org.uk/api/oembed",
],
[
"http://[^\\/\\s\\?&]+?.geograph.ie/[^\\/\\s\\?&]+?",
"http://api.geograph.org.uk/api/oembed",
],
[
"http://[^\\/\\s\\?&]+?.geograph.co.uk/[^\\/\\s\\?&]+?",
"http://api.geograph.org.uk/api/oembed",
],
[
"http://[^\\/\\s\\?&]+?.geograph.org.uk/[^\\/\\s\\?&]+?",
"http://api.geograph.org.uk/api/oembed",
],
[
"http://www.funnyordie.com/videos/[^\\/\\s\\?&]+?",
"http://www.funnyordie.com/oembed.json",
],
["https://framebuzz.com/v/[^\\/\\s\\?&]+?", "https://framebuzz.com/oembed/"],
["http://framebuzz.com/v/[^\\/\\s\\?&]+?", "https://framebuzz.com/oembed/"],
[
"https://fiso.foxsports.com.au/isomorphic-widget/[^\\/\\s\\?&]+?",
"https://fiso.foxsports.com.au/oembed",
],
[
"http://fiso.foxsports.com.au/isomorphic-widget/[^\\/\\s\\?&]+?",
"https://fiso.foxsports.com.au/oembed",
],
["https://catapult.fontself.com/[^\\/\\s\\?&]+?", "https://oembed.fontself.com/"],
[
"https://public.flourish.studio/story/[^\\/\\s\\?&]+?",
"https://app.flourish.studio/api/v1/oembed",
],
[
"https://public.flourish.studio/visualisation/[^\\/\\s\\?&]+?",
"https://app.flourish.studio/api/v1/oembed",
],
["https://flic.kr/p/[^\\/\\s\\?&]+?", "https://www.flickr.com/services/oembed/"],
[
"https://[^\\/\\s\\?&]+?.flickr.com/photos/[^\\/\\s\\?&]+?",
"https://www.flickr.com/services/oembed/",
],
["http://flic.kr/p/[^\\/\\s\\?&]+?", "https://www.flickr.com/services/oembed/"],
[
"http://[^\\/\\s\\?&]+?.flickr.com/photos/[^\\/\\s\\?&]+?",
"https://www.flickr.com/services/oembed/",
],
[
"https://[^\\/\\s\\?&]+?.flat.io/score/[^\\/\\s\\?&]+?",
"https://flat.io/services/oembed",
],
["https://flat.io/score/[^\\/\\s\\?&]+?", "https://flat.io/services/oembed"],
["https://www.fite.tv/watch/[^\\/\\s\\?&]+?", "https://www.fite.tv/oembed"],
[
"https://faithlifetv.com/media/resource/[^\\/\\s\\?&]+?/[^\\/\\s\\?&]+?",
"https://faithlifetv.com/api/oembed",
],
[
"https://faithlifetv.com/media/assets/[^\\/\\s\\?&]+?",
"https://faithlifetv.com/api/oembed",
],
[
"https://faithlifetv.com/media/[^\\/\\s\\?&]+?",
"https://faithlifetv.com/api/oembed",
],
[
"https://faithlifetv.com/items/resource/[^\\/\\s\\?&]+?/[^\\/\\s\\?&]+?",
"https://faithlifetv.com/api/oembed",
],
[
"https://faithlifetv.com/items/[^\\/\\s\\?&]+?",
"https://faithlifetv.com/api/oembed",
],
[
"https://app.getfader.com/projects/[^\\/\\s\\?&]+?/publish",
"https://app.getfader.com/api/oembed",
],
[
"https://www.facebook.com/video.php",
"https://www.facebook.com/plugins/video/oembed.json",
],
[
"https://www.facebook.com/[^\\/\\s\\?&]+?/videos/[^\\/\\s\\?&]+?",
"https://www.facebook.com/plugins/video/oembed.json",
],
[
"https://www.facebook.com/notes/[^\\/\\s\\?&]+?/[^\\/\\s\\?&]+?/[^\\/\\s\\?&]+?",
"https://www.facebook.com/plugins/post/oembed.json",
],
[
"https://www.facebook.com/questions/[^\\/\\s\\?&]+?",
"https://www.facebook.com/plugins/post/oembed.json",
],
[
"https://www.facebook.com/media/set\\?set=[^\\/\\s\\?&]+?",
"https://www.facebook.com/plugins/post/oembed.json",
],
[
"https://www.facebook.com/permalink.php",
"https://www.facebook.com/plugins/post/oembed.json",
],
[
"https://www.facebook.com/[^\\/\\s\\?&]+?/activity/[^\\/\\s\\?&]+?",
"https://www.facebook.com/plugins/post/oembed.json",
],
[
"https://www.facebook.com/photo.php",
"https://www.facebook.com/plugins/post/oembed.json",
],
[
"https://www.facebook.com/photo.php[^\\/\\s\\?&]+?",
"https://www.facebook.com/plugins/post/oembed.json",
],
[
"https://www.facebook.com/[^\\/\\s\\?&]+?/photos/[^\\/\\s\\?&]+?",
"https://www.facebook.com/plugins/post/oembed.json",
],
[
"https://www.facebook.com/photos/[^\\/\\s\\?&]+?",
"https://www.facebook.com/plugins/post/oembed.json",
],
[
"https://www.facebook.com/[^\\/\\s\\?&]+?/posts/[^\\/\\s\\?&]+?",
"https://www.facebook.com/plugins/post/oembed.json",
],
["https://eyrie.io/sparkfun/[^\\/\\s\\?&]+?", "https://eyrie.io/v1/oembed"],
["https://eyrie.io/board/[^\\/\\s\\?&]+?", "https://eyrie.io/v1/oembed"],
["https://ethfiddle.com/[^\\/\\s\\?&]+?", "https://ethfiddle.com/services/oembed/"],
["http://embedarticles.com/[^\\/\\s\\?&]+?", "http://embedarticles.com/oembed/"],
[
"http://egliseinfo.catholique.fr/[^\\/\\s\\?&]+?",
"http://egliseinfo.catholique.fr/api/oembed",
],
["http://edocr.com/docs/[^\\/\\s\\?&]+?", "http://edocr.com/api/oembed"],
["https://d.tube/v/[^\\/\\s\\?&]+?", "https://api.d.tube/oembed"],
["http://dotsub.com/view/[^\\/\\s\\?&]+?", "http://dotsub.com/services/oembed"],
["http://docdro.id/[^\\/\\s\\?&]+?", "https://www.docdroid.net/api/oembed"],
["https://docdro.id/[^\\/\\s\\?&]+?", "https://www.docdroid.net/api/oembed"],
[
"http://[^\\/\\s\\?&]+?.docdroid.net/[^\\/\\s\\?&]+?",
"https://www.docdroid.net/api/oembed",
],
[
"https://[^\\/\\s\\?&]+?.docdroid.net/[^\\/\\s\\?&]+?",
"https://www.docdroid.net/api/oembed",
],
[
"http://www.dipity.com/[^\\/\\s\\?&]+?/[^\\/\\s\\?&]+?/",
"http://www.dipity.com/oembed/timeline/",
],
[
"https://www.ultimedia.com/default/index/videogeneric/id/[^\\/\\s\\?&]+?",
"https://www.ultimedia.com/api/search/oembed",
],
[
"https://www.ultimedia.com/default/index/videogeneric/id/[^\\/\\s\\?&]+?/showtitle/1/viewnc/1",
"https://www.ultimedia.com/api/search/oembed",
],
[
"https://www.ultimedia.com/central/video/edit/id/[^\\/\\s\\?&]+?/topic_id/[^\\/\\s\\?&]+?/",
"https://www.ultimedia.com/api/search/oembed",
],
[
"https://[^\\/\\s\\?&]+?.didacte.com/a/course/[^\\/\\s\\?&]+?",
"https://*.didacte.com/cards/oembed'",
],
[
'https://[^\\/\\s\\?&]+?.deviantart.com/[^\\/\\s\\?&]+?#/d[^\\/\\s\\?&]+?"',
"http://backend.deviantart.com/oembed",
],
['https://sta.sh/[^\\/\\s\\?&]+?",', "http://backend.deviantart.com/oembed"],
[
"https://[^\\/\\s\\?&]+?.deviantart.com/[^\\/\\s\\?&]+?/art/[^\\/\\s\\?&]+?",
"http://backend.deviantart.com/oembed",
],
[
"https://[^\\/\\s\\?&]+?.deviantart.com/art/[^\\/\\s\\?&]+?",
"http://backend.deviantart.com/oembed",
],
["http://sta.sh/[^\\/\\s\\?&]+?", "http://backend.deviantart.com/oembed"],
["http://fav.me/[^\\/\\s\\?&]+?", "http://backend.deviantart.com/oembed"],
[
"http://[^\\/\\s\\?&]+?.deviantart.com/[^\\/\\s\\?&]+?#/d[^\\/\\s\\?&]+?",
"http://backend.deviantart.com/oembed",
],
[
"http://[^\\/\\s\\?&]+?.deviantart.com/art/[^\\/\\s\\?&]+?",
"http://backend.deviantart.com/oembed",
],
[
"https://[^\\/\\s\\?&]+?.deseretnews.com/[^\\/\\s\\?&]+?",
"https://embed.deseretnews.com/",
],
[
"https://www.dailymotion.com/video/[^\\/\\s\\?&]+?",
"https://www.dailymotion.com/services/oembed",
],
[
"http://www.dailymile.com/people/[^\\/\\s\\?&]+?/entries/[^\\/\\s\\?&]+?",
"http://api.dailymile.com/oembed?format=json",
],
[
"https://app.cyranosystems.com/msg/[^\\/\\s\\?&]+?",
"https://staging.cyranosystems.com/oembed",
],
[
"https://staging.cyranosystems.com/msg/[^\\/\\s\\?&]+?",
"https://staging.cyranosystems.com/oembed",
],
[
"http://crowdranking.com/[^\\/\\s\\?&]+?/[^\\/\\s\\?&]+?",
"http://crowdranking.com/api/oembed.json",
],
["http://coub.com/embed/[^\\/\\s\\?&]+?", "http://coub.com/api/oembed.json"],
["http://coub.com/view/[^\\/\\s\\?&]+?", "http://coub.com/api/oembed.json"],
["https://commaful.com/play/[^\\/\\s\\?&]+?", "https://commaful.com/api/oembed/"],
[
"http://www.collegehumor.com/video/[^\\/\\s\\?&]+?",
"http://www.collegehumor.com/oembed.json",
],
["https://codesandbox.io/embed/[^\\/\\s\\?&]+?", "https://codesandbox.io/oembed"],
["https://codesandbox.io/s/[^\\/\\s\\?&]+?", "https://codesandbox.io/oembed"],
[
"https://www.codepoints.net/[^\\/\\s\\?&]+?",
"https://codepoints.net/api/v1/oembed",
],
[
"http://www.codepoints.net/[^\\/\\s\\?&]+?",
"https://codepoints.net/api/v1/oembed",
],
["https://codepoints.net/[^\\/\\s\\?&]+?", "https://codepoints.net/api/v1/oembed"],
["http://codepoints.net/[^\\/\\s\\?&]+?", "https://codepoints.net/api/v1/oembed"],
["https://codepen.io/[^\\/\\s\\?&]+?", "http://codepen.io/api/oembed"],
["http://codepen.io/[^\\/\\s\\?&]+?", "http://codepen.io/api/oembed"],
[
"https://codehs.com/editor/share_abacus/[^\\/\\s\\?&]+?",
"https://codehs.com/api/sharedprogram/*/oembed/",
],
["http://clyp.it/playlist/[^\\/\\s\\?&]+?", "http://api.clyp.it/oembed/"],
["http://clyp.it/[^\\/\\s\\?&]+?", "http://api.clyp.it/oembed/"],
[
"https://www.clipland.com/v/[^\\/\\s\\?&]+?",
"https://www.clipland.com/api/oembed",
],
[
"http://www.clipland.com/v/[^\\/\\s\\?&]+?",
"https://www.clipland.com/api/oembed",
],
[
"https://www.circuitlab.com/circuit/[^\\/\\s\\?&]+?",
"https://www.circuitlab.com/circuit/oembed/",
],
["http://chirb.it/[^\\/\\s\\?&]+?", |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.