input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
""" Functionality for analysis of single quantum dots
For more details see https://arxiv.org/abs/1603.02274
"""
# %%
import scipy
import scipy.ndimage
import numpy as np
import matplotlib.pyplot as plt
import warnings
import logging
import qcodes
from qcodes.plots.qcmatplotlib import MatPlot
from qtt.data import dataset2Dmetadata, dataset2image, show2D
import qtt.data
import qtt.pgeometry as pgeometry
from qtt.pgeometry import plot2Dline
from qtt.algorithms.generic import detect_blobs_binary, weightedCentroid
try:
import cv2
except ImportError:
import qtt.exceptions
warnings.warn('could not find opencv, not all functionality available',
qtt.exceptions.MissingOptionalPackageWarning)
# %%
def _onedotGetBlobs(fimg, fig=None):
""" Extract blobs for a 2D scan of a one-dot """
# thr=otsu(fimg)
thr = np.median(fimg)
x = np.percentile(fimg, 99.5)
thr = thr + (x - thr) * .5
bim = 30 * (fimg > thr).astype(np.uint8)
xx = detect_blobs_binary(bim)
if int(cv2.__version__[0]) >= 4:
contours, _ = cv2.findContours(
bim.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
elif int(cv2.__version__[0]) >= 3:
_, contours, _ = cv2.findContours(
bim.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
else:
contours, _ = cv2.findContours(
bim.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
qq = []
for ii in range(len(contours)):
qq += [weightedCentroid(fimg, contours, contourIdx=ii, fig=None)]
xxw = np.array(qq)
if fig is not None:
plt.figure(fig)
plt.clf()
pgeometry.imshowz(fimg, interpolation='nearest')
plt.axis('image')
plt.colorbar()
pgeometry.plotPoints(xx.T, '.g', markersize=16, label='blob centres')
plt.title('Reponse image with detected blobs')
plt.figure(fig + 1)
plt.clf()
pgeometry.imshowz(bim, interpolation='nearest')
plt.axis('image')
plt.colorbar()
pgeometry.plotPoints(xxw.T, '.g', markersize=16, label='blob centres')
pgeometry.plotPoints(xx.T, '.m', markersize=12, label='blob centres (alternative)')
plt.title('Binary blobs')
pgeometry.tilefigs([fig, fig + 1], [2, 2])
return xxw, (xx, contours)
def _onedotSelectBlob(im, xx, fimg=None, verbose=0):
""" Select the best blob from a list of blob positions """
ims = qtt.algorithms.generic.smoothImage(im)
lowvalue = np.percentile(ims, 5)
highvalue = np.percentile(ims, 95)
thrvalue = lowvalue + (highvalue - lowvalue) * .1
goodidx = np.ones(len(xx))
for jj, p in enumerate(xx):
v = qtt.algorithms.generic.getValuePixel(ims, p)
if verbose:
print('_onedotSelectBlob %d: v %.2f/%.2f' % (jj, v, thrvalue))
if v < thrvalue:
goodidx[jj] = 0
lowvalue = np.percentile(im, 5)
highvalue = np.percentile(im, 95)
if verbose:
print('_onedotSelectBlob: good %s' % goodidx)
if xx.size == 0:
print('FIXME: better return value... ')
return np.array([1, 1])
score = xx[:, 0] - xx[:, 1]
score[goodidx == 0] += 10000
idx = np.argmin(score)
pt = xx[idx]
return pt
def onedotGetBalanceFine(impixel=None, dd=None, verbose=1, fig=None, baseangle=-np.pi / 4, units=None,
full_output=False):
""" Determine central position of Coulomb peak in 2D scan
The position is determined by scanning with Gabor filters and then performing blob detection
The image should be in pixel coordinates
Returns:
pt (array): detected point
results (dict): dictionary with all results
"""
extentscan, g0, g2, vstep, vsweep, arrayname = dataset2Dmetadata(dd, arrayname=None)
tr = qtt.data.image_transform(dd)
if impixel is None:
impixel, tr = dataset2image(dd, mode='pixel')
im = np.array(impixel)
else:
im = np.array(impixel)
theta0 = baseangle
step = np.abs(np.nanmean(np.diff(vstep)))
filters, angles, _ = qtt.algorithms.generic.makeCoulombFilter(theta0=theta0, step=step, fig=None)
lowvalue = np.percentile(im, 5)
highvalue = np.percentile(im, 95)
gfilter = filters[0]
fimg = cv2.filter2D(im, -1, gfilter)
bestvalue = highvalue * gfilter[gfilter > 0].sum() + lowvalue * gfilter[gfilter < 0].sum()
xxw, _ = _onedotGetBlobs(fimg, fig=None)
vv = _onedotSelectBlob(im, xxw, fimg=None)
ptpixel = np.array(vv).reshape((1, 2))
pt = tr.pixel2scan(ptpixel.T)
ptvalue = fimg[int(ptpixel[0, 1]), int(ptpixel[0, 0])]
if verbose:
print('onedotGetBalanceFine: point/best filter value: %.2f/%.2f' % (ptvalue, bestvalue))
if fig is not None:
od = None
xx = show2D(dd, impixel=im, fig=fig, verbose=1, title='input image for gabor', units=units)
if od is not None:
pt0 = od['balancepoint'].reshape((2, 1))
pgeometry.plotPoints(pt0, '.m', markersize=12)
plt.plot(pt[0], pt[1], '.', color=(0, .8, 0), markersize=16)
plt.axis('image')
xx = show2D(dd, impixel=fimg, fig=fig + 1, verbose=1, title='response image for gabor', units=units)
plt.plot(pt[0], pt[1], '.', color=(0, .8, 0), markersize=16, label='balance point fine')
plt.axis('image')
acc = 1
if np.abs(ptvalue) / bestvalue < 0.05:
acc = 0
logging.debug('accuracy: %d: %.2f' % (acc, (np.abs(ptvalue) / bestvalue)))
results = dict({'step': step, 'ptv': pt, 'ptpixel': ptpixel, 'accuracy': acc, 'gfilter': gfilter})
if full_output:
results['fimg'] = fimg
return pt, results
# %%
def costscoreOD(a, b, pt, ww, verbose=0, output=False):
""" Cost function for simple fit of one-dot open area
Args:
a,b (float): position along axis (a is the x-axis)
pt (numpy array): point in image
ww (array)
verbose (int)
output (bool)
Returns:
cost (float)
"""
pts = np.array(
[[a, 0], pt, [ww.shape[1] - 1, b], [ww.shape[1] - 1, 0], [a, 0]])
pts = pts.reshape((5, 1, 2)).astype(int)
imx = 0 * ww.copy().astype(np.uint8)
cv2.fillConvexPoly(imx, pts, color=[1])
area = np.abs(pgeometry.polyarea(pts.reshape((-1, 2))))
cost = -(imx == ww).sum()
# add penalty for moving out of range
cost += (.025 * ww.size) * np.maximum(b - ww.shape[0] - 1, 0) / ww.shape[0]
cost += (.025 * ww.size) * np.maximum(-a, 0) / ww.shape[1]
cost += (.025 * ww.size) * 2 * (pts[2, 0, 1] < 0)
cost += (.025 * ww.size) * 2 * (pt[0] < 0) # x too far left
cost += (.025 * ww.size) * 2 * (pt[1] > ww.shape[0]) # y too far down
cost += 1e-3 * area
if verbose:
print('costscore %.2f' % cost)
if output:
return cost, pts, imx
else:
return cost
# %%
def onedotGetBalance(dataset, verbose=1, fig=None, drawpoly=False, polylinewidth=2,
linecolor='c', full_output=False, od=None):
""" Determine tuning point from a 2D scan of a 1-dot
This function performs a simple fitting of the open (conducting region).
Args:
od (one-dot structure or None): data for one-dot
dd (2D dataset): data containing charge stability diagram
Returns:
fitresults (dict): dictionary with fitting results
od (obj): modified one-dot object
"""
if od is not None:
warnings.warn('od argument will be removed in the future', DeprecationWarning)
extentscan, g0, g2, vstep, vsweep, arrayname = dataset2Dmetadata(dataset, arrayname=None)
im, tr = qtt.data.dataset2image(dataset)
extentImageMatlab = tr.matplotlib_image_extent()
ims = im.copy()
# simlpy smoothing of the image
kk = np.ones((3, 3)) / 9.
for _ in range(2):
ims = scipy.ndimage.convolve(ims, kk, mode='nearest', cval=0.0)
r = np.percentile(ims, 99) - np.percentile(ims, 1)
lv = np.percentile(ims, 2) + r / 100
x = ims.flatten()
lvstd = np.std(x[x < lv])
lv = lv + lvstd / 2 # works for very smooth images
lv = (.45 * pgeometry.otsu(ims) + .55 * lv) # more robust
if verbose >= 2:
print('onedotGetBalance: threshold for low value %.1f' % lv)
# balance point: method 1 (first point above threshold of 45 degree line)
try:
ww = np.nonzero(ims > lv)
zz = -ww[0] + ww[1]
idx = zz.argmin()
pt = np.array([[ww[1][idx]], [ww[0][idx]]])
ptv = tr.pixel2scan(pt)
except:
print('qutechtnotools: error in onedotGetBalance: please debug')
idx = 0
pt = np.array([[int(vstep.size / 2)], [int(vsweep.size / 2)]])
ptv = np.array([[vstep[pt[0, 0]]], [vsweep[-pt[1, 0]]]])
pass
# balance point: method 2 (fit quadrilateral)
wwarea = ims > lv
x0 = np.array([pt[0] - .1 * im.shape[1], pt[1] + .1 * im.shape[0], pt[0], pt[1]]).reshape(4,) # initial square
def ff(x): return costscoreOD(x[0], x[1], x[2:4], wwarea)
# scipy.optimize.show_options(method='Nelder-Mead')
opts = dict({'disp': verbose >= 2, 'fatol': 1e-6, 'xatol': 1e-5})
powell_opts = dict({'disp': verbose >= 2, 'ftol': 1e-6, 'xtol': 1e-5})
xx = scipy.optimize.minimize(ff, x0, method='Nelder-Mead', options=opts)
# print(' optimize: %f->%f' % (ff(x0), ff(xx.x)) )
opts['disp'] = verbose >= 2
xx = scipy.optimize.minimize(ff, xx.x, method='Powell', options=powell_opts)
x = xx.x
cost, pts, imx = costscoreOD(x0[0], x0[1], x0[2:4], wwarea, output=True)
balancefitpixel0 = pts.reshape((-1, 2)).T.copy()
cost, pts, imx = costscoreOD(x[0], x[1], x[2:4], wwarea, output=True)
pt = pts[1, :, :].transpose()
fitresults = {}
fitresults['balancepoint0'] = ptv
fitresults['balancepointpixel'] = pt
fitresults['balancepointpolygon'] = tr.pixel2scan(pt)
fitresults['balancepoint'] = tr.pixel2scan(pt)
fitresults['balancefitpixel'] = pts.reshape((-1, 2)).T.copy()
fitresults['balancefit'] = tr.pixel2scan(fitresults['balancefitpixel'])
fitresults['balancefit1'] = tr.pixel2scan(balancefitpixel0)
fitresults['setpoint'] = fitresults['balancepoint'] + 8
fitresults['x0'] = x0
fitresults['gatevalues'] = dataset.metadata.get('allgatevalues', None)
if od is not None:
fitresults['gatevalues'][od['gates'][2]] = float(fitresults['balancepoint'][0])
fitresults['gatevalues'][od['gates'][0]] = float(fitresults['balancepoint'][1])
ptv = fitresults['balancepoint']
if od is not None:
# copy results into od structure
for k in fitresults:
od[k] = fitresults[k]
od['onedotbalance'] = fitresults
odname = od['name']
else:
odname = 'one-dot'
if verbose:
print('onedotGetBalance %s: balance point 0 at: %.1f %.1f [mV]' % (odname, ptv[0, 0], ptv[1, 0]))
print('onedotGetBalance: balance point at: %.1f %.1f [mV]' % (
fitresults['balancepoint'][0, 0], fitresults['balancepoint'][1, 0]))
if verbose >= 3:
# %
plt.figure(9)
plt.clf()
plt.imshow(im, interpolation='nearest')
pgeometry.plotPoints(balancefitpixel0, '.-r', label='balancefitpixel0')
pgeometry.plotLabels(balancefitpixel0)
pgeometry.plotPoints(fitresults['balancefitpixel'], '.-m')
pgeometry.plotLabels(fitresults['balancefitpixel'])
cost, pts, imx = costscoreOD(x[0], x[1], x[2:4], wwarea, output=True, verbose=1)
# %
if fig is not None:
plot_onedot(fitresults, ds=dataset, verbose=2, fig=fig, linecolor='c',
ims=ims, extentImageMatlab=extentImageMatlab, lv=lv)
qtt.utilities.tools.showImage(im, extentImageMatlab, fig=fig+1)
if verbose >= 2 or drawpoly:
pgeometry.plotPoints(fitresults['balancefit'], '--', color=linecolor,
linewidth=polylinewidth, label='balancefit')
if verbose >= 2:
pgeometry.plotPoints(fitresults['balancepoint0'], '.r', markersize=13, label='balancepoint0')
pgeometry.plotPoints(fitresults['balancepoint'], '.m', markersize=17, label='balancepoint')
plt.axis('image')
if full_output:
fitresults['ims'] = ims
fitresults['lv'] = | |
else:
bot.say("Unknown error: " + block)
def doltaBlock(bot, name, project, target):
creds = getCreds(name)
if creds is None:
bot.say(CONTACT_OP)
return
site = getWiki(project)
if site is None:
bot.say("I don't know that wiki.")
return
csrfToken = getCSRF(bot, site, creds, "csrf")
if csrfToken is False:
return
reqBlock = {
"action": "block",
"user": target,
"expiry": "1week",
"reason": "LTA / Block evasion",
"token": csrfToken,
"noemail":"",
"nocreate":"",
"reblock":"",
"autoblock":"",
"format": "json"
}
# Send block request
block = xmit(site, creds, reqBlock, "post")
if 'error' in block:
reason = block['error']['code']
if reason == "badtoken":
bot.say("Received CSRF token error. Try again...")
elif reason == "alreadyblocked":
bot.say(target + " is already blocked. Use !reblock to change the current block.")
elif reason == "permissiondenied":
bot.say("Received permission denied error. Are you a sysop on " + project + "?")
else:
info = block['error']['info']
bot.say("Unhandled error: " + info)
elif 'block' in block:
user = block['block']['user']
expiry = block['block']['expiry']
reason = block['block']['reason']
bot.say(user + " was blocked until " + expiry + " with reason: " + reason)
else:
bot.say("Unknown error: " + block)
def doSoftblock(bot, name, project, target, until, reason):
creds = getCreds(name)
if creds is None:
bot.say(CONTACT_OP)
return
site = getWiki(project)
if site is None:
bot.say("I don't know that wiki.")
return
csrfToken = getCSRF(bot, site, creds, "csrf")
if csrfToken is False:
return
if until == "indef" or until == "forever":
until = "never"
reqBlock = {
"action": "block",
"user": target,
"expiry": until,
"reason": reason,
"token": csrfToken,
"allowusertalk":"",
"format": "json"
}
# Send block request
block = xmit(site, creds, reqBlock, "post")
if 'error' in block:
reason = block['error']['code']
if reason == "badtoken":
bot.say("Received CSRF token error. Try again...")
elif reason == "alreadyblocked":
bot.say(target + " is already blocked. Use !reblock to change the current block.")
elif reason == "permissiondenied":
bot.say("Received permission denied error. Are you a sysop on " + project + "?")
else:
info = block['error']['info']
bot.say("Unhandled error: " + info)
elif 'block' in block:
user = block['block']['user']
expiry = block['block']['expiry']
reason = block['block']['reason']
bot.say(user + " was blocked until " + expiry + " with reason: " + reason)
else:
bot.say("Unknown error: " + block)
def doUnblock(bot, name, project, target, reason):
creds = getCreds(name)
if creds is None:
bot.say(CONTACT_OP)
return
site = getWiki(project)
if site is None:
bot.say("I don't know that wiki.")
return
csrfToken = getCSRF(bot, site, creds, "csrf")
if csrfToken is False:
return
reqBlock = {
"action": "unblock",
"user": target,
"reason": reason,
"token": csrfToken,
"format": "json"
}
# Send block request
unblock = xmit(site, creds, reqBlock, "post")
if 'error' in unblock:
reason = unblock['error']['info']
bot.say(reason)
elif 'unblock' in unblock:
user = unblock['unblock']['user']
reason = unblock['unblock']['reason']
bot.say(user + " was unblocked with reason: " + reason)
else:
bot.say("Unhandled error: " + unblock)
def addUser(bot, name):
# Setup dbase connection
db = sqlite3.connect(SAM_DB)
c = db.cursor()
# Check for user already existing
check = c.execute('''SELECT * FROM auth WHERE account="%s";''' % name).fetchall()
if len(check) != 0:
bot.say("User already exists!")
db.close()
return
else:
# Add new user to database
c.execute('''INSERT INTO auth VALUES("%s", NULL, NULL, NULL, NULL);''' % name)
db.commit()
db.close()
bot.say("User added.")
def remUser(bot, name):
# Setup dbase connection
db = sqlite3.connect(SAM_DB)
c = db.cursor()
# Check for user already existing
check = c.execute('''SELECT * FROM auth WHERE account="%s";''' % name).fetchall()
if len(check) == 0:
bot.say("User does not exist!")
db.close()
else:
c.execute('''DELETE FROM auth WHERE account="%s";''' % name)
db.commit()
db.close()
bot.say("User deleted.")
def addKeys(bot, name, info):
# Setup dbase connection
db = sqlite3.connect(SAM_DB)
c = db.cursor()
try:
c_token, c_secret, a_token, a_secret = info.split(" ")
except Exception as e:
bot.say(str(e))
check = c.execute('''SELECT * FROM auth WHERE account="%s";''' % name).fetchall()
if len(check) == 0:
bot.say("You are not approved to add tokens. Contact Operator873.")
db.close()
return
else:
try:
c.execute('''UPDATE auth SET consumer_token="%s", consumer_secret="%s", access_token="%s", access_secret="%s" WHERE account="%s";''' % (c_token, c_secret, a_token, a_secret, name))
bot.say("Keys added.")
except Exception as e:
bot.say(str(e))
finally:
db.commit()
db.close()
def processinfo(info):
info = "a=" + info
l = re.split(r"(\w)=", info)[1:]
data = {l[i]: l[i+1] for i in range(0, len(l), 2)}
for key in data:
data[key] = data[key].strip()
if 'd' in data:
adjust = re.sub(r"([0-9]+([0-9]+)?)",r" \1 ", data['d'])
data['d'] = re.sub(' +', ' ', adjust).strip()
return data
@module.commands('testblock')
@module.nickname_commands('testblock')
def commandtestBlock(bot, trigger):
# New syntax: !block Some Nick Here p=project d=duration r=reason
data = processinfo(trigger.group(2))
if len(data) < 4:
bot.say("Command missing arguements: !block <target account> p=project d=duration r=reason for block")
return
elif data['a'] == '':
bot.say("Target of block must go first or be indicated with 'a=target account'. !block <target account> p=project d=duration r=reason for block")
return
else:
try:
project = data['p']
target = data['a']
until = data['d']
reason = data['r']
except Exception as e:
bot.say("Error! " + str(e))
return
bot.say(target + " would be blocked on " + project + " for " + until + " with reason: " + reason)
@module.commands('block')
@module.nickname_commands('block')
def commandBlock(bot, trigger):
# New syntax: !block Some Nick Here p=project d=duration r=reason
data = processinfo(trigger.group(2))
if len(data) < 4:
bot.say("Command missing arguements: !block <target account> p=project d=duration r=reason for block")
return
elif data['a'] == '':
bot.say("Target of block must go first or be indicated with 'a=target account'. !block <target account> p=project d=duration r=reason for block")
return
else:
try:
project = data['p']
target = data['a']
until = data['d']
reason = data['r']
except Exception as e:
bot.say("Error! " + str(e))
return
doBlock(bot, trigger.account, project, target, until, reason)
@module.commands('lta')
@module.nickname_commands('lta')
def commandltablock(bot, trigger):
# New syntax: !lta Some Nick Here p=project
data = processinfo(trigger.group(2))
if len(data) < 2:
bot.say("Command missing arguements: !lta Some Nick Here p=project")
return
elif data['a'] == '':
bot.say("Target of block must go first or be indicated with 'a=target account'. !lta Some Nick Here p=project")
return
else:
try:
project = data['p']
target = data['a']
except Exception as e:
bot.say("Error! " + str(e))
return
doltaBlock(bot, trigger.account, project, target)
@module.commands('tpa')
@module.nickname_commands('tpa')
def commandRevoketpa(bot, trigger):
# New syntax: !tpa Some Nick Here p=project d=duration r=reason
data = processinfo(trigger.group(2))
if len(data) < 4:
bot.say("Command missing arguements: !tpa <target account> p=project d=duration r=reason for block")
return
elif data['a'] == '':
bot.say("Target of block must go first or be indicated with 'a=target account'. !tpa <target account> p=project d=duration r=reason for block")
return
else:
try:
project = data['p']
target = data['a']
until = data['d']
reason = data['r']
except Exception as e:
bot.say("Error! " + str(e))
return
dorevokeTPA(bot, trigger.account, project, target, until, reason)
@module.commands('reblock')
@module.nickname_commands('reblock')
def commandreBlock(bot, trigger):
# New syntax: !reblock Some Nick Here p=project d=duration r=reason
data = processinfo(trigger.group(2))
if len(data) < 4:
bot.say("Command missing arguements: !reblock <target account> p=project d=duration r=reason for block")
return
elif data['a'] == '':
bot.say("Target of block must go first or be indicated with 'a=target account'. !reblock <target account> p=project d=duration r=reason for block")
return
else:
try:
project = data['p']
target = data['a']
until = data['d']
reason = data['r']
except Exception as e:
bot.say("Error! " + str(e))
return
doReblock(bot, trigger.account, project, target, until, reason)
@module.commands('proxyblock')
@module.nickname_commands('proxyblock')
def commandproxyBlock(bot, trigger):
# New syntax: !proxyblock Some Nick Here p=project d=duration
data = processinfo(trigger.group(2))
if len(data) < 3:
bot.say("Command missing arguements: !proxyblock Some Nick Here p=project d=duration")
return
elif data['a'] == '':
bot.say("Target of block must go first or be indicated with 'a=target account'. !proxyblock Some Nick Here p=project d=duration")
return
else:
try:
project = data['p']
target = data['a']
until = data['d']
except Exception as e:
bot.say("Error! " + str(e))
return
reason = "[[m:NOP|Open proxy]]"
doReblock(bot, trigger.account, project, target, until, reason)
@module.commands('gblock')
@module.nickname_commands('gblock')
def commandglobalBlock(bot, trigger):
# New syntax: !gblock Some IP Here d=duration r=reason
data = processinfo(trigger.group(2))
if len(data) < 3:
bot.say("Command missing arguements: !gblock Some IP Here d=duration r=reason")
return
elif data['a'] == '':
bot.say("Target of block must go first or be indicated with 'a=target account'. !gblock Some | |
<gh_stars>0
#!/usr/bin/python
# -*- coding: utf-8 -*-
import datetime
import re
from . import utilities
from . import media
from .base import loadable
class MalformedAnimePageError(media.MalformedMediaPageError):
"""Indicates that an anime-related page on MAL has irreparably broken markup in some way.
"""
pass
class InvalidAnimeError(media.InvalidMediaError):
"""Indicates that the anime requested does not exist on MAL.
"""
pass
class Anime(media.Media):
"""Primary interface to anime resources on MAL.
"""
_status_terms = [
'Unknown',
'Currently Airing',
'Finished Airing',
'Not yet aired'
]
_consuming_verb = "watch"
def __init__(self, session, anime_id):
"""Creates a new instance of Anime.
:type session: :class:`myanimelist.session.Session`
:param session: A valid MAL session
:type anime_id: int
:param anime_id: The desired anime's ID on MAL
:raises: :class:`.InvalidAnimeError`
"""
if not isinstance(anime_id, int) or int(anime_id) < 1:
raise InvalidAnimeError(anime_id)
super(Anime, self).__init__(session, anime_id)
self._episodes = None
self._aired = None
self._producers = None
self._licensors = None
self._studios = None
self._duration = None
self._rating = None
self._voice_actors = None
self._staff = None
self._promotion_videos = None
self._broadcast = None
self._source = None
self._premiered = None
def parse_promotion_videos(self, media_page):
container = utilities.css_select_first("#content", media_page)
if container is None:
return None
result = []
video_tags = utilities.css_select("a.iframe", media_page)
for tag in video_tags:
embed_link = tag.get('href')
title_tag = tag.xpath("//div[@class='info-container']/span")
title = ""
if title_tag is not None and len(title_tag) > 0:
title = title_tag[0].text
result.append({"embed_link": embed_link, "title": title})
self._promotion_videos = result
return result
def parse_sidebar(self, anime_page):
"""Parses the DOM and returns anime attributes in the sidebar.
:type anime_page: :class:`lxml.html.HtmlElement`
:param anime_page: MAL anime page's DOM
:rtype: dict
:return: anime attributes
:raises: :class:`.InvalidAnimeError`, :class:`.MalformedAnimePageError`
"""
# if MAL says the series doesn't exist, raise an InvalidAnimeError.
if not self._validate_page(anime_page):
raise InvalidAnimeError(self.id)
title_tag = anime_page.xpath(".//div[@id='contentWrapper']//h1")
if len(title_tag) == 0:
raise MalformedAnimePageError(self.id, anime_page.text, message="Could not find title div")
anime_info = super(Anime, self).parse_sidebar(anime_page)
info_panel_first = None
try:
container = utilities.css_select("#content", anime_page)
if container is None:
raise MalformedAnimePageError(self.id, anime_page.text, message="Could not find the info table")
info_panel_first = container[0].find(".//table/tr/td")
temp = info_panel_first.xpath(".//div/span[text()[contains(.,'Episodes:')]]")
if len(temp) == 0:
raise Exception("Couldn't find episode tag.")
episode_tag = temp[0].getparent().xpath(".//text()")[-1]
anime_info['episodes'] = int(episode_tag.strip()) if episode_tag.strip() != 'Unknown' else 0
except:
if not self.session.suppress_parse_exceptions:
raise
try:
temp = info_panel_first.xpath(".//div/span[text()[contains(.,'Aired:')]]")
if len(temp) == 0:
raise Exception("Couldn't find aired tag.")
aired_tag = temp[0].getparent().xpath(".//text()")[2]
aired_parts = aired_tag.strip().split(' to ')
if len(aired_parts) == 1:
# this aired once.
try:
aired_date = utilities.parse_profile_date(aired_parts[0],
suppress=self.session.suppress_parse_exceptions)
except ValueError:
raise MalformedAnimePageError(self.id, aired_parts[0], message="Could not parse single air date")
anime_info['aired'] = (aired_date,)
else:
# two airing dates.
try:
air_start = utilities.parse_profile_date(aired_parts[0],
suppress=self.session.suppress_parse_exceptions)
except ValueError:
raise MalformedAnimePageError(self.id, aired_parts[0],
message="Could not parse first of two air dates")
try:
air_end = utilities.parse_profile_date(aired_parts[1],
suppress=self.session.suppress_parse_exceptions)
except ValueError:
raise MalformedAnimePageError(self.id, aired_parts[1],
message="Could not parse second of two air dates")
anime_info['aired'] = (air_start, air_end)
except:
if not self.session.suppress_parse_exceptions:
raise
try:
temp = info_panel_first.xpath(".//div/span[text()[contains(.,'Producers:')]]")
if len(temp) == 0:
raise Exception("Couldn't find producers tag.")
producers_tags = temp[0].getparent().xpath(".//a")
anime_info['producers'] = []
for producer_link in producers_tags:
if producer_link.text == 'add some':
# MAL is saying "None found, add some".
break
link_parts = producer_link.get('href').split('p=')
# of the form: /anime.php?p=14
if len(link_parts) > 1:
anime_info['producers'].append(
self.session.producer(int(link_parts[1])).set({'name': producer_link.text}))
else:
# of the form: /anime/producer/65
link_parts = producer_link.get('href').split('/')
anime_info['producers'].append(
self.session.producer(int(link_parts[-2])).set({"name": producer_link.text}))
except:
if not self.session.suppress_parse_exceptions:
raise
try:
temp = info_panel_first.xpath(".//div/span[text()[contains(.,'Licensors:')]]")
if len(temp) == 0:
raise Exception("Couldn't find licensors tag.")
licensors_tags = temp[0].getparent().xpath(".//a")
anime_info['licensors'] = []
for producer_link in licensors_tags:
if producer_link.text == 'add some':
# MAL is saying "None found, add some".
break
link_parts = producer_link.get('href').split('p=')
# of the form: /anime.php?p=14
if len(link_parts) > 1:
anime_info['licensors'].append(
self.session.producer(int(link_parts[1])).set({'name': producer_link.text}))
else:
# of the form: /anime/producer/65
link_parts = producer_link.get('href').split('/')
anime_info['licensors'].append(
self.session.producer(int(link_parts[-2])).set({"name": producer_link.text}))
except:
if not self.session.suppress_parse_exceptions:
raise
try:
temp = info_panel_first.xpath(".//div/span[text()[contains(.,'Studios:')]]")
if len(temp) == 0:
raise Exception("Couldn't find studios tag.")
studios_tags = temp[0].getparent().xpath(".//a")
anime_info['studios'] = []
for producer_link in studios_tags:
if producer_link.text == 'add some':
# MAL is saying "None found, add some".
break
link_parts = producer_link.get('href').split('p=')
# of the form: /anime.php?p=14
if len(link_parts) > 1:
anime_info['studios'].append(
self.session.producer(int(link_parts[1])).set({'name': producer_link.text}))
else:
# of the form: /anime/producer/65
link_parts = producer_link.get('href').split('/')
anime_info['studios'].append(
self.session.producer(int(link_parts[-2])).set({"name": producer_link.text}))
except:
if not self.session.suppress_parse_exceptions:
raise
try:
temp = info_panel_first.xpath(".//div/span[text()[contains(.,'Duration:')]]")
if len(temp) == 0:
raise Exception("Couldn't find duration tag.")
duration_tag = temp[0].xpath("../text()")[-1]
anime_info['duration'] = duration_tag.strip()
duration_parts = [part.strip() for part in anime_info['duration'].split('.')]
duration_mins = 0
for part in duration_parts:
part_match = re.match('(?P<num>[0-9]+)', part)
if not part_match:
continue
part_volume = int(part_match.group('num'))
if part.endswith('hr'):
duration_mins += part_volume * 60
elif part.endswith('min'):
duration_mins += part_volume
anime_info['duration'] = datetime.timedelta(minutes=duration_mins)
except:
if not self.session.suppress_parse_exceptions:
raise
try:
temp = info_panel_first.xpath(".//div/span[text()[contains(.,'Rating:')]]")
if len(temp) == 0:
raise Exception("Couldn't find duration tag.")
rating_tag = temp[0].xpath("../text()")[-1]
anime_info['rating'] = rating_tag.strip()
except:
if not self.session.suppress_parse_exceptions:
raise
# parse broadcasting times - note: the tests doesnt cover this bit, because its a dynamic data
# todo: figure out a way to cover this bit in the unit tests
try:
temp = info_panel_first.xpath(".//div/span[text()[contains(.,'Broadcast:')]]")
anime_info['broadcast'] = None
if len(temp) > 0:
broadcast_tag = temp[0].xpath("../text()")[-1].strip()
rex = re.compile("[a-zA-Z]+.[a-z]+.[0-9]{1,2}:[0-9]{1,2}.\([A-Z]+\)")
if broadcast_tag != "Unknown" and rex.match(broadcast_tag) is not None:
anime_info['broadcast'] = {}
parts = broadcast_tag.split(" at ")
time_parts = parts[-1].split(" ")
subtime_parts = time_parts[0].split(':')
anime_info['broadcast']['weekday'] = parts[0].rstrip('s')
anime_info['broadcast']['hour'] = int(subtime_parts[0])
anime_info['broadcast']['minute'] = int(subtime_parts[1])
anime_info['broadcast']['timezone'] = time_parts[-1].replace('(', '').replace(')', '')
except:
if not self.session.suppress_parse_exceptions:
raise
try:
temp = info_panel_first.xpath(".//div/span[text()[contains(.,'Source:')]]")
anime_info['source'] = ''
if len(temp) == 0:
raise Exception("Couldnt find source tag.")
source_tag = temp[0].xpath("../text()")[-1].strip()
if source_tag != "Unknown":
anime_info['source'] = source_tag
except:
if not self.session.suppress_parse_exceptions:
raise
try:
temp = info_panel_first.xpath(".//div/span[text()[contains(.,'Premiered:')]]")
anime_info['premiered'] = ''
if len(temp) > 0:
premiered_tag = "".join(temp[0].getparent().xpath(".//text()")).strip().replace('\n', '') \
.split(": ")[-1].rstrip()
anime_info['premiered'] = premiered_tag.strip()
except:
if not self.session.suppress_parse_exceptions:
raise
return anime_info
def parse_characters(self, character_page):
"""Parses the DOM and returns anime character attributes in the sidebar.
:type character_page: :class:`lxml.html.HtmlElement`
:param character_page: MAL anime character page's DOM
:rtype: dict
:return: anime character attributes
:raises: :class:`.InvalidAnimeError`, :class:`.MalformedAnimePageError`
"""
anime_info = self.parse_sidebar(character_page)
try:
temp = character_page.xpath(".//h2[text()[contains(.,'Characters')]]/following-sibling::table[1]")
anime_info['characters'] = {}
anime_info['voice_actors'] = {}
if len(temp) != 0:
curr_elt = temp[0]
while curr_elt is not None:
if curr_elt.tag != 'table':
break
curr_row = curr_elt.find('.//tr')
temp = curr_row.findall("./td")
# we got to the staff part, todo: fix the sibling part. this is ugly
if len(temp) != 3:
break
(_, character_col, va_col) = temp
character_link = character_col.find('.//a')
character_name = ' '.join(reversed(character_link.text.split(', ')))
link_parts = character_link.get('href').split('/')
# of the form /character/7373/Holo
if "myanimelist.net" not in link_parts:
character_id = int(link_parts[2])
# or of the form https://myanimelist.net/character/7373/Holo
else:
character_id = int(link_parts[4])
character = self.session.character(character_id).set({'name': character_name})
role = character_col.find('.//small').text
character_entry = {'role': role, 'voice_actors': {}}
va_table = va_col.find('.//table')
if va_table is not None:
for row in va_table.findall("tr"):
va_info_cols = row.findall('td')
if not va_info_cols or len(va_info_cols) == 0:
# don't ask me why MAL has an extra blank table row i don't know!!!
continue
va_info_col = va_info_cols[0]
va_link = va_info_col.find('.//a')
if va_link is not None:
va_name = ' '.join(reversed(va_link.text.split(', ')))
link_parts = va_link.get('href').split('/')
# of the form /people/70/Ami_Koshimizu
if "myanimelist.net" not in link_parts:
person_id = int(link_parts[2])
# or of the form https://myanimelist.net/people/70/Ami_Koshimizu
else:
person_id = int(link_parts[4])
person = self.session.person(person_id).set({'name': va_name})
language = va_info_col.find('.//small').text
# one person can be voice actor for many characters
if person not in anime_info['voice_actors'].keys():
anime_info['voice_actors'][person] = []
anime_info['voice_actors'][person].append({'role': role, 'character': character,
'language': language})
character_entry['voice_actors'][person] = language
anime_info['characters'][character] = character_entry
temp = curr_elt.xpath("./following-sibling::table[1]")
if len(temp) != 0:
curr_elt = temp[0]
else:
curr_elt = None
except:
if not self.session.suppress_parse_exceptions:
raise
try:
item_tables = character_page.xpath(".//h2[text()[contains(.,'Staff')]]/following-sibling::table")
anime_info['staff'] = {}
if len(item_tables) != 0:
for staff_table in item_tables:
for row in staff_table.findall('.//tr'):
# staff info in second col.
info = row.find('./td[2]')
staff_link = info.find('.//a')
if staff_link is not None:
staff_name = ' '.join(reversed(staff_link.text.split(', ')))
link_parts = staff_link.get('href').split('/')
# of the form /people/1870/Miyazaki_Hayao
person = self.session.person(int(link_parts[-2])).set({'name': staff_name})
# staff role(s).
smallTag = info.find('.//small')
if smallTag is not None:
anime_info['staff'][person] = set(smallTag.text.split(', '))
except:
if not self.session.suppress_parse_exceptions:
raise
return anime_info
def load_videos(self):
"""Fetches the MAL media videos page and sets the current media's promotion videos attribute.
:rtype: :class:`.Anime`
:return: current media object.
"""
self.session.wait()
videos_page = self.session.session.get(
'https://myanimelist.net/' + self.__class__.__name__.lower() + '/' + str(
self.id) + '/' + utilities.urlencode(self.title) + '/video').text
self.set({'promotion_videos': self.parse_promotion_videos(utilities.get_clean_dom(videos_page))})
return self
@property
@loadable('load')
def episodes(self):
"""The number of episodes | |
<filename>stitch/stitch.py
#!/usr/bin/env python3
# -*- coding: utf8 -*-
import os
import cv2 as cv
import numpy as np
from scipy import optimize as so
import pandas as pd
from tqdm import tqdm
import hough as ld
import lineutils as ut
from lineutils import r, t, x, y
def fst(x): return x[0]
def snd(x): return x[1]
def stitch(method, imagedir, image_height, cachefile, lInfRadius=50, reverse_rotation=False, output=None):
print('Reading Hough lines')
df = pd.read_csv(cachefile)
lines_per_file = {file: list(zip(group_df['rho'], group_df['theta']))
for file, group_df in df.groupby(by='file')}
line_files = [LineImage(os.path.join(imagedir, p), l)
for p, l
in sorted(lines_per_file.items(), key=fst)]
# dict holding final translation values
translations = {}
# pairs of images n and n+1
for current_image, next_image in tqdm(zip(line_files, line_files[1:]), total=len(line_files) - 1):
# find out which line in the current image
# corresponds to which line in the next image
# we call these pairs twins
current_image.init_twins(next_image)
# current lines
c_lines = current_image.lines
# next lines
n_lines = [current_image.twins[line]
if line in current_image.twins
else None
for line in c_lines]
# filter out entries where no twin was found
line_pairs = list(
filter(
lambda p: p[1] is not None,
zip(c_lines,
n_lines)
)
)
if method == 'analytical':
# we need two twins (that is, four lines) for each translation computation,
# so we generate all possible combinations of twins (pairs of twins)
count = len(line_pairs)
twin_combinations = [(line_pairs[l], line_pairs[r])
for l in range(0, count)
for r in range(l + 1, count)]
# translation values for each pair of twins
image_translations = []
# Naming conventions:
# Prefix c_ stands for C_urrent set of lines
# Prefix n_ stands for N_ext set of lines
# Postfix _l stands for _Left line
# Postfix _b stands for _Bisection line
# Postfix _r stands for _Right line
# x means x coord, y means y coord of foot point
# rho, theta are simply x, y in polar coords
# left twin right twin
for (c_l, n_l), (c_r, n_r) in twin_combinations:
# Compute bisecting lines
c_b = ut.get_bisecting_line(c_l, c_r)
n_b = ut.get_bisecting_line(n_l, n_r)
# We might need the original lines later on
c_b_backup, n_b_backup = c_b, n_b
# Move this distance to align bisec foot points
x_diff_b, y_diff_b = x(n_b) - x(c_b), y(n_b) - y(c_b)
# Use these four variables to track the overall vertical translation for each side
translate_x_l = x_diff_b
translate_x_r = x_diff_b
translate_y_l = y_diff_b
translate_y_r = y_diff_b
# Move current lines
c_l = ut.translate(c_l, x_diff_b, y_diff_b)
c_b = ut.translate(c_b, x_diff_b, y_diff_b)
c_r = ut.translate(c_r, x_diff_b, y_diff_b)
# Foot points should now be "equal" (deviate less than 1 pixel) for the bisecting lines
bft_x, bft_y = x(n_b), y(n_b) # = x(c_b), y(c_b)
# Rotate current lines and next lines
# such that the bisection lines are both vertical
c_rotate, n_rotate = -t(c_b), -t(n_b)
c_l = ut.rotate(c_l, c_rotate, bft_x, bft_y)
c_b = ut.rotate(c_b, c_rotate, bft_x, bft_y)
c_r = ut.rotate(c_r, c_rotate, bft_x, bft_y)
n_l = ut.rotate(n_l, n_rotate, bft_x, bft_y)
n_b = ut.rotate(n_b, n_rotate, bft_x, bft_y)
n_r = ut.rotate(n_r, n_rotate, bft_x, bft_y)
if reverse_rotation:
# Compute how far both current lines
# need to be translated in vertical direction
# to match both next lines
translate_l = ut.vertical_distance(n_l, c_l)
translate_r = ut.vertical_distance(n_r, c_r)
# As we rotated the lines earlier,
# vertical actually refers to parallel to the bisections,
# so we need to take them into account
# (We use the bisection of the bisections as a simplifying assumption)
vertical_direction = t(ut.get_bisecting_line(c_b_backup,
n_b_backup))
# Distribute vertical translations among both axes according to bisection of bisections
# (Note how we swapped sin and cos to account for the pi/2 angle of difference)
translate_x_l += translate_l * np.sin(vertical_direction)
translate_x_r += translate_r * np.sin(vertical_direction)
translate_y_l += translate_l * np.cos(vertical_direction)
translate_y_r += translate_r * np.cos(vertical_direction)
# Take the average over both translation for left and right
translate_x = (translate_x_l + translate_x_r) / 2
translate_y = (translate_y_l + translate_y_r) / 2
else:
# We do not take into account that we rotated our lines earlier because experiments show that this produces worse results.
# Compute how far the current lines
# need to be translated in vertical direction
# to match the next lines
translate_y_l += ut.vertical_distance(n_l, c_l)
translate_y_r += ut.vertical_distance(n_r, c_r)
# The only time we translated horizontally was in the beginning, so we just copy that value
translate_x = translate_x_l # = translate_x_r
# Take the average over both translation for left and right
translate_y = 0.5 * (translate_y_l + translate_y_r)
# Store our translation results for the twin combination
image_translations.append([translate_x, translate_y])
if len(image_translations) > 0:
# average over all values and round to pixel accuracy
translation = np.rint(
np.array(image_translations)
.mean(0)
).astype(int)
if lInfRadius > 0:
# Optimize result based on error function
translation = optimize_line_distances(
line_pairs, translation, image_height,
lInfRadius=lInfRadius
)
else:
translation = (0, 0)
print('WARNING:', 'Insufficient lines in analytical mode for image pair',
current_image, next_image)
else: # method == 'iterative'
res = so.minimize(
lambda t: compute_error(line_pairs, t, image_height),
(0, 0)
)
translation = tuple(map(int, map(round, res.x)))
# store reference image and translation value in result dict
key = os.path.basename(current_image.img_path)
ref = os.path.basename(next_image.img_path)
translations[key] = (ref, translation)
paths = list(sorted(translations.keys()))
refs = [translations[p][0] for p in paths]
xs = [translations[p][1][0] for p in paths]
ys = [translations[p][1][1] for p in paths]
df = pd.DataFrame({'ref': refs, 'x': xs, 'y': ys}, index=paths)
if output is not None:
print('Done.', output)
df.to_csv(output)
else:
print('Done. Result:')
print(df)
print('Result not written to disk as output file was not specified.')
def optimize_line_distances(line_pairs, translation, image_height, lInfRadius=50):
tx, ty = translation
# move origin of lines
# so that we can also compute the error function
# based on the bottom border of the image
line_pairs_bottom = [(ut.move_origin(c, y=image_height),
ut.move_origin(n, y=image_height))
for c, n in line_pairs]
# create surrounding area around target translation value
attempts = [(x, y)
for x in range(tx - lInfRadius, tx + lInfRadius + 1)
for y in range(ty - lInfRadius, ty + lInfRadius + 1)]
errors = [(translation,
compute_error(line_pairs, translation, image_height))
for translation in attempts]
return min(errors, key=snd)[0]
def compute_error(line_pairs, translation, image_height):
"""
Takes a list of line pairs (current lines and next lines)
as well as a translation (x, y)
and returns the sum of squared distances of the lines' roots
at both the top and the bottom border of the image.
"""
tx, ty = translation
# [(current at origin of next, next)]
translated_lines = ((ut.move_origin(c, x=tx, y=ty),
n)
for c, n in line_pairs)
# [(current, next, current at bottom border, next at bottom border)]
border_lines = ((c,
n,
ut.move_origin(c, y=image_height),
ut.move_origin(n, y=image_height))
for c, n in translated_lines)
# [(distance current <-> next, ditto at bottom border)]
deviations = ((ut.root(n) - ut.root(c), # top error
ut.root(nb) - ut.root(cb)) # bottom error
for c, n, cb, nb in border_lines)
squared_error = (x * x + y * y for x, y in deviations)
return sum(squared_error)
class LineImage:
"""
`LineImage`s contain an image path and a list of Hough lines with it.
Hough lines will automatically be normalized upon instantiation
as specified by `lineutils.normalize(line)`.
"""
def __init__(self, img_path, lines=[]):
self.img_path = img_path
self.lines = [ut.normalize(l) for l in lines]
self.twins = {}
def init_twins(self, image):
"""
Takes an image and matches `self.lines` with image.lines to generate
pairs of closest lines. Result will be stored in `self.twins` property.
"""
# TODO: find metric that works more generically, create clusters with two elements each
# print('Finding neighbors for', len(
# self.lines), 'lines in', self.img_path)
for line in self.lines:
# print('Finding neighbor for', line, 'in', image.lines)
# Take lines that are similar and sort them by rho distance
neighbors = list(
sorted(
filter(
lambda l: ld.ut.are_lines_similar(l, line),
image.lines
),
# similarity heuristic: compare distances of foot points from origin
key=lambda l: abs(r(line) - r(l))
)
)
if(len(neighbors) > 0):
twin = neighbors[0]
if(len(neighbors) > 1):
print('WARNING: Ignoring other similar line(s) of',
ut.eq(line), 'besides', ut.eq(twin) + '!', '(', self.img_path, ')')
print([ut.eq(l) for l in neighbors[1:]])
self.twins[line] = | |
<gh_stars>0
"""
Classes and functions to deal with hexagonal grids.
This module assumes that the hexagonal grid is aligned with the x-axis.
If you need it to be aligned with the y-axis instead, you will have to
swap x and y coordinates everywhere.
"""
from collections import namedtuple
from heapq import heappush, heappop
import operator
import math
import random
class InvalidHex(ValueError):
pass
class Hex(namedtuple("Hex", "x y")):
"A single hexagon in a hexagonal grid."""
_neighbours = ((2, 0), (1, 1), (-1, 1), (-2, 0), (-1, -1), (1, -1))
# E SE SW W NW NE
def __new__(cls, x, y):
if (x + y) % 2 != 0:
raise InvalidHex("x and y coordinate must sum to an even number")
return super().__new__(cls, x, y)
def neighbours(self):
"""Return the 6 direct neighbours of this hex."""
x, y = self
return [Hex(x+dx, y+dy) for dx, dy in self._neighbours]
def random_neighbour(self, random=random):
"""Return a random neighbour of this hexagon."""
x, y = self
dx, dy = random.choice(self._neighbours)
return Hex(x+dx, y+dy)
def right_neighbour(self, random=random):
"""Return a random neighbour of this hexagon."""
x, y = self
dx, dy = self._neighbours[0]
return Hex(x+dx, y+dy)
def down_right_neighbour(self, random=random):
"""Return a random neighbour of this hexagon."""
x, y = self
dx, dy = self._neighbours[1]
return Hex(x+dx, y+dy)
def down_left_neighbour(self, random=random):
"""Return a random neighbour of this hexagon."""
x, y = self
dx, dy = self._neighbours[2]
return Hex(x+dx, y+dy)
def left_neighbour(self, random=random):
"""Return a random neighbour of this hexagon."""
x, y = self
dx, dy = self._neighbours[3]
return Hex(x+dx, y+dy)
def up_left_neighbour(self, random=random):
"""Return a random neighbour of this hexagon."""
x, y = self
dx, dy = self._neighbours[4]
return Hex(x+dx, y+dy)
def up_right_neighbour(self, random=random):
"""Return a random neighbour of this hexagon."""
x, y = self
dx, dy = self._neighbours[5]
return Hex(x+dx, y+dy)
def random_walk(self, N, random=random):
"""Yield random walk of length N.
Returns a generator of length N+1 since it includes the start point.
"""
position = self
yield position
for i in range(N):
position = position.random_neighbour(random)
yield position
def square_grid(self, M, N):
"""Yield square walk of length N*M.
Returns a generator of length N*M.
"""
position = self
yield position
#for i in range(int(M/2)):
while M>0:
yield position
for j in range(N-1):
position = position.right_neighbour()
yield position
M=M-1
if M==0:
return
position = position.down_right_neighbour()
yield position
for j in range(N-1):
position = position.left_neighbour()
yield position
position = position.down_left_neighbour()
M=M-1
def __add__(self, other):
x1, y1 = self
x2, y2 = other
return Hex(x1+x2, y1+y2)
def __sub__(self, other):
x1, y1 = self
x2, y2 = other
return Hex(x1-x2, y1-y2)
def __neg__(self):
x, y = self
return Hex(-x, -y)
def distance(self, other):
"""Distance in number of hexagon steps.
Direct neighbours of this hex have distance 1.
"""
x1, y1 = self
x2, y2 = other
dx = abs(x1 - x2)
dy = abs(y1 - y2)
return dy + max(0, (dx - dy)//2)
def rotate_left(self):
"""Given a hex return the hex when rotated 60° counter-clock-wise around the origin.
"""
x, y = self
return Hex((x - 3 * y) >> 1, (x + y) >> 1)
def rotate_right(self):
"""Given a hex return the hex when rotated 60° clock-wise around the origin.
"""
x, y = self
return Hex((x + 3 * y) >> 1, (y - x) >> 1)
def field_of_view(self, transparent, max_distance, visible=None):
"""Calculate field-of-view.
transparent -- from a Hex to a boolean, indicating of the Hex is transparent
max_distance -- maximum distance you can view
visible -- if provided, should be a dict which will be filled and returned
Returns a dict which has as its keys the hexagons which are visible.
The value is a bitmask which indicates which sides of the hexagon are visible.
The bitmask is useful if you want to use this function also to compute light sources.
view_set = player_pos.field_of_view(...)
light_set = light_source.field_of_view(...)
# Is pos visible?
if view_set.get(pos, 0) & light_set.get(pos, 0):
# yes it is
"""
if visible is None:
visible = {}
visible[self] = all_directions
for direction in range(6):
_fovtree._field_of_view(self, direction, transparent, max_distance, visible)
return visible
def find_path(self, destination, passable, cost=lambda pos: 1):
"""Perform path-finding.
self -- Starting position for path finding.
destination -- Destination position for path finding.
passable -- Function of one position, returning True if we can move through this hex.
cost -- cost function for moving through a hex. Should return a value ≥ 1. By default all costs are 1.
"""
pathfinder = HexPathFinder(self, destination, passable, cost)
pathfinder.run()
return pathfinder.path
all_directions = (1 << 6) - 1
origin = Hex(0, 0)
Hex.rotations = (
lambda x: x,
operator.methodcaller("rotate_left"),
lambda x: -x.rotate_right(),
operator.neg,
lambda x: -x.rotate_left(),
operator.methodcaller("rotate_right")
)
class _FovTree:
_corners = ((0, -2), (1, -1), (1, 1), (0, 2))
_neighbours = (Hex(1, -1), Hex(2, 0), Hex(1, 1))
_cached_successors = None
def __init__(self, hexagon, direction, angle1, angle2):
self.hexagon = hexagon
self.angle1 = angle1
self.angle2 = angle2
self.direction = direction
self.hexagons = [rot(hexagon) for rot in Hex.rotations]
self.distance = hexagon.distance(origin)
def get_angle(self, corner):
cx, cy = corner
x, y = self.hexagon
return (3*y + cy)/float(x + cx)
def _field_of_view(self, offset, direction, transparent, max_distance, visible):
if self.distance > max_distance:
return
hexagon = offset + self.hexagons[direction]
if transparent(hexagon):
visible[hexagon] = all_directions
for succ in self.successors():
succ._field_of_view(offset, direction, transparent, max_distance, visible)
else:
directions = 1 << ((self.direction + direction) % 6)
visible[hexagon] = directions | visible.get(hexagon, 0)
def successors(self):
_cached_successors = self._cached_successors
if _cached_successors is None:
_cached_successors = []
angles = [self.get_angle(c) for c in self._corners]
hexagon = self.hexagon
for i in range(3):
c1 = max(self.angle1, angles[i])
c2 = min(self.angle2, angles[i+1])
if c1 < c2:
nb = self._neighbours[i]
_cached_successors.append(_FovTree(hexagon + nb, (i-1) % 6, c1, c2))
self._cached_successors = _cached_successors
return _cached_successors
_fovtree = _FovTree(Hex(2, 0), 0, -1.0, 1.0)
class Rectangle(namedtuple("Rectangle", "x y width height")):
"""Represents a rectangle.
x, y -- position of lower-left corner
width -- width of rectangle
height -- height of rectangle
"""
pass
def _tiled_range(lo, hi, tile_size):
return range(lo // tile_size, (hi + tile_size - 1) // tile_size)
def _make_range(x, width, bloat, grid_size):
return _tiled_range(x + grid_size - 1 - bloat, x + width + bloat, grid_size)
class HexGrid(namedtuple("HexGrid", "width height")):
"""Represents the dimensions of a hex grid as painted on the screen.
The hex grid is assumed to be aligned horizontally, like so:
/ \ / \ / \
| | | |
\ / \ / \ /
The center of hex (0, 0) is assumed to be on pixel (0, 0).
The hexgrid is determined by width and height, which are the screen coordinates
of the upper-right corner of the central hex.
To have equilateral hexes, width:height should be approximately √3 : 1.
If you only pass in width to the constructor, the height is computed to be
an integer as close as possible to width / √3 .
"""
_hex_factor = math.sqrt(1.0/3.0)
_corners = ((1, 1), (0, 2), (-1, 1), (-1, -1), (0, -2), (1, -1))
def __new__(cls, width, height=None):
if height is None:
height = round(cls._hex_factor * width)
return super().__new__(cls, width, height)
def corners(self, hex):
"""Get the 6 corners (in pixel coordinates) of the hex."""
width, height = self
x0, y0 = hex
y0 *= 3
return [(width * (x + x0), height * (y + y0)) for x, y in self._corners]
def center(self, hex):
"""Get the center (as (x, y) tuple) of a hexagon."""
width, height = self
x, y = hex
return (x*width, 3*height*y)
def bounding_box(self, hex):
"""Get the bounding box (as a Rectangle) of a hexagon."""
width, height = self
xc, yc = self.center(hex)
return Rectangle(xc - width, yc - 2*height, 2*width, 4*height)
def hex_at_coordinate(self, x, y):
"""Given pixel coordinates x and y, get the hexagon under it."""
width, height = self
x0 = x // width
δx = x % width
y0 = y // (3 * height)
δy = y % (3 * height)
if (x0 + y0) % 2 == 0:
if width * | |
kind, val in self.ivarsData:
self.ivarsDict[self.munge(key)] = g.GeneralSetting(
kind, ivar=key, val=val, tag='ivars')
for key, kind, val in self.encodingIvarsData:
self.encodingIvarsDict[self.munge(key)] = g.GeneralSetting(
kind, encoding=val, ivar=key, tag='encoding')
#@+node:ekr.20041117065611.2: *4* gcm.initIvarsFromSettings & helpers
def initIvarsFromSettings(self):
for ivar in sorted(list(self.encodingIvarsDict.keys())):
self.initEncoding(ivar)
for ivar in sorted(list(self.ivarsDict.keys())):
self.initIvar(ivar)
#@+node:ekr.20041117065611.1: *5* initEncoding
def initEncoding(self, key):
"""Init g.app.config encoding ivars during initialization."""
# Important: The key is munged.
gs = self.encodingIvarsDict.get(key)
setattr(self, gs.ivar, gs.encoding)
if gs.encoding and not g.isValidEncoding(gs.encoding):
g.es('g.app.config: bad encoding:', f"{gs.ivar}: {gs.encoding}")
#@+node:ekr.20041117065611: *5* initIvar
def initIvar(self, key):
"""
Init g.app.config ivars during initialization.
This does NOT init the corresponding commander ivars.
Such initing must be done in setIvarsFromSettings.
"""
# Important: the key is munged.
d = self.ivarsDict
gs = d.get(key)
setattr(self, gs.ivar, gs.val)
#@+node:ekr.20041117083202.2: *4* gcm.initRecentFiles
def initRecentFiles(self):
self.recentFiles = []
#@+node:ekr.20041228042224: *4* gcm.setIvarsFromSettings
def setIvarsFromSettings(self, c):
"""
Init g.app.config ivars or c's ivars from settings.
- Called from c.initSettings with c = None to init g.app.config ivars.
- Called from c.initSettings to init corresponding commmander ivars.
"""
if g.app.loadedThemes:
return
if not self.inited:
return
# Ignore temporary commanders created by readSettingsFiles.
d = self.ivarsDict
keys = list(d.keys())
keys.sort()
for key in keys:
gs = d.get(key)
if gs:
assert isinstance(gs, g.GeneralSetting)
ivar = gs.ivar # The actual name of the ivar.
kind = gs.kind
if c:
val = c.config.get(key, kind)
else:
val = self.get(key, kind) # Don't use bunch.val!
if c:
setattr(c, ivar, val)
if True: # Always set the global ivars.
setattr(self, ivar, val)
#@+node:ekr.20041117081009: *3* gcm.Getters...
#@+node:ekr.20041123070429: *4* gcm.canonicalizeSettingName (munge)
def canonicalizeSettingName(self, name):
if name is None:
return None
name = name.lower()
for ch in ('-', '_', ' ', '\n'):
name = name.replace(ch, '')
return name if name else None
munge = canonicalizeSettingName
#@+node:ekr.20051011105014: *4* gcm.exists
def exists(self, setting, kind):
"""Return true if a setting of the given kind exists, even if it is None."""
lm = g.app.loadManager
d = lm.globalSettingsDict
if d:
junk, found = self.getValFromDict(d, setting, kind)
return found
return False
#@+node:ekr.20041117083141: *4* gcm.get & allies
def get(self, setting, kind):
"""Get the setting and make sure its type matches the expected type."""
lm = g.app.loadManager
#
# It *is* valid to call this method: it returns the global settings.
d = lm.globalSettingsDict
if d:
assert isinstance(d, g.TypedDict), repr(d)
val, junk = self.getValFromDict(d, setting, kind)
return val
return None
#@+node:ekr.20041121143823: *5* gcm.getValFromDict
def getValFromDict(self, d, setting, requestedType, warn=True):
"""
Look up the setting in d. If warn is True, warn if the requested type
does not (loosely) match the actual type.
returns (val,exists)
"""
gs = d.get(self.munge(setting))
if not gs:
return None, False
assert isinstance(gs, g.GeneralSetting), repr(gs)
val = gs.val
isNone = val in ('None', 'none', '')
if not self.typesMatch(gs.kind, requestedType):
# New in 4.4: make sure the types match.
# A serious warning: one setting may have destroyed another!
# Important: this is not a complete test of conflicting settings:
# The warning is given only if the code tries to access the setting.
if warn:
g.error('warning: ignoring', gs.kind, '', setting, 'is not', requestedType)
g.error('there may be conflicting settings!')
return None, False
if isNone:
return '', True
# 2011/10/24: Exists, a *user-defined* empty value.
return val, True
#@+node:ekr.20051015093141: *5* gcm.typesMatch
def typesMatch(self, type1, type2):
"""
Return True if type1, the actual type, matches type2, the requeseted type.
The following equivalences are allowed:
- None matches anything.
- An actual type of string or strings matches anything *except* shortcuts.
- Shortcut matches shortcuts.
"""
# The shortcuts logic no longer uses the get/set code.
shortcuts = ('shortcut', 'shortcuts',)
if type1 in shortcuts or type2 in shortcuts:
g.trace('oops: type in shortcuts')
return (
type1 is None or type2 is None or
type1.startswith('string') and type2 not in shortcuts or
type1 == 'int' and type2 == 'size' or
(type1 in shortcuts and type2 in shortcuts) or
type1 == type2
)
#@+node:ekr.20060608224112: *4* gcm.getAbbrevDict
def getAbbrevDict(self):
"""Search all dictionaries for the setting & check it's type"""
d = self.get('abbrev', 'abbrev')
return d or {}
#@+node:ekr.20041117081009.3: *4* gcm.getBool
def getBool(self, setting, default=None):
"""Return the value of @bool setting, or the default if the setting is not found."""
val = self.get(setting, "bool")
if val in (True, False):
return val
return default
#@+node:ekr.20070926082018: *4* gcm.getButtons
def getButtons(self):
"""Return a list of tuples (x,y) for common @button nodes."""
return g.app.config.atCommonButtonsList
#@+node:ekr.20041122070339: *4* gcm.getColor
def getColor(self, setting):
"""Return the value of @color setting."""
col = self.get(setting, "color")
while col and col.startswith('@'):
col = self.get(col[1:], "color")
return col
#@+node:ekr.20080312071248.7: *4* gcm.getCommonCommands
def getCommonAtCommands(self):
"""Return the list of tuples (headline,script) for common @command nodes."""
return g.app.config.atCommonCommandsList
#@+node:ekr.20071214140900.1: *4* gcm.getData & getOutlineData
def getData(self, setting, strip_comments=True, strip_data=True):
"""Return a list of non-comment strings in the body text of @data setting."""
data = self.get(setting, "data")
# New in Leo 4.12.1: add two keyword arguments, with legacy defaults.
if data and strip_comments:
data = [z for z in data if not z.strip().startswith('#')]
if data and strip_data:
data = [z.strip() for z in data if z.strip()]
return data
def getOutlineData(self, setting):
"""Return the pastable (xml text) of the entire @outline-data tree."""
return self.get(setting, "outlinedata")
#@+node:ekr.20041117093009.1: *4* gcm.getDirectory
def getDirectory(self, setting):
"""Return the value of @directory setting, or None if the directory does not exist."""
# Fix https://bugs.launchpad.net/leo-editor/+bug/1173763
theDir = self.get(setting, 'directory')
if g.os_path_exists(theDir) and g.os_path_isdir(theDir):
return theDir
return None
#@+node:ekr.20070224075914.1: *4* gcm.getEnabledPlugins
def getEnabledPlugins(self):
"""Return the body text of the @enabled-plugins node."""
return g.app.config.enabledPluginsString
#@+node:ekr.20041117082135: *4* gcm.getFloat
def getFloat(self, setting):
"""Return the value of @float setting."""
val = self.get(setting, "float")
try:
val = float(val)
return val
except TypeError:
return None
#@+node:ekr.20041117062717.13: *4* gcm.getFontFromParams
def getFontFromParams(self, family, size, slant, weight, defaultSize=12):
"""Compute a font from font parameters.
Arguments are the names of settings to be use.
Default to size=12, slant="roman", weight="normal".
Return None if there is no family setting so we can use system default fonts."""
family = self.get(family, "family")
if family in (None, ""):
family = self.defaultFontFamily
size = self.get(size, "size")
if size in (None, 0): size = defaultSize
slant = self.get(slant, "slant")
if slant in (None, ""): slant = "roman"
weight = self.get(weight, "weight")
if weight in (None, ""): weight = "normal"
return g.app.gui.getFontFromParams(family, size, slant, weight)
#@+node:ekr.20041117081513: *4* gcm.getInt
def getInt(self, setting):
"""Return the value of @int setting."""
val = self.get(setting, "int")
try:
val = int(val)
return val
except TypeError:
return None
#@+node:ekr.20041117093009.2: *4* gcm.getLanguage
def getLanguage(self, setting):
"""Return the setting whose value should be a language known to Leo."""
language = self.getString(setting)
return language
#@+node:ekr.20070926070412: *4* gcm.getMenusList
def getMenusList(self):
"""Return the list of entries for the @menus tree."""
aList = self.get('menus', 'menus')
# aList is typically empty.
return aList or g.app.config.menusList
#@+node:ekr.20070411101643: *4* gcm.getOpenWith
def getOpenWith(self):
"""Return a list of dictionaries corresponding to @openwith nodes."""
val = self.get('openwithtable', 'openwithtable')
return val
#@+node:ekr.20041122070752: *4* gcm.getRatio
def getRatio(self, setting):
"""Return the value of @float setting.
Warn if the value is less than 0.0 or greater than 1.0."""
val = self.get(setting, "ratio")
try:
val = float(val)
if 0.0 <= val <= 1.0:
return val
except TypeError:
pass
return None
#@+node:ekr.20041117062717.11: *4* gcm.getRecentFiles
def getRecentFiles(self):
"""Return the list of recently opened files."""
return self.recentFiles
#@+node:ekr.20041117081009.4: *4* gcm.getString
def getString(self, setting):
"""Return the value of @string setting."""
return self.get(setting, "string")
#@+node:ekr.20120222103014.10314: *3* gcm.config_iter
def config_iter(self, c):
"""Letters:
leoSettings.leo
D default settings
F loaded .leo File
M myLeoSettings.leo
@ @button, @command, @mode.
"""
lm = g.app.loadManager
d = c.config.settingsDict if c else lm.globalSettingsDict
limit = c.config.getInt('print-settings-at-data-limit')
if limit is None:
limit = 20 # A resonable default.
# pylint: disable=len-as-condition
for key in sorted(list(d.keys())):
gs = d.get(key)
assert isinstance(gs, g.GeneralSetting), repr(gs)
if gs and gs.kind:
letter = lm.computeBindingLetter(c, gs.path)
val = gs.val
if gs.kind == 'data':
# #748: Remove comments
aList = [' '*8 + z.rstrip() for z in val
if z.strip() and not z.strip().startswith('#')]
if not aList:
val = '[]'
elif limit == 0 or len(aList) < limit:
val | |
strings containing aligned sequences
Returns
-------
numpy.array
2D array containing sequence alignment
(first axis: sequences, second axis: columns)
"""
if len(sequences) == 0:
raise ValueError("Need at least one sequence")
N = len(sequences)
L = len(next(iter(sequences)))
matrix = np.empty((N, L), dtype=np.str)
for i, seq in enumerate(sequences):
if len(seq) != L:
raise ValueError(
"Sequences have differing lengths: i={} L_0={} L_i={}".format(
i, L, len(seq)
)
)
matrix[i] = np.array(list(seq))
return matrix
def map_from_alphabet(alphabet=ALPHABET_PROTEIN, default=GAP):
"""
Creates a mapping dictionary from a given alphabet.
Parameters
----------
alphabet : str
Alphabet for remapping. Elements will
be remapped according to alphabet starting
from 0
default : Elements in matrix that are not
contained in alphabet will be treated as
this character
Raises
------
ValueError
For invalid default character
"""
map_ = {
c: i for i, c in enumerate(alphabet)
}
try:
default = map_[default]
except KeyError:
raise ValueError(
"Default {} is not in alphabet {}".format(default, alphabet)
)
return defaultdict(lambda: default, map_)
def map_matrix(matrix, map_):
"""
Map elements in a numpy array using alphabet
Parameters
----------
matrix : np.array
Matrix that should be remapped
map_ : defaultdict
Map that will be applied to matrix elements
Returns
-------
np.array
Remapped matrix
"""
return np.vectorize(map_.__getitem__)(matrix)
class Alignment:
"""
Container to store and manipulate multiple sequence alignments.
.. note::
Important:
1. Sequence annotation currently is not transformed when
selecting subsets of columns or positions (e.g. affects GR and GC
lines in Stockholm alignments)
2. Sequence ranges in IDs are not adjusted when selecting
subsets of positions
"""
def __init__(self, sequence_matrix, sequence_ids=None, annotation=None,
alphabet=ALPHABET_PROTEIN):
"""
Create new alignment object from ready-made components.
.. note::
Use factory method Alignment.from_file to create alignment from file,
or Alignment.from_dict from dictionary of sequences.
Parameters
----------
sequence_matrix : np.array
N x L array of characters in the alignment
(N=number of sequences, L=width of alignment)
sequence_ids : list-like, optional (default=None)
Sequence names of alignment members (must have N elements).
If None, defaults sequence IDs to "0", "1", ...
annotation : dict-like
Annotation for sequence alignment
Raises
------
ValueError
If dimensions of sequence_matrix and sequence_ids
are inconsistent
"""
self.matrix = np.array(sequence_matrix)
self.N, self.L = self.matrix.shape
# characters coding for gaps in match-state and insert
# columns of the alignment
self._match_gap = MATCH_GAP
self._insert_gap = INSERT_GAP
# defined alphabet of alignment
self.alphabet = alphabet
self.alphabet_default = self._match_gap
self.alphabet_map = map_from_alphabet(
self.alphabet, default=self.alphabet_default
)
self.num_symbols = len(self.alphabet_map)
# Alignment matrix remapped into in integers
# Will only be calculated if necessary for downstream
# calculations
self.matrix_mapped = None
self.num_cluster_members = None
self.weights = None
self._frequencies = None
self._pair_frequencies = None
if sequence_ids is None:
# default to numbering sequences if not given
self.ids = [str(i) for i in range(self.N)]
else:
if len(sequence_ids) != self.N:
raise ValueError(
"Number of sequence IDs and length of "
"alignment do not match".format(
len(sequence_ids), self.L
)
)
# make sure we get rid of iterators etc.
self.ids = np.array(list(sequence_ids))
self.id_to_index = {
id_: i for i, id_ in enumerate(self.ids)
}
if annotation is not None:
self.annotation = annotation
else:
self.annotation = {}
@classmethod
def from_dict(cls, sequences, **kwargs):
"""
Construct an alignment object from a dictionary
with sequence IDs as keys and aligned sequences
as values.
Parameters
----------
sequences : dict-like
Dictionary with pairs of sequence ID (key) and
aligned sequence (value)
Returns
-------
Alignment
initialized alignment
"""
matrix = sequences_to_matrix(sequences.values())
return cls(
matrix, sequences.keys(), **kwargs
)
@classmethod
def from_file(cls, fileobj, format="fasta",
a3m_inserts="first", **kwargs):
"""
Construct an alignment object by reading in an
alignment file.
Parameters
----------
fileobj : file-like obj
Alignment to be read in
format : {"fasta", "stockholm", "a3m"}
Format of input alignment
a3m_inserts : {"first", "delete"}, optional (default: "first")
Strategy to deal with inserts in a3m alignment files
(see read_a3m documentation for details)
Returns
-------
Alignment
Parsed alignment
Raises
------
ValueError
For invalid alignments or alignment formats
"""
annotation = {}
# read in sequence alignment from file
if format == "fasta":
seqs = OrderedDict()
for seq_id, seq in read_fasta(fileobj):
seqs[seq_id] = seq
elif format == "stockholm":
# only reads first Stockholm alignment contained in file
ali = next(read_stockholm(fileobj, read_annotation=True))
seqs = ali.seqs
annotation["GF"] = ali.gf
annotation["GC"] = ali.gc
annotation["GS"] = ali.gs
annotation["GR"] = ali.gr
kwargs["annotation"] = annotation
elif format == "a3m":
seqs = read_a3m(fileobj, inserts=a3m_inserts)
else:
raise ValueError("Invalid alignment format: {}".format(format))
return cls.from_dict(seqs, **kwargs)
def __getitem__(self, index):
"""
.. todo::
eventually this should allow fancy indexing and offer the functionality of select()
"""
if index in self.id_to_index:
return self.matrix[self.id_to_index[index], :]
elif index in range(self.N):
return self.matrix[index, :]
else:
raise KeyError(
"Not a valid index for sequence alignment: {}".format(index)
)
def __len__(self):
return self.N
def count(self, char, axis="pos", normalize=True):
"""
Count occurrences of a character in the sequence
alignment.
.. note::
The counts are raw counts not adjusted for
sequence redundancy.
Parameters
----------
char : str
Character which is counted
axis : {"pos", "seq"}, optional (default="pos")
Count along positions or sequences
normalize : bool, optional (default=True)
Normalize count for length of axis (i.e. relative count)
Returns
-------
np.array
Vector containing counts of char along the axis
Raises
------
ValueError
Upon invalid axis specification
"""
if axis == "pos":
naxis = 0
elif axis == "seq":
naxis = 1
else:
raise ValueError("Invalid axis: {}".format(axis))
c = np.sum(self.matrix == char, axis=naxis)
if normalize:
c = c / self.matrix.shape[naxis]
return c
def select(self, columns=None, sequences=None):
"""
Create a sub-alignment that contains a subset of
sequences and/or columns.
.. note::
This does currently not adjust the indices
of the sequences. Annotation in the original alignment
will be lost and not passed on to the new object.
Parameters
----------
columns : np.array(bool) or np.array(int), optional
Vector containing True for each column that
should be retained, False otherwise; or the
indices of columns that should be selected
sequences : np.array(bool) or np.array(int), optional
Vector containing True for each sequence that
should be retained, False otherwise; or the
indices of sequences that should be selected
Returns
-------
Alignment
Alignment with selected columns and sequences
(note this alignment looses annotation)
"""
if columns is None and sequences is None:
return self
sel_matrix = self.matrix
ids = self.ids
if columns is not None:
sel_matrix = sel_matrix[:, columns]
if sequences is not None:
sel_matrix = sel_matrix[sequences, :]
ids = ids[sequences]
# do not copy annotation since it may become
# inconsistent
return Alignment(
np.copy(sel_matrix), np.copy(ids),
alphabet=self.alphabet
)
def apply(self, columns=None, sequences=None, func=np.char.lower):
"""
Apply a function along columns and/or rows of alignment matrix,
or to entire matrix.
Parameters
----------
columns : np.array(bool) or np.array(int), optional
Vector containing True for each column that
should be retained, False otherwise; or the
indices of columns that should be selected
sequences : np.array(bool) or np.array(int), optional
Vector containing True for each sequence that
should be retained, False otherwise; or the
indices of sequences that should be selected
func : callable
Vectorized numpy function that will be applied to
the selected subset of the alignment matrix
Returns
-------
Alignment
Alignment with modified columns and sequences
(this alignment maintains annotation)
"""
mod_matrix = np.copy(self.matrix)
if columns is None and sequences is None:
return self
else:
if columns is not None:
mod_matrix[:, columns] = func(mod_matrix[:, columns])
if sequences is not None:
mod_matrix[sequences, :] = func(mod_matrix[sequences, :])
return Alignment(
mod_matrix, np.copy(self.ids), deepcopy(self.annotation),
alphabet=self.alphabet
)
def replace(self, original, replacement, columns=None, sequences=None):
"""
Replace character with another in full matrix or
subset of columns/sequences.
Parameters
----------
original : char
Character that should be replaced
replacement : char
Replacement character
columns : numpy index array
See self.apply for explanation
sequences : numpy index array
See self.apply for explanation
Returns
-------
Alignment
Alignment with replaced characters
"""
return self.apply(
columns, sequences,
func=lambda x: np.char.replace(
x, original, replacement
)
)
def lowercase_columns(self, columns):
"""
Change a subset of columns to lowercase character
and replace "-" gaps with "." gaps, e.g. to exclude
them from EC calculations
Parameters
----------
columns | |
<reponame>PetreStegaroiu/python-esppy<filename>esppy/connectors/fs.py
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
''' ESP File and Socket Connectors '''
from __future__ import print_function, division, absolute_import, unicode_literals
import numbers
import re
import six
from .base import Connector, prop, map_properties
from ..utils import xml
from ..utils.data import gen_name
class FilePublisher(Connector):
'''
Publish events from a file
Parameters
----------
fsname : string
The filename or path of the file
fstype : string, optional
The data file type.
Valid values: 'binary', 'csv', 'xml', 'json', 'syslog', 'hdat', 'cef'
name : string, optional
Name of the connector object
snapshot : boolean, optional
Specifies whether to send snapshot data
addcsvflags : string, optional
Specifies the event type to insert into input CSV events.
Valid values: 'normal' or 'partialupdate'
addcsvopcode : string, optional
Prepends an opcode and comma to input CSV events.
blocksize : int, optional
Specifies the number of events to include in a published event block
cefsyslogprefix : string, optional
When fstype=cef, specifies that CEF events contain the syslog prefix
configfilesection : string, optional
Specifies the name of the section in the ESP connector config
file for parameters.
csvfielddelimiter : string, optional
Specifies the character delimiter for field data in input CSV events
dateformat : string, optional
Specifies the format of datetime and timestamp fields
growinginputfile : boolean, optional
Enables reading from a growing input file by publishers
header : int, optional
Specifies the number of input lines to skip before starting
publish operations.
ignorecsvparseerrors : boolean, optional
Specifies that when a field in an input CSV event cannot be parsed,
the event is dropped, an error is logged, and publishing continues.
maxevents : int, optional
Specifies the maximum number of events to publish
noautogenfield : boolean, optional
Specifies that input events are missing the key field that is
autogenerated by the source window.
prebuffer : boolean, optional
Controls whether event blocks are buffered to an event block vector
before doing any injects.
publishwithupsert : boolean, optional
Build events with opcode=upsert instead of insert
rate : int, optional
Specifies the requested transmit rate in events per second
repeatcount : int, optional
Specifies the number of times to repeat the publish operation
transactional : string, optional
Sets the event block type to transactional.
Returns
-------
:class:`FilePublisher`
'''
connector_key = dict(cls='fs', type='publish')
property_defs = dict(
fsname=prop('fsname', dtype='string', required=True),
fstype=prop('fstype', dtype='string', required=True,
default='csv',
valid_values=['binary', 'csv', 'xml', 'json',
'syslog', 'hdat', 'cef']),
snapshot=prop('snapshot', dtype='boolean', required=True, default=False),
addcsvflags=prop('addcsvflags', dtype='string',
valid_values=['normal', '']),
addcsvopcode=prop('addcsvopcode', dtype='string',
valid_values=['insert', '']),
blocksize=prop('blocksize', dtype='int', valid_expr='value > 0'),
cefsyslogprefix=prop('blocksize', dtype='string'),
configfilesection=prop('configfilesection', dtype='string'),
csvfielddelimiter=prop('csvfielddelimiter', dtype='string'),
dateformat=prop('dateformat', dtype='string'),
growinginputfile=prop('growinginputfile', dtype='boolean'),
header=prop('header', dtype=('boolean', 'string'),
valid_values=[True, False, 'full']),
ignorecsvparseerrors=prop('ignorecsvparseerrors', dtype='boolean'),
maxevents=prop('maxevents', dtype='int', valid_expr='value >= 0'),
noautogenfield=prop('noautogenfield', dtype='boolean'),
prebuffer=prop('prebuffer', dtype='boolean'),
publish_with_upsert=prop('publishwithupsert', dtype='boolean'),
rate=prop('rate', dtype='int'),
repeatcount=prop('repeatcount', dtype='int', valid_expr='value >= 0'),
transactional=prop('transactional', dtype='string'),
)
def __init__(self, fsname=None, fstype=None, name=None, is_active=None,
snapshot=None, addcsvflags=None,
addcsvopcode=None, blocksize=None, cefsyslogprefix=None,
configfilesection=None, csvfielddelimiter=None,
dateformat=None,
growinginputfile=None, header=None, ignorecsvparseerrors=None,
maxevents=None, noautogenfield=None, prebuffer=None,
publishwithupsert=None, rate=None, repeatcount=None,
transactional=None):
params = dict(**locals())
params.pop('self')
params.pop('is_active')
name = params.pop('name')
Connector.__init__(self, 'fs', name=name, type='publish', is_active=is_active,
properties=params)
@classmethod
def from_parameters(cls, conncls, type=None, name=None, is_active=None,
properties=None):
req, properties = map_properties(cls, properties, required='fsname',
delete='type')
return cls(req[0], name=name, is_active=is_active, **properties)
class FileSubscriber(Connector):
'''
Subscribe to events from a file
Parameters
----------
fsname : string
The filename or path of the file
fstype : string, optional
The data file type.
Valid values: 'binary', 'csv', 'xml', 'json', 'syslog', 'hdat', 'cef'
name : string, optional
Name of the connector object
snapshot : boolean, optional
Specifies whether to send snapshot data
collapse : boolean, optional
Converts UPDATE_BLOCK events to UPDATE events in order to make
subscriber output publishable.
configfilesection : string, optional
Specifies the name of the section in the ESP connector config
file for parameters.
dateformat : string, optional
Specifies the format of datetime and timestamp fields
hdatcashostport : string, optional
Specifies the CAS server host and port
hdatcaspassword : string, optional
Specifies the CAS server password
hdatcasusername : string, optional
Specifies the CAS server user name
hdatfilename : string, optional
Specifies the name of the Objective Analysis Package Data (HDAT)
file to be written to the Hadoop Distributed File System (HDFS).
hdatlasrhostport : string, optional
Specifies the SAS LASR Analytic Server host and port
hdatlasrkey : string, optional
Specifies the path to tklasrkey.sh
hdatmaxdatanodes : int, optional
Specifies the maximum number of data node connections
hdatmaxstringlength : int, optional
Specifies in bytes the fixed size of string fields in Objective
Analysis Package Data (HDAT) files
hdatnumthreads : int, optional
Specifies the size of the thread pool used for multi-threaded
writes to data node socket connections.
hdfsblocksize : int, optional
Specifies in Mbytes the block size used to write an Objective
Analysis Package Data (HDAT) file.
hdfsnumreplicas : int, optional
Specifies the number of Hadoop Distributed File System (HDFS)
replicas created with writing an Objective Analysis Package
Data (HDAT) file.
header : boolean or string, optional
For a CSV subscriber, specifies to write a header row that
shows comma-separated fields.
Valid values: True, False, or 'full' (include opcode flags in header)
maxfilesize : int, optional
Specifies the maximum size in bytes of the subscriber output file
periodicity : int, optional
Specifies the interval in seconds at which the subscriber output
file is closed and a new output file opened.
rate : boolean, optional
When latency mode is enabled, shows this specified rate in generated
output files.
rmretdel : boolean, optional
Specifies to remove all delete events from event blocks received
by a subscriber that were introduced by a window retention policy.
unbufferedoutputstreams : boolean, optional
Specifies to create an unbuffered stream when writing to a
file or socket.
Returns
-------
:class:`FileSubscriber`
'''
connector_key = dict(cls='fs', type='subscribe')
property_defs = dict(
fsname=prop('fsname', dtype='string', required=True),
fstype=prop('fstype', dtype='string', required=True,
default='csv',
valid_values=['binary', 'csv', 'xml', 'json',
'syslog', 'hdat', 'cef']),
snapshot=prop('snapshot', dtype='bool', required=True, default=False),
collapse=prop('collapse', dtype='bool'),
configfilesection=prop('configfilesection', dtype='string'),
dateformat=prop('dateformat', dtype='string'),
hdatcashostport=prop('hdatcashostport', dtype='string',
valid_values=re.compile(r'\w[\w\-\.]*:\d+')),
hdatcaspassword=prop('hdatcaspassword', dtype='string'),
hdatcasusername=prop('hdatcasusername', dtype='string'),
hdatfilename=prop('hdatfilename', dtype='string'),
hdatlasrhostport=prop('hdatlasrhostport', dtype='string'),
hdatlasrkey=prop('hdatlasrkey', dtype='string'),
hdatmaxdatanodes=prop('hdatmaxdatanodes',
dtype='int', valid_expr='value > 0'),
hdatmaxstringlength=prop('hdatmaxstringlength', dtype='int',
valid_expr='value > 0'),
hdatnumthreads=prop('hdatnumthreads', dtype='int',
valid_expr='value >= 0'),
hdfsblocksize=prop('hdfsblocksize', dtype='int',
valid_expr='value >= 0'),
hdfsnumreplicas=prop('hdfsnumreplicas', dtype='int',
valid_expr='value >= 0'),
header=prop('header', dtype=('bool', 'string'),
valid_values=[True, False, 'full']),
maxfilesize=prop('maxfilesize', dtype='int',
valid_expr='value >= 0'),
periodicity=prop('periodicity', dtype='int',
valid_expr='value >= 0'),
rate=prop('rate', dtype='bool'),
rmretdel=prop('rmretdel', dtype='bool'),
unbufferedoutputstreams=prop('unbufferedoutputstreams', dtype='bool'),
)
def __init__(self, fsname=None, fstype=None, name=None, is_active=None,
snapshot=None, collapse=None, configfilesection=None,
dateformat=None, hdatcashostport=None,
hdatcaspassword=None, hdatcasusername=None,
hdatfilename=None, hdatlasrhostport=None, hdatlasrkey=None,
hdatmaxdatanodes=None, hdatmaxstringlength=None,
hdatnumthreads=None, hdfsblocksize=None,
hdfsnumreplicas=None, header=None, maxfilesize=None,
periodicity=None, rate=None, rmretdel=None,
unbufferedoutputstreams=None):
params = dict(**locals())
params.pop('self')
params.pop('is_active')
name = params.pop('name')
Connector.__init__(self, 'fs', name=name, type='subscribe', is_active=is_active,
properties=params)
@classmethod
def from_parameters(cls, conncls, type=None, name=None, is_active=None,
properties=None):
req, properties = map_properties(cls, properties, required='fsname',
delete='type')
return cls(req[0], name=name, is_active=is_active, **properties)
class SocketPublisher(FilePublisher):
'''
Publish events from a socket
Parameters
----------
port : int
The port number to publish on
fstype : string, optional
The data file type.
Valid values: 'binary', 'csv', 'xml', 'json', 'syslog', 'hdat', 'cef'
name : string, optional
Name of the connector object
snapshot : boolean, optional
Specifies whether to send snapshot data
addcsvflags : string, optional
Specifies the event type to insert into input CSV events.
Valid values: 'normal' or 'partialupdate'
addcsvopcode : string, optional
Prepends an opcode and comma to input CSV events.
blocksize : int, optional
Specifies the number of events to include in a published event block
cefsyslogprefix : string, optional
When fstype=cef, specifies that CEF events contain the syslog prefix
configfilesection : string, optional
Specifies the name of the section in the ESP connector config
file for parameters.
csvfielddelimiter : string, optional
Specifies the character delimiter for field data in input CSV events
dateformat | |
<reponame>petcomputacaoufrgs/EMS
import logging
import pandas as pd
import music21
from bisect import bisect_left
SETTINGS = {
'RESOLUTION': 16,
'KEYBOARD_SIZE': 88,
'KEYBOARD_OFFSET': 21
}
def interactive_debug_serial(serialized):
"""Interactive debugger of the serialized dataframe"""
serial_instruments = list(enumerate(set(serialized.index)))
stop = False
while not stop:
print('\nInstruments detected:')
print('\t.(ID, INSTRUMENT)')
print('\t.---------------)')
for instrument in serial_instruments:
print(f'\t.{instrument}')
sel_inst = input('\n\nEnter ID of instrument of interest: #')
if sel_inst.upper() == '':
break
else:
sel_inst = int(sel_inst)
sel_inst_name = serial_instruments[sel_inst][1]
target_instrument = serialized.loc[serialized.index == sel_inst_name]
measure_view = True
while measure_view:
# single measure
measure_s = input('\nEnter number of measure to show: #')
if measure_s.upper() == '':
measure_view = False
stop = False
else:
measure_s = int(measure_s)
measures = target_instrument[target_instrument['MEASURE'] == measure_s]
print(measures.to_markdown())
measure_view = True
stop = False
# https://stackoverflow.com/questions/12141150/from-list-of-integers-get-number-closest-to-a-given-value
def take_closest(myList, myNumber):
"""
Assumes myList is sorted. Returns closest value to myNumber.
If two numbers are equally close, return the smallest number.
"""
pos = bisect_left(myList, myNumber)
if pos == 0:
return myList[0]
if pos == len(myList):
return myList[-1]
before = myList[pos - 1]
after = myList[pos]
if after - myNumber < myNumber - before:
return after
else:
return before
def key_index2note(i, midi_offset):
""" Receives the key index and the midi offset of the keyboard and returns a M21 Note"""
index = i + midi_offset
n = music21.note.Note(midi=index)
return n
def transpose_stream_to_C(stream, force_eval=False):
"""Transpose a stream to C major, if it's in a major key, or to A minor, if it's in a minor key
Returns a tuple in the format (key, transposed_stream)"""
# trying to capture a M21 Key object in the stream
stream_key = stream.getElementsByClass(music21.key.Key)
if len(stream_key) != 0:
stream_key = stream_key[0]
else:
stream_key = None
# if we failed to get a M21 Key and 'forceEval' is set to True
# we will try to use M21 key analyzer.
# but this analyzer sometimes fails and breaks the code
# so this flag should be used carefully
if force_eval and stream_key is None:
stream_key = stream.analyze('key')
# if the flag jump was not taken, we raise a warn and
# return the own input.
# this is, we reject the input
if stream_key is None:
# logging.warning('Transposing measures containing empty KeySignatures can cause errors. Returning key as None '
# 'type.')
return None, stream
# copy for initialization
transposed_stream = stream
# at this point we should have a key
# so it's safe to compare
if stream_key != 'C' and stream_key != 'a':
# transpose song to C major/A minor
if stream_key.mode == 'major':
transpose_int = music21.interval.Interval(stream_key.tonic, music21.pitch.Pitch('C'))
transposed_stream = stream.transpose(transpose_int)
elif stream_key.mode == 'minor':
transpose_int = music21.interval.Interval(stream_key.tonic, music21.pitch.Pitch('a'))
transposed_stream = stream.transpose(transpose_int)
return stream_key.tonicPitchNameWithCase, transposed_stream
def measure_data(measure):
"""Receives a measure, and returns all notes from that measure in a list"""
items = measure.flat.notes
data = []
for item in items:
if isinstance(item, music21.note.Note) or isinstance(item, music21.note.Rest):
data.append(item)
elif isinstance(item, music21.chord.Chord):
for p in item.pitches:
n = music21.note.Note(pitch=p)
n.offset = item.offset
n.duration.quarterLength = item.duration.quarterLength
n.volume.velocityScalar = item.volume.velocityScalar
data.append(n)
return data
def measure2performance(measure, settings, ts_numerator, to_bins=False):
"""Receives a measure and returns it in a multi hot encoding form"""
if not isinstance(settings, pd.Series):
settings = pd.Series(settings)
data = measure_data(measure)
volume_flag = 1e-8
keyboard_range = settings.KEYBOARD_SIZE + settings.KEYBOARD_OFFSET
frames = [[False for i in range(settings.KEYBOARD_SIZE)] for j in range(ts_numerator * settings.RESOLUTION)]
for item in data:
# if item is a Rest, we can skip
# since no key must be turned on
if isinstance(item, music21.note.Rest):
continue
# if the item is a Note that is above
# or below our keyboard range, we can skip
# cause it will not be represented
if item.pitch.midi > keyboard_range:
continue
# # # # # # # #
# ITEM IS VALID
# # # # # # # #
#
# here we only have
# individual notes
# that are inside our
# keyboard range
#
# now we must discover
# what frames must be set
# not True at what note
# index to get the
# One Hot Encoding of
# the measure
# start and end frames
frame_s = int(item.offset * settings.RESOLUTION)
frame_e = int(frame_s + (item.duration.quarterLength * settings.RESOLUTION))
# note index on our keyboard
i_key = item.pitch.midi - settings.KEYBOARD_OFFSET
# velocity of the note
interval_list = [i / 128 for i in range(16, 128, 16)]
velocity = item.volume.velocityScalar
if to_bins:
velocity = take_closest(interval_list, velocity)
# if it's the first note of the bar, you don't need to check it
if frame_s > 0:
# if consecutive notes have the same speed, add a flag to differentiate them
if frames[frame_s-1][i_key] == velocity:
velocity += volume_flag
# turn them on captain!
for frame in range(frame_s, frame_e):
if velocity is not None:
# print(frame, i_key, velocity)
frames[frame][i_key] = velocity
else:
# no notes
frames[frame][i_key] = False
# create Pandas dataframe
note_names = [key_index2note(i, settings.KEYBOARD_OFFSET).nameWithOctave for i in range(0, settings.KEYBOARD_SIZE)]
frame_counter = [int(i) for i in range(0, ts_numerator * settings.RESOLUTION)]
stackframe = pd.DataFrame(frames, index=frame_counter, columns=note_names)
return stackframe
# M21 Measure -> Pandas DataFrame
def measure(m_number, m, settings, INSTRUMENT_BLOCK, ENVIRONMENT_BLOCK, to_bins=False):
"""Serialise a single measure"""
if not isinstance(settings, pd.Series):
settings = pd.Series(settings)
# check for key changes
m_ks, transposed_measure = transpose_stream_to_C(m, force_eval=False)
if m_ks is None:
m_ks = ENVIRONMENT_BLOCK.ORIGINAL_KS
# check for tempo changes
m_bpm = m.getElementsByClass(music21.tempo.TempoIndication)
if len(m_bpm) != 0:
m_bpm = m_bpm[0].getQuarterBPM()
else:
m_bpm = ENVIRONMENT_BLOCK.TEMPO
m_bpm = int(m_bpm)
# check for time sign changes
m_ts = m.getTimeSignatures()
if len(m_ts) != 0:
m_ts = m_ts[0]
else:
m_ts = ENVIRONMENT_BLOCK.TS
# Update Env according to this measure
ENVIRONMENT_BLOCK.ORIGINAL_KS = m_ks
ENVIRONMENT_BLOCK.TS = '{}/{}'.format(m_ts.numerator, m_ts.denominator)
ENVIRONMENT_BLOCK.TEMPO = m_bpm
# METRIC BLOCK
# ======||||======
measure_counter = [int(m_number) for i in range(settings.RESOLUTION * m_ts.numerator)]
beat_counter = [(int(i // settings.RESOLUTION) + 1) for i in range(settings.RESOLUTION * m_ts.numerator)]
frame_counter = [(int(i % settings.RESOLUTION) + 1) for i in range(settings.RESOLUTION * m_ts.numerator)]
metric_bl = pd.DataFrame(
{
'MEASURE': measure_counter,
'BEAT': beat_counter,
'FRAME': frame_counter
}
)
perf_bl = measure2performance(transposed_measure,
settings,
m_ts.numerator,
to_bins)
inst_bl = pd.concat([INSTRUMENT_BLOCK] * (m_ts.numerator * settings.RESOLUTION), axis=1).T
env_bl = pd.concat([ENVIRONMENT_BLOCK] * (m_ts.numerator * settings.RESOLUTION), axis=1).T
encoded_measure = pd.concat([inst_bl, metric_bl, env_bl, perf_bl], axis=1)
return encoded_measure
# M21 Part -> Pandas DataFrame
def instrument(part, settings, part_list=None, to_bins=False):
"""Serialise a single instrument/part"""
#
# INSTRUMENT BLOCK
#
if not isinstance(settings, pd.Series):
settings = pd.Series(settings)
# flat the stream
part = part.semiFlat
# ========================
# DEFINING BLOCKS
# ===============
# INSTRUMENT BLOCK
# ======||||======
part_name = part.partName
inst_specs = part.getElementsByClass(music21.instrument.Instrument)[0]
m21_inst = part.getElementsByClass(music21.instrument.Instrument)[-1]
inst_name = m21_inst.instrumentName
# This is a terminal case.
# Without the instrument name a lot of problems show up.
# So, we will avoid this case for now
if inst_name is None:
return None
inst_sound = inst_specs.instrumentSound
# to avoid the problem of having parts with the same name
while part_name in part_list:
part_name += "'"
part_list.append(part_name)
try:
midi_program = m21_inst.midiProgram
except:
midi_program = 0
logging.warning('Could not retrieve Midi Program from instrument, setting it to default value 0 ({})'
.format(music21.instrument.instrumentFromMidiProgram(midi_program).instrumentName))
INSTRUMENT_BLOCK = pd.Series(
{
'NAME': part_name,
'INSTRUMENT': inst_name,
'MIDI_PROGRAM': midi_program,
'SOUND': inst_sound
}
)
#
# ENVIRONMENT BLOCK
# ======||||======
# get part tempo
metronome = part.getElementsByClass(music21.tempo.TempoIndication)
if len(metronome) == 0:
bpm = 120
logging.warning('Could not retrieve Metronome object from Part, setting BPM to default value ({})'
.format(bpm))
else:
bpm = metronome[0].getQuarterBPM()
bpm = int(bpm)
# filter parts that are not in 4/4
time_signature = part.getElementsByClass(music21.meter.TimeSignature)
if len(time_signature) == 0:
ts = music21.meter.TimeSignature('4/4')
logging.warning('Could not retrieve Time Signature object from Part, setting TS to default value ({})'
.format(ts))
else:
ts = time_signature[0]
# transpose song to C major/A minor
original_ks, transposed_part = transpose_stream_to_C(part, force_eval=True)
n_measures = len(part) + 1
ENVIRONMENT_BLOCK = pd.Series(
{
'ORIGINAL_KS': original_ks,
'TS': '{}/{}'.format(ts.numerator, ts.denominator),
'TEMPO': bpm
}
)
# a vector containing the measures
part_df = []
first_measure = True
for i, m in enumerate(transposed_part.measures(1, n_measures)):
serialised_measure = pd.DataFrame(
measure(i+1, m,
settings,
INSTRUMENT_BLOCK,
ENVIRONMENT_BLOCK,
to_bins
)
)
if first_measure:
part_df = serialised_measure
first_measure = False
else:
part_df = pd.concat([part_df, serialised_measure], axis=0, ignore_index=True)
part_df.index = part_df.index | |
#!/usr/bin/env python
#-*- coding: utf-8 -*-
#pylint: disable=R0915,R0912,R0913,R0914,R0902
"""
File : generator.py
Author : <NAME> <<EMAIL>>
Generator module defines various data generator for MLaaS4HEP framework.
"""
from __future__ import print_function, division, absolute_import
# system modules
import os
import json
import time
import random
# numpy modules
import numpy as np
# MLaaS4HEP modules
from MLaaS4HEP.reader import RootDataReader, JsonReader, CsvReader, AvroReader, ParquetReader
from MLaaS4HEP.utils import file_type, timestamp
class MetaDataGenerator(object):
"""
MetaDataGenerator class provides interface to read files.
"""
def __init__(self, fin, labels, params=None, preproc=None, dtype=None):
"Initialization function for Data Generator"
time0 = time.time()
self.dtype = str(dtype).lower()
self.preproc = preproc
if not params:
params = {}
# parse given parameters
batch_size = params.get('batch_size', 256)
self.verbose = params.get('verbose', 0)
chunk_size = params.get('chunk_size', 1000)
self.evts = params.get('nevts', -1)
self.shuffle = params.get('shuffle', False)
# convert input fin parameter into file list if necessary
if isinstance(fin, str):
self.files = [fin]
elif isinstance(fin, list):
self.files = fin
else:
raise Exception("Unsupported data-type '%s' for fin parameter" % type(fin))
if isinstance(labels, str):
self.labels = [labels for _ in range(len(self.files))]
elif isinstance(labels, list):
self.labels = labels
else:
raise Exception("Unsupported data-type '%s' for labels parameter" % type(labels))
self.file_label_dict = dict(zip(self.files, self.labels))
self.reader = {} # global reader will handle all files readers
self.reader_counter = {} # reader counter keeps track of nevts read by readers
if self.verbose:
print(timestamp('Generator: {}'.format(self)))
print("model parameters: {}".format(json.dumps(params)))
self.start_idx = 0
self.chunk_size = chunk_size
self.stop_idx = chunk_size
self.batch_size = batch_size
# loop over files and create individual readers for them, then put them in a global reader
for fname, label in self.file_label_dict.items():
if self.dtype == 'json' or file_type(fname) == 'json':
reader = JsonReader(fname, label, chunk_size=chunk_size, nevts=self.evts, \
preproc=self.preproc, verbose=self.verbose)
elif self.dtype == 'csv' or file_type(fname) == 'csv':
reader = CsvReader(fname, label, chunk_size=chunk_size, nevts=self.evts, \
preproc=self.preproc, verbose=self.verbose)
elif self.dtype == 'avro' or file_type(fname) == 'avro':
reader = AvroReader(fname, label, chunk_size=chunk_size, nevts=self.evts, \
preproc=self.preproc, verbose=self.verbose)
elif self.dtype == 'parquet' or file_type(fname) == 'parquet':
reader = ParquetReader(fname, label, chunk_size=chunk_size, nevts=self.evts, \
preproc=self.preproc, verbose=self.verbose)
self.reader[fname] = reader
self.reader_counter[fname] = 0
self.current_file = self.files[0]
print("init MetaDataGenerator in {} sec".format(time.time()-time0))
print("available readers")
for fname, reader in self.reader.items():
print("{} {}".format(fname, reader))
@property
def nevts(self):
"Return number of events of current file"
return self.evts if self.evts != -1 else self.reader[self.current_file].nrows
def __len__(self):
"Return total number of batches this generator can deliver"
return int(np.floor(self.nevts / self.batch_size))
def next(self):
"Return next batch of events"
msg = "\nread chunk [{}:{}] from {} label {}"\
.format(self.start_idx, self.stop_idx, self.current_file, \
self.file_label_dict[self.current_file])
gen = self.read_data(self.start_idx, self.stop_idx)
# advance start and stop indecies
self.start_idx = self.stop_idx
self.stop_idx = self.start_idx+self.chunk_size
if self.nevts != -1 and \
(self.start_idx > self.nevts or \
(self.reader[self.current_file].nrows and \
self.start_idx > self.reader[self.current_file].nrows)):
# we reached the limit of the reader
self.start_idx = 0
self.stop_idx = self.chunk_size
raise StopIteration
if self.verbose:
print(msg)
data = []
labels = []
for xdf, ldf in gen:
data.append(xdf)
labels.append(ldf)
if not data:
raise StopIteration
data = np.array(data)
labels = np.array(labels)
if self.verbose:
print("return shapes: data=%s labels=%s" % (np.shape(data), np.shape(labels)))
return data, labels
def __iter__(self):
"Provide iterator capabilities to the class"
return self
def __next__(self):
"Provide generator capabilities to the class"
return self.next()
def read_data(self, start=0, stop=100):
"Helper function to read data via reader"
# if we exceed number of events in a file we discard it
if self.nevts < self.reader_counter[self.current_file]:
if self.verbose:
msg = "# discard {} since we read {} out of {} events"\
.format(self.current_file, \
self.reader_counter[self.current_file], self.nevts)
print(msg)
self.files.remove(self.current_file)
if self.files:
self.current_file = self.files[0]
else:
print("# no more files to read from")
raise StopIteration
if self.shuffle:
idx = random.randint(0, len(self.files)-1)
self.current_file = self.files[idx]
current_file = self.current_file
reader = self.reader[current_file]
for data in reader.next():
yield data
if stop == -1:
read_evts = reader.nrows
else:
read_evts = stop-start
# update how many events we read from current file
self.reader_counter[self.current_file] += read_evts
if self.verbose:
nevts = self.reader_counter[self.current_file]
msg = "\ntotal read {} evts from {}".format(nevts, current_file)
print(msg)
class RootDataGenerator(object):
"""
RootDataGenerator class provides interface to read HEP ROOT files.
"""
def __init__(self, fin, labels, params=None, specs=None):
"Initialization function for Data Generator"
time0 = time.time()
if not params:
params = {}
# parse given parameters
nan = params.get('nan', np.nan)
batch_size = params.get('batch_size', 256)
verbose = params.get('verbose', 0)
branch = params.get('branch', 'Events')
identifier = params.get('identifier', [])
branches = params.get('selected_branches', [])
chunk_size = params.get('chunk_size', 1000)
exclude_branches = params.get('exclude_branches', [])
redirector = params.get('redirector', 'root://cms-xrd-global.cern.ch')
self.evts = params.get('nevts', -1)
self.shuffle = params.get('shuffle', False)
# convert input fin parameter into file list if necessary
if isinstance(fin, str):
self.files = [fin]
elif isinstance(fin, list):
self.files = fin
else:
raise Exception("Unsupported data-type '%s' for fin parameter" % type(fin))
if isinstance(labels, str):
self.labels = labels
elif isinstance(labels, list):
self.labels = labels
self.file_label_dict = dict(zip(self.files, self.labels))
else:
raise Exception("Unsupported data-type '%s' for labels parameter" % type(labels))
self.reader = {} # global reader will handle all files readers
self.reader_counter = {} # reader counter keeps track of nevts read by readers
if verbose:
print(timestamp('DataGenerator: {}'.format(self)))
print("model parameters: {}".format(json.dumps(params)))
if exclude_branches and not isinstance(exclude_branches, list):
if os.path.isfile(exclude_branches):
exclude_branches = \
[r.replace('\n', '') for r in open(exclude_branches).readlines()]
else:
exclude_branches = exclude_branches.split(',')
if verbose:
print("exclude branches", exclude_branches)
self.start_idx = 0
self.chunk_size = chunk_size
self.stop_idx = chunk_size
self.batch_size = batch_size
self.verbose = verbose
self.jdim = {}
self.minv = {}
self.maxv = {}
self.fkeys = []
self.jkeys = []
self.nans = {}
self.gname = "global-specs.json"
self.finish_label = False
self.finish_file = False
self.events = {'total': 0}
self.evts_toread = {}
# loop over files and create individual readers for them, then put them in a global reader
for fname in self.files:
# if no specs is given try to read them from local area
fbase = fname.split('/')[-1].replace('.root', '')
sname = 'specs-{}.json'.format(fbase)
if not specs:
if os.path.isfile(self.gname):
if verbose:
print("loading specs {}".format(self.gname))
specs = json.load(open(self.gname))
reader = RootDataReader(fname, branch=branch, identifier=identifier, label=self.labels,\
selected_branches=branches, exclude_branches=exclude_branches, \
nan=nan, chunk_size=chunk_size, nevts=self.evts, specs=specs, \
redirector=redirector, verbose=verbose)
# build specs for the whole set of root files
self.global_specs(fname, reader)
if not os.path.isfile(sname):
if verbose:
print("writing specs {}".format(sname))
reader.write_specs(sname)
self.reader[fname] = reader
self.reader_counter[fname] = 0
for fname in self.files:
self.reader[fname].load_specs(self.gname)
if self.evts != -1:
self.events[fname] = round((float(self.events[fname])/self.events['total'])*self.evts)
if self.events[fname] == 0:
self.events[fname] = 1
self.evts_toread[fname] = round((float(self.events[fname])/self.evts) * self.chunk_size)
else:
self.evts_toread[fname] = round((float(self.events[fname])/self.events['total']) * self.chunk_size)
self.current_file = self.files[0]
print("init RootDataGenerator in {} sec\n\n".format(time.time()-time0))
@property
def nevts(self):
"Return number of events of current file"
return self.evts if self.evts != -1 else self.reader[self.current_file].nrows
def __len__(self):
"Return total number of batches this generator can deliver"
return int(np.floor(self.nevts / self.batch_size))
def next(self):
"Return next batch of events in form of data and mask vectors"
data = []
mask = []
index_label = 0
if self.shuffle:
idx = random.randint(0, len(self.files)-1)
self.current_file = self.files[idx]
while self.check_file():
pass
gen = self.read_data(self.start_idx, self.stop_idx)
for (xdf, mdf, idx_label) in gen:
data.append(xdf)
mask.append(mdf)
index_label = idx_label
if isinstance(self.labels, list):
label = self.file_label_dict[self.current_file]
data = np.array(data)
mask = np.array(mask)
else:
# one branch contains the label
if data:
label = []
c = list(zip(data,mask))
random.shuffle(c)
data, mask = zip(*c)
label.append(np.array(data)[:,index_label])
data = np.delete(np.array(data),index_label,1)
mask = np.delete(np.array(mask),index_label,1)
labels = np.full(shape=len(data), fill_value=label, dtype=np.int)
return data, mask, labels
def next_mix_files(self):
'''Return next batch of events in form of data and mask vectors.
Use it to equally mix events from different files'''
if self.finish_file == True:
raise StopIteration
time_start = time.time()
data = []
mask = []
for fname in self.files:
if fname == self.files[0]:
start = self.start_idx
self.current_file = fname
evts = self.evts_toread[fname]
if evts + self.reader_counter[fname] >= self.events[fname]:
self.finish_file = True
if self.finish_file == False:
if evts == 0: continue
if fname == self.files[-1] and (self.start_idx + evts) % self.chunk_size != 0:
self.stop_idx = start + self.chunk_size
evts = self.stop_idx - self.start_idx
else:
self.stop_idx = self.start_idx + evts
else:
evts = self.events[fname] - self.reader_counter[fname]
self.stop_idx = self.start_idx + evts
print(f"label {self.file_label_dict[self.current_file]}, "
f"file <{self.current_file.split('/')[-1]}>, going to read {evts} events")
gen = self.read_data_mix_files(self.start_idx, self.stop_idx)
for (xdf, mdf, idx_label) in gen:
data.append(xdf)
mask.append(mdf)
label = self.file_label_dict[self.current_file]
if fname == self.files[0]:
labels = np.full(shape=evts, fill_value=label, dtype=np.int)
else:
labels = np.append(labels, np.full(shape=evts, fill_value=label, dtype=np.int))
data = np.array(data)
| |
unique identifier of the cluster.
- **NodeType** *(string) --*
The node type for the nodes in the cluster.
- **ClusterStatus** *(string) --*
The current state of the cluster. Possible values are the following:
* ``available``
* ``available, prep-for-resize``
* ``available, resize-cleanup``
* ``cancelling-resize``
* ``creating``
* ``deleting``
* ``final-snapshot``
* ``hardware-failure``
* ``incompatible-hsm``
* ``incompatible-network``
* ``incompatible-parameters``
* ``incompatible-restore``
* ``modifying``
* ``rebooting``
* ``renaming``
* ``resizing``
* ``rotating-keys``
* ``storage-full``
* ``updating-hsm``
- **ModifyStatus** *(string) --*
The status of a modify operation, if any, initiated for the cluster.
- **MasterUsername** *(string) --*
The master user name for the cluster. This name is used to connect to the database that is specified in the **DBName** parameter.
- **DBName** *(string) --*
The name of the initial database that was created when the cluster was created. This same name is returned for the life of the cluster. If an initial database was not specified, a database named ``dev`` dev was created by default.
- **Endpoint** *(dict) --*
The connection endpoint.
- **Address** *(string) --*
The DNS address of the Cluster.
- **Port** *(integer) --*
The port that the database engine is listening on.
- **ClusterCreateTime** *(datetime) --*
The date and time that the cluster was created.
- **AutomatedSnapshotRetentionPeriod** *(integer) --*
The number of days that automatic cluster snapshots are retained.
- **ManualSnapshotRetentionPeriod** *(integer) --*
The default number of days to retain a manual snapshot. If the value is -1, the snapshot is retained indefinitely. This setting doesn't change the retention period of existing snapshots.
The value must be either -1 or an integer between 1 and 3,653.
- **ClusterSecurityGroups** *(list) --*
A list of cluster security group that are associated with the cluster. Each security group is represented by an element that contains ``ClusterSecurityGroup.Name`` and ``ClusterSecurityGroup.Status`` subelements.
Cluster security groups are used when the cluster is not created in an Amazon Virtual Private Cloud (VPC). Clusters that are created in a VPC use VPC security groups, which are listed by the **VpcSecurityGroups** parameter.
- *(dict) --*
Describes a cluster security group.
- **ClusterSecurityGroupName** *(string) --*
The name of the cluster security group.
- **Status** *(string) --*
The status of the cluster security group.
- **VpcSecurityGroups** *(list) --*
A list of Amazon Virtual Private Cloud (Amazon VPC) security groups that are associated with the cluster. This parameter is returned only if the cluster is in a VPC.
- *(dict) --*
Describes the members of a VPC security group.
- **VpcSecurityGroupId** *(string) --*
The identifier of the VPC security group.
- **Status** *(string) --*
The status of the VPC security group.
- **ClusterParameterGroups** *(list) --*
The list of cluster parameter groups that are associated with this cluster. Each parameter group in the list is returned with its status.
- *(dict) --*
Describes the status of a parameter group.
- **ParameterGroupName** *(string) --*
The name of the cluster parameter group.
- **ParameterApplyStatus** *(string) --*
The status of parameter updates.
- **ClusterParameterStatusList** *(list) --*
The list of parameter statuses.
For more information about parameters and parameter groups, go to `Amazon Redshift Parameter Groups <https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html>`__ in the *Amazon Redshift Cluster Management Guide* .
- *(dict) --*
Describes the status of a parameter group.
- **ParameterName** *(string) --*
The name of the parameter.
- **ParameterApplyStatus** *(string) --*
The status of the parameter that indicates whether the parameter is in sync with the database, waiting for a cluster reboot, or encountered an error when being applied.
The following are possible statuses and descriptions.
* ``in-sync`` : The parameter value is in sync with the database.
* ``pending-reboot`` : The parameter value will be applied after the cluster reboots.
* ``applying`` : The parameter value is being applied to the database.
* ``invalid-parameter`` : Cannot apply the parameter value because it has an invalid value or syntax.
* ``apply-deferred`` : The parameter contains static property changes. The changes are deferred until the cluster reboots.
* ``apply-error`` : Cannot connect to the cluster. The parameter change will be applied after the cluster reboots.
* ``unknown-error`` : Cannot apply the parameter change right now. The change will be applied after the cluster reboots.
- **ParameterApplyErrorDescription** *(string) --*
The error that prevented the parameter from being applied to the database.
- **ClusterSubnetGroupName** *(string) --*
The name of the subnet group that is associated with the cluster. This parameter is valid only when the cluster is in a VPC.
- **VpcId** *(string) --*
The identifier of the VPC the cluster is in, if the cluster is in a VPC.
- **AvailabilityZone** *(string) --*
The name of the Availability Zone in which the cluster is located.
- **PreferredMaintenanceWindow** *(string) --*
The weekly time range, in Universal Coordinated Time (UTC), during which system maintenance can occur.
- **PendingModifiedValues** *(dict) --*
A value that, if present, indicates that changes to the cluster are pending. Specific pending changes are identified by subelements.
- **MasterUserPassword** *(string) --*
The pending or in-progress change of the master user password for the cluster.
- **NodeType** *(string) --*
The pending or in-progress change of the cluster's node type.
- **NumberOfNodes** *(integer) --*
The pending or in-progress change of the number of nodes in the cluster.
- **ClusterType** *(string) --*
The pending or in-progress change of the cluster type.
- **ClusterVersion** *(string) --*
The pending or in-progress change of the service version.
- **AutomatedSnapshotRetentionPeriod** *(integer) --*
The pending or in-progress change of the automated snapshot retention period.
- **ClusterIdentifier** *(string) --*
The pending or in-progress change of the new identifier for the cluster.
- **PubliclyAccessible** *(boolean) --*
The pending or in-progress change of the ability to connect to the cluster from the public network.
- **EnhancedVpcRouting** *(boolean) --*
An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see `Enhanced VPC Routing <https://docs.aws.amazon.com/redshift/latest/mgmt/enhanced-vpc-routing.html>`__ in the Amazon Redshift Cluster Management Guide.
If this option is ``true`` , enhanced VPC routing is enabled.
Default: false
- **MaintenanceTrackName** *(string) --*
The name of the maintenance track that the cluster will change to during the next maintenance window.
- **EncryptionType** *(string) --*
The encryption type for a cluster. Possible values are: KMS and None. For the China region the possible values are None, and Legacy.
- **ClusterVersion** *(string) --*
The version ID of the Amazon Redshift engine that is running on the cluster.
- **AllowVersionUpgrade** *(boolean) --*
A boolean value that, if ``true`` , indicates that major version upgrades will be applied automatically to the cluster during the maintenance window.
- **NumberOfNodes** *(integer) --*
The number of compute nodes in the cluster.
- **PubliclyAccessible** *(boolean) --*
A boolean value that, if ``true`` , indicates that the cluster can be accessed from a public network.
- **Encrypted** *(boolean) --*
A boolean value that, if ``true`` , indicates that data in the cluster is encrypted at rest.
- **RestoreStatus** *(dict) --*
A value that describes the status of a cluster restore action. This parameter returns null if the cluster was not created by restoring a snapshot.
- **Status** *(string) --*
The status of the restore action. Returns starting, restoring, completed, or failed.
- **CurrentRestoreRateInMegaBytesPerSecond** *(float) --*
The number of megabytes per second | |
CDP, Maine",1113),
("Standish CDP, Maine",255),
("Steep Falls CDP, Maine",1720),
("Thomaston CDP, Maine",2018),
("Topsham CDP, Maine",6103),
("Unity CDP, Maine",321),
("Van Buren CDP, Maine",1687),
("Waldoboro CDP, Maine",1260),
("Washburn CDP, Maine",1065),
("Waterville city, Maine",16515),
("Westbrook city, Maine",18417),
("West Kennebunk CDP, Maine",1023),
("Wilton CDP, Maine",1982),
("Winslow CDP, Maine",5004),
("Winter Harbor CDP, Maine",284),
("Winterport CDP, Maine",1195),
("Winthrop CDP, Maine",2594),
("Wiscasset CDP, Maine",1042),
("Woodland CDP, Maine",948),
("Yarmouth CDP, Maine",5608),
("York Harbor CDP, Maine",2542),
("Aberdeen city, Maryland",15701),
("Aberdeen Proving Ground CDP, Maryland",2723),
("Accident town, Maryland",244),
("Accokeek CDP, Maryland",11895),
("Adamstown CDP, Maryland",2263),
("Adelphi CDP, Maryland",15751),
("Algonquin CDP, Maryland",1257),
("Allen CDP, Maryland",384),
("Andrews AFB CDP, Maryland",2964),
("Annapolis city, Maryland",39147),
("Annapolis Neck CDP, Maryland",11590),
("Antietam CDP, Maryland",76),
("Aquasco CDP, Maryland",825),
("Arbutus CDP, Maryland",20259),
("Arden on the Severn CDP, Maryland",1953),
("Arnold CDP, Maryland",23518),
("Ashton-Sandy Spring CDP, Maryland",6136),
("Aspen Hill CDP, Maryland",52807),
("Baden CDP, Maryland",1712),
("Bagtown CDP, Maryland",279),
("Bakersville CDP, Maryland",52),
("Ballenger Creek CDP, Maryland",19889),
("Baltimore city, Maryland",614700),
("Baltimore Highlands CDP, Maryland",7959),
("Barclay town, Maryland",230),
("Barnesville town, Maryland",140),
("Barrelville CDP, Maryland",47),
("Barton town, Maryland",433),
("Bartonsville CDP, Maryland",1712),
("Beaver Creek CDP, Maryland",356),
("Bel Air CDP, Maryland",1524),
("Bel Air town, Maryland",10068),
("Bel Air North CDP, Maryland",31582),
("Bel Air South CDP, Maryland",48269),
("Beltsville CDP, Maryland",17589),
("Benedict CDP, Maryland",358),
("Bensville CDP, Maryland",13247),
("Berlin town, Maryland",4606),
("Berwyn Heights town, Maryland",3257),
("Bethesda CDP, Maryland",62448),
("Betterton town, Maryland",385),
("Bier CDP, Maryland",149),
("Big Pool CDP, Maryland",56),
("Big Spring CDP, Maryland",90),
("Bishopville CDP, Maryland",766),
("Bivalve CDP, Maryland",387),
("Bladensburg town, Maryland",9397),
("Bloomington CDP, Maryland",216),
("Boonsboro town, Maryland",3500),
("Bowie city, Maryland",58368),
("Bowleys Quarters CDP, Maryland",6519),
("Bowling Green CDP, Maryland",1198),
("Bowmans Addition CDP, Maryland",568),
("Braddock Heights CDP, Maryland",3258),
("Brandywine CDP, Maryland",9761),
("Breathedsville CDP, Maryland",103),
("Brentwood town, Maryland",3468),
("Brock Hall CDP, Maryland",11664),
("Brookeville town, Maryland",165),
("Brooklyn Park CDP, Maryland",15036),
("Brookmont CDP, Maryland",3633),
("Brookview town, Maryland",45),
("Broomes Island CDP, Maryland",329),
("Brownsville CDP, Maryland",41),
("Brunswick city, Maryland",6193),
("Bryans Road CDP, Maryland",7325),
("Bryantown CDP, Maryland",644),
("Buckeystown CDP, Maryland",1318),
("Burkittsville town, Maryland",140),
("Burtonsville CDP, Maryland",10604),
("Butlertown CDP, Maryland",362),
("Cabin John CDP, Maryland",2085),
("California CDP, Maryland",14298),
("Calvert Beach CDP, Maryland",972),
("Calverton CDP, Maryland",18076),
("Cambridge city, Maryland",12401),
("Camp Springs CDP, Maryland",21602),
("Cape St. Claire CDP, Maryland",8928),
("Capitol Heights town, Maryland",4512),
("Carlos CDP, Maryland",80),
("Carney CDP, Maryland",29743),
("Catonsville CDP, Maryland",41515),
("Cavetown CDP, Maryland",1117),
("Cearfoss CDP, Maryland",106),
("Cecilton town, Maryland",884),
("Cedarville CDP, Maryland",546),
("Centreville town, Maryland",4678),
("Chance CDP, Maryland",482),
("Charlestown town, Maryland",1391),
("Charlotte Hall CDP, Maryland",2026),
("Charlton CDP, Maryland",245),
("Chesapeake Beach town, Maryland",5922),
("Chesapeake City town, Maryland",763),
("Chesapeake Ranch Estates CDP, Maryland",9732),
("Chester CDP, Maryland",4573),
("Chestertown town, Maryland",5113),
("Cheverly town, Maryland",6452),
("Chevy Chase town, Maryland",2965),
("Chevy Chase CDP, Maryland",9611),
("Chevy Chase Section Five village, Maryland",668),
("Chevy Chase Section Three village, Maryland",722),
("Chevy Chase View town, Maryland",997),
("Chevy Chase Village town, Maryland",1943),
("Chewsville CDP, Maryland",394),
("Chillum CDP, Maryland",35798),
("Choptank CDP, Maryland",201),
("Church Creek town, Maryland",105),
("Church Hill town, Maryland",838),
("Clarksburg CDP, Maryland",23548),
("Clarysville CDP, Maryland",43),
("Clear Spring town, Maryland",460),
("Clinton CDP, Maryland",40027),
("Cloverly CDP, Maryland",15062),
("Cobb Island CDP, Maryland",831),
("Cockeysville CDP, Maryland",22071),
("Colesville CDP, Maryland",13894),
("College Park city, Maryland",32183),
("Colmar Manor town, Maryland",1398),
("Columbia CDP, Maryland",103663),
("Coral Hills CDP, Maryland",9460),
("Cordova CDP, Maryland",655),
("Corriganville CDP, Maryland",525),
("Cottage City town, Maryland",1269),
("Crellin CDP, Maryland",86),
("Cresaptown CDP, Maryland",5977),
("Crisfield city, Maryland",2593),
("Crofton CDP, Maryland",28379),
("Croom CDP, Maryland",2490),
("Crownsville CDP, Maryland",1569),
("Cumberland city, Maryland",19845),
("Damascus CDP, Maryland",15094),
("Dames Quarter CDP, Maryland",71),
("Danville CDP, Maryland",283),
("Dargan CDP, Maryland",166),
("Darlington CDP, Maryland",442),
("Darnestown CDP, Maryland",6424),
("Dawson CDP, Maryland",68),
("Deale CDP, Maryland",4905),
("Deal Island CDP, Maryland",371),
("Deer Park town, Maryland",459),
("Delmar town, Maryland",3172),
("Denton town, Maryland",4406),
("Derwood CDP, Maryland",1992),
("Detmold CDP, Maryland",192),
("District Heights city, Maryland",5975),
("Downsville CDP, Maryland",331),
("Drum Point CDP, Maryland",2692),
("Dundalk CDP, Maryland",62768),
("Dunkirk CDP, Maryland",2449),
("Eagle Harbor town, Maryland",32),
("Eakles Mill CDP, Maryland",0),
("East New Market town, Maryland",453),
("Easton town, Maryland",16551),
("East Riverdale CDP, Maryland",15467),
("Eckhart Mines CDP, Maryland",801),
("Eden CDP, Maryland",826),
("Edesville CDP, Maryland",195),
("Edgemere CDP, Maryland",8633),
("Edgemont CDP, Maryland",255),
("Edgewater CDP, Maryland",9689),
("Edgewood CDP, Maryland",26219),
("Edmonston town, Maryland",1354),
("Eldersburg CDP, Maryland",31454),
("Eldorado town, Maryland",41),
("Elkridge CDP, Maryland",20519),
("Elkton town, Maryland",15675),
("Ellerslie CDP, Maryland",405),
("Ellicott City CDP, Maryland",72247),
("Elliott CDP, Maryland",37),
("Emmitsburg town, Maryland",3058),
("Ernstville CDP, Maryland",14),
("Essex CDP, Maryland",40480),
("Fairland CDP, Maryland",24831),
("Fairlee CDP, Maryland",470),
("Fairmount CDP, Maryland",318),
("Fairmount Heights town, Maryland",1789),
("Fairplay CDP, Maryland",558),
("Fairview CDP, Maryland",27),
("Fairwood CDP, Maryland",6084),
("Fallston CDP, Maryland",8836),
("Federalsburg town, Maryland",2665),
("Ferndale CDP, Maryland",17455),
("Finzel CDP, Maryland",659),
("Fishing Creek CDP, Maryland",248),
("Flintstone CDP, Maryland",152),
("Forest Glen CDP, Maryland",6589),
("Forest Heights town, Maryland",2573),
("Forestville CDP, Maryland",11719),
("Fort Meade CDP, Maryland",10297),
("Fort Ritchie CDP, Maryland",123),
("Fort Washington CDP, Maryland",24183),
("Fountainhead-Orchard Hills CDP, Maryland",5823),
("Four Corners CDP, Maryland",8566),
("Franklin CDP, Maryland",201),
("Frederick city, Maryland",70166),
("Frenchtown-Rumbly CDP, Maryland",121),
("Friendly CDP, Maryland",9793),
("Friendship CDP, Maryland",232),
("Friendship Heights Village CDP, Maryland",5225),
("Friendsville town, Maryland",573),
("Frostburg city, Maryland",8626),
("Fruitland city, Maryland",5204),
("Fulton CDP, Maryland",4429),
("Funkstown town, Maryland",811),
("Gaithersburg city, Maryland",67529),
("Galena town, Maryland",708),
("Galestown town, Maryland",82),
("Galesville CDP, Maryland",558),
("Gambrills CDP, Maryland",2829),
("Gapland CDP, Maryland",128),
("Garrett Park town, Maryland",1010),
("Garretts Mill CDP, Maryland",468),
("Garrison CDP, Maryland",8388),
("Georgetown CDP, Maryland",79),
("Germantown CDP, Maryland",90844),
("Gilmore CDP, Maryland",227),
("Girdletree CDP, Maryland",100),
("Glassmanor CDP, Maryland",17628),
("Glenarden city, Maryland",6170),
("Glen Burnie CDP, Maryland",69813),
("Glen Echo town, Maryland",325),
("Glenmont CDP, Maryland",16389),
("Glenn Dale CDP, Maryland",14191),
("Golden Beach CDP, Maryland",2875),
("Goldsboro town, Maryland",246),
("Gorman CDP, Maryland",40),
("Grahamtown CDP, Maryland",285),
("Grantsville town, Maryland",664),
("Grasonville CDP, Maryland",3556),
("Greenbelt city, Maryland",23203),
("Greensboro town, Maryland",2476),
("Greensburg CDP, Maryland",340),
("Hagerstown city, Maryland",40186),
("Halfway CDP, Maryland",10784),
("Hampstead town, Maryland",6340),
("Hampton CDP, Maryland",4725),
("Hancock town, Maryland",1481),
("Havre de Grace city, Maryland",13555),
("Hebron town, Maryland",1218),
("Henderson town, Maryland",153),
("Herald Harbor CDP, Maryland",2615),
("Highfield-Cascade CDP, Maryland",805),
("Highland CDP, Maryland",1066),
("Highland Beach town, Maryland",87),
("Hillandale CDP, Maryland",6223),
("Hillcrest Heights CDP, Maryland",16185),
("Hillsboro town, Maryland",130),
("Hughesville CDP, Maryland",2105),
("Huntingtown CDP, Maryland",3609),
("Hurlock town, Maryland",2327),
("Hutton CDP, Maryland",183),
("Hyattsville city, Maryland",18209),
("Ilchester CDP, Maryland",26647),
("Indian Head town, Maryland",3819),
("Indian Springs CDP, Maryland",38),
("Jarrettsville CDP, Maryland",2992),
("Jefferson CDP, Maryland",2155),
("Jennings CDP, Maryland",281),
("Jessup CDP, Maryland",8359),
("Jesterville CDP, Maryland",216),
("Joppatowne CDP, Maryland",12560),
("Jugtown CDP, Maryland",302),
("Keedysville town, Maryland",1154),
("Kemp Mill CDP, Maryland",13838),
("Kemps Mill CDP, Maryland",32),
("Kennedyville CDP, Maryland",445),
("Kensington town, Maryland",2269),
("Kent Narrows CDP, Maryland",475),
("Kettering CDP, Maryland",13637),
("Kingstown CDP, Maryland",1944),
("Kingsville CDP, Maryland",4404),
("Kitzmiller town, Maryland",265),
("Klondike CDP, Maryland",114),
("Konterra CDP, Maryland",2853),
("Lake Arbor CDP, Maryland",10456),
("Lake Shore CDP, Maryland",20260),
("Landover CDP, Maryland",22041),
("Landover Hills town, Maryland",1833),
("Langley Park CDP, Maryland",19278),
("Lanham CDP, Maryland",10301),
("Lansdowne CDP, Maryland",8571),
("La Plata town, Maryland",9252),
("Largo CDP, Maryland",11604),
("Laurel city, Maryland",25834),
("La Vale CDP, Maryland",3421),
("Layhill CDP, Maryland",4836),
("Laytonsville town, Maryland",367),
("Leisure World CDP, Maryland",9321),
("Leitersburg CDP, Maryland",434),
("Leonardtown town, Maryland",3678),
("Lexington Park CDP, Maryland",11848),
("Libertytown CDP, Maryland",983),
("Linganore CDP, Maryland",8978),
("Linthicum CDP, Maryland",10895),
("Little Orleans CDP, Maryland",28),
("Lochearn CDP, Maryland",26271),
("Loch Lynn Heights town, Maryland",499),
("Lonaconing town, Maryland",1194),
("Long Beach CDP, Maryland",2015),
("Luke town, Maryland",109),
("Lusby CDP, Maryland",1994),
("Lutherville CDP, Maryland",6773),
("McCoole CDP, Maryland",700),
("Madison CDP, Maryland",195),
("Manchester town, Maryland",4836),
("Mapleville CDP, Maryland",254),
("Mardela Springs town, Maryland",461),
("Marlboro Meadows CDP, Maryland",3487),
("Marlboro Village CDP, Maryland",9803),
("Marlow Heights CDP, Maryland",5869),
("Marlton CDP, Maryland",9216),
("Martin's Additions village, Maryland",983),
("Marydel town, Maryland",337),
("Maryland City CDP, Maryland",17736),
("Maugansville CDP, Maryland",3122),
("Mayo CDP, Maryland",8171),
("Mays Chapel CDP, Maryland",12303),
("Mechanicsville CDP, Maryland",1765),
("Melwood CDP, Maryland",3656),
("Mercersville CDP, Maryland",115),
("Middleburg CDP, Maryland",19),
("Middle River CDP, Maryland",25346),
("Middletown town, Maryland",4553),
("Midland town, Maryland",688),
("Midlothian CDP, Maryland",371),
("Milford Mill CDP, Maryland",29974),
("Millington town, Maryland",600),
("Mitchellville CDP, Maryland",11215),
("Monrovia CDP, Maryland",379),
("Montgomery Village CDP, Maryland",33798),
("Morningside town, Maryland",1238),
("Moscow CDP, Maryland",232),
("Mount Aetna CDP, Maryland",675),
("Mountain Lake Park town, Maryland",2148),
("Mount Airy town, Maryland",9395),
("Mount Briar CDP, Maryland",241),
("Mount Lena CDP, Maryland",640),
("Mount Rainier city, Maryland",8110),
("Mount Savage CDP, Maryland",798),
("Mount Vernon CDP, Maryland",917),
("Myersville town, Maryland",1838),
("Nanticoke CDP, Maryland",311),
("Nanticoke Acres CDP, Maryland",20),
("National CDP, Maryland",125),
("National Harbor CDP, Maryland",4322),
("Naval Academy CDP, Maryland",5834),
("Newark CDP, Maryland",297),
("New Carrollton city, Maryland",12932),
("New Market town, Maryland",1295),
("New Windsor town, Maryland",1341),
("Nikep CDP, Maryland",19),
("North Beach town, Maryland",2594),
("North Bethesda CDP, Maryland",50262),
("North Brentwood town, Maryland",755),
("North Chevy Chase village, Maryland",558),
("North East town, Maryland",3624),
("North Kensington CDP, Maryland",9347),
("North Laurel CDP, Maryland",24621),
("North Potomac CDP, Maryland",24148),
("Oakland town, Maryland",1823),
("Ocean CDP, Maryland",0),
("Ocean City town, Maryland",6990),
("Ocean Pines CDP, Maryland",12246),
("Odenton CDP, Maryland",41846),
("Oldtown CDP, Maryland",41),
("Olney CDP, Maryland",35280),
("Overlea CDP, Maryland",12384),
("Owings CDP, Maryland",2569),
("Owings Mills CDP, Maryland",34477),
("Oxford town, Maryland",586),
("Oxon Hill CDP, Maryland",17720),
("Paramount-Long Meadow CDP, Maryland",2981),
("Parkville CDP, Maryland",31633),
("Parole CDP, Maryland",16232),
("Parsonsburg CDP, Maryland",421),
("Pasadena CDP, Maryland",28338),
("Pecktonville CDP, Maryland",344),
("Peppermill Village CDP, Maryland",4628),
("Perry Hall CDP, Maryland",28129),
("Perryman CDP, Maryland",2712),
("Perryville town, Maryland",4395),
("Pikesville CDP, Maryland",33387),
("Pinesburg CDP, Maryland",261),
("Piney Point CDP, Maryland",838),
("Pittsville town, Maryland",1433),
("Pleasant Grove CDP, Maryland",438),
("Pleasant Hills CDP, Maryland",3765),
("Pocomoke City city, Maryland",4082),
("Point of Rocks CDP, Maryland",1940),
("Pomfret CDP, Maryland",683),
("Pondsville CDP, Maryland",73),
("Poolesville town, Maryland",5170),
("Port Deposit town, Maryland",618),
("Port Tobacco Village town, Maryland",4),
("Potomac CDP, Maryland",45824),
("Potomac Heights CDP, Maryland",1190),
("Potomac Park CDP, Maryland",848),
("Powellville CDP, Maryland",78),
("Preston town, Maryland",847),
("Pr<NAME> CDP, Maryland",3189),
("Princess Anne town, Maryland",3444),
("Pylesville CDP, Maryland",1178),
("Quantico CDP, Maryland",192),
("Queen Anne CDP, Maryland",870),
("Queen Anne town, Maryland",253),
("Queensland CDP, Maryland",1823),
("Queenstown town, Maryland",553),
("Randallstown CDP, Maryland",34897),
("Rawlings CDP, Maryland",464),
("Redland CDP, Maryland",16978),
("Reid CDP, Maryland",44),
("Reisterstown CDP, Maryland",26933),
("Ridgely town, Maryland",1558),
("Ringgold CDP, Maryland",168),
("Rising Sun town, Maryland",2812),
("Riva CDP, Maryland",4027),
("Riverdale Park town, Maryland",7219),
("Riverside CDP, Maryland",5839),
("Riviera Beach CDP, Maryland",12577),
("Robinwood CDP, Maryland",8123),
("Rock Hall town, Maryland",1361),
("Rock Point CDP, Maryland",103),
("Rockville city, Maryland",67062),
("Rohrersville CDP, Maryland",150),
("Rosaryville CDP, Maryland",10705),
("Rosedale CDP, Maryland",19336),
("Rosemont village, Maryland",336),
("Rossville CDP, Maryland",15392),
("Sabillasville CDP, Maryland",312),
("St. George Island CDP, Maryland",584),
("St. James CDP, Maryland",3100),
("St. Leonard CDP, Maryland",460),
("St. Michaels town, Maryland",1057),
("Salisbury city, Maryland",32536),
("Sandy Hook CDP, Maryland",101),
("San Mar CDP, Maryland",168),
("Savage CDP, Maryland",6671),
("Scaggsville CDP, Maryland",9430),
("Seabrook CDP, Maryland",18345),
("Seat Pleasant city, Maryland",4759),
("Secretary town, Maryland",439),
("Severn CDP, Maryland",50052),
("Severna Park CDP, Maryland",38576),
("Shady Side CDP, Maryland",5839),
("Shaft CDP, Maryland",361),
("Sharpsburg town, Maryland",806),
("Sharptown town, Maryland",815),
("Silver Hill CDP, Maryland",4921),
("Silver Spring CDP, Maryland",79750),
("Smith Island CDP, Maryland",193),
("Smithsburg town, Maryland",2964),
("Snow Hill town, Maryland",2143),
("Solomons CDP, Maryland",2113),
("Somerset town, Maryland",1136),
("South Kensington CDP, Maryland",8769),
("South Laurel CDP, Maryland",28048),
("Spencerville CDP, Maryland",2218),
("Springdale CDP, Maryland",2947),
("Spring Gap CDP, Maryland",45),
("Spring Ridge CDP, Maryland",5859),
("Stevensville CDP, Maryland",6973),
("Stockton CDP, Maryland",139),
("Sudlersville town, Maryland",415),
("Suitland CDP, Maryland",24444),
("Summerfield CDP, Maryland",13781),
("Swanton CDP, Maryland",68),
("Sykesville town, Maryland",3944),
("Takoma Park city, Maryland",17622),
("Tall Timbers CDP, Maryland",390),
("Taneytown city, Maryland",6774),
("Taylors Island CDP, Maryland",196),
("Temple Hills CDP, Maryland",7864),
("Templeville town, Maryland",161),
("Thurmont town, Maryland",6563),
("Tilghman Island CDP, Maryland",819),
("Tilghmanton CDP, Maryland",814),
("Timonium CDP, Maryland",10140),
("Tolchester CDP, Maryland",400),
("Towson CDP, Maryland",58347),
("Trappe town, Maryland",1210),
("Travilah CDP, Maryland",11633),
("Trego-Rohrersville Station CDP, Maryland",121),
("Tyaskin CDP, Maryland",170),
("Union Bridge town, Maryland",828),
("University Park town, Maryland",2641),
("Upper Marlboro town, Maryland",689),
("Urbana CDP, Maryland",11788),
("Vale Summit CDP, Maryland",125),
("Vienna town, Maryland",351),
("Waldorf CDP, Maryland",74587),
("Walker Mill CDP, Maryland",11780),
("Walkersville town, Maryland",6095),
("Washington Grove town, Maryland",568),
("Waterview CDP, Maryland",18),
("West Denton CDP, Maryland",116),
("Westernport town, Maryland",2018),
("West Laurel CDP, Maryland",4292),
("Westminster city, Maryland",18553),
("West Ocean City CDP, Maryland",4319),
("Westphalia CDP, Maryland",9040),
("West Pocomoke CDP, Maryland",301),
("Whaleyville CDP, Maryland",108),
("Wheaton CDP, Maryland",50459),
("Whitehaven CDP, Maryland",83),
("White Marsh CDP, Maryland",9455),
("White Oak CDP, Maryland",19010),
("Willards town, Maryland",856),
("Williamsport town, Maryland",2264),
("Williston CDP, Maryland",110),
("Wilson-Conococheague CDP, Maryland",2330),
("Woodland CDP, Maryland",22),
("Woodlawn CDP (Baltimore County), | |
leftFit[0]*ploty**2 + leftFit[1]*ploty + leftFit[2]
rightFitx = rightFit[0]*ploty**2 + rightFit[1]*ploty + rightFit[2]
for r in rect:
cv2.rectangle(outImg2,(r[2], r[0]),(r[3],r[1]),(0,255,0), 2)
cv2.rectangle(outImg2,(r[4], r[0]),(r[5],r[1]),(0,255,0), 2)
# Identify the x and y positions of all nonzero pixels in the image
# nonzero = tImg.nonzero()
# nonzeroy = np.array(nonzero[0])
# nonzerox = np.array(nonzero[1])
outImg2[nonzeroy[leftLaneInds], nonzerox[leftLaneInds]] = [255, 0, 0]
outImg2[nonzeroy[rightLaneInds], nonzerox[rightLaneInds]] = [100, 200, 255]
plt.imshow(outImg2)
plt.plot(leftFitx, ploty, color='orange', linewidth = 4)
plt.plot(rightFitx, ploty, color='orange', linewidth =4)
plt.title('Line Fit', fontsize=20)
def continueLineFit(binary_warped, left_fit, right_fit):
"""
Given a previously fit line, quickly try to find the line based on previous lines
binary_warped: new binary image, previously warped to detect lanes
left_fit, right_fit: fitted line from previous warped image
"""
# Assume you now have a new warped binary image from the next frame of video (also called "binary_warped")
# It's now much easier to find line pixels!
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
margin = 100
left_lane_inds = ((nonzerox > (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy + left_fit[2] - margin)) & (nonzerox < (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy + left_fit[2] + margin)))
right_lane_inds = ((nonzerox > (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy + right_fit[2] - margin)) & (nonzerox < (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy + right_fit[2] + margin)))
# Again, extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# If we don't find enough relevant points, return all None (this means error)
min_inds = 10
if lefty.shape[0] < min_inds or righty.shape[0] < min_inds:
return None
# Fit a second order polynomial to each
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
# Generate x and y values for plotting
ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
# Return a dict of relevant variables
ret = {}
ret['left_fit'] = left_fit
ret['right_fit'] = right_fit
ret['nonzerox'] = nonzerox
ret['nonzeroy'] = nonzeroy
ret['left_lane_inds'] = left_lane_inds
ret['right_lane_inds'] = right_lane_inds
return ret
## Calculate curvature
def curveRadius_CenterDist(binImg, ret):
"""
Calculate radius of curvature in meters
Define y-value where we want radius of curvature. I'll choose the maximum y-value, corresponding to the bottom of the image
y_eval = np.max(ploty)
Formula for left and right curvature radius:
left_curverad = ((1 + (2*left_fit[0]*y_eval + left_fit[1])**2)**1.5) / np.absolute(2*left_fit[0])
right_curverad = ((1 + (2*right_fit[0]*y_eval + right_fit[1])**2)**1.5) / np.absolute(2*right_fit[0])
"""
# Grab variables from ret dictionary
leftFit0 = ret['left_fit']
rightFit0 = ret['right_fit']
nonzerox = ret['nonzerox']
nonzeroy = ret['nonzeroy']
leftLaneInds = ret['left_lane_inds']
rightLaneInds = ret['right_lane_inds']
# leftCurveRad, rightCurveRad, centerDist = (0, 0, 0)
# Define y-value where we want radius of curvature
# I'll choose the maximum y-value, corresponding to the bottom of the image
h = binImg.shape[0]
ploty = np.linspace(0, h-1, h)
y_eval = np.max(ploty)
# y_eval = 719 # correspond to the lowest y coordinate of a 720p image
# Define conversions in x and y from pixels space to meters
ym_per_pix = 30/720 # meters per pixel in y dimension
xm_per_pix = 3.7/700 # meters per pixel in x dimension
# Identify the x and y positions of all nonzero pixels in the image
# nonzero = binImg.nonzero()
# nonzeroy = np.array(nonzero[0])
# nonzerox = np.array(nonzero[1])
# Again, extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit new polynomials to x,y under real meters measure
leftFit = np.polyfit(lefty*ym_per_pix, leftx*xm_per_pix, 2)
rightFit = np.polyfit(righty*ym_per_pix, rightx*xm_per_pix, 2)
# Calculate the new curvature radius in meters
leftCurveRad = ((1 + (2*leftFit[0]*y_eval*ym_per_pix + leftFit[1])**2)**1.5) / np.absolute(2*leftFit[0])
rightCurveRad = ((1 + (2*rightFit[0]*y_eval*ym_per_pix + rightFit[1])**2)**1.5) / np.absolute(2*rightFit[0])
# Distance from center is image x midpoint - mean of leftFit and rightFit intercepts
if rightFit is not None and leftFit is not None:
tposition = binImg.shape[1]/2 # horizontal midpoint
lFitx = leftFit0[0]*h**2 + leftFit0[1]*h + leftFit0[2]
rFitx = rightFit0[0]*h**2 + rightFit0[1]*h + rightFit0[2]
laneCenter = (rFitx + lFitx) /2
centerDist = (tposition - laneCenter) * xm_per_pix
return leftCurveRad, rightCurveRad, centerDist
def visualizeLaneOverlay(origImg, warpImg, leftFit, rightFit, mInv, lRad, rRad, cDist):
"""
Final lane line prediction visualized and overlayed on top of original image
origImage: original image of the lane (h0, w0, 3)
warpImage: binary warped image (h,w)
"""
# Generate x and y values for plotting
ploty = np.linspace(0, origImg.shape[0]-1, origImg.shape[0])
leftFit2 = leftFit[0]*ploty**2 + leftFit[1]*ploty + leftFit[2]
rightFit2 = rightFit[0]*ploty**2 + rightFit[1]*ploty + rightFit[2]
h,w = warpImg.shape
# Create an image to draw the lines on
warpZero = np.zeros_like(warpImg).astype(np.uint8)
colorWarp = np.dstack((warpZero, warpZero, warpZero))
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(colorWarp, np.int_([pts]), (0,255, 0))
cv2.polylines(colorWarp, np.int32([pts_left]), isClosed=False, color=(255,0,255), thickness=15)
cv2.polylines(colorWarp, np.int32([pts_right]), isClosed=False, color=(0,255,255), thickness=15)
# Warp the blank back to original image space using inverse perspective matrix (Minv)
newWarp = cv2.warpPerspective(colorWarp, mInv, (w, h))
# Combine the result with the original image
result = cv2.addWeighted(origImg, 1, newWarp, 0.3, 0)
# Annotate lane curvature values and vehicle offset from center
avgRadCurve = (lRad + rRad)/2
labRad = 'Radius of curvature: %.1f m' % avgRadCurve
result = cv2.putText(result, labRad, (20,50), 0, 1, (0,0,0), 2, cv2.LINE_AA)
labDist = 'Distance from lane center: %.1f m' % cDist
result = cv2.putText(result, labDist, (20,100), 0, 1, (0,0,0), 2, cv2.LINE_AA)
return result
def processImage0(imgPath, mtx, dist, fitPlot=True):
"""
Pipeline for advanced lane detection. Import images using openCV
imgPath: path of the image
mtx: camera calibration for distortion
dist: camera calibration for distortion
"""
img1 = cv2.imread(imgPath)
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)
# Undistort
undistImg = cv2.undistort(img1, mtx, dist, None, mtx)
# Gradient filter
comboGr, absGr, magGr, dirGr, hlsGr = combinedThresh(undistImg)
# Perspective (warping)
m, mInv, warpImg, unwarpImg = perspectiveTransform(comboGr)
# Polynomial fit
ret = lineFit(warpImg)
if fitPlot == True:
# Visualize lane fit plot
fPlot = visualizeFit(warpImg, ret)
# Calculate curvature radius and distance from center
lRad, rRad, cDist = curveRadius_CenterDist(warpImg, ret)
fImg = visualizeLaneOverlay(undistImg, warpImg, ret['left_fit'], ret['right_fit'], mInv, lRad, rRad, cDist)
return ret, img1, fImg
# Define a class to receive the characteristics of each line detection
class Line():
def __init__(self, n):
"""
n is the window size of the moving average
"""
self.n = n
# was the line detected in the last iteration?
self.detected = False
# Polynomial coefficients: x = A*y^2 + B*y + C
# Each of A, B, C is a "list-queue" with max length n
self.A = []
self.B = []
self.C = []
# Average of above
self.A_avg = 0.
self.B_avg = 0.
self.C_avg = 0.
def getFit(self):
return (self.A_avg, self.B_avg, self.C_avg)
def addFit(self, fit_coeffs):
"""
Gets most recent line fit coefficients and updates internal smoothed coefficients
fit_coeffs is a 3-element list of 2nd-order polynomial coefficients
"""
# Coefficient queue full?
q_full = len(self.A) >= self.n
# Append line fit coefficients
self.A.append(fit_coeffs[0])
self.B.append(fit_coeffs[1])
self.C.append(fit_coeffs[2])
# Pop from index 0 if full
if q_full:
_ = self.A.pop(0)
_ = self.B.pop(0)
_ = self.C.pop(0)
# Simple average of line coefficients
self.A_avg = np.mean(self.A)
self.B_avg = np.mean(self.B)
self.C_avg = np.mean(self.C)
return (self.A_avg, self.B_avg, self.C_avg)
# MoviePy video annotation will call this function
def processImage(img):
"""
Apply pipeline for video images
detected True: previously calculated leftLine and rightLine
"""
global mtx, dist, leftLine, rightLine, detected, lRad, rRad, cDist
# Undistort
undistImg = cv2.undistort(img, mtx, dist, None, mtx)
# Gradient filter
comboGr, absGr, magGr, dirGr, hlsGr = combinedThresh(undistImg)
# Perspective (warping)
m, mInv, warpImg, unwarpImg = perspectiveTransform(comboGr)
if not detected:
# Polynomial fit
ret = lineFit(warpImg)
tLeftFit = ret['left_fit']
tRightFit = ret['right_fit']
# Get moving average of line fit coefficients
leftFit = leftLine.addFit(tLeftFit)
rightFit = rightLine.addFit(tRightFit)
# Calculate curvature radius and distance from center
lRad, rRad, cDist = curveRadius_CenterDist(warpImg, ret)
detected = True
else:
# | |
content of the sandbox folder copying all that are not in `provenance_exclude_list`. Note
# that directories are not created explicitly. The `node.put_object_from_filelike` call will create intermediate
# directories for nested files automatically when needed. This means though that empty folders in the sandbox or
# folders that would be empty when considering the `provenance_exclude_list` will *not* be copied to the repo. The
# advantage of this explicit copying instead of deleting the files from `provenance_exclude_list` from the sandbox
# first before moving the entire remaining content to the node's repository, is that in this way we are guaranteed
# not to accidentally move files to the repository that should not go there at all cost. Note that all entries in
# the provenance exclude list are normalized first, just as the paths that are in the sandbox folder, otherwise the
# direct equality test may fail, e.g.: './path/file.txt' != 'path/file.txt' even though they reference the same file
provenance_exclude_list = [os.path.normpath(entry) for entry in provenance_exclude_list]
for root, _, filenames in os.walk(folder.abspath):
for filename in filenames:
filepath = os.path.join(root, filename)
relpath = os.path.normpath(os.path.relpath(filepath, folder.abspath))
if relpath not in provenance_exclude_list:
with open(filepath, 'rb') as handle:
node._repository.put_object_from_filelike(handle, relpath, 'wb', force=True) # pylint: disable=protected-access
if not dry_run:
# Make sure that attaching the `remote_folder` with a link is the last thing we do. This gives the biggest
# chance of making this method idempotent. That is to say, if a runner gets interrupted during this action, it
# will simply retry the upload, unless we got here and managed to link it up, in which case we move to the next
# task. Because in that case, the check for the existence of this link at the top of this function will exit
# early from this command.
remotedata = RemoteData(computer=computer, remote_path=workdir)
remotedata.add_incoming(node, link_type=LinkType.CREATE, link_label='remote_folder')
remotedata.store()
def submit_calculation(calculation, transport):
"""Submit a previously uploaded `CalcJob` to the scheduler.
:param calculation: the instance of CalcJobNode to submit.
:param transport: an already opened transport to use to submit the calculation.
:return: the job id as returned by the scheduler `submit_from_script` call
"""
job_id = calculation.get_job_id()
# If the `job_id` attribute is already set, that means this function was already executed once and the scheduler
# submit command was successful as the job id it returned was set on the node. This scenario can happen when the
# daemon runner gets shutdown right after accomplishing the submission task, but before it gets the chance to
# finalize the state transition of the `CalcJob` to the `UPDATE` transport task. Since the job is already submitted
# we do not want to submit it a second time, so we simply return the existing job id here.
if job_id is not None:
return job_id
scheduler = calculation.computer.get_scheduler()
scheduler.set_transport(transport)
submit_script_filename = calculation.get_option('submit_script_filename')
workdir = calculation.get_remote_workdir()
job_id = scheduler.submit_from_script(workdir, submit_script_filename)
calculation.set_job_id(job_id)
return job_id
def retrieve_calculation(calculation, transport, retrieved_temporary_folder):
"""Retrieve all the files of a completed job calculation using the given transport.
If the job defined anything in the `retrieve_temporary_list`, those entries will be stored in the
`retrieved_temporary_folder`. The caller is responsible for creating and destroying this folder.
:param calculation: the instance of CalcJobNode to update.
:param transport: an already opened transport to use for the retrieval.
:param retrieved_temporary_folder: the absolute path to a directory in which to store the files
listed, if any, in the `retrieved_temporary_folder` of the jobs CalcInfo
"""
logger_extra = get_dblogger_extra(calculation)
workdir = calculation.get_remote_workdir()
EXEC_LOGGER.debug(f'Retrieving calc {calculation.pk}', extra=logger_extra)
EXEC_LOGGER.debug(f'[retrieval of calc {calculation.pk}] chdir {workdir}', extra=logger_extra)
# If the calculation already has a `retrieved` folder, simply return. The retrieval was apparently already completed
# before, which can happen if the daemon is restarted and it shuts down after retrieving but before getting the
# chance to perform the state transition. Upon reloading this calculation, it will re-attempt the retrieval.
link_label = calculation.link_label_retrieved
if calculation.get_outgoing(FolderData, link_label_filter=link_label).first():
EXEC_LOGGER.warning(
f'CalcJobNode<{calculation.pk}> already has a `{link_label}` output folder: skipping retrieval'
)
return
# Create the FolderData node into which to store the files that are to be retrieved
retrieved_files = FolderData()
with transport:
transport.chdir(workdir)
# First, retrieve the files of folderdata
retrieve_list = calculation.get_retrieve_list()
retrieve_temporary_list = calculation.get_retrieve_temporary_list()
retrieve_singlefile_list = calculation.get_retrieve_singlefile_list()
with SandboxFolder() as folder:
retrieve_files_from_list(calculation, transport, folder.abspath, retrieve_list)
# Here I retrieved everything; now I store them inside the calculation
retrieved_files.put_object_from_tree(folder.abspath)
# Second, retrieve the singlefiles, if any files were specified in the 'retrieve_temporary_list' key
if retrieve_singlefile_list:
with SandboxFolder() as folder:
_retrieve_singlefiles(calculation, transport, folder, retrieve_singlefile_list, logger_extra)
# Retrieve the temporary files in the retrieved_temporary_folder if any files were
# specified in the 'retrieve_temporary_list' key
if retrieve_temporary_list:
retrieve_files_from_list(calculation, transport, retrieved_temporary_folder, retrieve_temporary_list)
# Log the files that were retrieved in the temporary folder
for filename in os.listdir(retrieved_temporary_folder):
EXEC_LOGGER.debug(
f"[retrieval of calc {calculation.pk}] Retrieved temporary file or folder '{filename}'",
extra=logger_extra
)
# Store everything
EXEC_LOGGER.debug(
f'[retrieval of calc {calculation.pk}] Storing retrieved_files={retrieved_files.pk}', extra=logger_extra
)
retrieved_files.store()
# Make sure that attaching the `retrieved` folder with a link is the last thing we do. This gives the biggest chance
# of making this method idempotent. That is to say, if a runner gets interrupted during this action, it will simply
# retry the retrieval, unless we got here and managed to link it up, in which case we move to the next task.
retrieved_files.add_incoming(calculation, link_type=LinkType.CREATE, link_label=calculation.link_label_retrieved)
def kill_calculation(calculation, transport):
"""
Kill the calculation through the scheduler
:param calculation: the instance of CalcJobNode to kill.
:param transport: an already opened transport to use to address the scheduler
"""
job_id = calculation.get_job_id()
# Get the scheduler plugin class and initialize it with the correct transport
scheduler = calculation.computer.get_scheduler()
scheduler.set_transport(transport)
# Call the proper kill method for the job ID of this calculation
result = scheduler.kill(job_id)
if result is not True:
# Failed to kill because the job might have already been completed
running_jobs = scheduler.get_jobs(jobs=[job_id], as_dict=True)
job = running_jobs.get(job_id, None)
# If the job is returned it is still running and the kill really failed, so we raise
if job is not None and job.job_state != JobState.DONE:
raise exceptions.RemoteOperationError(f'scheduler.kill({job_id}) was unsuccessful')
else:
EXEC_LOGGER.warning(
'scheduler.kill() failed but job<{%s}> no longer seems to be running regardless', job_id
)
return True
def _retrieve_singlefiles(job, transport, folder, retrieve_file_list, logger_extra=None):
"""Retrieve files specified through the singlefile list mechanism."""
singlefile_list = []
for (linkname, subclassname, filename) in retrieve_file_list:
EXEC_LOGGER.debug(
'[retrieval of calc {}] Trying '
"to retrieve remote singlefile '{}'".format(job.pk, filename),
extra=logger_extra
)
localfilename = os.path.join(folder.abspath, os.path.split(filename)[1])
transport.get(filename, localfilename, ignore_nonexisting=True)
singlefile_list.append((linkname, subclassname, localfilename))
# ignore files that have not been retrieved
singlefile_list = [i for i in singlefile_list if os.path.exists(i[2])]
# after retrieving from the cluster, I create the objects
singlefiles = []
for (linkname, subclassname, filename) in singlefile_list:
cls = DataFactory(subclassname)
singlefile = cls(file=filename)
singlefile.add_incoming(job, link_type=LinkType.CREATE, link_label=linkname)
singlefiles.append(singlefile)
for fil in singlefiles:
EXEC_LOGGER.debug(f'[retrieval of calc {job.pk}] Storing retrieved_singlefile={fil.pk}', extra=logger_extra)
fil.store()
def retrieve_files_from_list(calculation, transport, folder, retrieve_list):
"""
Retrieve all the files in the retrieve_list from the remote into the
local folder instance through the transport. The entries in the retrieve_list
can be of two types:
* a string
* a list
If it is a string, it represents the remote absolute filepath of the file.
If the item is a list, the elements will correspond to the following:
* remotepath
* localpath
* depth
If the remotepath contains file patterns with wildcards, the localpath will be
treated as the work directory of the folder and the depth integer determines
upto what level of the original remotepath nesting the files will be copied.
:param transport: the Transport instance.
:param folder: an absolute path to a folder that contains the files to copy.
:param retrieve_list: the list of files to retrieve.
"""
for item in retrieve_list:
if isinstance(item, (list, tuple)):
tmp_rname, tmp_lname, depth = item
# if there are more than one file I do something differently
if transport.has_magic(tmp_rname):
remote_names = transport.glob(tmp_rname)
local_names = []
for rem in remote_names:
to_append | |
dict_iso={'afghanistan': 'Afghanistan',
'albania': 'Albania',
'algeria': 'Algeria',
'andorra': 'Andorra',
'angola': 'Angola',
'antigua-and-barbuda': 'Antigua and Barbuda',
'argentina': 'Argentina',
'armenia': 'Armenia',
'aruba': 'Aruba',
'australia': 'Australia',
'austria': 'Austria',
'azerbaijan': 'Azerbaijan',
'bahamas': 'Bahamas',
'bahrain': 'Bahrain',
'bangladesh': 'Bangladesh',
'Barbados': 'Barbados',
'belarus': 'Belarus',
'belgium': 'Belgium',
'belize': 'Belize',
'benin': 'Benin',
'bermuda': 'Bermuda',
'bhutan': 'Bhutan',
'bolivia': 'Bolivia, Plurinational State of',
'bosnia-and-herzegovina': 'Bosnia and Herzegovina',
'botswana': 'Botswana',
'brazil': 'Brazil',
'bulgaria': 'Bulgaria',
'burkina-faso': 'Burkina Faso',
'burundi': 'Burundi',
'cabo-verde': 'Cape Verde',
'cambodia': 'Cambodia',
'cameroon': 'Cameroon',
'canada': 'Canada',
'cayman-islands': 'Cayman Islands',
'central-african-republic': 'Central African Republic',
'chad': 'Chad',
'chile': 'Chile',
'china': 'China',
'china-hong-kong-sar': 'Hong Kong,China',
'china-macao-sar': 'Macao, China',
'colombia': 'Colombia',
'comoros': 'Comoros',
'congo': 'Congo',
'costa-rica': 'Costa Rica',
'cote-d-ivoire': "Côte d'Ivoire",
'croatia': 'Croatia',
'cuba': 'Cuba',
'cyprus': 'Cyprus',
'czech-republic': 'Czech Republic',
'democratic-republic-of-the-congo': 'Congo, the Democratic Republic of the',
'denmark': 'Denmark',
'djibouti': 'Djibouti',
'dominican-republic': 'Dominican Republic',
'ecuador': 'Ecuador',
'egypt': 'Egypt',
'el-salvador': 'El Salvador',
'equatorial-guinea': 'Equatorial Guinea',
'eritrea': 'Eritrea',
'estonia': 'Estonia',
'ethiopia': 'Ethiopia',
'faeroe-islands': 'Faroe Islands',
'fiji': 'Fiji',
'finland': 'Finland',
'france': 'France',
'french-guiana': 'French Guiana',
'french-polynesia': 'French Polynesia',
'gabon': 'Gabon',
'gambia': 'Gambia',
'georgia': 'Georgia',
'germany': 'Germany',
'ghana': 'Ghana',
'gibraltar': 'Gibraltar',
'greece': 'Greece',
'grenada': 'Grenada',
'guadeloupe': 'Guadeloupe',
'guatemala': 'Guatemala',
'guinea': 'Guinea',
'guinea-bissau': 'Guinea-Bissau',
'guyana': 'Guyana',
'haiti': 'Haiti',
'honduras': 'Honduras',
'hungary': 'Hungary',
'iceland': 'Iceland',
'india': 'India',
'indonesia': 'Indonesia',
'iran': 'Iran, Islamic Republic of',
'iraq': 'Iraq',
'ireland': 'Ireland',
'israel': 'Israel',
'italy': 'Italy',
'jamaica': 'Jamaica',
'japan': 'Japan',
'jordan': 'Jordan',
'kazakhstan': 'Kazakhstan',
'kenya': 'Kenya',
'kuwait': 'Kuwait',
'kyrgyzstan': 'Kyrgyzstan',
'latvia': 'Latvia',
'lebanon': 'Lebanon',
'lesotho': 'Lesotho',
'liberia': 'Liberia',
'libya': 'Libya',
'liechtenstein': 'Liechtenstein',
'lithuania': 'Lithuania',
'luxembourg': 'Luxembourg',
'macedonia': 'North Macedonia',
'madagascar': 'Madagascar',
'malawi': 'Malawi',
'malaysia': 'Malaysia',
'maldives': 'Maldives',
'mali': 'Mali',
'malta': 'Malta',
'martinique': 'Martinique',
'mauritania': 'Mauritania',
'mauritius': 'Mauritius',
'mayotte': 'Mayotte',
'mexico': 'Mexico',
'moldova': 'Moldova, Republic of',
'monaco': 'Monaco',
'mongolia': 'Mongolia',
'montenegro': 'Montenegro',
'morocco': 'Morocco',
'mozambique': 'Mozambique',
'myanmar': 'Myanmar',
'namibia': 'Namibia',
'nepal': 'Nepal',
'netherlands': 'Netherlands',
'new-zealand': 'New Zealand',
'nicaragua': 'Nicaragua',
'niger': 'Niger',
'nigeria': 'Nigeria',
'norway': 'Norway',
'oman': 'Oman',
'pakistan': 'Pakistan',
'panama': 'Panama',
'papua-new-guinea': 'Papua New Guinea',
'paraguay': 'Paraguay',
'peru': 'Peru',
'philippines': 'Philippines',
'poland': 'Poland',
'portugal': 'Portugal',
'qatar': 'Qatar',
'reunion': 'Réunion',
'romania': 'Romania',
'russia': 'Russia',
'rwanda': 'Rwanda',
'saint-kitts-and-nevis': 'Saint Kitts and Nevis',
'saint-lucia': 'Saint Lucia',
'sao-tome-and-principe': 'Sao Tome and Principe',
'saudi-arabia': 'Saudi Arabia',
'senegal': 'Senegal',
'serbia': 'Serbia',
'seychelles': 'Seychelles',
'sierra-leone': 'Sierra Leone',
'singapore': 'Singapore',
'slovakia': 'Slovakia',
'slovenia': 'Slovenia',
'somalia': 'Somalia',
'south-africa': 'South Africa',
'south-korea': 'South Korea',
'spain': 'Spain',
'sri-lanka': 'Sri Lanka',
'state-of-palestine': 'Palestinian Territory, Occupied',
'sudan': 'Sudan',
'suriname': 'Suriname',
'swaziland': 'Swaziland',
'sweden': 'Sweden',
'switzerland': 'Switzerland',
'syria': 'Syrian Arab Republic',
'taiwan': 'Taiwan,China',
'tajikistan': 'Tajikistan',
'tanzania': 'Tanzania, United Republic of',
'thailand': 'Thailand',
'togo': 'Togo',
'trinidad-and-tobago': 'Trinidad and Tobago',
'tunisia': 'Tunisia',
'turkey': 'Turkey',
'turks-and-caicos-islands': 'Turks and Caicos Islands',
'uganda': 'Uganda',
'uk': 'United Kingdom',
'ukraine': 'Ukraine',
'united-arab-emirates': 'United Arab Emirates',
'uruguay': 'Uruguay',
'us': 'United States',
'uzbekistan': 'Uzbekistan',
'venezuela': 'Venezuela, Bolivarian Republic of',
'viet-nam': 'Viet Nam',
'western-sahara': 'Western Sahara',
'yemen': 'Yemen',
'zambia': 'Zambia',
'zimbabwe': 'Zimbabwe',
'faeroe-islands':'Faroe Islands',
'saint-vincent-and-the-grenadines':'Saint Vincent & the Grenadines',
'timor-leste':'Timor-Leste',
'grenada':'Grenada',
'new-caledonia':'New Caledonia',
'laos':'Lao People\'s Democratic Republic',
'dominica':'Dominica',
'falkland-islands-malvinas':'Falkland Islands',
'greenland':'Greenland',
'holy-see':'Holy See (Vatican City State)',
'anguilla':'Anguilla',
'south-sudan':'South Sudan'
}
cate={'china':'east asia',
'us':'north america',
'brazil':'south america',
'russia':'eastern europe',
'india':'south asia',
'uk':'western europe',
'spain':'western europe',
'peru':'south america',
'chile':'south america',
'italy':'western europe',
'iran':'west asia',
'mexico':'central america and mexico',
'pakistan':'west asia',
'turkey':'west asia',
'germany':'western europe',
'saudi-arabia':'west asia',
'france':'western europe',
'south-africa':'southern africa',
'bangladesh':'south asia',
'canada':'north america',
'qatar':'west asia',
'democratic-republic-of-the-congo':'central africa',
'colombia':'south america',
'egypt':'south-east mediterranean',
'sweden':'western europe',
'belarus':'eastern europe',
'belgium':'western europe',
'argentina':'south america',
'ecuador':'south america',
'indonesia':'southeast asia',
'netherlands':'western europe',
'united-arab-emirates':'west asia',
'iraq':'west asia',
'kuwait':'west asia',
'singapore':'southeast asia',
'ukraine':'eastern europe',
'portugal':'western europe',
'oman':'west asia',
'philippines':'southeast asia',
'poland':'eastern europe',
'panama':'central america and mexico',
'switzerland':'western europe',
'dominican-republic':'caribbean',
'afghanistan':'west asia',
'bolivia':'south america',
'romania':'eastern europe',
'bahrain':'west asia',
'ireland':'western europe',
'armenia':'eastern europe',
'nigeria':'west africa',
'israel':'south-east mediterranean',
'kazakhstan':'central asia',
'japan':'east asia',
'austria':'western europe',
'honduras':'central america and mexico',
'sao-tome-and-principe':'southeast asia',
'central-african-republic':'central africa',
'gabon':'central africa',
'ghana':'west africa',
'azerbaijan':'central asia',
'guatemala':'central america and mexico',
'moldova':'eastern europe',
'serbia':'eastern europe',
'algeria':'south-east mediterranean',
'nepal':'south asia',
'south-korea':'east asia',
'denmark':'western europe',
'cameroon':'central africa',
'morocco':'south-east mediterranean',
'czech-republic':'eastern europe',
'sudan':'east africa',
'cote-d-ivoire':'west africa',
'norway':'western europe',
'malaysia':'southeast asia',
'uzbekistan':'central asia',
'australia':'pacific region',
'finland':'western europe',
'saint-martin':'caribbean',
'senegal':'west africa',
'macedonia':'eastern europe',
'kenya':'east africa',
'el-salvador':'central america and mexico',
'guyana':'caribbean',
'tajikistan':'central asia',
'ethiopia':'east africa',
'guinea':'west africa',
'venezuela':'south america',
'jamaica':'caribbean',
'kyrgyzstan':'central asia',
'bulgaria':'eastern europe',
'djibouti':'east africa',
'luxembourg':'western europe',
'mauritania':'west africa',
'hungary':'eastern europe',
'bosnia-and-herzegovina':'eastern europe',
'french-guiana':'south america',
'grenada':'caribbean',
'greece':'western europe',
'thailand':'southeast asia',
'costa-rica':'central america and mexico',
'suriname':'caribbean',
'somalia':'east africa',
'croatia':'eastern europe',
'mayotte':'east africa',
'albania':'eastern europe',
'cuba':'caribbean',
'maldives':'south asia',
'nicaragua':'central america and mexico',
'equatorial-guinea':'central africa',
'mali':'west africa',
'paraguay':'south america',
'madagascar':'indian ocean islands',
'sri-lanka':'south asia',
'haiti':'caribbean',
'state-of-palestine':'missing',
'south-sudan':'east africa',
'estonia':'eastern europe',
'iceland':'western europe',
'lithuania':'eastern europe',
'lebanon':'south-east mediterranean',
'slovakia':'eastern europe',
'guinea-bissau':'west africa',
'slovenia':'eastern europe',
'zambia':'southern africa',
'new-zealand':'pacific region',
'sierra-leone':'west africa',
'china-hong-kong-sar':'east asia',
'tunisia':'south-east mediterranean',
'cabo-verde':'west africa',
'benin':'west africa',
'malawi':'southern africa',
'jordan':'south-east mediterranean',
'yemen':'west asia',
'latvia':'eastern europe',
'niger':'west africa',
'cyprus':'south-east mediterranean',
'burkina-faso':'west africa',
'uruguay':'south america',
'georgia':'eastern europe',
'rwanda':'east africa',
'chad':'west africa',
'mozambique':'southern africa',
'uganda':'east africa',
'andorra':'western europe',
'swaziland':'southern africa',
'liberia':'west africa',
'libya':'south-east mediterranean',
'malta':'south-east mediterranean',
'togo':'west africa',
'channel-islands':'western europe',
'zimbabwe':'southern africa',
'reunion':'indian ocean islands',
'tanzania':'southern africa',
'montenegro':'eastern europe',
'taiwan':'east asia',
'viet-nam':'southeast asia',
'mauritius':'west africa',
'myanmar':'southeast asia',
'comoros':'indian ocean islands',
'angola':'southern africa',
'syria':'south-east mediterranean',
'martinique':'eastern europe',
'mongolia':'east asia',
'cayman-islands':'north america',
'eritrea':'east africa',
'namibia':'southern africa',
'guadeloupe':'caribbean',
'gibraltar':'north africa',
'burundi':'east africa',
'bermuda':'north america',
'cambodia':'southeast asia',
'bahamas':'caribbean',
'monaco':'eastern europe',
'botswana':'southern africa',
'bhutan':'south asia',
'seychelles':'indian ocean islands',
'antigua-and-barbuda':'caribbean',
'french-polynesia':'pacific region',
'china-macao-sar':'east asia',
'gambia':'west africa',
'turks-and-caicos-islands':'southern africa',
'lesotho':'southern africa',
'belize':'caribbean',
'curacao':'north america',
'papua-new-guinea':'pacific region',
'western-sahara':'west africa',
'fiji':'pacific region',
'saint-kitts-and-nevis':'caribbean',
'saint-lucia':'caribbean',
'congo':'west africa',
'trinidad-and-tobago':'caribbean',
'faeroe-islands':'western europe',
'Barbados':'caribbean',
'liechtenstein':'western europe',
'aruba':'western europe',
'faeroe-islands':'western europe',
'saint-vincent-and-the-grenadines':'caribbean',
'timor-leste':'pacific region',
'grenada':'caribbean',
'new-caledonia':'pacific region',
'laos':'southeast asia',
'dominica':'caribbean',
'falkland-islands-malvinas':'south america',
'greenland':'north america',
'holy-see':'western europe',
'anguilla':'caribbean',
}
from tqdm import tqdm
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup
import requests
import json
import time
import random
import html5lib
import re
import scipy.stats as st
from pandas.core.frame import DataFrame
import copy
import math
import datetime
headers = { 'Connection': 'close'}
# proxies={'http':'http://127.0.0.1:10080','https':'http://127.0.0.1:10080'}
url='https://www.worldometers.info/coronavirus/#countries'
# url='https://www.worldometers.info/coronavirus/country/us/'
a=requests.get(url,headers=headers)
soup = BeautifulSoup(a.content,'html5lib')
x=soup.body.find_all('tr', attrs={'style': ['','background-color:#F0F0F0','background-color:#EAF7D5']})
# 190 210
def find_start_yesterday(i,j):
for start in range(i,j):
one=x[start]
two=x[start+1]
l1=one.find_all('a',attrs={'class':'mt_a'})
l2=two.find_all('a',attrs={'class':'mt_a'})
if l1==[] or l2==[]:
continue
s1=str(l1[0])
s2=str(l2[0])
coun1=s1.split('/')
coun2=s2.split('/')
if coun1[1]=='china' and coun2[1]=='us':
return start
#385 410
def find_end_yesterday(i,j):
for end in range(i,j):
# final_pre=x[end-1]
final=x[end]
# l1=final_pre.find_all('a',attrs={'class':'mt_a'})
l2=final.find_all('a',attrs={'class':'mt_a'})
if l2==[]:
continue
# s1=str(l1[0])
s2=str(l2[0])
# coun1=s1.split('/')
coun2=s2.split('/')
if coun2[1]=='anguilla':
return end+1
end=find_end_yesterday(370,440)
end2=find_end_yesterday(630,700)
start=find_start_yesterday(190,240)
start2=find_start_yesterday(440,470)
print('start:{}\tend:{}\tstart2:{}\tend2:{}'.format(start,end,start2,end2))
col_name=['0','#','Country,Other','TotalCases',
'NewCases','TotalDeaths','NewDeaths','TotalRecovered',
'NewRecovered','ActiveCases','Serious,Critical','Tot Cases/1M pop',
'Deaths/1M pop','TotalTests','Tests/1M pop','Population',
'Continent','17',' 1 Caseevery X', 'ppl1 Deathevery',' X ppl1 Testevery ','X ppl','22',
'Cases Per 100K Population','Tests Per 100K Population','Active Cases Per 100k Population','Total Test:Positive Ratio',
'New Positive%','Case Fatality Rate%','New Confirmed Case Growth Rate','New Death Case Growth Rate','Average daily cases per 100,000 people in the past week',
'New Test','NPI','Region','key-id','Country/District','field','7 days inc cases','7 days inc deaths']
#export https_proxy=http://127.0.0.1:10080;export http_proxy=http://127.0.0.1:10080;export all_proxy=socks5://127.0.0.1:10081
raw_data=[]
for i in tqdm(range(start,end)):
# time.sleep(2)
text_source=x[i]
l=text_source.find_all('a',attrs={'class':'mt_a'})
if l==[]:
continue
s=str(l[0])
coun=s.split('/')
try:
region=cate[coun[1]]
iso=dict_iso[coun[1]]
except:
region='missing'
url='https://www.worldometers.info/coronavirus/country/'+coun[1]+'/'
# a=requests.get(url,proxies=proxies,headers =headers)
a=''
while a=='':
try:
a=requests.get(url,headers=headers)
except:
a=''
soup = BeautifulSoup(a.content,'html5lib')
r=soup.body.find_all('script',attrs={'type':'text/javascript'})
p=re.compile(r'categories: \[(.*?)\]',re.S)
rs=re.findall(p,r[0].text)
d=rs[0]
str_pat = re.compile(r'\"(.*?)\"')
d = str_pat.findall(d)
date=d
p1=re.compile(r'name: \'Cases\'.*?\[(.*?)\]',re.S)
for j in range(10):
try:
rs=re.findall(p1,r[j].text)
d=rs[0]
d=re.sub(r'\"','',d)
case=d.split(',')
except:
# print('{} cases is not{}'.format(coun[1],j))
continue
p1=re.compile(r'name: \'Deaths\'.*?\[(.*?)\]',re.S)
for j in range(10):
try:
rs=re.findall(p1,r[j].text)
d=rs[0]
d=re.sub(r'\"','',d)
TD=d.split(',')
except:
continue
j={'Date':date,'Total Cases':case,'Total Deaths':TD}
print("Date {} TC {} TD {}".format(len(date),len(case),len(TD)))
if not len(set([len(date),len(case),len(TD)])) == 1:
continue
hist_data_of_coun_i=pd.DataFrame(j)
hist_data_of_coun_i['Total Deaths'][0]=0
for k in range(len(hist_data_of_coun_i['Total Deaths'])):
if hist_data_of_coun_i['Total Deaths'][k]=='null':
data['Total Deaths'][k]=0
hist_data_of_coun_i['Total Cases']=hist_data_of_coun_i['Total Cases'].astype(int)
hist_data_of_coun_i['Total Deaths']=hist_data_of_coun_i['Total Deaths'].astype(int)
hist_data_of_coun_i['case inc']=hist_data_of_coun_i['Total Cases'].diff()
hist_data_of_coun_i['death inc']=hist_data_of_coun_i['Total Deaths'].diff()
#七日新增死亡与cases
seven_cases=sum([hist_data_of_coun_i.loc[len(date)-i,'case inc'] for i in range(1,8)])
seven_deaths=sum([hist_data_of_coun_i.loc[len(date)-i,'death inc'] for i in range(1,8)])
inc1=hist_data_of_coun_i.loc[len(date)-1,'case inc']/(7*hist_data_of_coun_i.loc[len(date)-8,'case inc'])
inc2=hist_data_of_coun_i.loc[len(date)-1,'death inc']/(7*hist_data_of_coun_i.loc[len(date)-8,'death inc'])
inc_1=sum([hist_data_of_coun_i.loc[len(date)-i,'case inc'] for i in range(1,8)])/sum([hist_data_of_coun_i.loc[len(date)-i,'case inc'] for i in range(8,15)])
inc_2=sum([hist_data_of_coun_i.loc[len(date)-i,'death inc'] for i in range(1,8)])/sum([hist_data_of_coun_i.loc[len(date)-i,'death inc'] for i in range(8,15)])
adcp=sum([hist_data_of_coun_i.loc[len(date)-i,'case inc'] for i in range(1,8)])/7
p=1
while inc1 ==0 and hist_data_of_coun_i.loc[len(date)-1,'Total Cases']>=10000:
p+=1
inc1=hist_data_of_coun_i.loc[len(date)-p,'case inc']/(7*hist_data_of_coun_i.loc[len(date)-1-p,'case inc'])
dd=hist_data_of_coun_i.shift(5)
hist_data_of_coun_i['inc_p']=np.log(hist_data_of_coun_i['case inc']/dd['case inc'])/5
hist_data_of_coun_i=hist_data_of_coun_i[~hist_data_of_coun_i.isin([np.nan, np.inf, -np.inf]).any(1)]
da=hist_data_of_coun_i['inc_p'].values
try:
slope,intercept, r_value, p_value, std_err=st.linregress(list(range(30)), da[:30])
except:
slope=None
bo=x[i].text.split('\n')
if bo[6]=='' and bo[7]=='':
del bo[7]
if bo[17]=='' and bo[18]=='':
del bo[18]
for o in range(start2,end2):
s1=x[o]
l1=s1.find_all('a',attrs={'class':'mt_a'})
if l1==[]:
continue
s1=str(l1[0])
coun1=s1.split('/')
if coun1[1]==coun[1]:
bo1=x[o].text.split('\n')
break
for h in range(len(bo)):
bo[h]=bo[h].replace(',','')
bo[h]=bo[h].replace('+','')
for h in range(len(bo1)):
bo1[h]=bo1[h].replace(',','')
bo1[h]=bo1[h].replace('+','')
#Cases Per 100K Population
try:
bo.append(100000*int(bo[3])/int(bo[15]))
except:
continue
# bo.append(np.nan)
# print('lack one')
#Tests Per 100K Population
try:
bo.append(100000*int(bo[13])/int(bo[15]))
except:
continue
# bo.append(np.nan)
# print('lack one')
#'Active Cases Per 100k Population'
try:
bo.append(int(bo[9])*100000/int(bo[15]))
except:
| |
of last item in col
:param col: Passed to grid, defaults to 0
:param padx: Passed to grid
:param pady: Passed to grid
:param sticky: Passed to grid
:param rowspan: Passed to grid
:param colspan: Passed to grid
:param widgetkwargs: Passed to widget creation
:param gridkwargs: Passed to grid placement
"""
widgetkwargs, gridkwargs = noneDict(widgetkwargs, gridkwargs)
row, col = self.getRow(row, col, rowspan, colspan)
widget = ttk.Menubutton(self.master, text=defaulttext, menu=menu, **widgetkwargs)
widget.grid(row=row, column=col, padx=padx, pady=pady, sticky=sticky, rowspan=rowspan, columnspan=colspan, **gridkwargs)
self.widgets.append(Widget(widget, "MenuButton", row, col, rowspan, colspan, defaulttext))
return widget
def Notebook(self, name, row: int = None, col: int = None, padx=10, pady=10, sticky="nsew", rowspan: int = 1,
colspan: int = 1, widgetkwargs: dict = None, gridkwargs: dict = None):
"""
Creates a ttk.Notebook
:param name: Name for debugging
:param row: Passed to grid, defaults to +1 of last item in col
:param col: Passed to grid, defaults to 0
:param padx: Passed to grid
:param pady: Passed to grid
:param sticky: Passed to grid
:param rowspan: Passed to grid
:param colspan: Passed to grid
:param widgetkwargs: Passed to widget creation
:param gridkwargs: Passed to grid placement
"""
widgetkwargs, gridkwargs = noneDict(widgetkwargs, gridkwargs)
row, col = self.getRow(row, col, rowspan, colspan)
widget = Notebook(self.master, name, **widgetkwargs)
widget.notebook.grid(row = row, column=col, padx=padx, pady=pady, sticky=sticky,
columnspan=colspan, rowspan=rowspan, **gridkwargs)
self.widgets.append(Widget(widget, "Notebook", row, col, rowspan, colspan, name))
return widget
def PanedWindow(self, name, orient: Literal["vertical", 'horiztonal'] = 'vertical', row: int = None,
col: int = None, padx=10, pady=10, sticky="nsew",
rowspan: int = 1, colspan: int = 1, widgetkwargs: dict = None, gridkwargs: dict = None):
"""
Creates a ttk.PanedWindow with widgetframe compatability
:param name: Name for debugging
:param orient: "vertical" or "horizontal"
:param row: Passed to grid, defaults to +1 of last item in col
:param col: Passed to grid, defaults to 0
:param padx: Passed to grid
:param pady: Passed to grid
:param sticky: Passed to grid
:param rowspan: Passed to grid
:param colspan: Passed to grid
:param widgetkwargs: Passed to widget creation
:param gridkwargs: Passed to grid placement
"""
widgetkwargs, gridkwargs = noneDict(widgetkwargs, gridkwargs)
row, col = self.getRow(row, col, rowspan, colspan)
widget = PanedWindow(self.master, name, orient, **widgetkwargs)
widget.panedwindow.grid(row=row, column=col, padx=padx, pady=pady, sticky=sticky, rowspan = rowspan,
columnspan = colspan, **gridkwargs)
self.widgets.append(Widget(widget, "Paned Window", row, col, rowspan, colspan, name))
return widget
def Blank(self, name = "Blank", row: int = None, col: int = None, rowspan=1, colspan=1):
"""
Adds a blank space to prevent widget placement
:param name: for debugging
:param row: Passed to widget, defaults to +1 of last item in col
:param col: Passed to widget, defaults to 0
:param rowspan: Passed to widget
:param colspan: Passed to widget
"""
row, col = self.getRow(row, col, rowspan, colspan)
self.widgets.append(Widget(None, name, row, col, rowspan, colspan))
def Label(self, text: str, size=15, weight='bold', fontargs=(), row: int = None, col: int = None,
padx=10, pady=10, sticky=None, rowspan: int = 1, colspan: int = 1,
widgetkwargs: dict = None, gridkwargs: dict = None):
"""
Creates a ttk.label with a large font and bold text
:param text: text to be displayed
:param size: font size
:param weight: font weight
:param fontargs: additional font args
:param row: Passed to widget, defaults to +1 of last item in col
:param col: Passed to widget, defaults to 0
:param padx: Passed to grid
:param pady: Passed to grid
:param sticky: Passed to grid - use to align text
:param rowspan: Passed to grid
:param colspan: Passed to grid
:param widgetkwargs: Passed to widget creation
:param gridkwargs: Passed to grid placement
"""
widgetkwargs, gridkwargs = noneDict(widgetkwargs, gridkwargs)
row, col = self.getRow(row, col, rowspan, colspan)
font = ('-size', size, '-weight', weight) + fontargs
widget = ttk.Label(self.master, text=text, font=font, **widgetkwargs)
widget.grid(row=row, column=col, padx=padx, pady=pady, sticky=sticky, rowspan=rowspan, columnspan=colspan, **gridkwargs)
self.widgets.append(Widget(widget, "Label: ", row, col, rowspan, colspan, text))
return widget
def Text(self, text: str, fontargs=(), row: int = None, col: int = None,
padx=10, pady=10, sticky="nsw", rowspan: int = 1, colspan: int = 1,
widgetkwargs: dict = None, gridkwargs: dict = None):
"""
Creates a ttk.label with normal text
:param text: text to be displayed
:param fontargs: additional font args
:param row: Passed to widget, defaults to +1 of last item in col
:param col: Passed to widget, defaults to 0
:param padx: Passed to grid
:param pady: Passed to grid
:param sticky: Passed to grid - use to align text
:param rowspan: Passed to grid
:param colspan: Passed to grid
:param widgetkwargs: Passed to widget creation
:param gridkwargs: Passed to grid placement
"""
widgetkwargs, gridkwargs = noneDict(widgetkwargs, gridkwargs)
row, col = self.getRow(row, col, rowspan, colspan)
widget = ttk.Label(self.master, text=text, font=fontargs, **widgetkwargs)
widget.grid(row=row, column=col, padx=padx, pady=pady, sticky=sticky, rowspan=rowspan, columnspan=colspan,
**gridkwargs)
self.widgets.append(Widget(widget, "Text: ", row, col, rowspan, colspan, text))
return widget
def Scale(self, lower: float, upper: float, variable: Union[tk.IntVar, tk.DoubleVar], row: int = None,
col: int = None, padx=10, pady=10, sticky="ew", rowspan: int = 1, colspan: int = 1,
widgetkwargs: dict = None, gridkwargs: dict = None):
"""
Creates a ttk.Scale (horizontal only, vertical is not supported with these themes)
:param lower: Min value
:param upper: Max value
:param variable: Variable to hold value
:param row: Passed to grid, defaults to +1 of last item in col
:param col: Passed to grid, defaults to 0
:param padx: Passed to grid
:param pady: Passed to grid
:param sticky: Passed to grid
:param rowspan: Passed to grid
:param colspan: Passed to grid
:param widgetkwargs: Passed to widget creation
:param gridkwargs: Passed to grid placement
"""
widgetkwargs, gridkwargs = noneDict(widgetkwargs, gridkwargs)
widget = ttk.Scale(self.master, variable=variable, from_=lower, to=upper, **widgetkwargs)
row, col = self.getRow(row, col, rowspan, colspan)
widget.grid(row=row, column=col, padx=padx, pady=pady, sticky=sticky, rowspan=rowspan, columnspan=colspan, **gridkwargs)
self.widgets.append(Widget(widget, "Scale" + str((lower, upper)), row, col, rowspan, colspan))
return widget
def Progressbar(self, variable: tk.Variable, mode: Literal["determinate", "indeterminate"] ="determinate", lower=0,
upper=100, row: int = None, col: int = None, padx=10, pady=10, sticky="ew", rowspan: int = 1,
colspan: int = 1, widgetkwargs: dict = None, gridkwargs: dict = None):
"""
Creates a ttk.Progressbar. If lower is not 0, it will link a second variable for processing
:param variable: tk.Variable to control progressbar
:param mode: "determinate" for variable control, "indeterminate" for animation
:param lower: min value (0 is highly recommended)
:param upper: max value
:param row: passed to grid
:param col: passed to grid
:param padx: passed to grid
:param pady: passed to grid
:param sticky: passed to grid
:param rowspan: Passed to grid
:param colspan: Passed to grid
:param widgetkwargs: passed to widget
:param gridkwargs: passed to grid
"""
widgetkwargs, gridkwargs = noneDict(widgetkwargs, gridkwargs)
if lower != 0:
var = tk.DoubleVar(value=variable.get() - lower)
variable.trace_add("write", partial(adjust, lower, variable, var))
else:
var = variable
widget = ttk.Progressbar(self.master, mode=mode, variable=var, maximum = upper - lower, **widgetkwargs)
row, col = self.getRow(row, col, rowspan, colspan)
widget.grid(row=row, column=col, padx=padx, pady=pady, sticky=sticky, rowspan=rowspan, columnspan=colspan, **gridkwargs)
self.widgets.append(Widget(widget, "Progressbar" + str((lower, upper)), row, col, rowspan, colspan))
return widget
def matplotlibFrame(self, name, projection = None, toolbar = True, figsize=(4,4), figpadx: int = None,
figpady: int = None, row: int = None, col: int = None, padx=10, pady=10, sticky="ew",
rowspan: int = 1, colspan: int = 1, widgetkwargs: dict = None, gridkwargs: dict = None):
"""
Creates a frame and drops in a matplotlib figure and axes.
Returns canvas, fig, axes, backgroundcolor, accentcolor.
:param name: Name for debugging
:param projection: Figure projection, use '3d' for 3D
:param toolbar: Shows a matplotlib toolbar
:param figpadx: Extra padding around graph to stop rendering issues, default to 5 in 2d, 30 in 3d
:param figpady: Extra padding around graph to stop rendering issues, default to 5 in 2d, (5,20) in 3d
:param figsize: Sets figure creation size. Fig will not resize to below this point
:param row: passed to grid
:param col: passed to grid
:param padx: passed to grid
:param pady: passed to grid
:param sticky: passed to grid
:param rowspan: passed to grid
:param colspan: passed to grid
| |
logic in case of partial reconfiguration building on platform logics**. The `id` of the uploaded Image can then be used to create *Instances* or *Clusters*. It **must** contain the corresponding .sig file that was produced by the build. The resulting image can be viewed and deleted like other images. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.cf_manager_rest_api_post_app_logic(image_details, image_file, sig_file, pr_verify_rpt, username, password, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str image_details: (required)
:param str image_file: (required)
:param str sig_file: (required)
:param str pr_verify_rpt: (required)
:param str username: OpenStack username (required)
:param str password: <PASSWORD> (required)
:return: Image
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.cf_manager_rest_api_post_app_logic_with_http_info(image_details, image_file, sig_file, pr_verify_rpt, username, password, **kwargs) # noqa: E501
else:
(data) = self.cf_manager_rest_api_post_app_logic_with_http_info(image_details, image_file, sig_file, pr_verify_rpt, username, password, **kwargs) # noqa: E501
return data
def cf_manager_rest_api_post_app_logic_with_http_info(self, image_details, image_file, sig_file, pr_verify_rpt, username, password, **kwargs): # noqa: E501
"""Upload an image of type `app logic` # noqa: E501
This uploads an new image (aka FPGA **bin**file). **This method is for the app logic in case of partial reconfiguration building on platform logics**. The `id` of the uploaded Image can then be used to create *Instances* or *Clusters*. It **must** contain the corresponding .sig file that was produced by the build. The resulting image can be viewed and deleted like other images. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.cf_manager_rest_api_post_app_logic_with_http_info(image_details, image_file, sig_file, pr_verify_rpt, username, password, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str image_details: (required)
:param str image_file: (required)
:param str sig_file: (required)
:param str pr_verify_rpt: (required)
:param str username: OpenStack username (required)
:param str password: <PASSWORD> (required)
:return: Image
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['image_details', 'image_file', 'sig_file', 'pr_verify_rpt', 'username', 'password'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method cf_manager_rest_api_post_app_logic" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'image_details' is set
if ('image_details' not in params or
params['image_details'] is None):
raise ValueError("Missing the required parameter `image_details` when calling `cf_manager_rest_api_post_app_logic`") # noqa: E501
# verify the required parameter 'image_file' is set
if ('image_file' not in params or
params['image_file'] is None):
raise ValueError("Missing the required parameter `image_file` when calling `cf_manager_rest_api_post_app_logic`") # noqa: E501
# verify the required parameter 'sig_file' is set
if ('sig_file' not in params or
params['sig_file'] is None):
raise ValueError("Missing the required parameter `sig_file` when calling `cf_manager_rest_api_post_app_logic`") # noqa: E501
# verify the required parameter 'pr_verify_rpt' is set
if ('pr_verify_rpt' not in params or
params['pr_verify_rpt'] is None):
raise ValueError("Missing the required parameter `pr_verify_rpt` when calling `cf_manager_rest_api_post_app_logic`") # noqa: E501
# verify the required parameter 'username' is set
if ('username' not in params or
params['username'] is None):
raise ValueError("Missing the required parameter `username` when calling `cf_manager_rest_api_post_app_logic`") # noqa: E501
# verify the required parameter 'password' is set
if ('password' not in params or
params['password'] is None):
raise ValueError("Missing the required parameter `password` when calling `cf_manager_rest_api_post_app_logic`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'username' in params:
query_params.append(('username', params['username'])) # noqa: E501
if 'password' in params:
query_params.append(('password', params['password'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
if 'image_details' in params:
form_params.append(('image_details', params['image_details'])) # noqa: E501
if 'image_file' in params:
local_var_files['image_file'] = params['image_file'] # noqa: E501
if 'sig_file' in params:
local_var_files['sig_file'] = params['sig_file'] # noqa: E501
if 'pr_verify_rpt' in params:
local_var_files['pr_verify_rpt'] = params['pr_verify_rpt'] # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/images/app_logic', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Image', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def cf_manager_rest_api_post_images(self, image_details, image_file, pr_verify_rpt, username, password, **kwargs): # noqa: E501
"""Upload an image # noqa: E501
This uploads an new Image (aka FPGA bitfile). The `id` of the uploaded Image can then be used to create *Instances* or *Clusters*. If the bitfile **is not a partial bitfile**, the *image_detail* **property `breed` must be `\"SHELL\"`**. The *image_detail* property `shell_type` is only relevant for the partial reconfiguration flow, but cannot be empty (e.g. enter `\"NO_PR\"`). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.cf_manager_rest_api_post_images(image_details, image_file, pr_verify_rpt, username, password, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str image_details: (required)
:param str image_file: (required)
:param str pr_verify_rpt: (required)
:param str username: OpenStack username (required)
:param str password: <PASSWORD> (required)
:return: Image
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.cf_manager_rest_api_post_images_with_http_info(image_details, image_file, pr_verify_rpt, username, password, **kwargs) # noqa: E501
else:
(data) = self.cf_manager_rest_api_post_images_with_http_info(image_details, image_file, pr_verify_rpt, username, password, **kwargs) # noqa: E501
return data
def cf_manager_rest_api_post_images_with_http_info(self, image_details, image_file, pr_verify_rpt, username, password, **kwargs): # noqa: E501
"""Upload an image # noqa: E501
This uploads an new Image (aka FPGA bitfile). The `id` of the uploaded Image can then be used to create *Instances* or *Clusters*. If the bitfile **is not a partial bitfile**, the *image_detail* **property `breed` must be `\"SHELL\"`**. The *image_detail* property `shell_type` is only relevant for the partial reconfiguration flow, but cannot be empty (e.g. enter `\"NO_PR\"`). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.cf_manager_rest_api_post_images_with_http_info(image_details, image_file, pr_verify_rpt, username, password, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str image_details: (required)
:param str image_file: (required)
:param str pr_verify_rpt: (required)
:param str username: OpenStack username (required)
:param str password: OpenStack password (required)
:return: Image
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['image_details', 'image_file', 'pr_verify_rpt', 'username', 'password'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method cf_manager_rest_api_post_images" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'image_details' is set
if ('image_details' not in params or
params['image_details'] is None):
raise ValueError("Missing the required parameter `image_details` when calling `cf_manager_rest_api_post_images`") # noqa: E501
# verify the required parameter 'image_file' is set
if ('image_file' not in params or
params['image_file'] is None):
raise ValueError("Missing the required parameter `image_file` when calling `cf_manager_rest_api_post_images`") # noqa: E501
# verify the required parameter 'pr_verify_rpt' is set
if ('pr_verify_rpt' not in params or
params['pr_verify_rpt'] is None):
raise ValueError("Missing the required parameter `pr_verify_rpt` when calling `cf_manager_rest_api_post_images`") # noqa: E501
# verify the required parameter 'username' is set
if ('username' not in params or
params['username'] is None):
raise ValueError("Missing the required parameter `username` when calling `cf_manager_rest_api_post_images`") # noqa: E501
# verify the required parameter 'password' is set
if ('password' not in params or
params['password'] is None):
raise ValueError("Missing the required parameter `password` when calling `cf_manager_rest_api_post_images`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'username' in params:
query_params.append(('username', params['username'])) # noqa: E501
if 'password' in params:
query_params.append(('password', params['password'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
if 'image_details' in params:
form_params.append(('image_details', params['image_details'])) # noqa: E501
if 'image_file' in params:
local_var_files['image_file'] = params['image_file'] # noqa: E501
if 'pr_verify_rpt' in params:
local_var_files['pr_verify_rpt'] = params['pr_verify_rpt'] # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: | |
:param cluster_pt_number_list: number of points to generate for each cluster center
:param cluster_radius_list: list of size of each cluster
:return: list of sample points that belong to various clusters
"""
k = len(cluster_pt_number_list) # number of clusters to generate clusters for
if (not(k == len(cluster_center_list))) or (not(k == len(cluster_radius_list))):
assert False, "Length of list cluster_center_list, cluster_pt_number_list, cluster_radius_list must be the same!"
training_sets = []
for k_ind in range(k):
new_cluster_data = generate_one_cluster(cluster_center_list[k_ind], cluster_pt_number_list[k_ind],
cluster_radius_list[k_ind])
if k_ind > 0:
training_sets = np.concatenate((training_sets, new_cluster_data), axis=0)
else:
training_sets = new_cluster_data
# want to shuffle the data samples so that the clusters are all mixed up
map(np.random.shuffle, training_sets)
return training_sets
def generate_one_cluster(cluster_center, cluster_number, cluster_size):
"""
This function will generate a full cluster wither cluster_number points centered on cluster_center
with maximum radius cluster_size
:param cluster_center: python list denoting coordinates of cluster center
:param cluster_number: integer denoting number of points to generate for this cluster
:param cluster_size: float denoting radius of cluster
:return: np matrix denoting a cluster
"""
pt_dists = np.random.uniform(0, cluster_size, [cluster_number, 1])
coord_pts = len(cluster_center) # dimension of each cluster point
one_cluster_data = np.zeros((cluster_number, coord_pts), dtype=np.float)
for p_ind in range(cluster_number):
coord_indices = list(range(coord_pts))
random.shuffle(coord_indices) # randomly determine which coordinate to generate
left_radius = pt_dists[p_ind]
for c_ind in range(coord_pts):
coord_index = coord_indices[c_ind]
one_cluster_data[p_ind, coord_index] = random.uniform(-1*left_radius+cluster_center[coord_index],
left_radius+cluster_center[coord_index])
left_radius = math.sqrt(pow(left_radius, 2)-pow((one_cluster_data[p_ind, coord_index]-
cluster_center[coord_index]), 2))
return one_cluster_data
def remove_negative_response(x_mat, response_y):
"""
Recall that when the user chooses to generate a data set for multinomial or binomial using the 'threshold' method,
response y is set to the class with the maximum class probability if the maximum class probability
exceeds the second highest class probability by the value set in margin. If the maximum class probability fails
to be greater by margin than the second highest class probability, the data sample is discarded. However, when we
generate the data set, we keep all samples. For data sample with maximum class probability that fails to be
greater by margin than the second highest class probability, the response is set to be -1. This function will
remove all data samples (predictors and responses) with response set to -1.
:param x_mat: predictor matrix containing all predictor values
:param response_y: response that can be negative if that data sample is to be removed
:return: tuple containing x_mat, response_y with negative data samples removed.
"""
y_response_negative = np.where(response_y < 0) # matrix of True or False
x_mat = np.delete(x_mat,y_response_negative[0].transpose(),axis=0) # remove predictor row with negative response
# remove rows with negative response
response_y = response_y[response_y >= 0]
return x_mat,response_y.transpose()
def generate_training_set_mixed_glm(csv_filename, csv_filename_true_one_hot, row_count, col_count, min_p_value,
max_p_value, family_type, noise_std, weight, enum_col, enum_level_vec,
class_number=2, class_method='probability', class_margin=0.0, weightChange=False):
"""
Generate supervised data set given weights for the GLM algo with mixed categorical and real value
predictors. First randomly generate the predictors, then call function generate_response_glm to generate the
corresponding response y using the formula: y = w^T x+b+e where T is transpose, e is a random Gaussian noise
added. For the Binomial family, the relationship between the response Y and predictor vector X is assumed to
be Prob(Y = 1|X) = exp(W^T * X + e)/(1+exp(W^T * X + e)). For the Multinomial family, the relationship between
the response Y (K possible classes) and predictor vector X is assumed to be
Prob(Y = c|X) = exp(Wc^T * X + e)/(sum k=0 to K-1 (ep(Wk^T *X+e)) e is the random Gaussian noise added to the
response. The predictors and responses are saved in a file specified by csv_filename.
:param csv_filename: string representing full path filename to store supervised data set
:param csv_filename_true_one_hot: string representing full path filename to store data set with true one-hot
encoding.
:param row_count: integer representing the number of training samples in the data set
:param col_count: integer representing the number of predictors in the data set
:param max_p_value: integer representing maximum predictor values
:param min_p_value: integer representing minimum predictor values
:param family_type: string represents the various distribution families (gaussian, multinomial, binomial)
supported by our GLM algo
:param noise_std: Gaussian noise standard deviation used to generate noise e to add to response
:param weight: vector representing w in our formula to generate the response.
:param enum_col: integer representing actual number of categorical columns in data set
:param enum_level_vec: vector containing maximum integer value for each categorical column
:param class_number: integer, optional, representing number classes for binomial and multinomial
:param class_method: string, optional, describing how we derive the final response from the class probabilities
generated for binomial and multinomial family_type. If set to 'probability', response y is generated randomly
according to the class probabilities calculated. If set to 'threshold', response y is set to the class with
the maximum class probability if the maximum class probability exceeds the second highest class probability by
the value set in margin. If the maximum class probability fails to be greater by margin than the second highest
class probability, the data sample is discarded.
:param class_margin: float, optional, denotes the threshold by how much the maximum class probability has to
exceed the second highest class probability by in order for us to keep the data set sample. This field is only
meaningful if class_method is set to 'threshold'
:return: None
"""
# generate the random training data sets
enum_dataset = np.zeros((row_count, enum_col), dtype=np.int) # generate the categorical predictors
# generate categorical data columns
for indc in range(enum_col):
enum_dataset[:, indc] = np.random.random_integers(0, enum_level_vec[indc], row_count)
# generate real data columns
x_mat = np.random.uniform(min_p_value, max_p_value, [row_count, col_count-enum_col])
x_mat = np.concatenate((enum_dataset, x_mat), axis=1) # concatenate categorical and real predictor columns
if len(csv_filename_true_one_hot) > 0:
generate_and_save_mixed_glm(csv_filename_true_one_hot, x_mat, enum_level_vec, enum_col, True, weight, noise_std,
family_type, class_method=class_method, class_margin=class_margin, weightChange=weightChange)
if len(csv_filename) > 0:
generate_and_save_mixed_glm(csv_filename, x_mat, enum_level_vec, enum_col, False, weight, noise_std,
family_type, class_method=class_method, class_margin=class_margin, weightChange=False)
def generate_and_save_mixed_glm(csv_filename, x_mat, enum_level_vec, enum_col, true_one_hot, weight, noise_std,
family_type, class_method='probability', class_margin=0.0, weightChange=False):
"""
Given the weights and input data matrix with mixed categorical and real value predictors, this function will
generate a supervised data set and save the input data and response in a csv format file specified by
csv_filename. It will first encode the enums without using one hot encoding with or without a reference
level first before generating a response Y.
:param csv_filename: string representing full path filename to store supervised data set with reference level
plus true one-hot encoding.
:param x_mat: predictor matrix with mixed columns (categorical/real values)
:param enum_level_vec: vector containing maximum integer value for each categorical column
:param enum_col: integer representing actual number of categorical columns in data set
:param true_one_hot: bool indicating whether we are using true one hot encoding or reference level plus
one hot encoding
:param weight: vector representing w in our formula to generate the response
:param noise_std: Gaussian noise standard deviation used to generate noise e to add to response
:param family_type: string represents the various distribution families (gaussian, multinomial, binomial) supported
by our GLM algo
:param class_method: string, optional, describing how we derive the final response from the class probabilities
generated for binomial and multinomial family_type. If set to 'probability', response y is generated randomly
according to the class probabilities calculated. If set to 'threshold', response y is set to the class with the
maximum class probability if the maximum class probability exceeds the second highest class probability by the
value set in the margin. If the maximum class probability fails to be greater by margin than the second highest
class probability, the data sample is discarded.
:param class_margin: float, optional, denotes the threshold by how much the maximum class probability has to exceed
the second highest class probability in order for us to keep the data sample. This field is only meaningful if
class_method is set to 'threshold'
:return: None
"""
# encode the enums
x_mat_encoded = encode_enum_dataset(x_mat, enum_level_vec, enum_col, true_one_hot, False)
# extract the correct weight dimension for the data set
if not true_one_hot:
| |
cloud_build_options: Optional[pulumi.Input['CloudBuildOptionsArgs']] = None,
container: Optional[pulumi.Input['ContainerInfoArgs']] = None,
files: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
zip: Optional[pulumi.Input['ZipInfoArgs']] = None):
"""
Code and application artifacts used to deploy a version to App Engine.
:param pulumi.Input['BuildInfoArgs'] build: Google Cloud Build build information. Only applicable for instances running in the App Engine flexible environment.
:param pulumi.Input['CloudBuildOptionsArgs'] cloud_build_options: Options for any Google Cloud Build builds created as a part of this deployment.These options will only be used if a new build is created, such as when deploying to the App Engine flexible environment using files or zip.
:param pulumi.Input['ContainerInfoArgs'] container: The Docker image for the container that runs the version. Only applicable for instances running in the App Engine flexible environment.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] files: Manifest of the files stored in Google Cloud Storage that are included as part of this version. All files must be readable using the credentials supplied with this call.
:param pulumi.Input['ZipInfoArgs'] zip: The zip file for this deployment, if this is a zip deployment.
"""
if build is not None:
pulumi.set(__self__, "build", build)
if cloud_build_options is not None:
pulumi.set(__self__, "cloud_build_options", cloud_build_options)
if container is not None:
pulumi.set(__self__, "container", container)
if files is not None:
pulumi.set(__self__, "files", files)
if zip is not None:
pulumi.set(__self__, "zip", zip)
@property
@pulumi.getter
def build(self) -> Optional[pulumi.Input['BuildInfoArgs']]:
"""
Google Cloud Build build information. Only applicable for instances running in the App Engine flexible environment.
"""
return pulumi.get(self, "build")
@build.setter
def build(self, value: Optional[pulumi.Input['BuildInfoArgs']]):
pulumi.set(self, "build", value)
@property
@pulumi.getter(name="cloudBuildOptions")
def cloud_build_options(self) -> Optional[pulumi.Input['CloudBuildOptionsArgs']]:
"""
Options for any Google Cloud Build builds created as a part of this deployment.These options will only be used if a new build is created, such as when deploying to the App Engine flexible environment using files or zip.
"""
return pulumi.get(self, "cloud_build_options")
@cloud_build_options.setter
def cloud_build_options(self, value: Optional[pulumi.Input['CloudBuildOptionsArgs']]):
pulumi.set(self, "cloud_build_options", value)
@property
@pulumi.getter
def container(self) -> Optional[pulumi.Input['ContainerInfoArgs']]:
"""
The Docker image for the container that runs the version. Only applicable for instances running in the App Engine flexible environment.
"""
return pulumi.get(self, "container")
@container.setter
def container(self, value: Optional[pulumi.Input['ContainerInfoArgs']]):
pulumi.set(self, "container", value)
@property
@pulumi.getter
def files(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Manifest of the files stored in Google Cloud Storage that are included as part of this version. All files must be readable using the credentials supplied with this call.
"""
return pulumi.get(self, "files")
@files.setter
def files(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "files", value)
@property
@pulumi.getter
def zip(self) -> Optional[pulumi.Input['ZipInfoArgs']]:
"""
The zip file for this deployment, if this is a zip deployment.
"""
return pulumi.get(self, "zip")
@zip.setter
def zip(self, value: Optional[pulumi.Input['ZipInfoArgs']]):
pulumi.set(self, "zip", value)
@pulumi.input_type
class DiskUtilizationArgs:
def __init__(__self__, *,
target_read_bytes_per_second: Optional[pulumi.Input[int]] = None,
target_read_ops_per_second: Optional[pulumi.Input[int]] = None,
target_write_bytes_per_second: Optional[pulumi.Input[int]] = None,
target_write_ops_per_second: Optional[pulumi.Input[int]] = None):
"""
Target scaling by disk usage. Only applicable in the App Engine flexible environment.
:param pulumi.Input[int] target_read_bytes_per_second: Target bytes read per second.
:param pulumi.Input[int] target_read_ops_per_second: Target ops read per seconds.
:param pulumi.Input[int] target_write_bytes_per_second: Target bytes written per second.
:param pulumi.Input[int] target_write_ops_per_second: Target ops written per second.
"""
if target_read_bytes_per_second is not None:
pulumi.set(__self__, "target_read_bytes_per_second", target_read_bytes_per_second)
if target_read_ops_per_second is not None:
pulumi.set(__self__, "target_read_ops_per_second", target_read_ops_per_second)
if target_write_bytes_per_second is not None:
pulumi.set(__self__, "target_write_bytes_per_second", target_write_bytes_per_second)
if target_write_ops_per_second is not None:
pulumi.set(__self__, "target_write_ops_per_second", target_write_ops_per_second)
@property
@pulumi.getter(name="targetReadBytesPerSecond")
def target_read_bytes_per_second(self) -> Optional[pulumi.Input[int]]:
"""
Target bytes read per second.
"""
return pulumi.get(self, "target_read_bytes_per_second")
@target_read_bytes_per_second.setter
def target_read_bytes_per_second(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "target_read_bytes_per_second", value)
@property
@pulumi.getter(name="targetReadOpsPerSecond")
def target_read_ops_per_second(self) -> Optional[pulumi.Input[int]]:
"""
Target ops read per seconds.
"""
return pulumi.get(self, "target_read_ops_per_second")
@target_read_ops_per_second.setter
def target_read_ops_per_second(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "target_read_ops_per_second", value)
@property
@pulumi.getter(name="targetWriteBytesPerSecond")
def target_write_bytes_per_second(self) -> Optional[pulumi.Input[int]]:
"""
Target bytes written per second.
"""
return pulumi.get(self, "target_write_bytes_per_second")
@target_write_bytes_per_second.setter
def target_write_bytes_per_second(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "target_write_bytes_per_second", value)
@property
@pulumi.getter(name="targetWriteOpsPerSecond")
def target_write_ops_per_second(self) -> Optional[pulumi.Input[int]]:
"""
Target ops written per second.
"""
return pulumi.get(self, "target_write_ops_per_second")
@target_write_ops_per_second.setter
def target_write_ops_per_second(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "target_write_ops_per_second", value)
@pulumi.input_type
class EndpointsApiServiceArgs:
def __init__(__self__, *,
config_id: Optional[pulumi.Input[str]] = None,
disable_trace_sampling: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
rollout_strategy: Optional[pulumi.Input['EndpointsApiServiceRolloutStrategy']] = None):
"""
Cloud Endpoints (https://cloud.google.com/endpoints) configuration. The Endpoints API Service provides tooling for serving Open API and gRPC endpoints via an NGINX proxy. Only valid for App Engine Flexible environment deployments.The fields here refer to the name and configuration ID of a "service" resource in the Service Management API (https://cloud.google.com/service-management/overview).
:param pulumi.Input[str] config_id: Endpoints service configuration ID as specified by the Service Management API. For example "2016-09-19r1".By default, the rollout strategy for Endpoints is RolloutStrategy.FIXED. This means that Endpoints starts up with a particular configuration ID. When a new configuration is rolled out, Endpoints must be given the new configuration ID. The config_id field is used to give the configuration ID and is required in this case.Endpoints also has a rollout strategy called RolloutStrategy.MANAGED. When using this, Endpoints fetches the latest configuration and does not need the configuration ID. In this case, config_id must be omitted.
:param pulumi.Input[bool] disable_trace_sampling: Enable or disable trace sampling. By default, this is set to false for enabled.
:param pulumi.Input[str] name: Endpoints service name which is the name of the "service" resource in the Service Management API. For example "myapi.endpoints.myproject.cloud.goog"
:param pulumi.Input['EndpointsApiServiceRolloutStrategy'] rollout_strategy: Endpoints rollout strategy. If FIXED, config_id must be specified. If MANAGED, config_id must be omitted.
"""
if config_id is not None:
pulumi.set(__self__, "config_id", config_id)
if disable_trace_sampling is not None:
pulumi.set(__self__, "disable_trace_sampling", disable_trace_sampling)
if name is not None:
pulumi.set(__self__, "name", name)
if rollout_strategy is not None:
pulumi.set(__self__, "rollout_strategy", rollout_strategy)
@property
@pulumi.getter(name="configId")
def config_id(self) -> Optional[pulumi.Input[str]]:
"""
Endpoints service configuration ID as specified by the Service Management API. For example "2016-09-19r1".By default, the rollout strategy for Endpoints is RolloutStrategy.FIXED. This means that Endpoints starts up with a particular configuration ID. When a new configuration is rolled out, Endpoints must be given the new configuration ID. The config_id field is used to give the configuration ID and is required in this case.Endpoints also has a rollout strategy called RolloutStrategy.MANAGED. When using this, Endpoints fetches the latest configuration and does not need the configuration ID. In this case, config_id must be omitted.
"""
return pulumi.get(self, "config_id")
@config_id.setter
def config_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "config_id", value)
@property
@pulumi.getter(name="disableTraceSampling")
def disable_trace_sampling(self) -> Optional[pulumi.Input[bool]]:
"""
Enable or disable trace sampling. By default, this is set to false for enabled.
"""
return pulumi.get(self, "disable_trace_sampling")
@disable_trace_sampling.setter
def disable_trace_sampling(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disable_trace_sampling", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Endpoints service name which is the name of the "service" resource in the Service Management API. For example "myapi.endpoints.myproject.cloud.goog"
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="rolloutStrategy")
def rollout_strategy(self) -> Optional[pulumi.Input['EndpointsApiServiceRolloutStrategy']]:
"""
Endpoints rollout strategy. If FIXED, config_id must be specified. If MANAGED, config_id must be omitted.
"""
return pulumi.get(self, "rollout_strategy")
@rollout_strategy.setter
def rollout_strategy(self, value: Optional[pulumi.Input['EndpointsApiServiceRolloutStrategy']]):
pulumi.set(self, "rollout_strategy", value)
@pulumi.input_type
class EntrypointArgs:
def __init__(__self__, *,
shell: Optional[pulumi.Input[str]] = None):
"""
The entrypoint for the application.
:param pulumi.Input[str] shell: The format should be a shell command that can be fed to bash -c.
"""
if shell is not None:
pulumi.set(__self__, "shell", shell)
@property
@pulumi.getter
def shell(self) -> Optional[pulumi.Input[str]]:
"""
The format should be a shell command that can be fed to bash -c.
"""
return pulumi.get(self, "shell")
@shell.setter
def shell(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "shell", value)
@pulumi.input_type
class ErrorHandlerArgs:
def __init__(__self__, *,
error_code: Optional[pulumi.Input['ErrorHandlerErrorCode']] = None,
mime_type: Optional[pulumi.Input[str]] = None,
static_file: Optional[pulumi.Input[str]] = None):
"""
Custom static error page to be served when an error occurs.
:param pulumi.Input['ErrorHandlerErrorCode'] error_code: Error condition this handler applies to.
:param pulumi.Input[str] mime_type: MIME type of file. Defaults to text/html.
:param pulumi.Input[str] static_file: Static file content to be served for this error.
"""
if error_code is not None:
pulumi.set(__self__, "error_code", error_code)
if mime_type is not None:
pulumi.set(__self__, "mime_type", mime_type)
if static_file is not None:
pulumi.set(__self__, "static_file", static_file)
@property
@pulumi.getter(name="errorCode")
def error_code(self) -> Optional[pulumi.Input['ErrorHandlerErrorCode']]:
"""
Error condition this handler applies to.
"""
return pulumi.get(self, "error_code")
@error_code.setter
def error_code(self, value: Optional[pulumi.Input['ErrorHandlerErrorCode']]):
pulumi.set(self, "error_code", value)
@property
@pulumi.getter(name="mimeType")
def mime_type(self) -> Optional[pulumi.Input[str]]:
"""
MIME type of file. Defaults to | |
'like', 'history%' ),
])
histories = self.history_manager.list( filters=filters )
# for h in histories:
# print h.name
self.assertEqual( histories, [ history1, history2, history3 ])
filters = self.filter_parser.parse_filters([ ( 'name', 'like', '%2' ), ])
self.assertEqual( self.history_manager.list( filters=filters ), [ history2 ])
filters = self.filter_parser.parse_filters([ ( 'name', 'eq', 'history2' ), ])
self.assertEqual( self.history_manager.list( filters=filters ), [ history2 ])
self.history_manager.update( history1, dict( deleted=True ) )
filters = self.filter_parser.parse_filters([ ( 'deleted', 'eq', 'True' ), ])
self.assertEqual( self.history_manager.list( filters=filters ), [ history1 ])
filters = self.filter_parser.parse_filters([ ( 'deleted', 'eq', 'False' ), ])
self.assertEqual( self.history_manager.list( filters=filters ), [ history2, history3 ])
self.assertEqual( self.history_manager.list(), [ history1, history2, history3 ])
self.history_manager.update( history3, dict( deleted=True ) )
self.history_manager.update( history1, dict( importable=True ) )
self.history_manager.update( history2, dict( importable=True ) )
filters = self.filter_parser.parse_filters([
( 'deleted', 'eq', 'True' ),
( 'importable', 'eq', 'True' ),
])
self.assertEqual( self.history_manager.list( filters=filters ), [ history1 ])
self.assertEqual( self.history_manager.list(), [ history1, history2, history3 ])
def test_fn_filter_parsing( self ):
user2 = self.user_manager.create( **user2_data )
history1 = self.history_manager.create( name='history1', user=user2 )
history2 = self.history_manager.create( name='history2', user=user2 )
history3 = self.history_manager.create( name='history3', user=user2 )
filters = self.filter_parser.parse_filters([ ( 'annotation', 'has', 'no play' ), ])
anno_filter = filters[0]
history3.add_item_annotation( self.trans.sa_session, user2, history3, "All work and no play" )
self.trans.sa_session.flush()
self.assertTrue( anno_filter( history3 ) )
self.assertFalse( anno_filter( history2 ) )
self.assertEqual( self.history_manager.list( filters=filters ), [ history3 ])
self.log( 'should allow combinations of orm and fn filters' )
self.history_manager.update( history3, dict( importable=True ) )
self.history_manager.update( history2, dict( importable=True ) )
history1.add_item_annotation( self.trans.sa_session, user2, history1, "All work and no play" )
self.trans.sa_session.flush()
shining_examples = self.history_manager.list( filters=self.filter_parser.parse_filters([
( 'importable', 'eq', 'True' ),
( 'annotation', 'has', 'no play' ),
]))
self.assertEqual( shining_examples, [ history3 ])
def test_fn_filter_currying( self ):
self.filter_parser.fn_filter_parsers = {
'name_len' : { 'op': { 'lt' : lambda i, v: len( i.name ) < v }, 'val': int }
}
self.log( 'should be 2 filters now' )
self.assertEqual( len( self.filter_parser.fn_filter_parsers ), 1 )
filters = self.filter_parser.parse_filters([
( 'name_len', 'lt', '4' )
])
self.log( 'should have parsed out a single filter' )
self.assertEqual( len( filters ), 1 )
filter_ = filters[0]
fake = galaxy_mock.OpenObject()
fake.name = '123'
self.log( '123 should return true through the filter' )
self.assertTrue( filter_( fake ) )
fake.name = '1234'
self.log( '1234 should return false through the filter' )
self.assertFalse( filter_( fake ) )
def test_list( self ):
"""
Test limit and offset in conjunction with both orm and fn filtering.
"""
user2 = self.user_manager.create( **user2_data )
history1 = self.history_manager.create( name='history1', user=user2 )
history2 = self.history_manager.create( name='history2', user=user2 )
history3 = self.history_manager.create( name='history3', user=user2 )
history4 = self.history_manager.create( name='history4', user=user2 )
self.history_manager.delete( history1 )
self.history_manager.delete( history2 )
self.history_manager.delete( history3 )
test_annotation = "testing"
history2.add_item_annotation( self.trans.sa_session, user2, history2, test_annotation )
self.trans.sa_session.flush()
history3.add_item_annotation( self.trans.sa_session, user2, history3, test_annotation )
self.trans.sa_session.flush()
history3.add_item_annotation( self.trans.sa_session, user2, history4, test_annotation )
self.trans.sa_session.flush()
all_histories = [ history1, history2, history3, history4 ]
deleted_and_annotated = [ history2, history3 ]
self.log( "no offset, no limit should work" )
self.assertEqual( self.history_manager.list( offset=None, limit=None ), all_histories )
self.assertEqual( self.history_manager.list(), all_histories )
self.log( "no offset, limit should work" )
self.assertEqual( self.history_manager.list( limit=2 ), [ history1, history2 ] )
self.log( "offset, no limit should work" )
self.assertEqual( self.history_manager.list( offset=1 ), [ history2, history3, history4 ] )
self.log( "offset, limit should work" )
self.assertEqual( self.history_manager.list( offset=1, limit=1 ), [ history2 ] )
self.log( "zero limit should return empty list" )
self.assertEqual( self.history_manager.list( limit=0 ), [] )
self.log( "past len offset should return empty list" )
self.assertEqual( self.history_manager.list( offset=len( all_histories ) ), [] )
self.log( "negative limit should return full list" )
self.assertEqual( self.history_manager.list( limit=-1 ), all_histories )
self.log( "negative offset should return full list" )
self.assertEqual( self.history_manager.list( offset=-1 ), all_histories )
filters = [ model.History.deleted == true() ]
self.log( "orm filtered, no offset, no limit should work" )
found = self.history_manager.list( filters=filters )
self.assertEqual( found, [ history1, history2, history3 ] )
self.log( "orm filtered, no offset, limit should work" )
found = self.history_manager.list( filters=filters, limit=2 )
self.assertEqual( found, [ history1, history2 ] )
self.log( "orm filtered, offset, no limit should work" )
found = self.history_manager.list( filters=filters, offset=1 )
self.assertEqual( found, [ history2, history3 ] )
self.log( "orm filtered, offset, limit should work" )
found = self.history_manager.list( filters=filters, offset=1, limit=1 )
self.assertEqual( found, [ history2 ] )
filters = self.filter_parser.parse_filters([ ( 'annotation', 'has', test_annotation ) ])
self.log( "fn filtered, no offset, no limit should work" )
found = self.history_manager.list( filters=filters )
self.assertEqual( found, [ history2, history3, history4 ] )
self.log( "fn filtered, no offset, limit should work" )
found = self.history_manager.list( filters=filters, limit=2 )
self.assertEqual( found, [ history2, history3 ] )
self.log( "fn filtered, offset, no limit should work" )
found = self.history_manager.list( filters=filters, offset=1 )
self.assertEqual( found, [ history3, history4 ] )
self.log( "fn filtered, offset, limit should work" )
found = self.history_manager.list( filters=filters, offset=1, limit=1 )
self.assertEqual( found, [ history3 ] )
filters = self.filter_parser.parse_filters([
( 'deleted', 'eq', 'True' ),
( 'annotation', 'has', test_annotation )
])
self.log( "orm and fn filtered, no offset, no limit should work" )
found = self.history_manager.list( filters=filters )
self.assertEqual( found, [ history2, history3 ] )
self.log( "orm and fn filtered, no offset, limit should work" )
found = self.history_manager.list( filters=filters, limit=1 )
self.assertEqual( found, [ history2 ] )
self.log( "orm and fn filtered, offset, no limit should work" )
found = self.history_manager.list( filters=filters, offset=1 )
self.assertEqual( found, [ history3 ] )
self.log( "orm and fn filtered, offset, limit should work" )
found = self.history_manager.list( filters=filters, offset=1, limit=1 )
self.assertEqual( found, [ history3 ] )
self.log( "orm and fn filtered, zero limit should return empty list" )
found = self.history_manager.list( filters=filters, limit=0 )
self.assertEqual( found, [] )
self.log( "orm and fn filtered, past len offset should return empty list" )
found = self.history_manager.list( filters=filters, offset=len( deleted_and_annotated ) )
self.assertEqual( found, [] )
self.log( "orm and fn filtered, negative limit should return full list" )
found = self.history_manager.list( filters=filters, limit=-1 )
self.assertEqual( found, deleted_and_annotated )
self.log( "orm and fn filtered, negative offset should return full list" )
found = self.history_manager.list( filters=filters, offset=-1 )
self.assertEqual( found, deleted_and_annotated )
# =============================================================================
class HistoryAsContainerTestCase( BaseTestCase, CreatesCollectionsMixin ):
def set_up_managers( self ):
super( HistoryAsContainerTestCase, self ).set_up_managers()
self.history_manager = HistoryManager( self.app )
self.hda_manager = hdas.HDAManager( self.app )
self.collection_manager = collections.DatasetCollectionManager( self.app )
def add_hda_to_history( self, history, **kwargs ):
dataset = self.hda_manager.dataset_manager.create()
hda = self.hda_manager.create( history=history, dataset=dataset, **kwargs )
return hda
def add_list_collection_to_history( self, history, hdas, name='test collection', **kwargs ):
hdca = self.collection_manager.create( self.trans, history, name, 'list',
element_identifiers=self.build_element_identifiers( hdas ) )
return hdca
def test_contents( self ):
user2 = self.user_manager.create( **user2_data )
history = self.history_manager.create( name='history', user=user2 )
self.log( "calling contents on an empty history should return an empty list" )
self.assertEqual( [], list( self.history_manager.contents( history ) ) )
self.log( "calling contents on an history with hdas should return those in order of their hids" )
hdas = [ self.add_hda_to_history( history, name=( 'hda-' + str( x ) ) ) for x in xrange( 3 ) ]
random.shuffle( hdas )
ordered_hda_contents = list( self.history_manager.contents( history ) )
self.assertEqual( map( lambda hda: hda.hid, ordered_hda_contents ), [ 1, 2, 3 ] )
self.log( "calling contents on an history with both hdas and collections should return both" )
hdca = self.add_list_collection_to_history( history, hdas )
all_contents = list( self.history_manager.contents( history ) )
self.assertEqual( all_contents, list( ordered_hda_contents ) + [ hdca ] )
def test_contained( self ):
user2 = self.user_manager.create( **user2_data )
history = self.history_manager.create( name='history', user=user2 )
self.log( "calling contained on an empty history should return an empty list" )
self.assertEqual( [], list( self.history_manager.contained( history ) ) )
self.log( "calling contained on an history with both hdas and collections should return only hdas" )
hdas = [ self.add_hda_to_history( history, name=( 'hda-' + str( x ) ) ) for x in xrange( 3 ) ]
self.add_list_collection_to_history( history, hdas )
self.assertEqual( list( self.history_manager.contained( history ) ), hdas )
def test_subcontainers( self ):
user2 = self.user_manager.create( **user2_data )
history | |
# License: BSD 3 clause
# -*- coding: utf8 -*-
import unittest
import numpy as np
from scipy.sparse import csr_matrix
from tick.array_test.build import array_test as test
class Test(unittest.TestCase):
def setUp(self):
self.correspondence_dict = {
'Double': {
'python_type': float,
'cpp_type': 'double',
},
'Float': {
'python_type': np.float32,
'cpp_type': 'float',
},
'Int': {
'python_type': np.int32,
'cpp_type': 'std::int32_t',
},
'UInt': {
'python_type': np.uint32,
'cpp_type': 'std::uint32_t',
},
'Short': {
'python_type': np.int16,
'cpp_type': 'std::int16_t',
},
'UShort': {
'python_type': np.uint16,
'cpp_type': 'std::uint16_t',
},
'Long': {
'python_type': np.int64,
'cpp_type': 'std::int64_t',
},
'ULong': {
'python_type': np.uint64,
'cpp_type': 'std::uint64_t',
}
}
for array_type, info in self.correspondence_dict.items():
# fixed number of the correct type
info['number'] = 148 # info['python_type'](np.exp(5))
# The dense array of the corresponding type
python_array = np.array([1, 2, 5, 0, 4,
1]).astype(info['python_type'])
# The dense array 2D of the corresponding type
python_array_2d = np.array([[1, 2, 5],
[0, 4, 1]]).astype(info['python_type'])
# The list of dense array of the corresponding type
python_array_list_1d = [
np.array([1, 2, 5]).astype(info['python_type']),
np.array([1, 2, 9]).astype(info['python_type']),
np.array([0, 4]).astype(info['python_type'])
]
python_array2d_list_1d = [
np.array([[1, 2, 5], [0, 4, 1]]).astype(info['python_type']),
np.array([[1, 2, 9], [1, 2, 5],
[0, 4, 1]]).astype(info['python_type']),
np.array([[0]]).astype(info['python_type'])
]
python_array_list_2d = [[
np.array([1, 2, 5]).astype(info['python_type'])
], [
np.array([1, 2, 9, 5]).astype(info['python_type']),
np.array([0, 4, 1]).astype(info['python_type'])
], []]
python_array2d_list_2d = [[
np.array([[1, 2, 5], [0, 4, 1]]).astype(info['python_type']),
], [
np.array([[1, 2, 9], [1, 2, 5],
[0, 4, 1]]).astype(info['python_type']),
np.array([[0]]).astype(info['python_type'])
], []]
# The sparse array of the corresponding type
python_sparse_array = csr_matrix(
(np.array([1.5, 2, 3, 1]), np.array([3, 5, 7, 9]),
np.array([0, 4]))).astype(info['python_type'])
# The sparse array 2D of the corresponding type
python_sparse_array_2d = csr_matrix(
(np.array([1.5, 2, 3, 1]), np.array([3, 5, 7, 4]),
np.array([0, 3, 4]))).astype(info['python_type'])
python_sparse_array_list_1d = [
csr_matrix((np.array([1.5, 2, 3, 1]), np.array([3, 5, 7, 9]),
np.array([0, 4]))).astype(info['python_type']),
csr_matrix((np.array([1.5, 2, 3]), np.array([3, 5, 11]),
np.array([0, 3]))).astype(info['python_type']),
csr_matrix((np.array([1.5, 2, 3, 1, 2]),
np.array([3, 5, 7, 4, 1]),
np.array([0, 5]))).astype(info['python_type'])
]
# TODO: add mixed list
python_sparse_array2d_list_1d = [
csr_matrix((np.array([1.5, 2, 3, 1]), np.array([3, 5, 7, 9]),
np.array([0, 3, 4]))).astype(info['python_type']),
csr_matrix((np.array([1.5, 2, 3]), np.array([3, 5, 11]),
np.array([0, 1, 3]))).astype(info['python_type']),
csr_matrix(
(np.array([1.5, 2, 3, 1, 2]), np.array([3, 5, 7, 4, 1]),
np.array([0, 2, 3, 5]))).astype(info['python_type'])
]
python_sparse_array_list_2d = [[
csr_matrix((np.array([1.5, 2, 3, 1]), np.array([3, 5, 7, 9]),
np.array([0, 4]))).astype(info['python_type'])
], [
csr_matrix((np.array([1.5, 2, 3]), np.array([3, 5, 11]),
np.array([0, 3]))).astype(info['python_type']),
csr_matrix((np.array([1.5, 2, 3, 1, 2]),
np.array([3, 5, 7, 4, 1]),
np.array([0, 5]))).astype(info['python_type'])
], []]
python_sparse_array2d_list_2d = [[
csr_matrix((np.array([1.5, 2, 3, 1]), np.array([3, 5, 7, 9]),
np.array([0, 3, 4]))).astype(info['python_type'])
], [
csr_matrix((np.array([1.5, 2, 3]), np.array([3, 5, 11]),
np.array([0, 1, 3]))).astype(info['python_type']),
csr_matrix(
(np.array([1.5, 2, 3, 1, 2]), np.array([3, 5, 7, 4, 1]),
np.array([0, 2, 3, 5]))).astype(info['python_type'])
], []]
info['python_array'] = python_array
info['python_array_2d'] = python_array_2d
info['python_array_list_1d'] = python_array_list_1d
info['python_array2d_list_1d'] = python_array2d_list_1d
info['python_array2d_list_2d'] = python_array2d_list_2d
info['python_array_list_2d'] = python_array_list_2d
info['python_sparse_array'] = python_sparse_array
info['python_sparse_array_2d'] = python_sparse_array_2d
info['python_sparse_array_list_1d'] = python_sparse_array_list_1d
info['python_sparse_array2d_list_1d'] = \
python_sparse_array2d_list_1d
info['python_sparse_array_list_2d'] = python_sparse_array_list_2d
info['python_sparse_array2d_list_2d'] = \
python_sparse_array2d_list_2d
# corresponding test functions
# for typemap in
info['typemap_in_array'] = \
getattr(test, 'test_typemap_in_Array%s' % array_type)
info['typemap_in_array_2d'] = \
getattr(test, 'test_typemap_in_Array%s2d' % array_type)
info['typemap_in_array_list_1d'] = \
getattr(test, 'test_typemap_in_Array%sList1D' % array_type)
info['typemap_in_array_list_2d'] = \
getattr(test, 'test_typemap_in_Array%sList2D' % array_type)
info['typemap_in_sparse_array'] = \
getattr(test, 'test_typemap_in_SparseArray%s' % array_type)
info['typemap_in_sparse_array_2d'] = \
getattr(test, 'test_typemap_in_SparseArray%s2d' % array_type)
info['typemap_in_sarray_ptr'] = \
getattr(test, 'test_typemap_in_SArray%sPtr' % array_type)
info['typemap_in_sarray_ptr_2d'] = \
getattr(test, 'test_typemap_in_SArray%s2dPtr' % array_type)
info['typemap_in_sarray_ptr_list_1d'] = \
getattr(test, 'test_typemap_in_SArray%sPtrList1D' % array_type)
info['typemap_in_sarray_ptr_list_2d'] = \
getattr(test, 'test_typemap_in_SArray%sPtrList2D' % array_type)
info['typemap_in_sarray2d_ptr_list_1d'] = \
getattr(test, 'test_typemap_in_SArray%s2dPtrList1D' %
array_type)
info['typemap_in_sarray2d_ptr_list_2d'] = \
getattr(test, 'test_typemap_in_SArray%s2dPtrList2D' %
array_type)
info['typemap_in_varray_ptr'] = \
getattr(test, 'test_typemap_in_VArray%sPtr' % array_type)
info['typemap_in_varray_ptr_list_1d'] = \
getattr(test, 'test_typemap_in_VArray%sPtrList1D' % array_type)
info['typemap_in_varray_ptr_list_2d'] = \
getattr(test, 'test_typemap_in_VArray%sPtrList2D' % array_type)
info['typemap_in_base_array'] = \
getattr(test, 'test_typemap_in_BaseArray%s' % array_type)
info['typemap_in_base_array_2d'] = \
getattr(test, 'test_typemap_in_BaseArray%s2d' % array_type)
info['typemap_in_sparse_array_ptr'] = \
getattr(test, 'test_typemap_in_SSparseArray%sPtr' % array_type)
info['typemap_in_sparse_array_2d_ptr'] = \
getattr(test, 'test_typemap_in_SSparseArray%s2dPtr' %
array_type)
info['typemap_in_base_array_ptr'] = \
getattr(test, 'test_typemap_in_SBaseArray%sPtr' % array_type)
info['typemap_in_base_array_2d_ptr'] = \
getattr(test, 'test_typemap_in_SBaseArray%s2dPtr' %
array_type)
info['typemap_in_base_array_list_1d'] = \
getattr(test, 'test_typemap_in_BaseArray%sList1D' % array_type)
info['typemap_in_base_array_list_2d'] = \
getattr(test, 'test_typemap_in_BaseArray%sList2D' % array_type)
info['typemap_in_base_array2d_list_1d'] = \
getattr(test, 'test_typemap_in_BaseArray%s2dList1D' %
array_type)
info['typemap_in_base_array2d_list_2d'] = \
getattr(test, 'test_typemap_in_BaseArray%s2dList2D' %
array_type)
info['typemap_in_base_array_ptr_list_1d'] = \
getattr(test, 'test_typemap_in_SBaseArray%sPtrList1D' %
array_type)
info['typemap_in_base_array_ptr_list_2d'] = \
getattr(test, 'test_typemap_in_SBaseArray%sPtrList2D' %
array_type)
info['typemap_in_base_array2d_ptr_list_1d'] = \
getattr(test, 'test_typemap_in_SBaseArray%s2dPtrList1D' %
array_type)
info['typemap_in_base_array2d_ptr_list_2d'] = \
getattr(test, 'test_typemap_in_SBaseArray%s2dPtrList2D' %
array_type)
# Functions that are not overloaded to test error messages
info['typemap_in_array_not_ol'] = \
getattr(test, 'test_typemap_in_not_ol_Array%s' % array_type)
info['typemap_in_array_2d_not_ol'] = \
getattr(test, 'test_typemap_in_not_ol_Array%s2d' % array_type)
info['typemap_in_sparse_array_not_ol'] = \
getattr(test,
'test_typemap_in_not_ol_SparseArray%s' % array_type)
info['typemap_in_base_array_not_ol'] = \
getattr(test,
'test_typemap_in_not_ol_BaseArray%s' % array_type)
info['typemap_in_array_list_1d_not_ol'] = \
getattr(test,
'test_typemap_in_not_ol_Array%sList1D' % array_type)
info['typemap_in_array_list_2d_not_ol'] = \
getattr(test,
'test_typemap_in_not_ol_Array%sList2D' % array_type)
# for typemap out
info['typemap_out_sarray_ptr'] = \
getattr(test, 'test_typemap_out_SArray%sPtr' % array_type)
info['typemap_out_sarray_ptr_list_1d'] = \
getattr(test, 'test_typemap_out_SArray%sPtrList1D' % array_type)
info['typemap_out_sarray_ptr_list_2d'] = \
getattr(test, 'test_typemap_out_SArray%sPtrList2D' % array_type)
info['typemap_out_sarray_2d_ptr'] = \
getattr(test, 'test_typemap_out_SArray%s2dPtr' % array_type)
def test_array_typemap_in(self):
"""...Test we can pass an Array as argument
"""
for array_type, info in self.correspondence_dict.items():
python_array = info['python_array']
extract_function = info['typemap_in_array']
self.assertEqual(python_array.sum(),
extract_function(python_array))
self.assertEqual(info['number'], extract_function(info['number']))
def test_array2d_typemap_in(self):
"""...Test we can pass an Array2d as argument
"""
for array_type, info in self.correspondence_dict.items():
python_array = info['python_array_2d']
extract_function = info['typemap_in_array_2d']
self.assertEqual(python_array.sum(),
extract_function(python_array))
self.assertEqual(info['number'], extract_function(info['number']))
def test_array_list_1d_typemap_in(self):
"""...Test we can pass a list of Arrays as argument
"""
for array_type, info in self.correspondence_dict.items():
python_array = info['python_array_list_1d']
extract_function = info['typemap_in_array_list_1d']
self.assertEqual(python_array[0].sum(),
extract_function(python_array))
self.assertEqual(info['number'], extract_function(info['number']))
def test_array_list_2d_typemap_in(self):
"""...Test we can pass a list of list of Arrays as argument
"""
for array_type, info in self.correspondence_dict.items():
python_array = info['python_array_list_2d']
extract_function = info['typemap_in_array_list_2d']
self.assertEqual(python_array[0][0].sum(),
extract_function(python_array))
self.assertEqual(info['number'], extract_function(info['number']))
def test_sparsearray_typemap_in(self):
"""...Test we pass a SparseArray as argument
"""
for array_type, info in self.correspondence_dict.items():
python_sparse_array = info['python_sparse_array']
extract_function = info['typemap_in_sparse_array']
self.assertEqual(python_sparse_array.sum(),
extract_function(python_sparse_array))
self.assertEqual(info['number'], extract_function(info['number']))
def test_sparsearray2d_typemap_in(self):
"""...Test we can pass a SparseArray2d as argument
"""
for array_type, info in self.correspondence_dict.items():
python_sparse_array_2d = info['python_sparse_array_2d']
extract_function = info['typemap_in_sparse_array_2d']
self.assertEqual(python_sparse_array_2d.sum(),
extract_function(python_sparse_array_2d))
self.assertEqual(info['number'], extract_function(info['number']))
def test_sarray_ptr_typemap_in(self):
"""...Test we can pass an SArray shared pointer as argument
"""
for array_type, info in self.correspondence_dict.items():
python_array = info['python_array']
extract_function = info['typemap_in_sarray_ptr']
self.assertEqual(python_array.sum(),
extract_function(python_array))
self.assertEqual(info['number'], extract_function(info['number']))
def test_sarray2dptr_typemap_in(self):
"""...Test we can pass an SArray2d shared pointer as argument
"""
for array_type, info in self.correspondence_dict.items():
python_array = info['python_array_2d']
extract_function = info['typemap_in_sarray_ptr_2d']
self.assertEqual(python_array.sum(),
extract_function(python_array))
self.assertEqual(info['number'], extract_function(info['number']))
def test_sarray_ptr_list_1d_typemap_in(self):
"""...Test we can pass a list of SArray shared pointers as argument
"""
for array_type, info in self.correspondence_dict.items():
python_array = info['python_array_list_1d']
extract_function = info['typemap_in_sarray_ptr_list_1d']
self.assertEqual(python_array[0].sum(),
extract_function(python_array))
self.assertEqual(info['number'], extract_function(info['number']))
def test_sarray_ptr_list_2d_typemap_in(self):
"""...Test we can pass a list of list of SArray shared pointers as
argument
"""
for array_type, info in self.correspondence_dict.items():
python_array = info['python_array_list_2d']
extract_function = info['typemap_in_sarray_ptr_list_2d']
self.assertEqual(python_array[0][0].sum(),
extract_function(python_array))
self.assertEqual(info['number'], extract_function(info['number']))
def test_sarray2d_ptr_list_2d_typemap_in(self):
"""...Test we can pass a list of SArray2d shared pointers as
argument
"""
for array_type, info in self.correspondence_dict.items():
python_array = info['python_array2d_list_1d']
extract_function = info['typemap_in_sarray2d_ptr_list_1d']
self.assertEqual(python_array[0].sum(),
extract_function(python_array))
self.assertEqual(info['number'], extract_function(info['number']))
def test_sarray2d_ptr_list_2d_typemap_in(self):
"""...Test we can pass a list of list of SArray2d shared pointers as
argument
"""
for array_type, info in self.correspondence_dict.items():
python_array = info['python_array2d_list_2d']
extract_function = info['typemap_in_sarray2d_ptr_list_2d']
self.assertEqual(python_array[0][0].sum(),
extract_function(python_array))
self.assertEqual(info['number'], extract_function(info['number']))
def test_varray_ptr_typemap_in(self):
"""...Test we can pass an VArray shared pointer as argument
"""
for array_type, info in self.correspondence_dict.items():
python_array = info['python_array']
extract_function = info['typemap_in_varray_ptr']
self.assertEqual(python_array.sum(),
extract_function(python_array))
self.assertEqual(info['number'], extract_function(info['number']))
def test_varray_ptr_list_1d_typemap_in(self):
"""...Test we can pass a list of VArray shared pointers as argument
"""
for array_type, info in self.correspondence_dict.items():
python_array = info['python_array_list_1d']
extract_function = info['typemap_in_varray_ptr_list_1d']
self.assertEqual(python_array[0].sum(),
extract_function(python_array))
self.assertEqual(info['number'], extract_function(info['number']))
def test_varray_ptr_list_2d_typemap_in(self):
"""...Test we can pass a list of list of VArray shared pointers as
argument
"""
for array_type, info in self.correspondence_dict.items():
python_array = info['python_array_list_2d']
extract_function = info['typemap_in_varray_ptr_list_2d']
self.assertEqual(python_array[0][0].sum(),
extract_function(python_array))
self.assertEqual(info['number'], extract_function(info['number']))
def test_basearray_typemap_in(self):
"""...Test we can pass an BaseArray as argument for sparse and dense
arrays
"""
for array_type, info in self.correspondence_dict.items():
python_array = info['python_array']
python_sparse_array = info['python_sparse_array']
extract_function = info['typemap_in_base_array']
# Test dense
self.assertEqual(python_array.sum(),
extract_function(python_array))
# Test sparse
self.assertEqual(python_sparse_array.data.sum(),
extract_function(python_sparse_array))
self.assertEqual(info['number'], extract_function(info['number']))
def test_basearray2d_typemap_in(self):
"""...Test we can pass an BaseArray2d as argument for sparse and dense
arrays
"""
for array_type, info in self.correspondence_dict.items():
python_array = info['python_array_2d']
python_sparse_array = info['python_sparse_array_2d']
extract_function = info['typemap_in_base_array_2d']
# Test dense
self.assertEqual(python_array.sum(),
extract_function(python_array))
# Test sparse
self.assertEqual(python_sparse_array.data.sum(),
extract_function(python_sparse_array))
self.assertEqual(info['number'], extract_function(info['number']))
def test_ssparsearrayptr_typemap_in(self):
"""...Test we can pass a SSparseArray shared pointer as argument
"""
for array_type, | |
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Module to manage the autogrouping functionality by ``verdi run``."""
import re
import warnings
from aiida.common import exceptions, timezone
from aiida.common.escaping import escape_for_sql_like, get_regex_pattern_from_sql
from aiida.common.warnings import AiidaDeprecationWarning
from aiida.orm import AutoGroup
from aiida.plugins.entry_point import get_entry_point_string_from_class
CURRENT_AUTOGROUP = None
class Autogroup:
"""Class to create a new `AutoGroup` instance that will, while active, automatically contain all nodes being stored.
The autogrouping is checked by the `Node.store()` method which, if `CURRENT_AUTOGROUP is not None` the method
`Autogroup.is_to_be_grouped` is called to decide whether to put the current node being stored in the current
`AutoGroup` instance.
The exclude/include lists are lists of strings like:
``aiida.data:int``, ``aiida.calculation:quantumespresso.pw``,
``aiida.data:array.%``, ...
i.e.: a string identifying the base class, followed a colona and by the path to the class
as accepted by CalculationFactory/DataFactory.
Each string can contain one or more wildcard characters ``%``;
in this case this is used in a ``like`` comparison with the QueryBuilder.
Note that in this case you have to remember that ``_`` means "any character"
in the QueryBuilder, and you need to escape it if you mean a literal underscore.
Only one of the two (between exclude and include) can be set.
If none of the two is set, everything is included.
"""
def __init__(self):
"""Initialize with defaults."""
self._exclude = None
self._include = None
now = timezone.now()
default_label_prefix = f"Verdi autogroup on {now.strftime('%Y-%m-%d %H:%M:%S')}"
self._group_label_prefix = default_label_prefix
self._group_label = None # Actual group label, set by `get_or_create_group`
@staticmethod
def validate(strings):
"""Validate the list of strings passed to set_include and set_exclude."""
if strings is None:
return
valid_prefixes = set(['aiida.node', 'aiida.calculations', 'aiida.workflows', 'aiida.data'])
for string in strings:
pieces = string.split(':')
if len(pieces) != 2:
raise exceptions.ValidationError(
f"'{string}' is not a valid include/exclude filter, must contain two parts split by a colon"
)
if pieces[0] not in valid_prefixes:
raise exceptions.ValidationError(
f"'{string}' has an invalid prefix, must be among: {sorted(valid_prefixes)}"
)
def get_exclude(self):
"""Return the list of classes to exclude from autogrouping.
Returns ``None`` if no exclusion list has been set."""
return self._exclude
def get_include(self):
"""Return the list of classes to include in the autogrouping.
Returns ``None`` if no inclusion list has been set."""
return self._include
def get_group_label_prefix(self):
"""Get the prefix of the label of the group.
If no group label prefix was set, it will set a default one by itself."""
return self._group_label_prefix
def get_group_name(self):
"""Get the label of the group.
If no group label was set, it will set a default one by itself.
.. deprecated:: 1.2.0
Will be removed in `v2.0.0`, use :py:meth:`.get_group_label_prefix` instead.
"""
warnings.warn('function is deprecated, use `get_group_label_prefix` instead', AiidaDeprecationWarning) # pylint: disable=no-member
return self.get_group_label_prefix()
def set_exclude(self, exclude):
"""Set the list of classes to exclude in the autogrouping.
:param exclude: a list of valid entry point strings (might contain '%' to be used as
string to be matched using SQL's ``LIKE`` pattern-making logic), or ``None``
to specify no include list.
"""
if isinstance(exclude, str):
exclude = [exclude]
self.validate(exclude)
if exclude is not None and self.get_include() is not None:
# It's ok to set None, both as a default, or to 'undo' the exclude list
raise exceptions.ValidationError('Cannot both specify exclude and include')
self._exclude = exclude
def set_include(self, include):
"""Set the list of classes to include in the autogrouping.
:param include: a list of valid entry point strings (might contain '%' to be used as
string to be matched using SQL's ``LIKE`` pattern-making logic), or ``None``
to specify no include list.
"""
if isinstance(include, str):
include = [include]
self.validate(include)
if include is not None and self.get_exclude() is not None:
# It's ok to set None, both as a default, or to 'undo' the include list
raise exceptions.ValidationError('Cannot both specify exclude and include')
self._include = include
def set_group_label_prefix(self, label_prefix):
"""
Set the label of the group to be created
"""
if not isinstance(label_prefix, str):
raise exceptions.ValidationError('group label must be a string')
self._group_label_prefix = label_prefix
def set_group_name(self, gname):
"""Set the name of the group.
.. deprecated:: 1.2.0
Will be removed in `v2.0.0`, use :py:meth:`.set_group_label_prefix` instead.
"""
warnings.warn('function is deprecated, use `set_group_label_prefix` instead', AiidaDeprecationWarning) # pylint: disable=no-member
return self.set_group_label_prefix(label_prefix=gname)
@staticmethod
def _matches(string, filter_string):
"""Check if 'string' matches the 'filter_string' (used for include and exclude filters).
If 'filter_string' does not contain any % sign, perform an exact match.
Otherwise, match with a SQL-like query, where % means any character sequence,
and _ means a single character (these caracters can be escaped with a backslash).
:param string: the string to match.
:param filter_string: the filter string.
"""
if '%' in filter_string:
regex_filter = get_regex_pattern_from_sql(filter_string)
return re.match(regex_filter, string) is not None
return string == filter_string
def is_to_be_grouped(self, node):
"""
Return whether the given node has to be included in the autogroup according to include/exclude list
:return (bool): True if ``node`` is to be included in the autogroup
"""
# strings, including possibly 'all'
include = self.get_include()
exclude = self.get_exclude()
if include is None and exclude is None:
# Include all classes by default if nothing is explicitly specified.
return True
# We should never be here, anyway - this should be catched by the `set_include/exclude` methods
assert include is None or exclude is None, "You cannot specify both an 'include' and an 'exclude' list"
entry_point_string = node.process_type
# If there is no `process_type` we are dealing with a `Data` node so we get the entry point from the class
if not entry_point_string:
entry_point_string = get_entry_point_string_from_class(node.__class__.__module__, node.__class__.__name__)
if include is not None:
# As soon as a filter string matches, we include the class
return any(self._matches(entry_point_string, filter_string) for filter_string in include)
# If we are here, exclude is not None
# include *only* in *none* of the filters match (that is, exclude as
# soon as any of the filters matches)
return not any(self._matches(entry_point_string, filter_string) for filter_string in exclude)
def clear_group_cache(self):
"""Clear the cache of the group name.
This is mostly used by tests when they reset the database.
"""
self._group_label = None
def get_or_create_group(self):
"""Return the current `AutoGroup`, or create one if None has been set yet.
This function implements a somewhat complex logic that is however needed
to make sure that, even if `verdi run` is called at the same time multiple
times, e.g. in a for loop in bash, there is never the risk that two ``verdi run``
Unix processes try to create the same group, with the same label, ending
up in a crash of the code (see PR #3650).
Here, instead, we make sure that if this concurrency issue happens,
one of the two will get a IntegrityError from the DB, and then recover
trying to create a group with a different label (with a numeric suffix appended),
until it manages to create it.
"""
from aiida.orm import QueryBuilder
# When this function is called, if it is the first time, just generate
# a new group name (later on, after this ``if`` block`).
# In that case, we will later cache in ``self._group_label`` the group label,
# So the group with the same name can be returned quickly in future
# calls of this method.
if self._group_label is not None:
builder = QueryBuilder().append(AutoGroup, filters={'label': self._group_label})
results = [res[0] for res in builder.iterall()]
if results:
# If it is not empty, it should have only one result due to the uniqueness constraints
assert len(results) == 1, 'I got more than one autogroup with the same label!'
return results[0]
# There are no results: probably the group has been deleted.
# I continue as if it was not cached
self._group_label = None
label_prefix = self.get_group_label_prefix()
# Try to do a preliminary QB query to avoid to do | |
<filename>src/python/grapl_analyzerlib/tests/test_process_node.py
import time
import unittest
from typing import cast, Dict
import hypothesis
import hypothesis.strategies as st
import pytest
from hypothesis import given
from grapl_analyzerlib.prelude import *
from grapl_analyzerlib.test_utils.dgraph_utils import upsert, create_edge
from grapl_analyzerlib.test_utils.strategies.asset_view_strategy import (
asset_props_strategy,
get_or_create_asset,
AssetProps,
)
from grapl_analyzerlib.test_utils.strategies.misc import text_dgraph_compat
from grapl_analyzerlib.test_utils.strategies.process_view_strategy import (
process_props_strategy,
get_or_create_process,
ProcessProps,
)
Property = str
def assert_equal_props(a: Viewable, b: Viewable) -> None:
"""
NOTE: Doesn't look at edges at all.
You may need to fetch more properties from the queried one.
"""
for k, v in a.predicates.items():
assert v == b.predicates[k]
def assert_equal_identity(a: Viewable, b: Viewable) -> None:
""" Assert these nodes have the same type and uuid """
assert a.node_key == b.node_key
def get_or_create_process_node_deprecated(
graph_client: GraphClient,
node_key: str,
# properties
process_id: str,
arguments: str,
created_timestamp: str,
terminate_time: str,
image_name: str,
process_name: str,
) -> ProcessView:
"""
Deprecated in favor of property_view_strategy.py
"""
node_props: Dict[str, Property] = {
"process_id": process_id,
"arguments": arguments,
"created_timestamp": created_timestamp,
"terminate_time": terminate_time,
"image_name": image_name,
"process_name": process_name,
}
return cast(
ProcessView, upsert(graph_client, "Process", ProcessView, node_key, node_props)
)
@pytest.mark.integration_test
class TestProcessQuery(unittest.TestCase):
@hypothesis.settings(deadline=None)
@given(process_props=process_props_strategy())
def test_single_process_contains_key(self, process_props: ProcessProps) -> None:
graph_client = GraphClient()
created_proc = get_or_create_process(self, graph_client, process_props)
# Setup complete, do some queries
queried_proc = ProcessQuery().query_first(
graph_client, contains_node_key=created_proc.node_key
)
assert queried_proc
assert created_proc.get_process_id() == queried_proc.get_process_id()
assert created_proc.node_key == queried_proc.node_key
assert "Process" == queried_proc.get_node_type()
assert created_proc.get_arguments() == queried_proc.get_arguments()
assert (
created_proc.get_created_timestamp() == queried_proc.get_created_timestamp()
)
assert created_proc.get_terminate_time() == queried_proc.get_terminate_time()
assert created_proc.get_image_name() == queried_proc.get_image_name()
assert created_proc.get_process_name() == queried_proc.get_process_name()
assert not queried_proc.get_asset()
@hypothesis.settings(deadline=None)
@given(
asset_props=asset_props_strategy(),
process_props=process_props_strategy(),
)
def test_single_process_connected_to_asset_node(
self,
asset_props: AssetProps,
process_props: ProcessProps,
):
graph_client = GraphClient()
created_asset = get_or_create_asset(self, graph_client, asset_props)
created_proc = get_or_create_process(self, graph_client, process_props)
create_edge(
graph_client,
created_asset.uid,
"asset_processes",
created_proc.uid,
)
create_edge(graph_client, created_proc.uid, "process_asset", created_asset.uid)
# Setup complete, do some queries
queried_proc = (
ProcessQuery()
.with_asset(AssetQuery().with_hostname(eq=created_asset.get_hostname()))
.query_first(graph_client, contains_node_key=created_proc.node_key)
)
assert queried_proc
queried_proc._expand()
assert_equal_props(created_proc, queried_proc)
queried_asset = queried_proc.get_asset()
assert_equal_identity(created_asset, queried_asset)
# Given that the code that generates timestamps only uses unsized types we can make some
# assumptions about the data
@hypothesis.settings(deadline=None)
@given(process_props=process_props_strategy())
def test_process_query_view_parity(self, process_props: ProcessProps):
graph_client = GraphClient()
created_proc = get_or_create_process(
self,
graph_client,
process_props,
)
queried_proc = (
ProcessQuery()
.with_node_key(eq=created_proc.node_key)
.query_first(graph_client)
)
assert queried_proc
assert process_props["node_key"] == queried_proc.node_key
assert "Process" == queried_proc.get_node_type()
assert process_props["process_id"] == queried_proc.get_process_id()
assert process_props["arguments"] == queried_proc.get_arguments()
assert (
process_props["created_timestamp"] == queried_proc.get_created_timestamp()
)
assert None == queried_proc.get_asset()
assert process_props["terminate_time"] == queried_proc.get_terminate_time()
assert process_props["image_name"] == queried_proc.get_image_name()
assert process_props["process_name"] == queried_proc.get_process_name()
@hypothesis.settings(deadline=None)
@given(
node_key=st.uuids(),
process_id=st.integers(min_value=1, max_value=2 ** 32),
created_timestamp=st.integers(min_value=0, max_value=2 ** 48),
terminate_time=st.integers(min_value=0, max_value=2 ** 48),
image_name=st.text(min_size=1, max_size=64),
process_name=st.text(min_size=1, max_size=64),
arguments=st.text(min_size=1, max_size=64),
)
def test_process_query_view_parity_eq(
self,
node_key,
process_id,
created_timestamp,
terminate_time,
image_name,
process_name,
arguments,
):
node_key = "test_process_query_view_parity_eq" + str(node_key)
graph_client = GraphClient()
get_or_create_process_node_deprecated(
graph_client,
node_key,
process_id,
arguments,
created_timestamp,
terminate_time,
image_name,
process_name,
)
queried_proc = (
ProcessQuery()
.with_node_key(eq=node_key)
.with_process_id(eq=process_id)
.with_arguments(eq=arguments)
.with_created_timestamp(eq=created_timestamp)
.with_terminate_time(eq=terminate_time)
.with_image_name(eq=image_name)
.with_process_name(eq=process_name)
.query_first(graph_client)
)
assert node_key == queried_proc.node_key
assert "Process" == queried_proc.get_node_type()
assert process_id == queried_proc.get_process_id()
assert arguments == queried_proc.get_arguments()
assert created_timestamp == queried_proc.get_created_timestamp()
assert terminate_time == queried_proc.get_terminate_time()
assert image_name == queried_proc.get_image_name()
assert process_name == queried_proc.get_process_name()
@hypothesis.settings(deadline=None)
@given(process_props=process_props_strategy())
def test_process_query_view_miss(self, process_props: ProcessProps) -> None:
graph_client = GraphClient()
created_proc = get_or_create_process(self, graph_client, process_props)
assert (
created_proc.process_id is not None
and created_proc.arguments is not None
and created_proc.created_timestamp is not None
and created_proc.terminate_time is not None
and created_proc.image_name is not None
and created_proc.process_name is not None
)
queried_proc = (
ProcessQuery()
.with_node_key(eq=created_proc.node_key)
.with_process_id(eq=Not(created_proc.process_id))
.with_arguments(eq=Not(created_proc.arguments))
.with_created_timestamp(eq=Not(created_proc.created_timestamp))
.with_terminate_time(eq=Not(created_proc.terminate_time))
.with_image_name(eq=Not(created_proc.image_name))
.with_process_name(eq=Not(created_proc.process_name))
.query_first(graph_client)
)
assert not queried_proc
# Given that the code that generates timestamps only uses unsized types we can make some
# assumptions about the data
@hypothesis.settings(deadline=None)
@given(
node_key=st.uuids(),
process_id=st.integers(min_value=1, max_value=2 ** 32),
created_timestamp=st.integers(min_value=0, max_value=2 ** 48),
terminate_time=st.integers(min_value=0, max_value=2 ** 48),
image_name=text_dgraph_compat(),
process_name=text_dgraph_compat(),
arguments=text_dgraph_compat(),
)
def test_process_query_view_parity_contains(
self,
node_key,
process_id,
created_timestamp,
terminate_time,
image_name,
process_name,
arguments,
):
node_key = "test_process_query_view_parity_contains" + str(node_key)
graph_client = GraphClient()
get_or_create_process_node_deprecated(
graph_client,
node_key,
process_id=process_id,
arguments=arguments,
created_timestamp=created_timestamp,
terminate_time=terminate_time,
image_name=image_name,
process_name=process_name,
)
query = ProcessQuery().with_node_key(eq=node_key)
# Don't fuck with newlines due to a dgraph bug
# https://github.com/dgraph-io/dgraph/issues/4694
for prop in [arguments, image_name, process_name]:
hypothesis.assume(len(prop) > 3)
hypothesis.assume("\n" not in prop)
hypothesis.assume("\\" not in prop)
# These fail because dgraph doesn't like the query
# (regexp(process_name, /00\\//))
query.with_arguments(contains=arguments[: len(arguments) - 1])
query.with_image_name(contains=image_name[: len(image_name) - 1])
query.with_process_name(contains=process_name[: len(process_name) - 1])
queried_proc = query.query_first(graph_client)
assert queried_proc
assert "Process" == queried_proc.get_node_type()
assert process_id == queried_proc.get_process_id()
assert node_key == queried_proc.node_key
assert arguments == queried_proc.get_arguments()
assert created_timestamp == queried_proc.get_created_timestamp()
assert terminate_time == queried_proc.get_terminate_time()
assert image_name == queried_proc.get_image_name()
assert process_name == queried_proc.get_process_name()
def test_parent_children_edge(self) -> None:
# Given: a process with a pid 100 & process_name word.exe,
graph_client = GraphClient()
created_timestamp = int(time.time())
parent_process = {
"process_id": 100,
"process_name": "word.exe",
"created_timestamp": created_timestamp,
} # type: Dict[str, Property]
parent_process_view = upsert(
graph_client,
"Process",
ProcessView,
"0e84f2ce-f711-46ce-bc9e-1b13c9ba6d6c",
parent_process,
)
child_process = {
"process_id": 110,
"process_name": "malware.exe",
"created_timestamp": created_timestamp + 1000,
} # type: Dict[str, Property]
child_process_view = upsert(
graph_client,
"Process",
ProcessView,
"46d2862f-cb58-4062-b35e-bb310b8d5b0d",
child_process,
)
create_edge(
graph_client,
parent_process_view.uid,
"children",
child_process_view.uid,
)
queried_process = (
ProcessQuery()
.with_node_key(eq="0e84f2ce-f711-46ce-bc9e-1b13c9ba6d6c")
.with_process_id(eq=100)
.with_process_name(contains="word")
.with_created_timestamp(eq=created_timestamp)
.with_children(
ProcessQuery()
.with_node_key(eq="<KEY>")
.with_process_id(eq=110)
.with_process_name(eq="malware.exe")
.with_created_timestamp(eq=created_timestamp + 1000)
)
.query_first(graph_client)
)
assert queried_process
assert queried_process.node_key == "0e84f2ce-f711-46ce-bc9e-1b13c9ba6d6c"
assert queried_process.process_id == 100
assert queried_process.process_name == "word.exe"
assert queried_process.created_timestamp == created_timestamp
assert len(queried_process.children) == 1
child = queried_process.children[0]
assert child.node_key == "<KEY>"
assert child.process_id == 110
assert child.process_name == "malware.exe"
assert child.created_timestamp == created_timestamp + 1000
def test_with_bin_file(self) -> None:
# Given: a process with a pid 100 & process_name word.exe,
graph_client = GraphClient()
created_timestamp = int(time.time())
parent_process = {
"process_id": 100,
"process_name": "word.exe",
"created_timestamp": created_timestamp,
} # type: Dict[str, Property]
parent_process_view = upsert(
graph_client,
"Process",
ProcessView,
"635952af-87f3-4a2a-a65d-3f1859db9525",
parent_process,
)
bin_file = {
"file_path": "/folder/file.txt",
"created_timestamp": created_timestamp + 1000,
} # type: Dict[str, Property]
bin_file_view = upsert(
graph_client,
"File",
FileView,
"9f16e0c9-33c0-4d18-9878-ef686373570b",
bin_file,
)
create_edge(
graph_client,
parent_process_view.uid,
"bin_file",
bin_file_view.uid,
)
queried_process = (
ProcessQuery()
.with_node_key(eq="635952af-87f3-4a2a-a65d-3f1859db9525")
.with_process_id(eq=100)
.with_process_name(contains="word")
.with_created_timestamp(eq=created_timestamp)
.with_bin_file(
FileQuery()
.with_node_key(eq="9f16e0c9-33c0-4d18-9878-ef686373570b")
.with_file_path(eq="/folder/file.txt")
)
.query_first(graph_client)
)
assert queried_process
assert "635952af-87f3-4a2a-a65d-3f1859db9525"
assert queried_process.process_id == 100
assert queried_process.process_name == "word.exe"
assert queried_process.created_timestamp == created_timestamp
bin_file = queried_process.bin_file
assert bin_file.node_key == "9f16e0c9-33c0-4d18-9878-ef686373570b"
assert bin_file.file_path == "/folder/file.txt"
def test_process_with_created_files(self) -> None:
# Given: a process with a pid 100 & process_name word.exe,
graph_client = GraphClient()
created_timestamp = int(time.time())
parent_process = {
"process_id": 100,
"process_name": "word.exe",
"created_timestamp": created_timestamp,
} # type: Dict[str, Property]
parent_process_view = upsert(
graph_client,
"Process",
ProcessView,
"763ddbda-8812-4a07-acfe-83402b92379d",
parent_process,
)
created_file = {
"file_path": "/folder/file.txt",
"created_timestamp": created_timestamp + 1000,
} # type: Dict[str, Property]
created_file_view = upsert(
graph_client,
"File",
FileView,
"575f103e-1a11-4650-9f1b-5b72e44dfec3",
created_file,
)
create_edge(
graph_client,
parent_process_view.uid,
"created_files",
created_file_view.uid,
)
queried_process = (
ProcessQuery()
.with_node_key(eq="<KEY>")
.with_process_id(eq=100)
.with_process_name(contains="word")
.with_created_timestamp(eq=created_timestamp)
.with_created_files(
FileQuery()
.with_node_key(eq="575f103e-1a11-4650-9f1b-5b72e44dfec3")
.with_file_path(eq="/folder/file.txt")
)
.query_first(graph_client)
)
assert queried_process
assert queried_process.process_id == 100
assert len(queried_process.created_files) == 1
created_file = queried_process.created_files[0]
assert created_file.file_path == "/folder/file.txt"
def test_with_deleted_files(self) -> None:
# Given: a process with a pid 100 & process_name word.exe,
graph_client = GraphClient()
created_timestamp = int(time.time())
parent_process = {
"process_id": 100,
"process_name": "word.exe",
"created_timestamp": created_timestamp,
} # type: Dict[str, Property]
parent_process_view = upsert(
graph_client,
"Process",
ProcessView,
"test_with_deleted_files-47527d73-22c4-4e0f-bf7d-184bf1f206e2",
parent_process,
)
deleted_file = {
"file_path": "/folder/file.txt",
"created_timestamp": created_timestamp + 1000,
} # type: Dict[str, Property]
deleted_file_view = upsert(
graph_client,
"File",
FileView,
"test_with_deleted_files8b8364ea-9b47-476b-8cf0-0f724adff10f",
deleted_file,
)
create_edge(
graph_client,
parent_process_view.uid,
"deleted_files",
deleted_file_view.uid,
)
queried_process = (
ProcessQuery()
.with_process_id(eq=100)
.with_process_name(contains="word")
.with_created_timestamp(eq=created_timestamp)
.with_deleted_files(FileQuery().with_file_path(eq="/folder/file.txt"))
.query_first(graph_client)
)
assert queried_process
assert queried_process.process_id == 100
def test_with_read_files(self) -> None:
# Given: a process with a pid 100 & process_name word.exe,
graph_client = GraphClient()
created_timestamp = int(time.time())
parent_process = {
"process_id": 100,
"process_name": "word.exe",
"created_timestamp": created_timestamp,
} # type: Dict[str, Property]
parent_process_view = upsert(
graph_client,
"Process",
ProcessView,
"test_with_read_files-669a3693-d960-401c-8d29-5d669ffcd660",
parent_process,
)
read_file = {
"file_path": "/folder/file.txt",
"created_timestamp": created_timestamp + 1000,
} # type: Dict[str, Property]
read_file_view = upsert(
graph_client,
"File",
FileView,
"test_with_read_files-aa9248ec-36ee-4177-ba1a-999de735e682",
read_file,
)
create_edge(
graph_client,
parent_process_view.uid,
"read_files",
read_file_view.uid,
)
queried_process = (
ProcessQuery()
.with_process_id(eq=100)
.with_process_name(contains="word")
.with_created_timestamp(eq=created_timestamp)
.with_read_files(FileQuery().with_file_path(eq="/folder/file.txt"))
.query_first(graph_client)
)
assert queried_process
assert (
queried_process.node_key
== "test_with_read_files-669a3693-d960-401c-8d29-5d669ffcd660"
)
assert queried_process.process_id == 100
assert queried_process.process_name == "word.exe"
assert len(queried_process.read_files) == 1
assert (
queried_process.read_files[0].node_key
== "test_with_read_files-aa9248ec-36ee-4177-ba1a-999de735e682"
)
assert queried_process.read_files[0].file_path == "/folder/file.txt"
def test_with_wrote_files(self) -> None:
# Given: a process with a pid 100 & process_name word.exe,
graph_client = GraphClient()
created_timestamp = int(time.time())
parent_process = {
"process_id": 100,
"process_name": "word.exe",
"created_timestamp": created_timestamp,
} # type: Dict[str, Property]
parent_process_view = | |
<filename>casa_imaging/casa_utils.py
"""
Utility functions for operating on CASA-exported FITS files.
"""
import astropy.io.fits as fits
from astropy.wcs import WCS
import numpy as np
import os
import shutil
import scipy.stats as stats
import traceback
import yaml
from collections import OrderedDict as odict
import datetime
import sys
def get_hdu_info(hdu):
"""
Get info from a CASA-exported FITS header-unit list.
Args:
hdu : FITS header unit list
Output from astropy.io.fits.open(<fname>)
Returns: (pols, freqs, stokax, freqax)
ra : ndarray of right ascension (degrees)
dec : ndarray of declination (degrees)
pols : ndarray containing polarization integers
freqs : ndarray containing frequencies in Hz
stokax : integer of polarization (stokes) axis in data cube
freqax : integer of frequency axis in data cube
"""
# get header
head = hdu[0].header
npix1 = head["NAXIS1"]
npix2 = head["NAXIS2"]
# get ra and dec arrays
w = WCS(hdu[0])
# convert pixel to equatorial coordinates
lon_arr, lat_arr = np.meshgrid(np.arange(npix1), np.arange(npix2))
lon, lat, s, f = w.all_pix2world(lon_arr.ravel(), lat_arr.ravel(), 0, 0, 0)
ra = lon.reshape(npix2, npix1)
dec = lat.reshape(npix2, npix1)
# get frequencies and polarizations
if head["CTYPE3"] == "FREQ":
freqax = 3
stokax = 4
elif head["CTYPE4"] == "FREQ":
freqax = 4
stokax = 3
else:
raise ValueError("Couldn't find freq and stokes axes in FITS file {}".format(hdu))
# get pols
pols = np.arange(head["NAXIS{}".format(stokax)]) * head["CDELT{}".format(stokax)] + head["CRVAL{}".format(stokax)]
pols = np.asarray(pols, dtype=np.int)
# get cube frequencies
freqs = np.arange(head["NAXIS{}".format(freqax)]) * head["CDELT{}".format(freqax)] + head["CRVAL{}".format(freqax)]
return ra, dec, pols, freqs, stokax, freqax
def get_beam_info(hdu, pol_ind=0, pxunits=False):
"""
Takes a CASA-exported FITS HDU and gets the synthesized beam info
in degrees. If pxunits, assumes CDELT1 and CDELT2 are equivalent.
Args:
hdu : FITS header unit list
Output from astropy.io.fits.open(<fname>)
pol_ind : integer
Polarization index to query from beam information
pxunits : boolean
If True, return bmaj and bmin in pixel units
Returns: (bmaj, bmin, bpa)
bmaj : beam major axis in degrees (unless pxunits)
bmin : beam minor axis in degrees (unless pxunits)
bpa : beam position angle in degrees
"""
# test for old clean output where its in the header in degrees
if 'BMAJ' in hdu[0].header:
bmaj = hdu[0].header['BMAJ']
bmin = hdu[0].header['BMIN']
bpa = hdu[0].header['BPA']
else:
# new tclean output where its in the data in arcsecs
try:
bmaj = hdu[1].data['BMAJ'][pol_ind] / 3600.
bmin = hdu[1].data['BMIN'][pol_ind] / 3600.
bpa = hdu[1].data['BPA'][pol_ind]
except:
raise ValueError("Couldn't get access to synthesized beam in HDU.")
# convert bmaj and bmin to pixel units
if pxunits:
if not np.isclose(np.abs(hdu[0].header['CDELT1']), np.abs(hdu[0].header['CDELT2'])):
raise ValueError("Can't convert to pixel units b/c CDELT1 != CDELT2, which this conversion assumes")
bmaj = bmaj / np.abs(hdu[0].header['CDELT1'])
bmin = bmin / np.abs(hdu[0].header['CDELT1'])
return bmaj, bmin, bpa
def make_restoring_beam(bmaj, bmin, bpa, size=31):
"""
Make a model of the restoring (clean) beam.
Args:
bmaj : beam major axis in pixel units
bmin : beam minor axis in pixel units
bpa : beam position angle in degrees
size : integer side length of model in pixels. Must be odd.
Returns:
rest_beam : 2D ndarray of peak-normalized restoring beam
"""
assert size % 2 == 1, "size must be odd-valued."
# make a meshgrid
x, y = np.meshgrid(np.linspace(size//2+1, -size//2, size), np.linspace(-size//2, size//2+1, size))
P = np.array([x, y]).T
# get bpa in radians and rotate meshgrid
beam_theta = bpa * np.pi / 180
Prot = P.dot(np.array([[np.cos(beam_theta), -np.sin(beam_theta)], [np.sin(beam_theta), np.cos(beam_theta)]]))
# evaluate gaussian PDF, recall that its std is the major or minor axis / 2.0
gauss_cov = np.array([[(bmaj/2.)**2, 0.0], [0.0, (bmin/2.)**2]])
rest_beam = stats.multivariate_normal.pdf(Prot, mean=np.array([0, 0]), cov=gauss_cov)
rest_beam /= rest_beam.max()
return rest_beam
def subtract_beam(image, beam, px, search_frac=0.5, subtract=True, inplace=True):
"""
Subtract a postage cutout of a synthesized beam from
an image 2D array centered at image pixel values px, and
read-off the peak flux in the cutout.
Args:
image : an nD image array with RA and Dec as 0th and 1st axes
image : an nD beam array with RA and Dec as 0th and 1st axes.
Must have the same CDELT as the image array.
px : pixel coordinates of image to center subtraction at.
Doesn't need to be within the image bounds.
search_frac : beam fraction within which to look for peak flux
subtract : bool, if True subtract the beam else add it.
inplace : edit input array in memory, else make a copy
Returns:
diff_image : image with beam subtracted at px location
peak : peak flux within search_frac
im_cutout : cutout of image before subtraction
bm_cutout : cutout of beam before subtraction
im_s1, im_s2 : slice objects
"""
# get slices
beamNpx = beam.shape
assert beamNpx[0] % 2 == 1 and beamNpx[1] % 2 == 1, "Beam must have odd-valued side-lengths"
im_s1 = slice(px[0]-beamNpx[0]//2, px[0]+beamNpx[0]//2+1)
im_s2 = slice(px[1]-beamNpx[1]//2, px[1]+beamNpx[1]//2+1)
bm_s1 = slice(0, beamNpx[0])
bm_s2 = slice(0, beamNpx[1])
# confirm boundary values
imNpx = image.shape
if im_s1.start < 0:
bm_s1 = slice(-im_s1.start, beamNpx[0])
im_s1 = slice(0, im_s1.stop)
if im_s1.stop > imNpx[0]:
bm_s1 = slice(0, imNpx[0]-im_s1.stop)
im_s1 = slice(im_s1.start, imNpx[0])
if im_s2.start < 0:
bm_s2 = slice(-im_s2.start, beamNpx[1])
im_s2 = slice(0, im_s2.stop)
if im_s2.stop > imNpx[1]:
bm_s2 = slice(0, imNpx[1]-im_s2.stop)
im_s2 = slice(im_s2.start, imNpx[1])
# inplace
if inplace:
diff_image = image
else:
diff_image = image.copy()
# get cutouts
im_cutout = image[im_s1, im_s2].copy()
bm_cutout = beam[bm_s1, bm_s2]
def loop_peak(im, bm, plvl):
if im.ndim > 2:
pks = []
sel = []
for i in range(im.shape[2]):
p, s = loop_peak(im[:, :, i], bm[:, :, i], plvl)
pks.append(p)
sel.append(s)
return pks, sel
else:
s = bm > plvl
if not s.max():
return np.nan
return np.nanmax(im[s]), s
# look for peak flux within area defined by search_frac
peak, select = loop_peak(im_cutout, bm_cutout, search_frac)
if isinstance(peak, list):
peak = np.array(peak)
select = np.moveaxis(select, (0, 1), (-2, -1))
# reformat bm_cutout given image dimensions
if image.ndim > beam.ndim:
bm_cutout = bm_cutout.reshape(bm_cutout.shape + tuple([1]*(image.ndim-beam.ndim)))
# add peak value if beam is a float
if np.issubclass_(bm_cutout.dtype.type, np.float):
bm_cutout = bm_cutout * peak * 0.99999
# difference
if subtract:
diff_image[im_s1, im_s2] -= bm_cutout
else:
diff_image[im_s1, im_s2] += bm_cutout
return diff_image, peak, im_cutout, select, bm_cutout, im_s1, im_s2
def load_config(config_file):
"""
Load configuration details from a YAML file.
All entries of 'None' --> None and all lists
of lists become lists of tuples.
"""
# define recursive replace function
def replace(d):
if isinstance(d, (dict, odict)):
for k in d.keys():
# 'None' and '' turn into None
if d[k] == 'None': d[k] = None
# list of lists turn into lists of tuples
if isinstance(d[k], list) and np.all([isinstance(i, list) for i in d[k]]):
d[k] = [tuple(i) for i in d[k]]
elif isinstance(d[k], (dict, odict)): replace(d[k])
# Open and read config file
with open(config_file, 'r') as cfile:
try:
cfg = yaml.load(cfile)
except yaml.YAMLError as exc:
raise(exc)
# Replace entries
replace(cfg)
return cfg
def log(msg, f=None, lvl=0, tb=None, verbose=True):
"""
Add a message to the log.
Parameters
----------
msg : str
Message string to print.
f : file descriptor
file descriptor to write message to.
lvl : int, optional
Indent level of the message. Each level adds two extra spaces.
Default: 0.
tb : traceback tuple, optional
Output of sys.exc_info()
verbose : bool, optional
if True, print msg. Even if False, still writes to file
if f is provided.
"""
# catch for traceback if provided
if tb is not None:
msg += "\n{}".format('\n'.join(traceback.format_exception(*tb)))
# form output
output = "%s%s\n" % (" "*lvl, msg)
# write
if f is not None:
f.write(output)
f.flush()
# print
if verbose and sys.stdout != f:
print(output)
def get_direction(ra, dec):
"""Turn ra and dec in degrees into a CASA J2000 string"""
_ra = ra / 15.0
ra_h = int(np.floor(_ra))
ra_m = int(np.floor((_ra - ra_h) * 60))
ra_s = int(np.around(((_ra - ra_h) * 60 - ra_m) * 60))
dec_d = int(np.floor(np.abs(dec)) * dec / np.abs(dec))
dec_m = int(np.floor(np.abs(dec - dec_d) * 60.))
dec_s = int(np.abs(dec - dec_d) * 3600 - dec_m * 60)
direction = "{:02d}:{:02d}:{:02.0f}\t{:03d}:{:02d}:{:02.0f}:".format(ra_h, ra_m, ra_s, dec_d, dec_m, dec_s)
return direction
def get_elapsed_time(time1, time2):
"""Get elapsed time in seconds given two datetime
objects within the | |
import_config.resetSqoopStatistics() - Finished")
def saveSqoopStatistics(self, sqoopStartUTS, sqoopSize=None, sqoopRows=None, sqoopIncrMaxvaluePending=None, sqoopMappers=None):
logging.debug("Executing import_config.saveSqoopStatistics()")
logging.info("Saving sqoop statistics")
self.sqoopStartUTS = sqoopStartUTS
try:
self.sqoop_last_execution_timestamp = datetime.utcfromtimestamp(sqoopStartUTS).strftime('%Y-%m-%d %H:%M:%S.000')
except TypeError:
self.sqoop_last_execution_timestamp = None
queryParam = []
query = "update import_tables set "
query += " sqoop_last_execution = %s "
queryParam.append(sqoopStartUTS)
if sqoopSize != None:
query += " ,sqoop_last_size = %s "
self.sqoop_last_size = sqoopSize
queryParam.append(sqoopSize)
if sqoopRows != None:
query += " ,sqoop_last_rows = %s "
self.sqoop_last_rows = sqoopRows
queryParam.append(sqoopRows)
if sqoopIncrMaxvaluePending != None:
query += " ,incr_maxvalue_pending = %s "
self.sqoopIncrMaxvaluePending = sqoopIncrMaxvaluePending
queryParam.append(sqoopIncrMaxvaluePending)
if sqoopMappers != None:
query += " ,sqoop_last_mappers = %s "
self.sqoop_last_mappers = sqoopMappers
queryParam.append(sqoopMappers)
if self.validate_source == "sqoop" and self.validationMethod == "rowCount":
logging.info("Saving the imported row count as the number of rows in the source system.")
if self.import_is_incremental == True:
query += " ,source_rowcount = NULL "
query += " ,source_rowcount_incr = %s "
else:
query += " ,source_rowcount = %s "
query += " ,source_rowcount_incr = NULL "
queryParam.append(sqoopRows)
query += "where table_id = %s "
queryParam.append(self.table_id)
self.mysql_cursor01.execute(query, queryParam)
logging.debug("SQL Statement executed: %s" % (self.mysql_cursor01.statement) )
self.mysql_conn.commit()
logging.debug("Executing import_config.saveSqoopStatistics() - Finished")
def checkMSSQLChangeTrackingMinValidVersion(self):
""" Checks the Minimum valid version from MSSQL Change Tracking. If the min valid version is higher than what we want to read, we will force a full import instead of an incremental import """
logging.debug("Executing import_config.checkMSSQLChangeTrackingMinValidVersion()")
query = "select CHANGE_TRACKING_MIN_VALID_VERSION(OBJECT_ID('%s')) AS MINVERSION"%(self.common_config.getJDBCsqlFromTable(schema=self.source_schema, table=self.source_table))
try:
minVersion = int(self.common_config.executeJDBCquery(query).iloc[0]['MINVERSION'])
except TypeError:
# if minVersion == None:
# Cant read the value. It means permission error or that Change Tracking isnt available on the table
raise invalidConfiguration("Unable to read Change Tracking version. Verify that Change Tracking is enabled on the table and that the user that is accessing it have the correct permission")
if self.incr_maxvalue == None:
# No need to check as it will be a full load anyway
return
if minVersion > int(self.incr_maxvalue):
# If we hit this part of the code, it means that the minimal version on the MSSQL Server is higher thatn what we require.
# Only way to get out of this situation is to do a full load
logging.warning("The minimal version in MSSQL is larger than what is required. This means that we need to force a full initial load")
self.resetSqoopStatistics(maxValue=None)
self.incr_maxvalue = None
# self.remove_temporary_files()
# sys.exit(1)
logging.debug("Executing import_config.checkMSSQLChangeTrackingMinValidVersion() - Finished")
def getSQLtoReadFromSourceWithMSSQLChangeTracking(self):
""" Creates and return the SQL needed to read the rows from the source database with the help of MSSQL Change Tracking function """
logging.debug("Executing import_config.getSQLtoReadFromSourceWithMSSQLChangeTracking()")
query = self.getSQLtoReadFromSource(sourceTableAsName="ST")
# Previous MaxValue or the pending one will be used as the minimum version to pull
# query = ("select COALESCE(incr_maxvalue_pending, incr_maxvalue) from import_tables where table_id = %s")
# self.mysql_cursor01.execute(query, (self.table_id, ))
# logging.debug("SQL Statement executed: %s" % (self.mysql_cursor01.statement) )
# row = self.mysql_cursor01.fetchone()
# self.sqoopIncrMaxvaluePending = row[0]
# self.sqoopIncrMinvaluePending = self.sqoop_incr_lastvalue
query = re.sub('^select', 'select \"CT\".\"SYS_CHANGE_VERSION\" as \"datalake_mssql_changetrack_version\", \"CT\".\"SYS_CHANGE_OPERATION\" as \"datalake_mssql_changetrack_operation\",', query)
if self.incr_maxvalue == None:
# This means that there is no previous version. So we need to do an initial load
query += " LEFT JOIN CHANGETABLE(CHANGES %s, 0 ) as CT"%(self.common_config.getJDBCsqlFromTable(schema=self.source_schema, table=self.source_table))
PKColumns = self.getPKcolumns()
joinColumns = ""
for i, targetColumn in enumerate(PKColumns.split(",")):
if joinColumns == "":
joinColumns = " ON "
else:
joinColumns += " AND "
joinColumns += "\"CT\".\"%s\" = \"ST\".\"%s\""%(targetColumn, targetColumn)
query += joinColumns
else:
# This is an incremental load
query += " RIGHT OUTER JOIN CHANGETABLE(CHANGES %s, %s ) as CT"%(self.common_config.getJDBCsqlFromTable(schema=self.source_schema, table=self.source_table), self.incr_maxvalue)
PKColumns = self.getPKcolumns()
joinColumns = ""
# PK must come from the CHANGETABLE function. This is needed in case of deletes, as the column would be NULL otherwise and the merge
# in Hive would not be able to match and remove the row
for i, targetColumn in enumerate(PKColumns.split(",")):
query = re.sub("\"ST\".\"%s\""%(targetColumn), "\"CT\".\"%s\""%(targetColumn), query)
for i, targetColumn in enumerate(PKColumns.split(",")):
if joinColumns == "":
joinColumns = " ON "
else:
joinColumns += " AND "
joinColumns += "\"CT\".\"%s\" = \"ST\".\"%s\""%(targetColumn, targetColumn)
query += joinColumns
return query
logging.debug("Executing import_config.getSQLtoReadFromSourceWithMSSQLChangeTracking() - Finished")
def getSQLtoReadFromSource(self, sourceTableAsName=None):
""" Creates and return the SQL needed to read all rows from the source database """
logging.debug("Executing import_config.getSQLtoReadFromSource()")
quote = self.common_config.getQuoteAroundColumn()
query = "select "
query += " c.source_column_name, "
query += " c.column_name "
query += "from import_tables t "
query += "join import_columns c on t.table_id = c.table_id "
query += "where t.table_id = %s and t.last_update_from_source = c.last_update_from_source "
query += "and c.include_in_import = 1 "
query += "order by c.column_order"
self.mysql_cursor01.execute(query, (self.table_id, ))
logging.debug("SQL Statement executed: %s" % (self.mysql_cursor01.statement) )
if self.mysql_cursor01.rowcount == 0:
logging.error("Error: Zero rows returned from query on 'import_table'")
logging.error("SQL Statement that generated the error: %s" % (self.mysql_cursor01.statement) )
raise Exception
result_df = pd.DataFrame(self.mysql_cursor01.fetchall())
result_df.columns = ['source_name', 'name' ]
sparkQuery = ""
for index, row in result_df.iterrows():
if len(sparkQuery) == 0:
sparkQuery = "select "
else:
sparkQuery += ", "
if sourceTableAsName == None:
if row['source_name'] != row['name']:
sparkQuery += quote + row['source_name'] + quote + " as " + quote + row['name'] + quote
else:
sparkQuery += quote + row['source_name'] + quote
else:
if row['source_name'] != row['name']:
sparkQuery += quote + sourceTableAsName + quote + "." + quote + row['source_name'] + quote + " as " + quote + row['name'] + quote
else:
sparkQuery += quote + sourceTableAsName + quote + "." + quote + row['source_name'] + quote
# Add the source the to the generated sql query
sparkQuery += " from %s"%(self.common_config.getJDBCsqlFromTable(schema=self.source_schema, table=self.source_table))
if sourceTableAsName != None:
sparkQuery += " as %s"%(sourceTableAsName)
logging.debug("Executing import_config.getSQLtoReadFromSource() - Finished")
return sparkQuery
def getColumnsFromConfigDatabase(self, restrictColumns=None, sourceIsParquetFile=False, includeAllColumns=True):
""" Reads the columns from the configuration database and returns the information in a Pandas DF with the columns name, type and comment """
logging.debug("Executing import_config.getColumnsFromConfigDatabase()")
hiveColumnDefinition = ""
restrictColumnsList = []
if restrictColumns != None:
restrictColumnsList = restrictColumns.split(",")
query = "select "
query += " COALESCE(c.column_name_override, c.column_name) as name, "
query += " c.column_type as type, "
query += " c.comment "
query += "from import_tables t "
query += "join import_columns c on t.table_id = c.table_id "
query += "where t.table_id = %s and t.last_update_from_source = c.last_update_from_source "
if includeAllColumns == False:
query += " and c.include_in_import = 1 "
if restrictColumnsList != []:
query += " and c.column_name in ('"
query += "', '".join(restrictColumnsList)
query += "') "
query += "order by c.column_order"
self.mysql_cursor01.execute(query, (self.table_id, ))
logging.debug("SQL Statement executed: %s" % (self.mysql_cursor01.statement) )
if self.mysql_cursor01.rowcount == 0:
logging.error("Error: Zero rows returned from query on 'import_table'")
logging.error("SQL Statement that generated the error: %s" % (self.mysql_cursor01.statement) )
raise Exception
result_df = pd.DataFrame(self.mysql_cursor01.fetchall())
# Set the correct column namnes in the DataFrame
result_df_columns = []
for columns in self.mysql_cursor01.description:
result_df_columns.append(columns[0]) # Name of the column is in the first position
result_df.columns = result_df_columns
if sourceIsParquetFile == True:
result_df['name'] = result_df['name'].apply(lambda x: self.getParquetColumnName(x))
logging.debug("Executing import_config.getColumnsFromConfigDatabase() - Finished")
return result_df
def getSQLWhereAddition(self,):
""" Returns the SQL Where addition used by the sqoop and spark """
if self.sqoop_sql_where_addition == None or self.sqoop_sql_where_addition.strip() == "":
return None
sqlWhere = self.sqoop_sql_where_addition.replace('"', '\'')
printCustomSQL = False
if "${MIN_VALUE}" in sqlWhere:
printCustomSQL = True
sqlWhere = sqlWhere.replace("${MIN_VALUE}", str(self.sqoop_incr_lastvalue))
if "${MAX_VALUE}" in sqlWhere:
printCustomSQL = True
sqlWhere = sqlWhere.replace("${MAX_VALUE}", str(self.sqoopIncrMaxvaluePending))
if printCustomSQL == True and self.printSQLWhereAddition == True:
logging.info("Values have been replaced in the SQL where addition. New value is:")
self.printSQLWhereAddition = False
print(sqlWhere)
return sqlWhere
def getHiveTableComment(self,):
""" Returns the table comment stored in import_tables.comment """
logging.debug("Executing import_config.getHiveTableComment()")
hiveColumnDefinition = ""
query = "select comment from import_tables where table_id = %s "
self.mysql_cursor01.execute(query, (self.table_id, ))
logging.debug("SQL Statement executed: %s" % (self.mysql_cursor01.statement) )
if self.mysql_cursor01.rowcount == 0:
logging.error("Error: Zero rows returned from query on 'import_table'")
logging.error("SQL Statement that generated the error: %s" % (self.mysql_cursor01.statement) )
raise Exception
row = self.mysql_cursor01.fetchone()
logging.debug("Executing import_config.getHiveTableComment() - Finished")
return row[0]
def validateCustomQuery(self):
""" Validates the custom queries """
if self.validationCustomQuerySourceValue == None or self.validationCustomQueryHiveValue == None:
logging.error("Validation failed! One of the custom queries did not return a result")
logging.info("Result from source query: %s" % ( self.validationCustomQuerySourceValue ))
logging.info("Result from Hive query: %s" % ( self.validationCustomQueryHiveValue ))
raise validationError()
if self.validationCustomQuerySourceValue != self.validationCustomQueryHiveValue:
logging.error("Validation failed! The custom queries did not return the same result")
logging.info("Result from source query: %s" % ( self.validationCustomQuerySourceValue ))
logging.info("Result from Hive query: %s" % ( self.validationCustomQueryHiveValue ))
raise validationError()
return True
def validateRowCount(self, validateSqoop=False, incremental=False):
""" Validates the rows based on values stored in import_tables -> source_columns and target_columns. Returns True or False """
# incremental=True is used for normal validations but on ly on incremented rows, regardless if we are going to validate full or incr.
# This is used to validate the Import table if the validation method is full for incremental import
logging.debug("Executing import_config.validateRowCount()")
logging.debug("validateSqoop = %s"%(validateSqoop))
logging.debug("incremental = %s"%(incremental))
returnValue = None
if self.validate_import == True:
# Reading the saved number from the configurationdatabase
if validateSqoop == False:
if self.import_is_incremental == False:
logging.debug("validateSqoop == False & self.import_is_incremental == False")
# Standard full validation
query = "select source_rowcount, hive_rowcount from import_tables where table_id = %s "
validateTextTarget = "Hive table"
validateTextSource = "Source table"
validateText = validateTextTarget
elif self.sqoop_incr_validation_method == "full" and incremental == False:
logging.debug("validateSqoop == False & self.sqoop_incr_validation_method == 'full' and incremental == False")
# We are not validating the sqoop import, but the validation is an incremental import and
# we are going to validate all the data
query = "select source_rowcount, hive_rowcount from import_tables where table_id = | |
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"9k5XPoBoDcK3Cm7MzBAmazTWa63TGURaNv",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"9aS7RUHee7JsELTwtce2yuUPpH7LD5m4Aw",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"9jSH2wen5N5TSeEsB4StZeCSoDWcyHLEVV",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"9dpYAzyLDUGw7yQ63eEtHmAn3eM2ESStEA",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"9iNBtrpGgaH8rDGoo5SR9T2CTrHa4rqsWR",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"9mFmTZvA8faxChFabENamEHSfUfLa6paDK",
"<KEY>",
"9rRoN3yBs5YsQ2U935yMTUR3euFPfqsu5N",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"9dptiD2EnZYCEvCR61sngMpaLrTKqR2HFe",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"9f7ukc7t4WY49yD3EvPES2MaTUa2Hse62o",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"9XviDgQyV1RV3oYRYoLu4nSehoWyp7LdED",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"9mhRfueLTnteugA9KC6S1oaCHRQRKWyary",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"9iEAr7e6eNWRgBniofbaBF1KAsA1JUXC8w",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"9gC5aieive7rmMuLBMWhkSj4GEunYst4ou",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"9nZCApPApaS24erZ9nodreEDCujA1nScez",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"9kTh3hC8LUGEAa2Ppt3UrAVy4nbE2QVMr1",
"<KEY>",
"<KEY>",
"9jEV9ZQubmEofwviMaNFuNTwsdtfweeaTr",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"9VeDLaobYGvAWV9eToUCVND3wkTkbywf23",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"9c6ySLLa5nTZ9arLLt54CvxcH6f9UCqwrn",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"9duSpEw1wDufFcWnXpcy8B49yutdNStFFr",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"9TovYPT<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"9m7eRfW39zjEAisoP5cttuFx2tKa1kiZuz",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"9isdBJaDsnTDF37Phu3PAwrBL4mYaNTVZs",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"9mPoBuLzHx5GiN5H9yFdzXz5CGvuH1VtQy",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"9YTNi85eiLzzYHCadSA5oMApuSPJAMdmm8",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"9Z98fu9LkyuP7Gg8T9fPdyTByeGrosEFd1",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"9USiB55SfGdpne5mtHkDTYBnoS2i7DJt4o",
"<KEY>",
"<KEY>",
"<KEY>",
"9nvWAZEeysu6Me9GE8vUbSGSvUaF4aseE5",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"9meyRtiZRyh7H3GkHS5Dy7cEqumU8dUswT",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"9eRGqQ9BJodmeabWimi48C8s3gwsDbv2Se",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"9annCJzeRDhSyZE5iRPDw8L9idUwd6eoPu",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>", | |
<gh_stars>0
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import contextlib
import io
import os
import subprocess
import sys
import tempfile
import unittest
from argparse import Namespace
from datetime import datetime, time, timedelta
from time import sleep
from unittest.mock import MagicMock, Mock, patch
import psutil
import pytz
import airflow.bin.cli as cli
from airflow import AirflowException, models, settings
from airflow.bin.cli import get_dag, get_num_ready_workers_running, run
from airflow.models import DagModel, TaskInstance, Variable
from airflow.settings import Session
from airflow.utils import timezone
from airflow.utils.state import State
from tests.compat import mock
dag_folder_path = '/'.join(os.path.realpath(__file__).split('/')[:-1])
DEV_NULL = "/dev/null"
DEFAULT_DATE = timezone.make_aware(datetime(2015, 1, 1))
TEST_DAG_FOLDER = os.path.join(
os.path.dirname(dag_folder_path), 'dags')
TEST_DAG_ID = 'unit_tests'
def reset(dag_id):
session = Session()
tis = session.query(models.TaskInstance).filter_by(dag_id=dag_id)
tis.delete()
session.commit()
session.close()
def create_mock_args( # pylint: disable=too-many-arguments
task_id,
dag_id,
subdir,
execution_date,
task_params=None,
dry_run=False,
queue=None,
pool=None,
priority_weight_total=None,
retries=0,
local=True,
mark_success=False,
ignore_all_dependencies=False,
ignore_depends_on_past=False,
ignore_dependencies=False,
force=False,
run_as_user=None,
executor_config=None,
cfg_path=None,
pickle=None,
raw=None,
interactive=None,
):
if executor_config is None:
executor_config = {}
args = MagicMock(spec=Namespace)
args.task_id = task_id
args.dag_id = dag_id
args.subdir = subdir
args.task_params = task_params
args.execution_date = execution_date
args.dry_run = dry_run
args.queue = queue
args.pool = pool
args.priority_weight_total = priority_weight_total
args.retries = retries
args.local = local
args.run_as_user = run_as_user
args.executor_config = executor_config
args.cfg_path = cfg_path
args.pickle = pickle
args.raw = raw
args.mark_success = mark_success
args.ignore_all_dependencies = ignore_all_dependencies
args.ignore_depends_on_past = ignore_depends_on_past
args.ignore_dependencies = ignore_dependencies
args.force = force
args.interactive = interactive
return args
EXAMPLE_DAGS_FOLDER = os.path.join(
os.path.dirname(
os.path.dirname(
os.path.dirname(os.path.realpath(__file__))
)
),
"airflow/example_dags"
)
class TestCLI(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.dagbag = models.DagBag(include_examples=True)
cls.parser = cli.CLIFactory.get_parser()
def setUp(self):
self.gunicorn_master_proc = Mock(pid=None)
self.children = MagicMock()
self.child = MagicMock()
self.process = MagicMock()
def test_ready_prefix_on_cmdline(self):
self.child.cmdline.return_value = [settings.GUNICORN_WORKER_READY_PREFIX]
self.process.children.return_value = [self.child]
with patch('psutil.Process', return_value=self.process):
self.assertEqual(get_num_ready_workers_running(self.gunicorn_master_proc), 1)
def test_ready_prefix_on_cmdline_no_children(self):
self.process.children.return_value = []
with patch('psutil.Process', return_value=self.process):
self.assertEqual(get_num_ready_workers_running(self.gunicorn_master_proc), 0)
def test_ready_prefix_on_cmdline_zombie(self):
self.child.cmdline.return_value = []
self.process.children.return_value = [self.child]
with patch('psutil.Process', return_value=self.process):
self.assertEqual(get_num_ready_workers_running(self.gunicorn_master_proc), 0)
def test_ready_prefix_on_cmdline_dead_process(self):
self.child.cmdline.side_effect = psutil.NoSuchProcess(11347)
self.process.children.return_value = [self.child]
with patch('psutil.Process', return_value=self.process):
self.assertEqual(get_num_ready_workers_running(self.gunicorn_master_proc), 0)
def test_cli_webserver_debug(self):
env = os.environ.copy()
proc = psutil.Popen(["airflow", "webserver", "-d"], env=env)
sleep(3) # wait for webserver to start
return_code = proc.poll()
self.assertEqual(
None,
return_code,
"webserver terminated with return code {} in debug mode".format(return_code))
proc.terminate()
proc.wait()
def test_local_run(self):
args = create_mock_args(
task_id='print_the_context',
dag_id='example_python_operator',
subdir='/root/dags/example_python_operator.py',
interactive=True,
execution_date=timezone.parse('2018-04-27T08:39:51.298439+00:00')
)
reset(args.dag_id)
with patch('argparse.Namespace', args) as mock_args:
run(mock_args)
dag = get_dag(mock_args)
task = dag.get_task(task_id=args.task_id)
ti = TaskInstance(task, args.execution_date)
ti.refresh_from_db()
state = ti.current_state()
self.assertEqual(state, State.SUCCESS)
class TestCliDags(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.dagbag = models.DagBag(include_examples=True)
cls.parser = cli.CLIFactory.get_parser()
@mock.patch("airflow.bin.cli.DAG.run")
def test_backfill(self, mock_run):
cli.backfill(self.parser.parse_args([
'dags', 'backfill', 'example_bash_operator',
'-s', DEFAULT_DATE.isoformat()]))
mock_run.assert_called_once_with(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
conf=None,
delay_on_limit_secs=1.0,
donot_pickle=False,
ignore_first_depends_on_past=False,
ignore_task_deps=False,
local=False,
mark_success=False,
pool=None,
rerun_failed_tasks=False,
run_backwards=False,
verbose=False,
)
mock_run.reset_mock()
dag = self.dagbag.get_dag('example_bash_operator')
with mock.patch('sys.stdout', new_callable=io.StringIO) as mock_stdout:
cli.backfill(self.parser.parse_args([
'dags', 'backfill', 'example_bash_operator', '-t', 'runme_0', '--dry_run',
'-s', DEFAULT_DATE.isoformat()]), dag=dag)
mock_stdout.seek(0, 0)
output = mock_stdout.read()
self.assertIn("Dry run of DAG example_bash_operator on {}\n".format(DEFAULT_DATE.isoformat()), output)
self.assertIn("Task runme_0\n".format(DEFAULT_DATE.isoformat()), output)
mock_run.assert_not_called() # Dry run shouldn't run the backfill
cli.backfill(self.parser.parse_args([
'dags', 'backfill', 'example_bash_operator', '--dry_run',
'-s', DEFAULT_DATE.isoformat()]), dag=dag)
mock_run.assert_not_called() # Dry run shouldn't run the backfill
cli.backfill(self.parser.parse_args([
'dags', 'backfill', 'example_bash_operator', '-l',
'-s', DEFAULT_DATE.isoformat()]), dag=dag)
mock_run.assert_called_once_with(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
conf=None,
delay_on_limit_secs=1.0,
donot_pickle=False,
ignore_first_depends_on_past=False,
ignore_task_deps=False,
local=True,
mark_success=False,
pool=None,
rerun_failed_tasks=False,
run_backwards=False,
verbose=False,
)
mock_run.reset_mock()
def test_show_dag_print(self):
temp_stdout = io.StringIO()
with contextlib.redirect_stdout(temp_stdout):
cli.show_dag(self.parser.parse_args([
'dags', 'show', 'example_bash_operator']))
out = temp_stdout.getvalue()
self.assertIn("label=example_bash_operator", out)
self.assertIn("graph [label=example_bash_operator labelloc=t rankdir=LR]", out)
self.assertIn("runme_2 -> run_after_loop", out)
@mock.patch("airflow.bin.cli.render_dag")
def test_show_dag_dave(self, mock_render_dag):
temp_stdout = io.StringIO()
with contextlib.redirect_stdout(temp_stdout):
cli.show_dag(self.parser.parse_args([
'dags', 'show', 'example_bash_operator', '--save', 'awesome.png']
))
out = temp_stdout.getvalue()
mock_render_dag.return_value.render.assert_called_once_with(
cleanup=True, filename='awesome', format='png'
)
self.assertIn("File awesome.png saved", out)
@mock.patch("airflow.bin.cli.subprocess.Popen")
@mock.patch("airflow.bin.cli.render_dag")
def test_show_dag_imgcat(self, mock_render_dag, mock_popen):
mock_render_dag.return_value.pipe.return_value = b"DOT_DATA"
mock_popen.return_value.communicate.return_value = (b"OUT", b"ERR")
temp_stdout = io.StringIO()
with contextlib.redirect_stdout(temp_stdout):
cli.show_dag(self.parser.parse_args([
'dags', 'show', 'example_bash_operator', '--imgcat']
))
out = temp_stdout.getvalue()
mock_render_dag.return_value.pipe.assert_called_once_with(format='png')
mock_popen.return_value.communicate.assert_called_once_with(b'DOT_DATA')
self.assertIn("OUT", out)
self.assertIn("ERR", out)
@mock.patch("airflow.bin.cli.DAG.run")
def test_cli_backfill_depends_on_past(self, mock_run):
"""
Test that CLI respects -I argument
We just check we call dag.run() right. The behaviour of that kwarg is
tested in test_jobs
"""
dag_id = 'test_dagrun_states_deadlock'
run_date = DEFAULT_DATE + timedelta(days=1)
args = [
'dags',
'backfill',
dag_id,
'-l',
'-s',
run_date.isoformat(),
'-I',
]
dag = self.dagbag.get_dag(dag_id)
cli.backfill(self.parser.parse_args(args), dag=dag)
mock_run.assert_called_once_with(
start_date=run_date,
end_date=run_date,
conf=None,
delay_on_limit_secs=1.0,
donot_pickle=False,
ignore_first_depends_on_past=True,
ignore_task_deps=False,
local=True,
mark_success=False,
pool=None,
rerun_failed_tasks=False,
run_backwards=False,
verbose=False,
)
@mock.patch("airflow.bin.cli.DAG.run")
def test_cli_backfill_depends_on_past_backwards(self, mock_run):
"""
Test that CLI respects -B argument and raises on interaction with depends_on_past
"""
dag_id = 'test_depends_on_past'
start_date = DEFAULT_DATE + timedelta(days=1)
end_date = start_date + timedelta(days=1)
args = [
'dags',
'backfill',
dag_id,
'-l',
'-s',
start_date.isoformat(),
'-e',
end_date.isoformat(),
'-I',
'-B',
]
dag = self.dagbag.get_dag(dag_id)
cli.backfill(self.parser.parse_args(args), dag=dag)
mock_run.assert_called_once_with(
start_date=start_date,
end_date=end_date,
conf=None,
delay_on_limit_secs=1.0,
donot_pickle=False,
ignore_first_depends_on_past=True,
ignore_task_deps=False,
local=True,
mark_success=False,
pool=None,
rerun_failed_tasks=False,
run_backwards=True,
verbose=False,
)
def test_next_execution(self):
# A scaffolding function
def reset_dr_db(dag_id):
session = Session()
dr = session.query(models.DagRun).filter_by(dag_id=dag_id)
dr.delete()
session.commit()
session.close()
dag_ids = ['example_bash_operator', # schedule_interval is '0 0 * * *'
'latest_only', # schedule_interval is timedelta(hours=4)
'example_python_operator', # schedule_interval=None
'example_xcom'] # schedule_interval="@once"
# The details below is determined by the schedule_interval of example DAGs
now = timezone.utcnow()
next_execution_time_for_dag1 = pytz.utc.localize(
datetime.combine(
now.date() + timedelta(days=1),
time(0)
)
)
next_execution_time_for_dag2 = now + timedelta(hours=4)
expected_output = [str(next_execution_time_for_dag1),
str(next_execution_time_for_dag2),
"None",
"None"]
for i in range(len(dag_ids)): # pylint: disable=consider-using-enumerate
dag_id = dag_ids[i]
# Clear dag run so no execution history fo each DAG
reset_dr_db(dag_id)
proc = subprocess.Popen(["airflow", "dags", "next_execution", dag_id,
"--subdir", EXAMPLE_DAGS_FOLDER],
stdout=subprocess.PIPE)
proc.wait()
stdout = []
for line in proc.stdout:
stdout.append(str(line.decode("utf-8").rstrip()))
# `next_execution` function is inapplicable if no execution record found
# It prints `None` in such cases
self.assertEqual(stdout[-1], "None")
dag = self.dagbag.dags[dag_id]
# Create a DagRun for each DAG, to prepare for next step
dag.create_dagrun(
run_id='manual__' + now.isoformat(),
execution_date=now,
start_date=now,
state=State.FAILED
)
proc = subprocess.Popen(["airflow", "dags", "next_execution", dag_id,
"--subdir", EXAMPLE_DAGS_FOLDER],
stdout=subprocess.PIPE)
proc.wait()
stdout = []
for line in proc.stdout:
stdout.append(str(line.decode("utf-8").rstrip()))
self.assertEqual(stdout[-1], expected_output[i])
reset_dr_db(dag_id)
def test_cli_list_dags(self):
args = self.parser.parse_args(['dags', 'list', '--report'])
cli.list_dags(args)
def test_cli_list_dag_runs(self):
cli.trigger_dag(self.parser.parse_args([
'dags', 'trigger', 'example_bash_operator', ]))
args = self.parser.parse_args(['dags', 'list_runs',
'example_bash_operator',
'--no_backfill'])
cli.list_dag_runs(args)
def test_cli_list_jobs_with_args(self):
args = self.parser.parse_args(['dags', 'list_jobs', '--dag_id',
'example_bash_operator',
'--state', 'success',
'--limit', '100',
'--output', 'tsv'])
cli.list_jobs(args)
def test_pause(self):
args = self.parser.parse_args([
'dags', 'pause', 'example_bash_operator'])
cli.pause(args)
self.assertIn(self.dagbag.dags['example_bash_operator'].is_paused, [True, 1])
args = self.parser.parse_args([
'dags', 'unpause', 'example_bash_operator'])
cli.unpause(args)
self.assertIn(self.dagbag.dags['example_bash_operator'].is_paused, [False, 0])
def test_trigger_dag(self):
cli.trigger_dag(self.parser.parse_args([
'dags', 'trigger', 'example_bash_operator',
'-c', '{"foo": "bar"}']))
self.assertRaises(
ValueError,
cli.trigger_dag,
self.parser.parse_args([
'dags', 'trigger', 'example_bash_operator',
'--run_id', 'trigger_dag_xxx',
'-c', 'NOT JSON'])
)
def test_delete_dag(self):
DM = DagModel
key = "my_dag_id"
session = settings.Session()
session.add(DM(dag_id=key))
session.commit()
cli.delete_dag(self.parser.parse_args([
'dags', 'delete', key, '--yes']))
self.assertEqual(session.query(DM).filter_by(dag_id=key).count(), 0)
self.assertRaises(
AirflowException,
cli.delete_dag,
self.parser.parse_args([
'dags', 'delete',
'does_not_exist_dag',
'--yes'])
)
def test_delete_dag_existing_file(self):
# Test to check that the DAG should be deleted even if
# the file containing it is not deleted
DM = DagModel
key = "my_dag_id"
session = settings.Session()
with tempfile.NamedTemporaryFile() as f:
session.add(DM(dag_id=key, fileloc=f.name))
session.commit()
cli.delete_dag(self.parser.parse_args([
'dags', 'delete', key, '--yes']))
self.assertEqual(session.query(DM).filter_by(dag_id=key).count(), 0)
def test_cli_list_jobs(self):
args = self.parser.parse_args(['dags', 'list_jobs'])
cli.list_jobs(args)
def test_dag_state(self):
self.assertEqual(None, cli.dag_state(self.parser.parse_args([
'dags', 'state', 'example_bash_operator', DEFAULT_DATE.isoformat()])))
class TestCliTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.dagbag = models.DagBag(include_examples=True)
cls.parser = cli.CLIFactory.get_parser()
def test_cli_list_tasks(self):
for dag_id in self.dagbag.dags:
args = self.parser.parse_args(['tasks', 'list', dag_id])
cli.list_tasks(args)
args = self.parser.parse_args([
'tasks', 'list', 'example_bash_operator', '--tree'])
cli.list_tasks(args)
def test_test(self):
"""Test the `airflow test` command"""
args = create_mock_args(
task_id='print_the_context',
dag_id='example_python_operator',
subdir=None,
execution_date=timezone.parse('2018-01-01')
)
saved_stdout = sys.stdout
try:
sys.stdout = out = io.StringIO()
cli.test(args)
output = out.getvalue()
# Check that prints, and log messages, are shown
self.assertIn("'example_python_operator__print_the_context__20180101'", output)
finally:
sys.stdout = saved_stdout
@mock.patch("airflow.bin.cli.jobs.LocalTaskJob")
def test_run_naive_taskinstance(self, mock_local_job):
"""
Test that we can run naive (non-localized) task instances
"""
naive_date = datetime(2016, 1, 1)
dag_id = 'test_run_ignores_all_dependencies'
dag = self.dagbag.get_dag('test_run_ignores_all_dependencies')
task0_id = 'test_run_dependent_task'
args0 = ['tasks',
'run',
'-A',
'--local',
dag_id,
task0_id,
naive_date.isoformat()]
cli.run(self.parser.parse_args(args0), dag=dag)
mock_local_job.assert_called_once_with(
task_instance=mock.ANY,
mark_success=False,
ignore_all_deps=True,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
pickle_id=None,
pool=None,
)
def test_cli_test(self):
cli.test(self.parser.parse_args([
'tasks', 'test', 'example_bash_operator', 'runme_0',
DEFAULT_DATE.isoformat()]))
cli.test(self.parser.parse_args([
'tasks', 'test', 'example_bash_operator', 'runme_0', '--dry_run',
DEFAULT_DATE.isoformat()]))
def test_cli_test_with_params(self):
cli.test(self.parser.parse_args([
'tasks', 'test', 'example_passing_params_via_test_command', 'run_this',
'-tp', | |
strides_node = inputs_vals[ConvParamIdx.stride_idx].node()
strides_vals = list(strides_node.inputs())
mace_strides = [strides_vals[i].node()['value'] for i in range(2)]
strides_arg = op.arg.add()
strides_arg.name = MaceKeyword.mace_strides_str
strides_arg.ints.extend(mace_strides)
pads_node = inputs_vals[ConvParamIdx.pad_idx].node()
pads_vals = list(pads_node.inputs())
mace_pads = [2 * pads_vals[i].node()['value'] for i in range(2)]
pads_arg = op.arg.add()
pads_arg.name = MaceKeyword.mace_padding_values_str
pads_arg.ints.extend(mace_pads)
dilations_node = inputs_vals[ConvParamIdx.dilation_idx].node()
dilations_vals = list(dilations_node.inputs())
mace_dilations = [dilations_vals[i].node()['value'] for i in range(2)]
dilation_arg = op.arg.add()
dilation_arg.name = MaceKeyword.mace_dilations_str
dilation_arg.ints.extend(mace_dilations)
filter_tensor_name = inputs_vals[ConvParamIdx.weight_idx].debugName()
filter_data = self._params_dict[filter_tensor_name]
if is_depthwise:
# C1HW => 1CHW
filter_data = filter_data.permute((1, 0, 2, 3))
filter_data = filter_data.numpy()
self.add_tensor_and_shape(filter_tensor_name, filter_data.shape,
mace_pb2.DT_FLOAT, filter_data)
bias_val = inputs_vals[ConvParamIdx.bias_idx]
has_bias = (not isinstance(bias_val.type(), ValueType.NoneType))
if has_bias:
bias_tensor_name = inputs_vals[ConvParamIdx.bias_idx].debugName()
bias_data = self._params_dict[bias_tensor_name]
bias_data = bias_data.numpy()
self.add_tensor_and_shape(bias_tensor_name, bias_data.shape,
mace_pb2.DT_FLOAT, bias_data)
op.input.extend([bias_tensor_name])
self.infer_shape_conv2d_pool(op)
def convert_batch_norm(self, node, inputs_vals, outputs_vals):
op = self.convert_general_op(outputs_vals)
op.input.extend([inputs_vals[0].debugName()])
op.output.extend([outputs_vals[0].debugName()])
op.type = MaceOp.BatchNorm.name
is_training = int(inputs_vals[BNParamIdx.training_idx].node()['value'])
mace_check(is_training == 0,
"Only support batch normalization with is_training = 0,"
" but got {}".format(is_training))
state_dict = self._params_dict
gamma_key = inputs_vals[BNParamIdx.weight_idx].debugName()
gamma_value = state_dict[gamma_key].numpy().astype(np.float32)
beta_key = inputs_vals[BNParamIdx.bias_idx].debugName()
beta_value = state_dict[beta_key].numpy().astype(np.float32)
mean_name = inputs_vals[BNParamIdx.running_mean_idx].debugName()
mean_value = state_dict[mean_name].numpy().astype(np.float32)
var_name = inputs_vals[BNParamIdx.running_var_idx].debugName()
var_value = state_dict[var_name].numpy().astype(np.float32)
epsilon_value = inputs_vals[BNParamIdx.eps_idx].node()['value']
scale_name = gamma_key + '_scale'
offset_name = beta_key + '_offset'
scale_value = (
(1.0 / np.vectorize(math.sqrt)(
var_value + epsilon_value)) * gamma_value)
offset_value = (-mean_value * scale_value) + beta_value
self.add_tensor_and_shape(scale_name, scale_value.shape,
mace_pb2.DT_FLOAT, scale_value)
self.add_tensor_and_shape(offset_name, offset_value.shape,
mace_pb2.DT_FLOAT, offset_value)
op.input.extend([scale_name, offset_name])
self.infer_shape_general(op)
def convert_hardtanh(self, node, inputs_vals, outputs_vals):
op = self.convert_general_op(outputs_vals)
op.type = MaceOp.Activation.name
min_val = inputs_vals[1].node()['value']
max_val = inputs_vals[2].node()['value']
mace_check(abs(min_val) < 1e-8, "MACE only supports min == 0 Clip op")
op.input.extend([inputs_vals[0].debugName()])
op.output.extend([outputs_vals[0].debugName()])
type_arg = op.arg.add()
type_arg.name = MaceKeyword.mace_activation_type_str
mace_check(abs(max_val - 6.) < 1e-8,
'only support converting hardtanh_ to ReLU6 yet')
type_arg.s = six.b(self.activation_type['ReLU6'].name)
limit_arg = op.arg.add()
limit_arg.name = MaceKeyword.mace_activation_max_limit_str
limit_arg.f = 6.0
self.infer_shape_general(op)
def convert_add(self, node, inputs_vals, outputs_vals):
op = self.convert_general_op(outputs_vals)
op.type = MaceOp.Eltwise.name
type_arg = op.arg.add()
type_arg.name = MaceKeyword.mace_element_type_str
node_kind = node.kind()
type_arg.i = self.eltwise_type[node_kind].value
alpha = inputs_vals[2].node()['value']
mace_check(alpha == 1,
'MACE only support alpha value of 1 for Add Op,'
' {} found'.format(alpha))
op.input.extend([inputs_vals[i].debugName() for i in range(2)])
op.output.extend([outputs_vals[0].debugName()])
lhs_kind = inputs_vals[0].node().kind()
rhs_kind = inputs_vals[1].node().kind()
if lhs_kind != NodeKind.Constant and rhs_kind == NodeKind.Constant:
const_value = inputs_vals[1].node()['value']
value_arg = op.arg.add()
value_arg.name = MaceKeyword.mace_scalar_input_str
value_arg.f = float(const_value)
value_index_arg = op.arg.add()
value_index_arg.name = MaceKeyword.mace_scalar_input_index_str
value_index_arg.i = 1
del op.input[1]
elif lhs_kind == NodeKind.Constant and rhs_kind != NodeKind.Constant:
const_value = inputs_vals[0].node()['value']
value_arg = op.arg.add()
value_arg.name = MaceKeyword.mace_scalar_input_str
value_arg.f = float(const_value)
value_index_arg = op.arg.add()
value_index_arg.name = MaceKeyword.mace_scalar_input_index_str
value_index_arg.i = 0
del op.input[0]
self.infer_shape_general(op)
def convert_relu(self, node, inputs_vals, outputs_vals):
op = self.convert_general_op(outputs_vals)
op.type = MaceOp.Activation.name
op.input.extend([inputs_vals[0].debugName()])
op.output.extend([outputs_vals[0].debugName()])
type_arg = op.arg.add()
type_arg.name = MaceKeyword.mace_activation_type_str
type_arg.s = six.b(self.activation_type['ReLU'].name)
self.infer_shape_general(op)
def infer_shape_cat(self, op):
output_shape = list(self._output_shape_cache[op.input[0]])
axis = ConverterUtil.get_arg(op, MaceKeyword.mace_axis_str).i
if axis < 0:
axis = len(output_shape) + axis
output_shape[axis] = 0
for input_node in op.input:
input_shape = list(self._output_shape_cache[input_node])
output_shape[axis] = output_shape[axis] + input_shape[axis]
self.add_output_shape(op, [output_shape])
def convert_cat(self, node, inputs_vals, outputs_vals):
op = self.convert_general_op(outputs_vals)
op.type = MaceOp.Concat.name
in_vals = list(inputs_vals[0].node().inputs())
in_names = [in_vals[i].debugName() for i in range(len(in_vals))]
op.input.extend(in_names)
op.output.extend([outputs_vals[0].debugName()])
axis_int = inputs_vals[1].node()['value']
axis_arg = op.arg.add()
axis_arg.name = MaceKeyword.mace_axis_str
axis_arg.i = axis_int
self.infer_shape_cat(op)
def convert_flatten(self, node, inputs_vals, outputs_vals):
op = self.convert_general_op(outputs_vals)
op.type = MaceOp.Reshape.name
op.input.extend([inputs_vals[0].debugName()])
op.output.extend([outputs_vals[0].debugName()])
input_shape = list(self._output_shape_cache[op.input[0]])
ndim = len(input_shape)
start_dim = inputs_vals[1].node()['value']
if start_dim < 0:
start_dim += ndim
end_dim = inputs_vals[2].node()['value']
if end_dim < 0:
end_dim += ndim
reshape_dims = []
for i in range(0, start_dim):
reshape_dims.append(input_shape[i])
mid_shape = 1
for i in range(start_dim, end_dim+1):
mid_shape *= input_shape[i]
reshape_dims.append(mid_shape)
for i in range(end_dim + 1, ndim):
reshape_dims.append(input_shape[i])
dim_arg = op.arg.add()
dim_arg.name = MaceKeyword.mace_dim_str
dim_arg.ints.extend(reshape_dims)
self.infer_shape_reshape(op)
def get_weight_from_node(self, node):
input_list = list(node.inputs())
key = input_list[0].debugName()
return self._params_dict[key]
def is_trans_fc_w(self, node):
in_vals = list(node.inputs())
mace_check(len(in_vals) == 1, 't() must have 1 input')
in_name = in_vals[0].debugName()
if in_name in self._params_dict and \
len(self._params_dict[in_name].shape) == 2:
return True
return False
def infer_shape_fully_connected(self, op):
input_shape = self._output_shape_cache[op.input[0]]
weight_shape = self._output_shape_cache[op.input[1]]
data_format = ConverterUtil.data_format(op)
mace_check(data_format == DataFormat.NCHW,
"format {} is not supported".format(data_format))
output_shape = [input_shape[0], weight_shape[0], 1, 1]
self.add_output_shape(op, [output_shape])
def convert_addmm(self, node, inputs_vals, outputs_vals):
op = self.convert_general_op(outputs_vals)
weight_in_node = inputs_vals[AddmmParamIdx.weight_idx].node()
is_mat2_w = weight_in_node.kind() == NodeKind.T and self.is_trans_fc_w(
weight_in_node)
alpha = inputs_vals[AddmmParamIdx.alpha_idx].node()['value']
alpha_type = inputs_vals[AddmmParamIdx.alpha_idx].type()
is_alpha_fc = isinstance(alpha_type, ValueType.IntType) and alpha == 1
is_bias_w = inputs_vals[AddmmParamIdx.bias_idx].debugName() in \
self._params_dict
beta = inputs_vals[AddmmParamIdx.beta_idx].node()['value']
beta_type = inputs_vals[AddmmParamIdx.beta_idx].type()
is_beta_fc = isinstance(beta_type, ValueType.IntType) and beta == 1
# when mat2 is from state_dict and alpha=1 and bias is from state_dict
# and beta =1, it is fc
is_fc = is_mat2_w and is_alpha_fc and is_bias_w and is_beta_fc
mace_check(is_fc, 'addmm can only be converted into FC yet')
# pytorch usually prepend a reshape/flatten before fc, convert fc
# into matmul followed by biasadd, thus reshape/flatten and matmul
# will be merged. see transform_matmul_to_fc for detail.
name_back = op.name
matmul_op_name = op.name + '_matmul'
op.name = matmul_op_name
op.type = MaceOp.MatMul.name
fc_upstream_name = inputs_vals[AddmmParamIdx.input_idx].debugName()
op.input.extend([fc_upstream_name])
op.output.extend([matmul_op_name])
weight_tensor_name = op.name + '_weight'
weight_tensor = self.get_weight_from_node(weight_in_node)
weight_data = weight_tensor.numpy()
self.add_tensor_and_shape(weight_tensor_name, weight_data.shape,
mace_pb2.DT_FLOAT, weight_data)
op.input.extend([weight_tensor_name])
transpose_a_arg = op.arg.add()
transpose_a_arg.name = MaceKeyword.mace_transpose_a_str
transpose_a_arg.i = 0
transpose_b_arg = op.arg.add()
transpose_b_arg.name = MaceKeyword.mace_transpose_b_str
transpose_b_arg.i = 1 # OxI, trans_b needed
self.infer_shape_matmul(op)
opb = self.convert_general_op(outputs_vals)
opb.type = MaceOp.BiasAdd.name
bias_tensor_name = opb.name + '_bias'
key = inputs_vals[AddmmParamIdx.bias_idx].debugName()
bias_data = self._params_dict[key]
bias_data = bias_data.numpy()
self.add_tensor_and_shape(bias_tensor_name, bias_data.reshape(
-1).shape, mace_pb2.DT_FLOAT, bias_data)
opb.input.extend([matmul_op_name, bias_tensor_name])
opb.output.extend([name_back])
self.infer_shape_general(opb)
def infer_shape_matmul(self, op):
lhs_shape = self._output_shape_cache[op.input[0]]
lhs_rank = len(lhs_shape)
lhs_rows = lhs_shape[-2]
lhs_cols = lhs_shape[-1]
rhs_shape = self._output_shape_cache[op.input[1]]
rhs_rank = len(rhs_shape)
rhs_rows = rhs_shape[-2]
rhs_cols = rhs_shape[-1]
transpose_a_ = ConverterUtil.get_arg(
op, MaceKeyword.mace_transpose_a_str).i
transpose_b_ = ConverterUtil.get_arg(
op, MaceKeyword.mace_transpose_b_str).i
rows = lhs_cols if transpose_a_ else lhs_rows
cols = rhs_rows if transpose_b_ else rhs_cols
if lhs_rank >= rhs_rank:
if lhs_rank > rhs_rank:
mace_check(rhs_rank == 2,
'The rhs rank of non-batched MatMul must be 2') # noqa
output_shape = lhs_shape.copy()
output_shape[lhs_rank - 2] = rows
output_shape[lhs_rank - 1] = cols
else:
output_shape = rhs_shape.copy()
output_shape[rhs_rank - 2] = rows
output_shape[rhs_rank - 1] = cols
self.add_output_shape(op, [output_shape])
def convert_matmul(self, node, inputs_vals, outputs_vals):
op = self.convert_general_op(outputs_vals)
weight_in_node = inputs_vals[1].node()
is_weight = weight_in_node.kind() == NodeKind.T and self.is_trans_fc_w(
weight_in_node)
op.type = MaceOp.MatMul.name
op.input.extend([inputs_vals[i].debugName() for i in range(2)])
op.output.extend([outputs_vals[0].debugName()])
if is_weight:
weight_tensor_name = op.input[1]
weight_val = inputs_vals[1]
weight_tensor = self.get_weight_from_node(weight_in_node)
weight_data = weight_tensor.numpy()
self.add_tensor_and_shape(weight_tensor_name, weight_data.shape,
mace_pb2.DT_FLOAT, weight_data)
lhs_shape = self._output_shape_cache[op.input[0]]
rhs_shape = self._output_shape_cache[op.input[1]]
lhs_rank = len(lhs_shape)
rhs_rank = len(rhs_shape)
mace_check(lhs_rank >= 2 and rhs_rank >= 2,
"The rank of MatMul must be >= 2,"
" but lhs_rank = {} and rhs_rank = {} found".format(
lhs_rank, rhs_rank))
transpose_a_arg = op.arg.add()
transpose_a_arg.name = MaceKeyword.mace_transpose_a_str
transpose_a_arg.i = 0
transpose_b_arg = op.arg.add()
transpose_b_arg.name = MaceKeyword.mace_transpose_b_str
if is_weight:
transpose_b_arg.i = 1
else:
transpose_b_arg.i = 0
self.infer_shape_matmul(op)
def convert_pool(self, node, inputs_vals, outputs_vals):
op = self.convert_general_op(outputs_vals)
op.type = MaceOp.Pooling.name
op.input.extend([inputs_vals[0].debugName()])
op.output.extend([outputs_vals[0].debugName()])
node_kind = node.kind()
idx_map = {NodeKind.AvgPool2D: AvgPool2DParamIdx,
NodeKind.MaxPool2D: MaxPool2DParamIdx}
if node_kind == NodeKind.AdaptiveAvgPool2D:
output_shape_node = inputs_vals[1].node()
output_shape_vals = list(output_shape_node.inputs())
target_output_shape = [
output_shape_vals[i].node()['value'] for i in range(2)]
mace_check(target_output_shape[0] == 1 and
target_output_shape[1] == 1,
'only support output shape of [1, 1] for AdaptiveAvgPool2D') # noqa
strides_arg = op.arg.add()
strides_arg.name = MaceKeyword.mace_strides_str
strides_arg.ints.extend([1, 1])
pads_arg = op.arg.add()
pads_arg.name = MaceKeyword.mace_padding_values_str
pads_arg.ints.extend([0, 0])
kernels_arg = op.arg.add()
kernels_arg.name = MaceKeyword.mace_kernel_str
kernels_arg.ints.extend([0, 0])
global_pooling_arg = op.arg.add()
global_pooling_arg.name = MaceKeyword.mace_global_pooling_str
global_pooling_arg.i = 1
else:
pad_node = inputs_vals[idx_map[node_kind].pad_idx].node()
pad_vals = list(pad_node.inputs())
mace_check(len(pad_vals) == 2,
"only support 2D pooling,"
" but {}D padding value found".format(len(pad_vals)))
# MACE pads include both sides, pytorch pads include single side
pads = [2 * pad_vals[i].node()['value'] for i in range(2)]
pads_arg = op.arg.add()
pads_arg.name = MaceKeyword.mace_padding_values_str
pads_arg.ints.extend(pads)
if node_kind == NodeKind.MaxPool2D:
dilation_node = inputs_vals[
idx_map[node_kind].dilation_idx].node()
dilation_vals = list(dilation_node.inputs())
dilations = [dilation_vals[i].node()['value']
for i in range(2)]
mace_check(dilations[0] == 1 and dilations[1] == 1,
"MACE pooling does not support dilation")
kernel_node = inputs_vals[
idx_map[node_kind].kernel_size_idx].node()
kernel_vals = list(kernel_node.inputs())
kernels = [kernel_vals[i].node()['value'] for i in range(2)]
kernels_arg = op.arg.add()
kernels_arg.name = MaceKeyword.mace_kernel_str
kernels_arg.ints.extend(kernels)
stride_node = inputs_vals[idx_map[node_kind].stride_idx].node()
stride_vals = list(stride_node.inputs())
strides = [stride_vals[i].node()['value'] for i in range(2)]
strides_arg = op.arg.add()
strides_arg.name = MaceKeyword.mace_strides_str
strides_arg.ints.extend(strides)
ceil_node = inputs_vals[idx_map[node_kind].ceil_mode_idx].node()
ceil_mode = bool(ceil_node['value'])
round_mode_arg = op.arg.add()
round_mode_arg.name = MaceKeyword.mace_round_mode_str
round_mode_arg.i = RoundMode.FLOOR.value
if ceil_mode:
round_mode_arg.i = RoundMode.CEIL.value
if node_kind | |
<filename>baseline/vectorizers.py
import collections
import copy
import tempfile
import unicodedata
import re
import json
from typing import Tuple, List, Iterable, Set, Dict
import numpy as np
from functools import lru_cache
from eight_mile.downloads import open_file_or_url, get_file_or_url
from eight_mile.utils import exporter, optional_params, listify, register, Offsets, is_sequence, pads
from baseline.utils import import_user_module
try:
import regex
except:
# If this doesnt work, no GPT2
pass
try:
# If this doesnt work, no XLM-R
import sentencepiece as spm
except:
pass
__all__ = []
export = exporter(__all__)
@export
class Vectorizer:
def __init__(self):
pass
def run(self, tokens, vocab):
pass
def count(self, tokens):
pass
def get_dims(self) -> Tuple[int]:
pass
def valid_label_indices(self, tokens: Iterable) -> List[int]:
pass
def iterable(self, tokens):
pass
def reset(self):
pass
MEAD_VECTORIZERS = {}
@export
@optional_params
def register_vectorizer(cls, name=None):
"""Register a function as a plug-in"""
return register(cls, MEAD_VECTORIZERS, name, 'vectorizer')
@export
def identity_trans_fn(x):
return x
@export
class AbstractVectorizer(Vectorizer):
def __init__(self, transform_fn=None, emit_begin_tok=[], emit_end_tok=[]):
super().__init__()
self.transform_fn = identity_trans_fn if transform_fn is None else transform_fn
self.emit_begin_tok = listify(emit_begin_tok)
self.emit_end_tok = listify(emit_end_tok)
def iterable(self, tokens):
"""Produce an iterable of segmented tokens from an iterable input
The tokens here could be subwords, the identity, or some other transform, this is really
up to the implementation, but the intent here is that the items yielded are the underlying
atoms, meaning that there is no processing left to do to convert them to integer values other
than to lookup their values in a word-to-index lookup table
:param tokens: An iterable of tokens
:return: Generator for atoms
"""
for t in self.emit_begin_tok:
yield t
for tok in tokens:
yield self.transform_fn(tok)
for t in self.emit_end_tok:
yield t
def _next_element(self, tokens, vocab):
"""This function transforms non "atomic" input to its elements and yields integer values
Because this function requires a vocab, it cannot be used during counting (which is responsible for producing
the text atomic words (or subwords) that may be used for vocabulary tabulation
:param tokens: An iterable of tokens
:param vocab:
:return: Generator for integer values that can be directly used in Embeddings
"""
for atom in self.iterable(tokens):
value = vocab.get(atom)
if value is None:
value = vocab.get('<UNK>', -1)
if value == -1:
break
yield value
def valid_label_indices(self, tokens: Iterable) -> List[int]:
"""Produce the indices in an iterable containing valid labels only
For instance, if the vectorizer deals with sub-words, this function will return
the leader token indices
:param tokens:
:return:
"""
try:
return list(range(len(tokens)))
except TypeError:
return [i for i, _ in enumerate(tokens)]
@export
@register_vectorizer(name='token1d')
class Token1DVectorizer(AbstractVectorizer):
def __init__(self, **kwargs):
super().__init__(kwargs.get('transform_fn'), kwargs.get('emit_begin_tok', []), kwargs.get('emit_end_tok', []))
self.time_reverse = kwargs.get('rev', False)
self.mxlen = kwargs.get('mxlen', -1)
self.max_seen = 0
def count(self, tokens):
"""Count (tabulate) the "atoms" in this tokens stream
This method converts each token to its atoms (e.g. subwords, or transformed case tokens), and gives back
a frequency table tabulated from the input
:param tokens: An iterable of string tokens
:return: A frequency table of atoms
"""
seen = 0
counter = collections.Counter()
for tok in self.iterable(tokens):
counter[tok] += 1
seen += 1
self.max_seen = max(self.max_seen, seen)
return counter
def reset(self):
"""Reset allows the vectorizer to reset any critical information from scratch
In this implementation, the only critical items are the max length of the temporal stream allowable and the
maximum attested temporal stream length
:return: None
"""
self.mxlen = -1
self.max_seen = -1
def run(self, tokens, vocab):
"""Convert an iterable token stream to an integer padded up to the maximum length `mxlen)
:param tokens: An iterable token stream
:param vocab: A word-to-integer index
:return: A (padded) vector and the valid (unpadded length)
"""
if self.mxlen < 0:
self.mxlen = self.max_seen
vec1d = pads(self.mxlen, dtype=int)
i = 0
for i, atom in enumerate(self._next_element(tokens, vocab)):
if i == self.mxlen:
i -= 1
break
vec1d[i] = atom
valid_length = i + 1
if self.time_reverse:
vec1d = vec1d[::-1]
return vec1d, None
return vec1d, valid_length
def get_dims(self):
return self.mxlen,
def _token_iterator(vectorizer, tokens):
for tok in tokens:
token = []
for field in vectorizer.fields:
if isinstance(tok, dict):
token += [vectorizer.transform_fn(tok[field])]
else:
token += [vectorizer.transform_fn(tok)]
yield vectorizer.delim.join(token)
@export
@register_vectorizer(name='dict1d')
class Dict1DVectorizer(Token1DVectorizer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.fields = listify(kwargs.get('fields', 'text'))
self.delim = kwargs.get('token_delim', '@@')
def iterable(self, tokens):
for t in self.emit_begin_tok:
yield t
for t in _token_iterator(self, tokens):
yield t
for t in self.emit_end_tok:
yield t
@export
@register_vectorizer(name='single-item-dict1d')
class SingleItemDict1DVectorizer(Token1DVectorizer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.field = kwargs.get('field', kwargs.get('fields', 'text'))
def iterable(self, tokens):
for t in self.emit_begin_tok:
yield t
for tok in tokens:
yield tok[self.field]
for t in self.emit_end_tok:
yield t
@export
@register_vectorizer(name='int-identity-dict1d')
class IntIdentityDict1DVectorizer(Token1DVectorizer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.field = kwargs.get('field', kwargs.get('fields', 'text'))
def iterable(self, tokens):
for t in self.emit_begin_tok:
yield t
for tok in tokens:
yield tok[self.field]
for t in self.emit_end_tok:
yield t
def _next_element(self, tokens, vocab):
"""This function transforms non "atomic" input to its elements and yields integer values
Because this function requires a vocab, it cannot be used during counting (which is responsible for producing
the text atomic words (or subwords) that may be used for vocabulary tabulation
:param tokens: An iterable of tokens
:param vocab:
:return: Generator for integer values that can be directly used in Embeddings
"""
for value in self.iterable(tokens):
if value == -1:
break
yield int(value)
@export
class AbstractCharVectorizer(AbstractVectorizer):
def _next_element(self, tokens, vocab):
OOV = vocab['<UNK>']
EOW = vocab.get('<EOW>', vocab.get(' ', Offsets.PAD))
for token in self.iterable(tokens):
for ch in token:
yield vocab.get(ch, OOV)
yield EOW
def valid_label_indices(self, tokens: Iterable) -> List[int]:
try:
return list(range(len(tokens)))
except TypeError:
return [i for i, _ in enumerate(tokens)]
@export
@register_vectorizer(name='char2d')
class Char2DVectorizer(AbstractCharVectorizer):
def __init__(self, **kwargs):
super().__init__(kwargs.get('transform_fn'), kwargs.get('emit_begin_tok', []), kwargs.get('emit_end_tok', []))
self.mxlen = kwargs.get('mxlen', -1)
self.mxwlen = kwargs.get('mxwlen', -1)
self.max_seen_tok = 0
self.max_seen_char = 0
def count(self, tokens):
seen_tok = 0
counter = collections.Counter()
for token in self.iterable(tokens):
self.max_seen_char = max(self.max_seen_char, len(token))
seen_tok += 1
for ch in token:
counter[ch] += 1
counter['<EOW>'] += 1
self.max_seen_tok = max(self.max_seen_tok, seen_tok)
return counter
def reset(self):
self.mxlen = -1
self.mxwlen = -1
self.max_seen_tok = 0
self.max_seen_char = 0
def run(self, tokens, vocab):
if self.mxlen < 0:
self.mxlen = self.max_seen_tok
if self.mxwlen < 0:
self.mxwlen = self.max_seen_char
EOW = vocab.get('<EOW>', vocab.get(' ', Offsets.PAD))
vec2d = pads((self.mxlen, self.mxwlen), dtype=int)
i = 0
j = 0
over = False
for atom in self._next_element(tokens, vocab):
if over:
# If if we have gone over mxwlen burn tokens until we hit end of word
if atom == EOW:
over = False
continue
if i == self.mxlen:
break
if atom == EOW:
i += 1
j = 0
continue
elif j == self.mxwlen:
over = True
i += 1
j = 0
continue
else:
vec2d[i, j] = atom
j += 1
valid_length = i
return vec2d, valid_length
def get_dims(self):
return self.mxlen, self.mxwlen
@export
@register_vectorizer(name='dict2d')
class Dict2DVectorizer(Char2DVectorizer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.fields = listify(kwargs.get('fields', 'text'))
self.delim = kwargs.get('token_delim', '@@')
def iterable(self, tokens):
for t in self.emit_begin_tok:
yield t
for t in _token_iterator(self, tokens):
yield t
for t in self.emit_end_tok:
yield t
@export
@register_vectorizer(name='char1d')
class Char1DVectorizer(AbstractCharVectorizer):
def __init__(self, **kwargs):
super().__init__(kwargs.get('transform_fn'))
self.mxlen = kwargs.get('mxlen', -1)
self.time_reverse = kwargs.get('rev', False)
self.max_seen_tok = 0
def count(self, tokens):
seen_tok = 0
counter = collections.Counter()
for token in self.iterable(tokens):
seen_tok += 1
for ch in token:
counter[ch] += 1
seen_tok += 1
counter['<EOW>'] += 1
seen_tok += 1
self.max_seen_tok = max(self.max_seen_tok, seen_tok)
return counter
def reset(self):
self.mxlen = -1
self.max_seen_tok = 0
def run(self, tokens, vocab):
if self.mxlen < 0:
self.mxlen = self.max_seen_tok
vec1d = pads(self.mxlen, dtype=int)
for i, atom in enumerate(self._next_element(tokens, vocab)):
if i == self.mxlen:
i -= 1
break
vec1d[i] = atom
if self.time_reverse:
vec1d = vec1d[::-1]
return vec1d, None
return vec1d, i + 1
def get_dims(self):
return self.mxlen,
@register_vectorizer(name='ngram')
class TextNGramVectorizer(Token1DVectorizer):
def __init__(self, filtsz=3, joiner='@@', transform_fn=None, pad='<PAD>', **kwargs):
super().__init__(**kwargs)
self.filtsz = filtsz
self.pad = pad
self.joiner = joiner
self.transform_fn = identity_trans_fn if transform_fn is None else transform_fn
def iterable(self, tokens):
nt = len(tokens)
valid_range = nt - self.filtsz + 1
for i in range(valid_range):
chunk = tokens[i:i+self.filtsz]
yield self.joiner.join(chunk)
def get_padding(self):
return [self.pad] * (self.filtsz // 2)
def run(self, tokens, vocab):
if self.mxlen < 0:
self.mxlen = self.max_seen
zp = self.get_padding()
vec2d = pads(self.mxlen, dtype=int)
padded_tokens = zp + tokens + zp
for i, atom in enumerate(self._next_element(padded_tokens, vocab)):
if i == self.mxlen:
break
vec2d[i] = atom
lengths = | |
import sys
import unittest
from tf.fabric import Fabric
# LOAD THE TEST CORPUS
TF = Fabric('tf')
api = TF.load('sign name')
F = api.F
S = api.S
# MAKE CUSTOM SETS OF NODES
Sign = set(range(1, F.otype.maxSlot + 1))
Node = set(range(1, F.otype.maxNode + 1))
sets = dict(Sign=Sign, Node=Node)
# RUN A QUERY, OPTIONALLY WITH CUSTOM SETS
def query(template, sets=None):
return (
tuple(S.search(template))
if sets is None else
tuple(S.search(template, sets=sets))
)
# DEFINE THE TESTS
relationKey = {
'=': 'equal',
'#': 'unequal',
'<': 'canonicalBefore',
'>': 'canonicalAfter',
'==': 'sameSlots',
'&&': 'overlap',
'##': 'differentSlots',
'||': 'disjointSlots',
'[[': 'has',
']]': 'in',
'<<': 'slotBefore',
'>>': 'slotAfter',
'=:': 'startSame',
'=0:': 'startSame0',
'=1:': 'startSame1',
'=2:': 'startSame2',
':=': 'endSame',
':0=': 'endSame0',
':1=': 'endSame1',
':2=': 'endSame2',
'::': 'sameBoundary',
':0:': 'sameBoundary0',
':1:': 'sameBoundary1',
':2:': 'sameBoundary2',
'<:': 'adjacentBefore',
'<0:': 'adjacentBefore0',
'<1:': 'adjacentBefore1',
'<2:': 'adjacentBefore2',
':>': 'adjacentAfter',
':0>': 'adjacentAfter0',
':1>': 'adjacentAfter1',
':2>': 'adjacentAfter2',
'.namesign=namepart.': 'featureEqualSP',
'.namesign=namesign.': 'featureEqualSS',
'.namesign#namesign.': 'featureUnequalSS',
'.namesign.': 'featureEqualSS',
'.number.': 'featureEqualN',
'.number#number.': 'featureUnEqualN',
'.number>number.': 'featureGreaterN',
'.number<number.': 'featureLesserN',
'.namesign~(^[sp]{2}-)|(-[sp]{2}$)~namepart.': 'featureFuzzySP1',
'.namesign~(^[sp]{2}-)|(-[sp]{2}$)~namesign.': 'featureFuzzySS1',
'.namepart~(^[sp]{2}-)|(-[sp]{2}$)~namepart.': 'featureFuzzyPP1',
'.namesign~([sp]-)|(-[sp])~namepart.': 'featureFuzzySP2',
'.namesign~([sp]-)|(-[sp])~namesign.': 'featureFuzzySS2',
'.namepart~([sp]-)|(-[sp])~namepart.': 'featureFuzzyPP2',
}
# DEFINE THE PARAMETERS FOR EACH TEST
comparisons = {
'=': (
('sign', 'a', 'sign', 'a', True),
('sign', 'a', 'sign', 'b', False),
('part', 's1', 'part', 's1', True),
('part', 's1', 'part', 's2', False),
('part', 'lower', 'part', 'lower', True),
('part', 'lower', 'part', 'upper', False),
),
'#': (
('sign', 'a', 'sign', 'a', False),
('sign', 'a', 'sign', 'b', True),
('part', 's1', 'part', 's1', False),
('part', 's1', 'part', 's2', True),
('part', 'lower', 'part', 'lower', False),
('part', 'lower', 'part', 'upper', True),
),
'<': (
('sign', 'a', 'sign', 'a', False),
('sign', 'a', 'sign', 'b', True),
('sign', 'b', 'sign', 'a', False),
('sign', 'd', 'sign', 'g', True),
('sign', 'g', 'sign', 'd', False),
('part', 'lower', 'part', 'upper', True),
('part', 'upper', 'part', 'lower', False),
('part', 'big', 'part', 'small1', True),
('part', 'big', 'part', 'small2', True),
('part', 'big', 'part', 'small3', True),
('part', 'big', 'part', 'small4', True),
('part', 'small1', 'part', 'big', False),
('part', 'small2', 'part', 'big', False),
('part', 'small3', 'part', 'big', False),
('part', 'small4', 'part', 'big', False),
('part', 'small5', 'part', 'big', False),
('part', 'small6', 'part', 'big', False),
('part', 'small7', 'part', 'big', False),
('part', 'small8', 'part', 'big', True),
('sign', 'a', 'part', 'big', True),
('sign', 'b', 'part', 'big', True),
('sign', 'c', 'part', 'big', False),
('sign', 'd', 'part', 'big', False),
('sign', 'i', 'part', 'big', False),
('sign', 'j', 'part', 'big', False),
),
'>': (
('sign', 'a', 'sign', 'a', False),
('sign', 'a', 'sign', 'b', False),
('sign', 'b', 'sign', 'a', True),
('sign', 'd', 'sign', 'g', False),
('sign', 'g', 'sign', 'd', True),
('part', 'lower', 'part', 'upper', False),
('part', 'upper', 'part', 'lower', True),
('part', 'big', 'part', 'small1', False),
('part', 'big', 'part', 'small2', False),
('part', 'big', 'part', 'small3', False),
('part', 'big', 'part', 'small4', False),
('part', 'small1', 'part', 'big', True),
('part', 'small2', 'part', 'big', True),
('part', 'small3', 'part', 'big', True),
('part', 'small4', 'part', 'big', True),
('part', 'small5', 'part', 'big', True),
('part', 'small6', 'part', 'big', True),
('part', 'small7', 'part', 'big', True),
('part', 'small8', 'part', 'big', False),
('sign', 'a', 'part', 'big', False),
('sign', 'b', 'part', 'big', False),
('sign', 'c', 'part', 'big', True),
('sign', 'd', 'part', 'big', True),
('sign', 'i', 'part', 'big', True),
('sign', 'j', 'part', 'big', True),
),
'==': (
('sign', 'a', 'sign', 'a', True),
('sign', 'a', 'sign', 'b', False),
('part', 'john', 'part', 'mary', True),
('part', 'john', 'part', 'small4', False),
('sign', 'a', 'part', 's1', True),
('part', 's1', 'sign', 'a', True),
('part', 's1', 'part', 'ss1', True),
('part', 'small1', 'part', 'big', False),
('part', 'big', 'part', 'small1', False),
),
'&&': (
('sign', 'a', 'sign', 'a', True),
('sign', 'a', 'sign', 'b', False),
('part', 'john', 'part', 'mary', True),
('part', 'john', 'part', 'john', True),
('part', 'john', 'part', 'small4', True),
('part', 'lower', 'part', 'upper', False),
('part', 'odd', 'part', 'even', False),
('sign', 'c', 'part', 'odd', True),
('sign', 'd', 'part', 'odd', False),
('sign', 'c', 'part', 'even', False),
('sign', 'd', 'part', 'even', True),
),
'##': (
('sign', 'a', 'sign', 'a', False),
('sign', 'a', 'sign', 'b', True),
('part', 'john', 'part', 'mary', False),
('part', 'john', 'part', 'john', False),
('part', 'john', 'part', 'big', True),
('sign', 'c', 'part', 'odd', True),
('sign', 'd', 'part', 'odd', True),
('part', 'even', 'sign', 'c', True),
('part', 'even', 'sign', 'd', True),
),
'||': (
('sign', 'a', 'sign', 'a', False),
('sign', 'a', 'sign', 'b', True),
('part', 'john', 'part', 'mary', False),
('part', 'john', 'part', 'john', False),
('part', 'john', 'part', 'small4', False),
('part', 'lower', 'part', 'upper', True),
('part', 'odd', 'part', 'even', True),
('sign', 'c', 'part', 'odd', False),
('sign', 'd', 'part', 'odd', True),
('sign', 'c', 'part', 'even', True),
('sign', 'd', 'part', 'even', False),
),
'[[': (
('sign', 'a', 'sign', 'a', False),
('sign', 'a', 'sign', 'b', False),
('part', 's1', 'sign', 'a', True),
('sign', 'a', 'part', 's1', False),
('part', 's1', 'part', 's1', False),
('part', 's1', 'part', 'ss1', True),
('part', 'ss1', 'part', 's1', True),
('part', 'john', 'part', 'john', False),
('part', 'john', 'part', 'mary', True),
('part', 'mary', 'part', 'john', True),
('part', 'big', 'part', 'small1', True),
('part', 'big', 'part', 'small2', True),
('part', 'big', 'part', 'small3', True),
('part', 'big', 'part', 'small4', True),
('part', 'big', 'part', 'small5', False),
('part', 'big', 'part', 'small6', False),
('part', 'big', 'part', 'small7', False),
('part', 'big', 'part', 'small8', False),
('part', 'small1', 'part', 'big', False),
('part', 'small2', 'part', 'big', False),
('part', 'small3', 'part', 'big', False),
('part', 'small4', 'part', 'big', False),
('part', 'small5', 'part', 'big', False),
('part', 'small6', 'part', 'big', False),
('part', 'small7', 'part', 'big', False),
('part', 'small8', 'part', 'big', False),
),
']]': (
('sign', 'a', 'sign', 'a', False),
('sign', 'a', 'sign', 'b', False),
('part', 's1', 'sign', 'a', False),
('sign', 'a', 'part', 's1', True),
('part', 's1', 'part', 's1', False),
('part', 's1', 'part', 'ss1', True),
('part', 'ss1', 'part', 's1', True),
('part', 'john', 'part', 'john', False),
('part', 'john', 'part', 'mary', True),
('part', 'mary', 'part', 'john', True),
('part', 'big', 'part', 'small1', False),
('part', 'big', 'part', 'small2', False),
('part', 'big', 'part', 'small3', False),
('part', 'big', 'part', 'small4', False),
('part', 'big', 'part', 'small5', False),
('part', 'big', 'part', 'small6', False),
('part', 'big', 'part', 'small7', False),
('part', 'big', 'part', 'small8', False),
('part', 'small1', 'part', 'big', True),
('part', 'small2', 'part', 'big', True),
('part', 'small3', 'part', 'big', True),
('part', 'small4', 'part', 'big', True),
('part', 'small5', 'part', 'big', False),
('part', 'small6', 'part', 'big', False),
('part', 'small7', 'part', 'big', False),
('part', 'small8', 'part', 'big', False),
),
'<<': (
('sign', 'a', 'sign', 'a', False),
('sign', 'a', 'sign', 'b', True),
('sign', 'b', 'sign', 'a', False),
('sign', 'c', 'sign', 'g', True),
('sign', 'g', 'sign', 'c', False),
('sign', 'c', 'part', 's2', False),
('sign', 'c', 'part', 's3', False),
('sign', 'c', 'part', 's4', True),
('part', 's2', 'sign', 'c', True),
('part', 's3', 'sign', 'c', False),
('part', 's4', 'sign', 'c', False),
('part', 's4', 'part', 's3', False),
('part', 's4', 'part', 's4', False),
('part', 's4', 'part', 's5', True),
('part', 's3', 'part', 's4', True),
('part', 's5', 'part', 's4', False),
('part', 's2', 'part', 'big', True),
('part', 's3', 'part', 'big', False),
('part', 's4', 'part', 'big', False),
('part', 'big', 'part', 's2', False),
('part', 'big', 'part', 's3', False),
('part', 'big', 'part', 's4', False),
('part', 'lower', 'part', 'upper', True),
('part', 'upper', 'part', 'lower', False),
('part', 'odd', 'part', 'even', False),
('part', 'even', 'part', 'odd', False),
),
'>>': (
('sign', 'a', 'sign', 'a', False),
('sign', 'a', 'sign', 'b', False),
('sign', 'b', 'sign', 'a', True),
('sign', 'c', 'sign', 'g', False),
('sign', 'g', 'sign', 'c', True),
('sign', 'c', 'part', 's2', True),
('sign', 'c', 'part', 's3', False),
('sign', 'c', 'part', 's4', False),
('part', 's2', 'sign', 'c', False),
('part', 's3', 'sign', 'c', False),
('part', 's4', 'sign', 'c', True),
('part', 's4', 'part', 's3', True),
('part', 's4', 'part', 's4', False),
('part', 's4', 'part', 's5', False),
('part', 's3', 'part', 's4', False),
('part', 's5', 'part', 's4', True),
('part', 's2', 'part', 'big', False),
('part', 's3', 'part', 'big', False),
('part', 's4', 'part', 'big', False),
('part', 'big', 'part', 's2', True),
('part', 'big', 'part', 's3', False),
('part', 'big', 'part', 's4', False),
('part', 'lower', 'part', 'upper', False),
('part', 'upper', 'part', 'lower', True),
('part', 'odd', 'part', 'even', False),
('part', 'even', 'part', 'odd', False),
),
'=:': (
('sign', 'a', 'sign', 'a', True),
('sign', 'a', 'sign', 'b', False),
('sign', 'a', 'sign', 'c', False),
('sign', 'a', 'sign', 'd', False),
('sign', 'a', 'part', 's1', True),
('sign', 'a', 'part', 's2', False),
('sign', 'a', 'part', 's3', False),
('sign', 'a', 'part', 's4', False),
('part', 's1', 'sign', 'a', True),
('part', 's1', 'sign', 'b', False),
| |
for (nw, kv) in zip(self.nweights(), kvalues))
def aggregate_lfunction(self, r, edge_correction=None):
"""
Compute the aggregate of the empirical L-function over all patterns in
the collection
Parameters
----------
r : array-like
Array of values at which to evaluate the emprical aggregate
L-function.
edge_correction : str {'stationary', 'finite', 'isotropic', 'periodic',
'plus'}, optional
String to select the edge handling to apply in computations. See
the documentation for `PointPattern` for details. If None, the
edge correction falls back to the default value (set at instance
initialization).
Returns
-------
array
Values of the empirical aggregate L-function evaluated at `r`.
"""
if edge_correction is None:
edge_correction = self.edge_correction
return numpy.sqrt(self.aggregate_kfunction(
r, edge_correction=edge_correction) / _PI)
def _pp_attr_r_frame(self, attr, r, edge_correction, **kwargs):
"""
Compute a DataFrame containing values of some PointPattern attribute
which is a function of a distance.
Parameters
----------
attr : string
Name of `PointPattern` attribute to use.
r : array-like
Array of values at which to evaluate the `PointPattern` attribute.
edge_correction : str {'stationary', 'finite', 'isotropic', 'periodic',
'plus'}, optional
String to select the edge handling to apply in computations. See
the documentation for `PointPattern` for details. If None, the
edge correction falls back to the default value (set at instance
initialization).
**kwargs : dict, optional
Other arguments to pass to the `PointPattern` attribute.
Returns
-------
DataFrame
DataFrame where each row contains values of the
`PointPattern` attribute from one pattern, evaluated at `r`.
"""
if edge_correction is None:
edge_correction = self.edge_correction
return pandas.DataFrame(
[getattr(pp, attr)(r, edge_correction=edge_correction, **kwargs)
for pp in self.patterns])
def _pp_attr_r_critical(self, attr, alpha, r, edge_correction, **kwargs):
"""
Compute critical values of some PointPattern attribute
Parameters
----------
attr : string
name of `pointpattern` attribute to use.
alpha : scalar between 0.0 and 1.0
Percentile defining the critical values.
r : array-like
Array of values at which to evaluate the critical values.
edge_correction : str {'stationary', 'finite', 'isotropic', 'periodic',
'plus'}, optional
String to select the edge handling to apply in computations. See
the documentation for `PointPattern` for details. If None, the
edge correction falls back to the default value (set at instance
initialization).
**kwargs : dict, optional
Other arguments to pass to the `PointPattern` attribute.
Returns
-------
array
Critical values of the `PointPattern` attribute evaluated at `r`.
"""
if edge_correction is None:
edge_correction = self.edge_correction
attr_frame = self._pp_attr_r_frame(attr, r,
edge_correction=edge_correction,
**kwargs)
return attr_frame.quantile(q=alpha, axis=0)
def _pp_attr_r_mean(self, attr, r, edge_correction, **kwargs):
"""
Compute the mean of some PointPattern attribute
Parameters
----------
attr : string
name of `pointpattern` attribute to use.
r : array-like
Array of values at which to evaluate the mean values.
edge_correction : str {'stationary', 'finite', 'isotropic', 'periodic',
'plus'}, optional
String to select the edge handling to apply in computations. See
the documentation for `PointPattern` for details. If None, the
edge correction falls back to the default value (set at instance
initialization).
Returns
-------
array
Mean of the `PointPattern` attribute evaluated at `r`.
"""
if edge_correction is None:
edge_correction = self.edge_correction
attr_frame = self._pp_attr_r_frame(attr, r,
edge_correction=edge_correction,
**kwargs)
return attr_frame.mean(axis=0, skipna=True)
def _pp_attr_r_var(self, attr, r, edge_correction, **kwargs):
"""
Compute the variance of some PointPattern attribute
Parameters
----------
attr : string
name of `pointpattern` attribute to use.
r : array-like
Array of values at which to evaluate the variance.
edge_correction : str {'stationary', 'finite', 'isotropic', 'periodic',
'plus'}, optional
String to select the edge handling to apply in computations. See
the documentation for `PointPattern` for details. If None, the
edge correction falls back to the default value (set at instance
initialization).
Returns
-------
array
Variance of the `PointPattern` attribute evaluated at `r`.
"""
if edge_correction is None:
edge_correction = self.edge_correction
attr_frame = self._pp_attr_r_frame(attr, r,
edge_correction=edge_correction,
**kwargs)
return attr_frame.var(axis=0, skipna=True)
def _pp_attr_r_std(self, attr, r, edge_correction, **kwargs):
"""
Compute the standard deviation of some PointPattern attribute
Parameters
----------
attr : string
name of `pointpattern` attribute to use.
r : array-like
Array of values at which to evaluate the standard deviation.
edge_correction : str {'stationary', 'finite', 'isotropic', 'periodic',
'plus'}, optional
String to select the edge handling to apply in computations. See
the documentation for `PointPattern` for details. If None, the
edge correction falls back to the default value (set at instance
initialization).
Returns
-------
array
Standard deviation of the `PointPattern` attribute evaluated at
`r`.
"""
if edge_correction is None:
edge_correction = self.edge_correction
attr_frame = self._pp_attr_r_frame(attr, r,
edge_correction=edge_correction,
**kwargs)
return attr_frame.std(axis=0, skipna=True)
def kframe(self, r, edge_correction=None):
"""
Compute a DataFrame containing values of the empirical K-functions of
the patterns
Parameters
----------
r : array-like
Array of values at which to evaluate the emprical K-functions.
edge_correction : str {'stationary', 'finite', 'isotropic', 'periodic',
'plus'}, optional
String to select the edge handling to apply in computations. See
the documentation for `PointPattern` for details. If None, the
edge correction falls back to the default value (set at instance
initialization).
Returns
-------
DataFrame
DataFrame where each row contains values of the empirical
K-function from one pattern, evaluated at `r`.
"""
return self._pp_attr_r_frame('kfunction', r,
edge_correction=edge_correction)
def kcritical(self, alpha, r, edge_correction=None):
"""
Compute critical values of the empirical K-functions of the patterns
Parameters
----------
alpha : scalar between 0.0 and 1.0
Percentile defining the critical values.
r : array-like
Array of values at which to evaluate the critical values of the
empirical K-functions.
edge_correction : str {'stationary', 'finite', 'isotropic', 'periodic',
'plus'}, optional
String to select the edge handling to apply in computations. See
the documentation for `PointPattern` for details. If None, the
edge correction falls back to the default value (set at instance
initialization).
Returns
-------
array
Critical values of the empirical K-functions evaluated at `r`.
"""
return self._pp_attr_r_critical('kfunction', alpha, r,
edge_correction=edge_correction)
def kmean(self, r, edge_correction=None):
"""
Compute the mean of the empirical K-functions of the patterns
Parameters
----------
r : array-like
Array of values at which to evaluate the mean values of the
empirical K-functions.
edge_correction : str {'stationary', 'finite', 'isotropic', 'periodic',
'plus'}, optional
String to select the edge handling to apply in computations. See
the documentation for `PointPattern` for details. If None, the
edge correction falls back to the default value (set at instance
initialization).
Returns
-------
array
Mean of the empirical K-functions evaluated at `r`.
"""
return self._pp_attr_r_mean('kfunction', r,
edge_correction=edge_correction)
def kvar(self, r, edge_correction=None):
"""
Compute the variance of the empirical K-functions of the patterns
Parameters
----------
r : array-like
Array of values at which to evaluate the variance of the empirical
K-functions.
edge_correction : str {'stationary', 'finite', 'isotropic', 'periodic',
'plus'}, optional
String to select the edge handling to apply in computations. See
the documentation for `PointPattern` for details. If None, the
edge correction falls back to the default value (set at instance
initialization).
Returns
-------
array
Variance of the empirical K-functions evaluated at `r`.
"""
return self._pp_attr_r_var('kfunction', r,
edge_correction=edge_correction)
def kstd(self, r, edge_correction=None):
"""
Compute the standard devation of the empirical K-functions of the
patterns
Parameters
----------
r : array-like
Array of values at which to evaluate the standard deviation of the
empirical K-functions.
edge_correction : str {'stationary', 'finite', 'isotropic', 'periodic',
'plus'}, optional
String to select the edge handling to apply in computations. See
the documentation for `PointPattern` for details. If None, the
edge correction falls back to the default value (set at instance
initialization).
Returns
-------
array
Standard deviation of the empirical K-functions evaluated at `r`.
"""
return self._pp_attr_r_std('kfunction', r,
edge_correction=edge_correction)
def lframe(self, r, edge_correction=None):
"""
Compute a DataFrame containing values of the empirical L-functions of
the patterns
Parameters
----------
r : array-like
Array of values at which to evaluate the emprical L-functions.
edge_correction : str {'stationary', 'finite', 'isotropic', 'periodic',
'plus'}, optional
String to select the edge handling to apply in computations. See
the documentation for `PointPattern` for details. If None, the
edge correction falls back to the default value (set at instance
initialization).
Returns
-------
DataFrame
DataFrame where each row contains values of the empirical
L-function from one pattern, evaluated at `r`.
"""
return self._pp_attr_r_frame('lfunction', r,
edge_correction=edge_correction)
def lcritical(self, alpha, r, edge_correction=None):
"""
Compute | |
[0, 1, 2]
1:α(t) = 2.00 + 1.00 t; π = [1, 2]
2:α(t) = 2.00 + 1.00 t; π = [0]
3:α(t) = 2.00 + 1.00 t; π = [2]
4:α(t) = 2.00 + 1.00 t; π = [0, 1]
Servers:
0:β(t) = 6.00 . (t - 4.00)+
1:β(t) = 6.00 . (t - 4.00)+
2:β(t) = 6.00 . (t - 4.00)+>, [0, 1, 3])
"""
flow_list = []
list_prems = []
pre = 0
for flow in self.network.flows:
i = 0
list_prems += [pre]
p = [flow.path[i]]
while i < len(flow.path) - 1:
if flow.path[i + 1] == self.succ_forest[flow.path[i]]:
p += [flow.path[i + 1]]
else:
pre += 1
flow_list += [Flow(flow.acurve, p)]
p = [flow.path[i + 1]]
i += 1
pre += 1
flow_list += [Flow(flow.acurve, p)]
return Network(flow_list, self.network.servers), list_prems
@property
def fixpoint_matrix(self) -> tuple:
r"""
Compute the fix-point matrix to solve with the Rxact method, represented by the tuple (mat_a, vec_b).
This make use of the matrix computing the :math:`xi` coefficients. The unknown are the :math:`\sigma` of the
flows in the network transformed into a forest.
:return: the matrix and the vector such that :math:`mat_a \sigma = vec_b`
:rtype: tuple
>>> toy = ExactFixPointAnalyzer(Ring(3, ArrivalCurve(2., 1.), Server(ServiceCurve(6, 4))))
>>> toy.fixpoint_matrix
(array([[ 1. , 0. , 0. , 0. , 0. ],
[ 0. , 1. , 0. , 0. , 0. ],
[-0.25, -1. , 0.9 , -0.25, -0.25],
[ 0. , 0. , 0. , 1. , 0. ],
[-0.25, -0.25, -0.07, -1. , 0.9 ]]), array([ 2. , 2. , 14.4 , 2. , 10.08]))
"""
forest, list_prems = self.nk2forest
s = len(forest.flows)
mat_a = np.zeros((s, s))
vec_b = np.zeros(s)
list_prems += [forest.num_flows]
i = 0
for h in range(s):
if h == list_prems[i]:
vec_b[h] = forest.flows[i].acurve.sigma
mat_a[h, h] = 1.
i += 1
else:
ftrim = forest.trim(forest.flows[h-1].path[-1])
# print (ftrim)
ffa = ExactFeedForwardAnalyzer(ftrim)
xi = ffa.exact_xi([h-1], forest.flows[h-1].path[-1])
# print(xi)
mat_a[h, h] = 1.
mat_a[h, h - 1] = -1
for h1 in range(s):
if not h - 1 == h1 and not ftrim.flows[h1].path == []:
mat_a[h, h1] -= xi[ftrim.flows[h1].path[0], ftrim.flows[h1].path[-1]]
vec_b[h] = ffa.latency_term([h-1], forest.flows[h-1].path[-1], xi)
return mat_a, vec_b
@property
def ff_equiv(self) -> Network:
"""
transforms a non feed-forward network into a feed-forward network by splitting the flows and computing the
arrival curve of every splitted flow by the fixpoint method with exact method
:return: The equivalent network
:rtype: Network
>>> toy = ExactFixPointAnalyzer(Ring(3, ArrivalCurve(2., 1.), Server(ServiceCurve(6, 4))))
>>> toy.ff_equiv
<Network:
Flows:
0:α(t) = 2.00 + 1.00 t; π = [0, 1, 2]
1:α(t) = 2.00 + 1.00 t; π = [1, 2]
2:α(t) = 23.89 + 1.00 t; π = [0]
3:α(t) = 2.00 + 1.00 t; π = [2]
4:α(t) = 16.39 + 1.00 t; π = [0, 1]
Servers:
0:β(t) = 6.00 . (t - 4.00)+
1:β(t) = 6.00 . (t - 4.00)+
2:β(t) = 6.00 . (t - 4.00)+>
"""
tab_sigma = resoud(self.fixpoint_matrix[0], self.fixpoint_matrix[1])
forest = self.nk2forest[0]
s = forest.num_flows
list_flows = []
for i in range(s):
flow = forest.flows[i]
if tab_sigma[i] >= 0:
list_flows += [Flow(ArrivalCurve(tab_sigma[i], flow.acurve.rho), flow.path)]
else:
list_flows += [Flow(ArrivalCurve(np.inf, flow.acurve.rho), flow.path)]
return Network(list_flows, self.network.servers)
def _flow_decomp(self, flow, server):
ff_net, list_prems = self.nk2forest
f = list_prems[flow]
if flow == self.network.num_flows - 1:
b = ff_net.num_flows
else:
b = list_prems[flow + 1]
while f < b and server not in ff_net.flows[f].path:
f += 1
if f == b:
raise NameError("flow does not cross the server")
return f
def backlog(self, flow, server):
"""
Computes a backlog bound of a flow at a server based on the exact analysis.
:param flow: flow for which the backlog is computed
:type flow: int
:param server: server at which the backlog is computed
:type server: int
:return: the backlog of flow and server
:rtype: float
>>> toy = ExactFixPointAnalyzer(Ring(3, ArrivalCurve(2., 1.), Server(ServiceCurve(6, 4))))
>>> toy.backlog(0, 2)
31.069400630914828
"""
f = self._flow_decomp(flow, server)
# print(f)
return ExactFeedForwardAnalyzer(self.ff_equiv).backlog(f, server)
def delay(self, flow):
"""
Computes a delay bound of a flow based on the exact analysis.
WARNING: only for flows not cut into several subflows -> TODO
:param flow: flow for which the delay is computed
:type flow: int
:return: the delay of flow
:rtype: float
"""
server = self.network.flows[flow].path[-1]
f = self._flow_decomp(flow, server)
# print(f)
# print(ExactFeedForwardAnalyzer(self.ff_equiv).network)
# print(f)
return ExactFeedForwardAnalyzer(self.ff_equiv).delay(f)
class GroupFixPointAnalyzer(ExactFixPointAnalyzer):
@property
def _removed_edges(self) -> list:
"""
Compute the set of edges that are removed when transforming the network into a forest.
:return: the list of removed edges
:rtype: list
>>> toy = GroupFixPointAnalyzer(Ring(3, ArrivalCurve(2., 1.), Server(ServiceCurve(6, 4))))
>>> toy._removed_edges
[(2, 0)]
"""
lre = set([])
for i in range(self.network.num_flows):
for h in range(self.network.flows[i].length - 1):
if not self.network.flows[i].path[h + 1] == self.succ_forest[self.network.flows[i].path[h]]:
lre.add((self.network.flows[i].path[h], self.network.flows[i].path[h + 1]))
return list(lre)
@property
def foi_group(self):
"""
For each removed edge, constructs the set of flows of interest for the analysis, that is the set of flows that
were going through that edge. These will be the set of flows of interest for gurther analysis (we want the
global worst-case backlog of these flows
:return: the list of flows of interests for each removed edge
:rtype: list
>>> toy = GroupFixPointAnalyzer(Ring(3, ArrivalCurve(2., 1.), Server(ServiceCurve(6, 4))))
>>> toy.foi_group
[[1, 3]]
"""
forest, list_prems = self.nk2forest
list_prems += [forest.num_flows]
list_per_edge = len(self._removed_edges) * [[]]
for f in range(len(self._removed_edges)):
(i, j) = self._removed_edges[f]
s = 1
for h in range(forest.num_flows - 1):
if h + 1 == list_prems[s]:
s += 1
elif (i, j) == (forest.flows[h].path[-1], forest.flows[h + 1].path[0]):
list_per_edge[f] = list_per_edge[f] + [h]
return list_per_edge
@property
def fixpoint_matrix(self):
r"""
Compute the fix-point matrix to solve with the Exact method, represented by the tuple (mat_a, vec_b).
This make use of the matrix computing the :math:`xi` coefficients. The unknown are the :math:`\sigma` of the
grups of flows, per removed edge in the network transformed into a forest.
:return: the matrix and the vector such that :math:`mat_a \sigma = vec_b`
:rtype: tuple
>>> toy = GroupFixPointAnalyzer(Ring(3, ArrivalCurve(2., 1.), Server(ServiceCurve(6, 4))))
>>> toy.fixpoint_matrix
(array([[0.72]]), array([18.]))
"""
forest, list_prems = self.nk2forest
redges = self._removed_edges
rlist = self.foi_group
#print(rlist)
#print(list_prems)
s = len(redges)
mat_a = np.zeros((s, s))
vec_b = np.zeros(s)
for h in range(s):
tforest = forest.trim(redges[h][0])
ffa = ExactFeedForwardAnalyzer(tforest)
xi = ffa.exact_xi(rlist[h], redges[h][0])
# print(xi)
mat_a[h, h] = 1
for e in range(s):
mat_a[h, e] -= max([0] + [xi[tforest.flows[f + 1].path[0],
tforest.flows[f + 1].path[-1]]
for f in rlist[e]])
vec_b[h] = sum([xi[tforest.flows[f].path[0], tforest.flows[f].path[-1]]
* tforest.flows[f].acurve.sigma
for f in list_prems if not tforest.flows[f].path == []
and f not in rlist[h]])
vec_b[h] += sum([tforest.flows[f].acurve.sigma
for f in list_prems if not tforest.flows[f].path == []
and f in rlist[h]])
vec_b += ffa.latency_term(rlist[h], redges[h][0], xi)
#print(mat_a, vec_b)
return mat_a, vec_b
@property
def ff_equiv(self) -> Network:
"""
transforms a non feed-forward network into a feed-forward network by splitting the flows and computing the
arrival curve of every splitted flow by the fixpoint method with exact method and grouping flows.
:return: The equivalent network
:rtype: Network
>>> toy = GroupFixPointAnalyzer(Ring(3, ArrivalCurve(2., 1.), Server(ServiceCurve(6, 4))))
>>> toy.ff_equiv
<Network:
Flows:
0:α(t) = 2.00 + 1.00 t; π = [0, 1, 2]
1:α(t) = 2.00 + 1.00 t; π = [1, 2]
2:α(t) = 30.53 + 1.00 t; π = [0]
3:α(t) = 2.00 + 1.00 t; π = [2]
4:α(t) = 30.53 + 1.00 t; π = [0, 1]
Servers:
0:β(t) = 6.00 . (t - 4.00)+
1:β(t) = 6.00 . (t - 4.00)+
2:β(t) = 6.00 . (t - 4.00)+>
"""
tab_sigma = resoud(self.fixpoint_matrix[0], self.fixpoint_matrix[1])
forest, list_prems = self.nk2forest
s = forest.num_flows
r = len(self._removed_edges)
list_sigma = np.zeros(s)
for i in range(self.network.num_flows):
list_sigma[list_prems[i]] = self.network.flows[i].acurve.sigma
for i in range(r):
for f in self.foi_group[i]:
if tab_sigma[i] >= 0:
list_sigma[f + 1] = tab_sigma[i]
else:
list_sigma[f + 1] = np.inf
list_flows = []
for i in range(s):
flow = forest.flows[i]
list_flows += [Flow(ArrivalCurve(list_sigma[i], flow.acurve.rho), flow.path)]
return | |
import numpy as np
import h5py
from typing import List, BinaryIO
from mlagents_envs.exception import UnityException
class BufferException(UnityException):
"""
Related to errors with the Buffer.
"""
pass
class AgentBuffer(dict):
"""
AgentBuffer contains a dictionary of AgentBufferFields. Each agent has his own AgentBuffer.
The keys correspond to the name of the field. Example: state, action
"""
class AgentBufferField(list):
"""
AgentBufferField is a list of numpy arrays. When an agent collects a field, you can add it to his
AgentBufferField with the append method.
"""
def __init__(self):
self.padding_value = 0
super().__init__()
def __str__(self):
return str(np.array(self).shape)
def append(self, element: np.ndarray, padding_value: float = 0.0) -> None:
"""
Adds an element to this list. Also lets you change the padding
type, so that it can be set on append (e.g. action_masks should
be padded with 1.)
:param element: The element to append to the list.
:param padding_value: The value used to pad when get_batch is called.
"""
super().append(element)
self.padding_value = padding_value
def extend(self, data: np.ndarray) -> None:
"""
Adds a list of np.arrays to the end of the list of np.arrays.
:param data: The np.array list to append.
"""
self += list(np.array(data))
def set(self, data):
"""
Sets the list of np.array to the input data
:param data: The np.array list to be set.
"""
# Make sure we convert incoming data to float32 if it's a float
dtype = None
if data is not None and len(data) and isinstance(data[0], float):
dtype = np.float32
self[:] = []
self[:] = list(np.array(data, dtype=dtype))
def get_batch(
self,
batch_size: int = None,
training_length: int = 1,
sequential: bool = True,
) -> np.ndarray:
"""
Retrieve the last batch_size elements of length training_length
from the list of np.array
:param batch_size: The number of elements to retrieve. If None:
All elements will be retrieved.
:param training_length: The length of the sequence to be retrieved. If
None: only takes one element.
:param sequential: If true and training_length is not None: the elements
will not repeat in the sequence. [a,b,c,d,e] with training_length = 2 and
sequential=True gives [[0,a],[b,c],[d,e]]. If sequential=False gives
[[a,b],[b,c],[c,d],[d,e]]
"""
if sequential:
# The sequences will not have overlapping elements (this involves padding)
leftover = len(self) % training_length
# leftover is the number of elements in the first sequence (this sequence might need 0 padding)
if batch_size is None:
# retrieve the maximum number of elements
batch_size = len(self) // training_length + 1 * (leftover != 0)
# The maximum number of sequences taken from a list of length len(self) without overlapping
# with padding is equal to batch_size
if batch_size > (len(self) // training_length + 1 * (leftover != 0)):
raise BufferException(
"The batch size and training length requested for get_batch where"
" too large given the current number of data points."
)
if batch_size * training_length > len(self):
padding = np.array(self[-1], dtype=np.float32) * self.padding_value
return np.array(
[padding] * (training_length - leftover) + self[:],
dtype=np.float32,
)
else:
return np.array(
self[len(self) - batch_size * training_length :],
dtype=np.float32,
)
else:
# The sequences will have overlapping elements
if batch_size is None:
# retrieve the maximum number of elements
batch_size = len(self) - training_length + 1
# The number of sequences of length training_length taken from a list of len(self) elements
# with overlapping is equal to batch_size
if (len(self) - training_length + 1) < batch_size:
raise BufferException(
"The batch size and training length requested for get_batch where"
" too large given the current number of data points."
)
tmp_list: List[np.ndarray] = []
for end in range(len(self) - batch_size + 1, len(self) + 1):
tmp_list += self[end - training_length : end]
return np.array(tmp_list, dtype=np.float32)
def reset_field(self) -> None:
"""
Resets the AgentBufferField
"""
self[:] = []
def __init__(self):
self.last_brain_info = None
self.last_take_action_outputs = None
super().__init__()
def __str__(self):
return ", ".join(["'{0}' : {1}".format(k, str(self[k])) for k in self.keys()])
def reset_agent(self) -> None:
"""
Resets the AgentBuffer
"""
for k in self.keys():
self[k].reset_field()
self.last_brain_info = None
self.last_take_action_outputs = None
def __getitem__(self, key):
if key not in self.keys():
self[key] = self.AgentBufferField()
return super().__getitem__(key)
def check_length(self, key_list: List[str]) -> bool:
"""
Some methods will require that some fields have the same length.
check_length will return true if the fields in key_list
have the same length.
:param key_list: The fields which length will be compared
"""
if len(key_list) < 2:
return True
length = None
for key in key_list:
if key not in self.keys():
return False
if (length is not None) and (length != len(self[key])):
return False
length = len(self[key])
return True
def shuffle(self, sequence_length: int, key_list: List[str] = None) -> None:
"""
Shuffles the fields in key_list in a consistent way: The reordering will
be the same across fields.
:param key_list: The fields that must be shuffled.
"""
if key_list is None:
key_list = list(self.keys())
if not self.check_length(key_list):
raise BufferException(
"Unable to shuffle if the fields are not of same length"
)
s = np.arange(len(self[key_list[0]]) // sequence_length)
np.random.shuffle(s)
for key in key_list:
tmp: List[np.ndarray] = []
for i in s:
tmp += self[key][i * sequence_length : (i + 1) * sequence_length]
self[key][:] = tmp
def make_mini_batch(self, start: int, end: int) -> "AgentBuffer":
"""
Creates a mini-batch from buffer.
:param start: Starting index of buffer.
:param end: Ending index of buffer.
:return: Dict of mini batch.
"""
mini_batch = AgentBuffer()
for key in self:
mini_batch[key] = self[key][start:end]
return mini_batch
def sample_mini_batch(
self, batch_size: int, sequence_length: int = 1
) -> "AgentBuffer":
"""
Creates a mini-batch from a random start and end.
:param batch_size: number of elements to withdraw.
:param sequence_length: Length of sequences to sample.
Number of sequences to sample will be batch_size/sequence_length.
"""
num_seq_to_sample = batch_size // sequence_length
mini_batch = AgentBuffer()
buff_len = self.num_experiences
num_sequences_in_buffer = buff_len // sequence_length
start_idxes = (
np.random.randint(num_sequences_in_buffer, size=num_seq_to_sample)
* sequence_length
) # Sample random sequence starts
for i in start_idxes:
for key in self:
mini_batch[key].extend(self[key][i : i + sequence_length])
return mini_batch
def save_to_file(self, file_object: BinaryIO) -> None:
"""
Saves the AgentBuffer to a file-like object.
"""
with h5py.File(file_object, "w") as write_file:
for key, data in self.items():
write_file.create_dataset(key, data=data, dtype="f", compression="gzip")
def load_from_file(self, file_object: BinaryIO) -> None:
"""
Loads the AgentBuffer from a file-like object.
"""
with h5py.File(file_object, "r") as read_file:
for key in list(read_file.keys()):
self[key] = AgentBuffer.AgentBufferField()
# extend() will convert the numpy array's first dimension into list
self[key].extend(read_file[key][()])
def truncate(self, max_length: int, sequence_length: int = 1) -> None:
"""
Truncates the buffer to a certain length.
This can be slow for large buffers. We compensate by cutting further than we need to, so that
we're not truncating at each update. Note that we must truncate an integer number of sequence_lengths
param: max_length: The length at which to truncate the buffer.
"""
current_length = self.num_experiences
# make max_length an integer number of sequence_lengths
max_length -= max_length % sequence_length
if current_length > max_length:
for _key in self.keys():
self[_key][:] = self[_key][current_length - max_length :]
def resequence_and_append(
self,
target_buffer: "AgentBuffer",
key_list: List[str] = None,
batch_size: int = None,
training_length: int = None,
) -> None:
"""
Takes in a batch size and training length (sequence length), and appends this AgentBuffer to target_buffer
properly padded for LSTM use. Optionally, use key_list to restrict which fields are inserted into the new
buffer.
:param target_buffer: The buffer which to append the samples to.
:param key_list: The fields that must be added. If None: all fields will be appended.
:param batch_size: The number of elements that must be appended. If None: All of them will be.
:param training_length: The length of the samples that must be appended. If None: only takes one element.
"""
if key_list is None:
key_list = list(self.keys())
if not self.check_length(key_list):
raise BufferException(
"The length of the fields {0} were not of same length".format(key_list)
)
for field_key in key_list:
target_buffer[field_key].extend(
self[field_key].get_batch(
batch_size=batch_size, training_length=training_length
)
)
@property
def num_experiences(self) -> int:
"""
The number of agent experiences in the AgentBuffer, i.e. | |
<gh_stars>0
import datetime
import unittest
from il2fb.ds.events.definitions.shootdowns import AIAircraftShotdownEvent
from il2fb.ds.events.definitions.shootdowns import AIAircraftShotdownSelfEvent
from il2fb.ds.events.definitions.shootdowns import HumanAircraftShotdownEvent
from il2fb.ds.events.definitions.shootdowns import HumanAircraftShotdownSelfEvent
from il2fb.ds.events.definitions.shootdowns import AIAircraftShotdownByAIAircraftEvent
from il2fb.ds.events.definitions.shootdowns import AIAircraftShotdownByBridgeEvent
from il2fb.ds.events.definitions.shootdowns import AIAircraftShotdownByBuildingEvent
from il2fb.ds.events.definitions.shootdowns import AIAircraftShotdownByHumanAircraftEvent
from il2fb.ds.events.definitions.shootdowns import AIAircraftShotdownByMovingUnitEvent
from il2fb.ds.events.definitions.shootdowns import AIAircraftShotdownByMovingUnitMemberEvent
from il2fb.ds.events.definitions.shootdowns import AIAircraftShotdownByObjectEvent
from il2fb.ds.events.definitions.shootdowns import AIAircraftShotdownByStationaryUnitEvent
from il2fb.ds.events.definitions.shootdowns import AIAircraftShotdownByTreeEvent
from il2fb.ds.events.definitions.shootdowns import AIAircraftShotdownByParatrooperEvent
from il2fb.ds.events.definitions.shootdowns import HumanAircraftShotdownByAIAircraftEvent
from il2fb.ds.events.definitions.shootdowns import HumanAircraftShotdownByBridgeEvent
from il2fb.ds.events.definitions.shootdowns import HumanAircraftShotdownByBuildingEvent
from il2fb.ds.events.definitions.shootdowns import HumanAircraftShotdownByHumanAircraftEvent
from il2fb.ds.events.definitions.shootdowns import HumanAircraftShotdownByMovingUnitEvent
from il2fb.ds.events.definitions.shootdowns import HumanAircraftShotdownByMovingUnitMemberEvent
from il2fb.ds.events.definitions.shootdowns import HumanAircraftShotdownByObjectEvent
from il2fb.ds.events.definitions.shootdowns import HumanAircraftShotdownByStationaryUnitEvent
from il2fb.ds.events.definitions.shootdowns import HumanAircraftShotdownByTreeEvent
from il2fb.ds.events.definitions.shootdowns import HumanAircraftShotdownByParatrooperEvent
from il2fb.ds.events.definitions.shootdowns import AIAircraftShotdownByAIAircraftAndAIAircraftEvent
from il2fb.ds.events.definitions.shootdowns import AIAircraftShotdownByAIAircraftAndHumanAircraftEvent
from il2fb.ds.events.definitions.shootdowns import AIAircraftShotdownByHumanAircraftAndAIAircraftEvent
from il2fb.ds.events.definitions.shootdowns import AIAircraftShotdownByHumanAircraftAndHumanAircraftEvent
from il2fb.ds.events.definitions.shootdowns import HumanAircraftShotdownByAIAircraftAndAIAircraftEvent
from il2fb.ds.events.definitions.shootdowns import HumanAircraftShotdownByAIAircraftAndHumanAircraftEvent
from il2fb.ds.events.definitions.shootdowns import HumanAircraftShotdownByHumanAircraftAndAIAircraftEvent
from il2fb.ds.events.definitions.shootdowns import HumanAircraftShotdownByHumanAircraftAndHumanAircraftEvent
from il2fb.ds.events.parsing.shootdowns import ActorShotdownLineParser
class ActorShotdownLineParserTestCase(unittest.TestCase):
def setUp(self):
self.parser = ActorShotdownLineParser()
def test_parse_line_no_match(self):
timestamp = None
line = "foo"
evt = self.parser.parse_line(timestamp, line)
self.assertIsNone(evt)
def test_ai_aircraft_shotdown(self):
timestamp = datetime.datetime(2020, 12, 31, 15, 46, 8)
line = "r01001 shot down by at 145663.6 62799.64 83.96088"
evt = self.parser.parse_line(timestamp, line)
self.assertIsInstance(evt, AIAircraftShotdownEvent)
self.assertEqual(evt.data.timestamp, timestamp)
self.assertEqual(evt.data.target.id, "r0100")
self.assertEqual(evt.data.target.flight_index, 1)
self.assertEqual(evt.data.pos.x, float("145663.6"))
self.assertEqual(evt.data.pos.y, float("62799.64"))
self.assertEqual(evt.data.pos.z, float("83.96088"))
def test_ai_aircraft_shotdown_self(self):
timestamp = datetime.datetime(2020, 12, 31, 15, 46, 8)
line = "r01001 shot down by landscape at 145663.6 62799.64 83.96088"
evt = self.parser.parse_line(timestamp, line)
self.assertIsInstance(evt, AIAircraftShotdownSelfEvent)
self.assertEqual(evt.data.timestamp, timestamp)
self.assertEqual(evt.data.target.id, "r0100")
self.assertEqual(evt.data.target.flight_index, 1)
line = "r01001 shot down by NONAME at 145663.6 62799.64 83.96088"
evt = self.parser.parse_line(timestamp, line)
self.assertIsInstance(evt, AIAircraftShotdownSelfEvent)
self.assertEqual(evt.data.timestamp, timestamp)
self.assertEqual(evt.data.target.id, "r0100")
self.assertEqual(evt.data.target.flight_index, 1)
def test_ai_aircraft_shotdown_by_ai_aircraft(self):
timestamp = datetime.datetime(2020, 12, 31, 15, 46, 8)
line = "r01001 shot down by g01002 at 145663.6 62799.64 83.96088"
evt = self.parser.parse_line(timestamp, line)
self.assertIsInstance(evt, AIAircraftShotdownByAIAircraftEvent)
self.assertEqual(evt.data.timestamp, timestamp)
self.assertEqual(evt.data.target.id, "r0100")
self.assertEqual(evt.data.target.flight_index, 1)
self.assertEqual(evt.data.attacker.id, "g0100")
self.assertEqual(evt.data.attacker.flight_index, 2)
def test_ai_aircraft_shotdown_by_bridge(self):
timestamp = datetime.datetime(2020, 12, 31, 15, 46, 8)
line = "r01001 shot down by Bridge159 at 145663.6 62799.64 83.96088"
evt = self.parser.parse_line(timestamp, line)
self.assertIsInstance(evt, AIAircraftShotdownByBridgeEvent)
self.assertEqual(evt.data.timestamp, timestamp)
self.assertEqual(evt.data.target.id, "r0100")
self.assertEqual(evt.data.target.flight_index, 1)
self.assertEqual(evt.data.attacker.id, "Bridge159")
def test_ai_aircraft_shotdown_by_building(self):
timestamp = datetime.datetime(2020, 12, 31, 15, 46, 8)
line = "r01001 shot down by 194_bld at 145663.6 62799.64 83.96088"
evt = self.parser.parse_line(timestamp, line)
self.assertIsInstance(evt, AIAircraftShotdownByBuildingEvent)
self.assertEqual(evt.data.timestamp, timestamp)
self.assertEqual(evt.data.target.id, "r0100")
self.assertEqual(evt.data.target.flight_index, 1)
self.assertEqual(evt.data.attacker.id, "194_bld")
def test_ai_aircraft_shotdown_by_human_aircraft(self):
timestamp = datetime.datetime(2020, 12, 31, 15, 46, 8)
line = "r01001 shot down by TheUser:TB-7_M40F at 145663.6 62799.64 83.96088"
evt = self.parser.parse_line(timestamp, line)
self.assertIsInstance(evt, AIAircraftShotdownByHumanAircraftEvent)
self.assertEqual(evt.data.timestamp, timestamp)
self.assertEqual(evt.data.target.id, "r0100")
self.assertEqual(evt.data.target.flight_index, 1)
self.assertEqual(evt.data.attacker.callsign, "TheUser")
self.assertEqual(evt.data.attacker.aircraft, "TB-7_M40F")
def test_ai_aircraft_shotdown_by_human_aircraft_stripped_callsign_spaces(self):
timestamp = datetime.datetime(2020, 12, 31, 15, 46, 8)
line = "r01001 shot down by The User :TB-7_M40F at 145663.6 62799.64 83.96088"
evt = self.parser.parse_line(timestamp, line)
self.assertIsInstance(evt, AIAircraftShotdownByHumanAircraftEvent)
self.assertEqual(evt.data.attacker.callsign, "TheUser")
def test_ai_aircraft_shotdown_by_human_aircraft_empty_callsign(self):
timestamp = datetime.datetime(2020, 12, 31, 15, 46, 8)
line = "r01001 shot down by :TB-7_M40F at 145663.6 62799.64 83.96088"
evt = self.parser.parse_line(timestamp, line)
self.assertIsInstance(evt, AIAircraftShotdownByHumanAircraftEvent)
self.assertEqual(evt.data.attacker.callsign, "")
line = "r01001 shot down by :TB-7_M40F at 145663.6 62799.64 83.96088"
evt = self.parser.parse_line(timestamp, line)
self.assertIsInstance(evt, AIAircraftShotdownByHumanAircraftEvent)
self.assertEqual(evt.data.attacker.callsign, "")
def test_ai_aircraft_shotdown_by_moving_unit(self):
timestamp = datetime.datetime(2020, 12, 31, 15, 46, 8)
line = "r01001 shot down by 0_Chief at 145663.6 62799.64 83.96088"
evt = self.parser.parse_line(timestamp, line)
self.assertIsInstance(evt, AIAircraftShotdownByMovingUnitEvent)
self.assertEqual(evt.data.timestamp, timestamp)
self.assertEqual(evt.data.target.id, "r0100")
self.assertEqual(evt.data.target.flight_index, 1)
self.assertEqual(evt.data.attacker.id, "0_Chief")
def test_ai_aircraft_shotdown_by_moving_unit_member(self):
timestamp = datetime.datetime(2020, 12, 31, 15, 46, 8)
line = "r01001 shot down by 0_Chief0 at 145663.6 62799.64 83.96088"
evt = self.parser.parse_line(timestamp, line)
self.assertIsInstance(evt, AIAircraftShotdownByMovingUnitMemberEvent)
self.assertEqual(evt.data.timestamp, timestamp)
self.assertEqual(evt.data.target.id, "r0100")
self.assertEqual(evt.data.target.flight_index, 1)
self.assertEqual(evt.data.attacker.id, "0_Chief")
self.assertEqual(evt.data.attacker.member_index, 0)
def test_ai_aircraft_shotdown_by_object(self):
timestamp = datetime.datetime(2020, 12, 31, 15, 46, 8)
line = "r01001 shot down by 3do/Buildings/Airdrome/BarrelBlock1/mono.sim at 145663.6 62799.64 83.96088"
evt = self.parser.parse_line(timestamp, line)
self.assertIsInstance(evt, AIAircraftShotdownByObjectEvent)
self.assertEqual(evt.data.timestamp, timestamp)
self.assertEqual(evt.data.target.id, "r0100")
self.assertEqual(evt.data.target.flight_index, 1)
self.assertEqual(evt.data.attacker.id, "3do/Buildings/Airdrome/BarrelBlock1/mono.sim")
def test_ai_aircraft_shotdown_by_stationary_unit(self):
timestamp = datetime.datetime(2020, 12, 31, 15, 46, 8)
line = "r01001 shot down by 0_Static at 145663.6 62799.64 83.96088"
evt = self.parser.parse_line(timestamp, line)
self.assertIsInstance(evt, AIAircraftShotdownByStationaryUnitEvent)
self.assertEqual(evt.data.timestamp, timestamp)
self.assertEqual(evt.data.target.id, "r0100")
self.assertEqual(evt.data.target.flight_index, 1)
self.assertEqual(evt.data.attacker.id, "0_Static")
def test_ai_aircraft_shotdown_by_tree(self):
timestamp = datetime.datetime(2020, 12, 31, 15, 46, 8)
line = "r01001 shot down by 3do/Tree/Line/live.sim at 145663.6 62799.64 83.96088"
evt = self.parser.parse_line(timestamp, line)
self.assertIsInstance(evt, AIAircraftShotdownByTreeEvent)
self.assertEqual(evt.data.timestamp, timestamp)
self.assertEqual(evt.data.target.id, "r0100")
self.assertEqual(evt.data.target.flight_index, 1)
def test_ai_aircraft_shotdown_by_paratrooper(self):
timestamp = datetime.datetime(2020, 12, 31, 15, 46, 8)
line = "r01001 shot down by _para_1 at 145663.6 62799.64 83.96088"
evt = self.parser.parse_line(timestamp, line)
self.assertIsInstance(evt, AIAircraftShotdownByParatrooperEvent)
self.assertEqual(evt.data.timestamp, timestamp)
self.assertEqual(evt.data.target.id, "r0100")
self.assertEqual(evt.data.target.flight_index, 1)
def test_human_aircraft_shotdown(self):
timestamp = datetime.datetime(2020, 12, 31, 15, 46, 8)
line = "TheUser:TB-7_M40F shot down by at 145663.6 62799.64 83.96088"
evt = self.parser.parse_line(timestamp, line)
self.assertIsInstance(evt, HumanAircraftShotdownEvent)
self.assertEqual(evt.data.timestamp, timestamp)
self.assertEqual(evt.data.target.callsign, "TheUser")
self.assertEqual(evt.data.target.aircraft, "TB-7_M40F")
def test_human_aircraft_shotdown_stripped_callsign_spaces(self):
timestamp = datetime.datetime(2020, 12, 31, 15, 46, 8)
line = " The User :TB-7_M40F shot down by at 145663.6 62799.64 83.96088"
evt = self.parser.parse_line(timestamp, line)
self.assertIsInstance(evt, HumanAircraftShotdownEvent)
self.assertEqual(evt.data.target.callsign, "TheUser")
def test_human_aircraft_shotdown_empty_callsign(self):
timestamp = datetime.datetime(2020, 12, 31, 15, 46, 8)
line = " :TB-7_M40F shot down by at 145663.6 62799.64 83.96088"
evt = self.parser.parse_line(timestamp, line)
self.assertIsInstance(evt, HumanAircraftShotdownEvent)
self.assertEqual(evt.data.target.callsign, "")
line = ":TB-7_M40F shot down by at 145663.6 62799.64 83.96088"
evt = self.parser.parse_line(timestamp, line)
self.assertIsInstance(evt, HumanAircraftShotdownEvent)
self.assertEqual(evt.data.target.callsign, "")
def test_human_aircraft_shotdown_self(self):
timestamp = datetime.datetime(2020, 12, 31, 15, 46, 8)
line = "TheUser:TB-7_M40F shot down by landscape at 145663.6 62799.64 83.96088"
evt = self.parser.parse_line(timestamp, line)
self.assertIsInstance(evt, HumanAircraftShotdownSelfEvent)
self.assertEqual(evt.data.timestamp, timestamp)
self.assertEqual(evt.data.target.callsign, "TheUser")
self.assertEqual(evt.data.target.aircraft, "TB-7_M40F")
line = "TheUser:TB-7_M40F shot down by NONAME at 145663.6 62799.64 83.96088"
evt = self.parser.parse_line(timestamp, line)
self.assertIsInstance(evt, HumanAircraftShotdownSelfEvent)
self.assertEqual(evt.data.timestamp, timestamp)
self.assertEqual(evt.data.target.callsign, "TheUser")
self.assertEqual(evt.data.target.aircraft, "TB-7_M40F")
def test_human_aircraft_shotdown_by_ai_aircraft(self):
timestamp = datetime.datetime(2020, 12, 31, 15, 46, 8)
line = "TheUser:TB-7_M40F shot down by g01002 at 145663.6 62799.64 83.96088"
evt = self.parser.parse_line(timestamp, line)
self.assertIsInstance(evt, HumanAircraftShotdownByAIAircraftEvent)
self.assertEqual(evt.data.timestamp, timestamp)
self.assertEqual(evt.data.target.callsign, "TheUser")
self.assertEqual(evt.data.target.aircraft, "TB-7_M40F")
self.assertEqual(evt.data.attacker.id, "g0100")
self.assertEqual(evt.data.attacker.flight_index, 2)
def test_human_aircraft_shotdown_by_bridge(self):
timestamp = datetime.datetime(2020, 12, 31, 15, 46, 8)
line = "TheUser:TB-7_M40F shot down by Bridge159 at 145663.6 62799.64 83.96088"
evt = self.parser.parse_line(timestamp, line)
self.assertIsInstance(evt, HumanAircraftShotdownByBridgeEvent)
self.assertEqual(evt.data.timestamp, timestamp)
self.assertEqual(evt.data.target.callsign, "TheUser")
self.assertEqual(evt.data.target.aircraft, "TB-7_M40F")
self.assertEqual(evt.data.attacker.id, "Bridge159")
def test_human_aircraft_shotdown_by_building(self):
timestamp = datetime.datetime(2020, 12, 31, 15, 46, 8)
line = "TheUser:TB-7_M40F shot down by 194_bld at 145663.6 62799.64 83.96088"
evt = self.parser.parse_line(timestamp, line)
self.assertIsInstance(evt, HumanAircraftShotdownByBuildingEvent)
self.assertEqual(evt.data.timestamp, timestamp)
self.assertEqual(evt.data.target.callsign, "TheUser")
self.assertEqual(evt.data.target.aircraft, "TB-7_M40F")
self.assertEqual(evt.data.attacker.id, "194_bld")
def test_human_aircraft_shotdown_by_human_aircraft(self):
timestamp = datetime.datetime(2020, 12, 31, 15, 46, 8)
line = "TheUser:TB-7_M40F shot down by TheUser2:TB-7_M40F at 145663.6 62799.64 83.96088"
evt = self.parser.parse_line(timestamp, line)
self.assertIsInstance(evt, HumanAircraftShotdownByHumanAircraftEvent)
self.assertEqual(evt.data.timestamp, timestamp)
self.assertEqual(evt.data.target.callsign, "TheUser")
self.assertEqual(evt.data.target.aircraft, "TB-7_M40F")
self.assertEqual(evt.data.attacker.callsign, "TheUser2")
self.assertEqual(evt.data.attacker.aircraft, "TB-7_M40F")
def test_human_aircraft_shotdown_by_human_aircraft_stripped_callsign_spaces(self):
timestamp = datetime.datetime(2020, 12, 31, 15, 46, 8)
line = "TheUser:TB-7_M40F shot down by The User2 :TB-7_M40F at 145663.6 62799.64 83.96088"
evt = self.parser.parse_line(timestamp, line)
self.assertIsInstance(evt, HumanAircraftShotdownByHumanAircraftEvent)
self.assertEqual(evt.data.attacker.callsign, "TheUser2")
def test_human_aircraft_shotdown_by_human_aircraft_empty_callsign(self):
timestamp = datetime.datetime(2020, 12, 31, 15, 46, 8)
line = "TheUser:TB-7_M40F shot down by :TB-7_M40F at 145663.6 62799.64 83.96088"
evt = self.parser.parse_line(timestamp, line)
self.assertIsInstance(evt, HumanAircraftShotdownByHumanAircraftEvent)
self.assertEqual(evt.data.attacker.callsign, "")
line = "TheUser:TB-7_M40F shot down by :TB-7_M40F at 145663.6 62799.64 83.96088"
evt = self.parser.parse_line(timestamp, line)
self.assertIsInstance(evt, HumanAircraftShotdownByHumanAircraftEvent)
self.assertEqual(evt.data.attacker.callsign, "")
def test_human_aircraft_shotdown_by_moving_unit(self):
timestamp = datetime.datetime(2020, 12, 31, 15, 46, 8)
line = "TheUser:TB-7_M40F shot down by 0_Chief at 145663.6 62799.64 83.96088"
evt = self.parser.parse_line(timestamp, line)
self.assertIsInstance(evt, HumanAircraftShotdownByMovingUnitEvent)
self.assertEqual(evt.data.timestamp, timestamp)
self.assertEqual(evt.data.target.callsign, "TheUser")
self.assertEqual(evt.data.target.aircraft, "TB-7_M40F")
self.assertEqual(evt.data.attacker.id, "0_Chief")
def test_human_aircraft_shotdown_by_moving_unit_member(self):
timestamp = datetime.datetime(2020, 12, 31, 15, 46, 8)
line = "TheUser:TB-7_M40F shot down by 0_Chief0 at 145663.6 62799.64 83.96088"
evt = self.parser.parse_line(timestamp, line)
self.assertIsInstance(evt, HumanAircraftShotdownByMovingUnitMemberEvent)
self.assertEqual(evt.data.timestamp, timestamp)
self.assertEqual(evt.data.target.callsign, "TheUser")
self.assertEqual(evt.data.target.aircraft, "TB-7_M40F")
self.assertEqual(evt.data.attacker.id, "0_Chief")
self.assertEqual(evt.data.attacker.member_index, 0)
def test_human_aircraft_shotdown_by_object(self):
timestamp = datetime.datetime(2020, 12, 31, 15, 46, 8)
line = "TheUser:TB-7_M40F shot down by 3do/Buildings/Airdrome/BarrelBlock1/mono.sim at 145663.6 62799.64 83.96088"
evt = self.parser.parse_line(timestamp, line)
self.assertIsInstance(evt, HumanAircraftShotdownByObjectEvent)
self.assertEqual(evt.data.timestamp, timestamp)
self.assertEqual(evt.data.target.callsign, "TheUser")
self.assertEqual(evt.data.target.aircraft, "TB-7_M40F")
self.assertEqual(evt.data.attacker.id, "3do/Buildings/Airdrome/BarrelBlock1/mono.sim")
def test_human_aircraft_shotdown_by_stationary_unit(self):
timestamp = datetime.datetime(2020, 12, 31, 15, 46, 8)
line = "TheUser:TB-7_M40F shot down by 0_Static at 145663.6 62799.64 83.96088"
evt = self.parser.parse_line(timestamp, line)
self.assertIsInstance(evt, HumanAircraftShotdownByStationaryUnitEvent)
self.assertEqual(evt.data.timestamp, timestamp)
self.assertEqual(evt.data.target.callsign, "TheUser")
self.assertEqual(evt.data.target.aircraft, "TB-7_M40F")
self.assertEqual(evt.data.attacker.id, "0_Static")
def test_human_aircraft_shotdown_by_tree(self):
timestamp = datetime.datetime(2020, 12, 31, 15, 46, 8)
line = "TheUser:TB-7_M40F shot down by 3do/Tree/Line/live.sim at 145663.6 62799.64 83.96088"
evt = self.parser.parse_line(timestamp, line)
self.assertIsInstance(evt, HumanAircraftShotdownByTreeEvent)
self.assertEqual(evt.data.timestamp, timestamp)
self.assertEqual(evt.data.target.callsign, "TheUser")
self.assertEqual(evt.data.target.aircraft, "TB-7_M40F")
def test_human_aircraft_shotdown_by_paratrooper(self):
timestamp = datetime.datetime(2020, 12, 31, 15, 46, 8)
line = "TheUser:TB-7_M40F shot down by _para_1 at 145663.6 62799.64 83.96088"
evt = self.parser.parse_line(timestamp, line)
self.assertIsInstance(evt, HumanAircraftShotdownByParatrooperEvent)
self.assertEqual(evt.data.timestamp, timestamp)
self.assertEqual(evt.data.target.callsign, "TheUser")
self.assertEqual(evt.data.target.aircraft, "TB-7_M40F")
def test_ai_aircraft_shotdown_by_ai_aircraft_and_ai_aircraft(self):
timestamp = datetime.datetime(2020, 12, 31, 15, 46, 8)
line = "r01001 shot down by g01002 and g01003 at 145663.6 62799.64 83.96088"
evt = self.parser.parse_line(timestamp, line)
self.assertIsInstance(evt, AIAircraftShotdownByAIAircraftAndAIAircraftEvent)
self.assertEqual(evt.data.timestamp, timestamp)
self.assertEqual(evt.data.target.id, "r0100")
self.assertEqual(evt.data.target.flight_index, 1)
self.assertEqual(evt.data.attacker.id, "g0100")
| |
<filename>samples/sample_architectures.py<gh_stars>0
# -*- coding: utf-8 -*-
"""
© <NAME>, <NAME>, 2017
Some example architectures to be used with corresponding config files in folder configs/examples.
Overview:
---------
ArchitectureDense: Simple dense layer network (use with main_lstm.py)
ArchitectureLSTM: Most simple usage example of LSTM (use with main_lstm.py)
ArchitectureLSTM... : More advanced (optimized/flexible) usage examples of LSTM (use with main_lstm.py)
ArchitectureConvLSTM: Example for ConvLSTM and plotting (use with main_convlstm.py)
AutoencoderDynamicLenght: Dynamic lenght, LSTM autoenc. <EMAIL>
"""
import tensorflow as tf
import sys
import time
from collections import OrderedDict
import numpy as np
from TeLL.config import Config
from TeLL.initializations import constant, weight_xavier_conv2d
from TeLL.utility.misc_tensorflow import layers_from_specs, tensor_shape_with_flexible_dim
from TeLL.layers import ConcatLayer, ConvLSTMLayer, ConvLayer, DenseLayer, LSTMLayer, RNNInputLayer, MaxPoolingLayer, \
ScalingLayer, LSTMLayerGetNetInput
from TeLL.utility.misc import get_rec_attr
from TeLL.regularization import regularize
class ArchitectureDense(object):
def __init__(self, config: Config, dataset):
"""Simple network with dense layer and dense output layer;
Command-line usage:
>>> python3 samples/main_lstm.py --config=samples/config_dense.json
Example input shapes: [n_samples, n_features]
Example output shapes: [n_samples, n_features]
"""
#
# Some convenience objects
#
# We will use a list to store all layers for regularization etc. (this is optional)
layers = []
# Prepare xavier initialization for weights
w_init = tf.contrib.layers.xavier_initializer(uniform=False, seed=None, dtype=tf.float32)
#
# Create placeholders for input data (shape: [n_samples, n_features])
#
input_shapes = dataset.get_input_shapes()
X = tf.placeholder(tf.float32, shape=input_shapes['X'].shape)
y_ = tf.placeholder(tf.float32, shape=input_shapes['y'].shape)
n_output_units = dataset.datareader.get_num_classes() # nr of output features is number of classes
# ----------------------------------------------------------------------------------------------------------
# Define network architecture
# ----------------------------------------------------------------------------------------------------------
#
# Dense Layer
# Input for the dense layer shall be X (TeLL layers take tensors or TeLL Layer instances as input)
#
print("\tDense layer...")
dense_layer = DenseLayer(incoming=X, n_units=config.n_dense, name='DenseLayer', W=w_init, b=tf.zeros,
a=tf.nn.elu)
layers.append(dense_layer)
#
# Output Layer
#
print("\tOutput layer...")
output_layer = DenseLayer(incoming=dense_layer, n_units=n_output_units, name='DenseLayerOut', W=w_init,
b=tf.zeros, a=tf.sigmoid)
layers.append(output_layer)
#
# Calculate output
# This will calculate the output of output_layer, including all dependencies
#
output = output_layer.get_output()
print("\tDone!")
#
# Publish
#
self.X = X
self.y_ = y_
self.output = output
# Store layers in list for regularization in main file
self.__layers = layers
def get_layers(self):
return self.__layers
class ArchitectureLSTM(object):
def __init__(self, config: Config, dataset):
"""Simple network with LSTM layer and dense output layer; All sequence positions are fed to the LSTM layer at
once, this is the most convenient but least flexible design; see ArchitectureLSTM_optimized for a faster
version;
Command-line usage:
>>> python3 samples/main_lstm.py --config=samples/config_lstm.json
Example input shapes: [n_samples, n_sequence_positions, n_features]
Example output shapes: [n_samples, n_sequence_positions, n_features] (with return_states=True),
[n_samples, 1, n_features] (with return_states=False)
"""
#
# Some convenience objects
#
# We will use a list to store all layers for regularization etc. (this is optional)
layers = []
# Prepare xavier initialization for weights
w_init = tf.contrib.layers.xavier_initializer(uniform=False, seed=None, dtype=tf.float32)
#
# Create placeholders for input data (shape: [n_samples, n_sequence_positions, n_features])
#
input_shapes = dataset.get_input_shapes()
X = tf.placeholder(tf.float32, shape=input_shapes['X'].shape)
y_ = tf.placeholder(tf.float32, shape=input_shapes['y'].shape)
n_output_units = dataset.datareader.get_num_classes() # nr of output features is number of classes
# ----------------------------------------------------------------------------------------------------------
# Define network architecture
# ----------------------------------------------------------------------------------------------------------
#
# LSTM Layer
# We want to create an output sequence with the LSTM instead of only returning the ouput at the last sequence
# position -> return_states=True
#
print("\tLSTM...")
lstm_layer = LSTMLayer(incoming=X, n_units=config.n_lstm, name='LSTM',
W_ci=w_init, W_ig=w_init, W_og=w_init, W_fg=w_init,
b_ci=tf.zeros, b_ig=tf.zeros, b_og=tf.zeros, b_fg=tf.zeros,
a_ci=tf.tanh, a_ig=tf.sigmoid, a_og=tf.sigmoid, a_fg=tf.sigmoid, a_out=tf.nn.elu,
c_init=tf.zeros, h_init=tf.zeros, forgetgate=True, precomp_fwds=True, return_states=True)
layers.append(lstm_layer)
#
# Output Layer
#
print("\tOutput layer...")
output_layer = DenseLayer(incoming=lstm_layer, n_units=n_output_units, name='DenseLayerOut',
W=w_init, b=tf.zeros, a=tf.sigmoid)
layers.append(output_layer)
#
# Calculate output
#
output = output_layer.get_output(tickersteps=config.tickersteps)
print("\tDone!")
#
# Publish
#
self.X = X
self.y_ = y_
self.output = output
# Store layers in list for regularization in main file
self.__layers = layers
def get_layers(self):
return self.__layers
class ArchitectureLSTMFlexible(object):
def __init__(self, config: Config, dataset):
"""Architecture with LSTM layer followed by dense output layer; Inputs are fed to LSTM layer sequence position
by sequence position in a for-loop; this is the most flexible design, as showed e.g. in ArchitectureLSTM3;
Command-line usage:
Change entry
"architecture": "sample_architectures.ArchitectureLSTM"
to
"architecture": "sample_architectures.ArchitectureLSTMFlexible" in samples/config_lstm.json. Then run
>>> python3 samples/main_lstm.py --config=samples/config_lstm.json
Example input shapes: [n_samples, n_sequence_positions, n_features]
Example output shapes: [n_samples, n_sequence_positions, n_features] (with return_states=True),
[n_samples, 1, n_features] (with return_states=False)
"""
#
# Some convenience objects
#
# We will use a list to store all layers for regularization etc. (this is optional)
layers = []
# Prepare xavier initialization for weights
w_init = tf.contrib.layers.xavier_initializer(uniform=False, seed=None, dtype=tf.float32)
#
# Create placeholders for input data (shape: [n_samples, n_sequence_positions, n_features])
#
X = tf.placeholder(tf.float32, shape=dataset.X_shape)
y_ = tf.placeholder(tf.float32, shape=dataset.y_shape)
n_output_units = dataset.y_shape[-1] # nr of output features is number of classes
n_seq_pos = dataset.X_shape[1] # dataset.X_shape is [sample, seq_pos, features]
# ----------------------------------------------------------------------------------------------------------
# Define network architecture
# ----------------------------------------------------------------------------------------------------------
#
# Input Layer
# RNNInputLayer will hold the input to network at each sequence position. We will initalize it with zeros-
# tensor of shape [sample, 1, features]
#
input_shape = dataset.X_shape[:1] + (1,) + dataset.X_shape[2:]
rnn_input_layer = RNNInputLayer(tf.zeros(input_shape, dtype=tf.float32))
layers.append(rnn_input_layer)
#
# LSTM Layer
#
print("\tLSTM...")
lstm_layer = LSTMLayer(incoming=rnn_input_layer, n_units=config.n_lstm, name='LSTM',
W_ci=w_init, W_ig=w_init, W_og=w_init, W_fg=w_init,
b_ci=tf.zeros, b_ig=tf.zeros, b_og=tf.zeros, b_fg=tf.zeros,
a_ci=tf.tanh, a_ig=tf.sigmoid, a_og=tf.sigmoid, a_fg=tf.sigmoid, a_out=tf.nn.elu,
c_init=tf.zeros, h_init=tf.zeros, forgetgate=True, precomp_fwds=True, return_states=True)
layers.append(lstm_layer)
#
# Output Layer
#
print("\tOutput layer...")
output_layer = DenseLayer(incoming=lstm_layer, n_units=n_output_units, name='DenseLayerOut',
W=w_init, b=tf.zeros, a=tf.sigmoid)
layers.append(output_layer)
# ----------------------------------------------------------------------------------------------------------
# Loop through sequence positions and create graph
# ----------------------------------------------------------------------------------------------------------
#
# Loop through sequence positions
#
print("\tRNN Loop...")
for seq_pos in range(n_seq_pos):
with tf.name_scope("Sequence_pos_{}".format(seq_pos)):
print("\t seq. pos. {}...".format(seq_pos))
# Set rnn input layer to input at current sequence position
rnn_input_layer.update(X[:, seq_pos:seq_pos + 1, :])
# Calculate new network state at new frame (this updates the network's hidden activations, cell states,
# and dependencies automatically)
_ = lstm_layer.get_output()
#
# Loop through tickersteps
#
# Use zero input during ticker steps
tickerstep_input = tf.zeros(dataset.X_shape[:1] + (1,) + dataset.X_shape[2:], dtype=tf.float32,
name="tickerstep_input")
for tickerstep in range(config.tickersteps):
with tf.name_scope("Tickerstep_{}".format(tickerstep)):
print("\t tickerstep {}...".format(tickerstep))
# Set rnn input layer to tickerstep input
rnn_input_layer.update(tickerstep_input)
# Calculate new network state at new frame (this updates the network's hidden activations, cell states,
# and dependencies automatically)
_ = lstm_layer.get_output(tickerstep_nodes=True)
#
# Calculate output but consider that the lstm_layer is already computed (i.e. do not modify cell states any
# further)
#
output = output_layer.get_output(prev_layers=[lstm_layer])
print("\tDone!")
#
# Publish
#
self.X = X
self.y_ = y_
self.output = output
# Store layers in list for regularization in main file
self.__layers = layers
def get_layers(self):
return self.__layers
# TODO: Implement continuous prediction
class ArchitectureLSTMOptimized(object):
def __init__(self, config: Config, dataset):
"""Architecture with LSTM layer followed by dense output layer; Inputs are fed to LSTM layer sequence position
by sequence position in tensorflow tf.while_loop();
This is not as flexible as using a for-loop and more difficult to use but can be faster and optimized
differently; LSTM return_states is not possible here unless manually implemented into the tf.while_loop (that is
why we are only using the prediction at the last sequence position in this example);
This is an advanced example, see ArchitectureLSTM to get started;
Command-line usage:
Change entry
"architecture": "sample_architectures.ArchitectureLSTM"
to
"architecture": "sample_architectures.ArchitectureLSTMOptimized" in samples/config_lstm.json. Then run
>>> python3 samples/main_lstm.py --config=samples/config_lstm.json
Example input shapes: [n_samples, n_sequence_positions, n_features]
Example output shapes: [n_samples, 1, n_features]
"""
#
# Some convenience objects
#
# We will use a list to store all layers for regularization etc. (this is optional)
layers = []
# Prepare xavier initialization for weights
w_init = tf.contrib.layers.xavier_initializer(uniform=False, seed=None, dtype=tf.float32)
#
# Create placeholders for input data (shape: [n_samples, n_sequence_positions, n_features])
#
X = tf.placeholder(tf.float32, shape=dataset.X_shape)
y_ = tf.placeholder(tf.float32, shape=dataset.y_shape)
n_output_units = dataset.y_shape[-1] # nr of output features is number of classes
n_seq_pos = dataset.X_shape[1] # dataset.X_shape is [sample, seq_pos, features]
# ----------------------------------------------------------------------------------------------------------
# Define network architecture
# ----------------------------------------------------------------------------------------------------------
#
# Input Layer
# RNNInputLayer will hold the input to network at each sequence position. We will initalize it with zeros-
# tensor of shape [sample, 1, features]
#
input_shape = dataset.X_shape[:1] + (1,) + dataset.X_shape[2:]
rnn_input_layer = RNNInputLayer(tf.zeros(input_shape, dtype=tf.float32))
layers.append(rnn_input_layer)
#
# LSTM Layer
#
print("\tLSTM...")
lstm_layer = LSTMLayer(incoming=rnn_input_layer, n_units=config.n_lstm, name='LSTM',
W_ci=w_init, W_ig=w_init, W_og=w_init, W_fg=w_init,
b_ci=tf.zeros, b_ig=tf.zeros, b_og=tf.zeros, b_fg=tf.zeros,
a_ci=tf.tanh, a_ig=tf.sigmoid, a_og=tf.sigmoid, a_fg=tf.sigmoid, a_out=tf.nn.elu,
| |
"""
check_is_fitted(self)
if not all([hasattr(estimator, "predict_proba")
for estimator in self.estimators_]):
raise AttributeError("The base estimator should "
"implement predict_proba method")
return self._predict_proba
def _predict_proba(self, X):
results = [estimator.predict_proba(X) for estimator in
self.estimators_]
return results
def score(self, X, y):
"""Returns the mean accuracy on the given test data and labels.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Test samples
y : array-like, shape [n_samples, n_outputs]
True values for X
Returns
-------
scores : float
accuracy_score of self.predict(X) versus y
"""
check_is_fitted(self)
n_outputs_ = len(self.estimators_)
if y.ndim == 1:
raise ValueError("y must have at least two dimensions for "
"multi target classification but has only one")
if y.shape[1] != n_outputs_:
raise ValueError("The number of outputs of Y for fit {0} and"
" score {1} should be same".
format(n_outputs_, y.shape[1]))
y_pred = self.predict(X)
return np.mean(np.all(y == y_pred, axis=1))
def _more_tags(self):
# FIXME
return {'_skip_test': True}
class _BaseChain(BaseEstimator, metaclass=ABCMeta):
@_deprecate_positional_args
def __init__(self, base_estimator, *, order=None, cv=None,
random_state=None):
self.base_estimator = base_estimator
self.order = order
self.cv = cv
self.random_state = random_state
@abstractmethod
def fit(self, X, Y, **fit_params):
"""Fit the model to data matrix X and targets Y.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
Y : array-like, shape (n_samples, n_classes)
The target values.
**fit_params : dict of string -> object
Parameters passed to the `fit` method of each step.
Returns
-------
self : object
"""
X, Y = self._validate_data(X, Y, multi_output=True, accept_sparse=True)
random_state = check_random_state(self.random_state)
check_array(X, accept_sparse=True)
self.order_ = self.order
if self.order_ is None:
self.order_ = np.array(range(Y.shape[1]))
elif isinstance(self.order_, str):
if self.order_ == 'random':
self.order_ = random_state.permutation(Y.shape[1])
elif sorted(self.order_) != list(range(Y.shape[1])):
raise ValueError("invalid order")
self.estimators_ = [clone(self.base_estimator)
for _ in range(Y.shape[1])]
if self.cv is None:
Y_pred_chain = Y[:, self.order_]
if sp.issparse(X):
X_aug = sp.hstack((X, Y_pred_chain), format='lil')
X_aug = X_aug.tocsr()
else:
X_aug = np.hstack((X, Y_pred_chain))
elif sp.issparse(X):
Y_pred_chain = sp.lil_matrix((X.shape[0], Y.shape[1]))
X_aug = sp.hstack((X, Y_pred_chain), format='lil')
else:
Y_pred_chain = np.zeros((X.shape[0], Y.shape[1]))
X_aug = np.hstack((X, Y_pred_chain))
del Y_pred_chain
for chain_idx, estimator in enumerate(self.estimators_):
y = Y[:, self.order_[chain_idx]]
estimator.fit(X_aug[:, :(X.shape[1] + chain_idx)], y,
**fit_params)
if self.cv is not None and chain_idx < len(self.estimators_) - 1:
col_idx = X.shape[1] + chain_idx
cv_result = cross_val_predict(
self.base_estimator, X_aug[:, :col_idx],
y=y, cv=self.cv)
if sp.issparse(X_aug):
X_aug[:, col_idx] = np.expand_dims(cv_result, 1)
else:
X_aug[:, col_idx] = cv_result
return self
def predict(self, X):
"""Predict on the data matrix X using the ClassifierChain model.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
Returns
-------
Y_pred : array-like, shape (n_samples, n_classes)
The predicted values.
"""
check_is_fitted(self)
X = check_array(X, accept_sparse=True)
Y_pred_chain = np.zeros((X.shape[0], len(self.estimators_)))
for chain_idx, estimator in enumerate(self.estimators_):
previous_predictions = Y_pred_chain[:, :chain_idx]
if sp.issparse(X):
if chain_idx == 0:
X_aug = X
else:
X_aug = sp.hstack((X, previous_predictions))
else:
X_aug = np.hstack((X, previous_predictions))
Y_pred_chain[:, chain_idx] = estimator.predict(X_aug)
inv_order = np.empty_like(self.order_)
inv_order[self.order_] = np.arange(len(self.order_))
Y_pred = Y_pred_chain[:, inv_order]
return Y_pred
class ClassifierChain(MetaEstimatorMixin, ClassifierMixin, _BaseChain):
"""A multi-label model that arranges binary classifiers into a chain.
Each model makes a prediction in the order specified by the chain using
all of the available features provided to the model plus the predictions
of models that are earlier in the chain.
Read more in the :ref:`User Guide <classifierchain>`.
.. versionadded:: 0.19
Parameters
----------
base_estimator : estimator
The base estimator from which the classifier chain is built.
order : array-like of shape (n_outputs,) or 'random', optional
By default the order will be determined by the order of columns in
the label matrix Y.::
order = [0, 1, 2, ..., Y.shape[1] - 1]
The order of the chain can be explicitly set by providing a list of
integers. For example, for a chain of length 5.::
order = [1, 3, 2, 4, 0]
means that the first model in the chain will make predictions for
column 1 in the Y matrix, the second model will make predictions
for column 3, etc.
If order is 'random' a random ordering will be used.
cv : int, cross-validation generator or an iterable, optional \
(default=None)
Determines whether to use cross validated predictions or true
labels for the results of previous estimators in the chain.
If cv is None the true labels are used when fitting. Otherwise
possible inputs for cv are:
- integer, to specify the number of folds in a (Stratified)KFold,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
random_state : int, RandomState instance or None, optional (default=None)
If ``order='random'``, determines random number generation for the
chain order.
In addition, it controls the random seed given at each `base_estimator`
at each chaining iteration. Thus, it is only used when `base_estimator`
exposes a `random_state`.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Attributes
----------
classes_ : list
A list of arrays of length ``len(estimators_)`` containing the
class labels for each estimator in the chain.
estimators_ : list
A list of clones of base_estimator.
order_ : list
The order of labels in the classifier chain.
See also
--------
RegressorChain: Equivalent for regression
MultioutputClassifier: Classifies each output independently rather than
chaining.
References
----------
<NAME>, <NAME>, <NAME>, <NAME>, "Classifier
Chains for Multi-label Classification", 2009.
"""
def fit(self, X, Y):
"""Fit the model to data matrix X and targets Y.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
Y : array-like, shape (n_samples, n_classes)
The target values.
Returns
-------
self : object
"""
super().fit(X, Y)
self.classes_ = [estimator.classes_
for chain_idx, estimator
in enumerate(self.estimators_)]
return self
@if_delegate_has_method('base_estimator')
def predict_proba(self, X):
"""Predict probability estimates.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
Y_prob : array-like, shape (n_samples, n_classes)
"""
X = check_array(X, accept_sparse=True)
Y_prob_chain = np.zeros((X.shape[0], len(self.estimators_)))
Y_pred_chain = np.zeros((X.shape[0], len(self.estimators_)))
for chain_idx, estimator in enumerate(self.estimators_):
previous_predictions = Y_pred_chain[:, :chain_idx]
if sp.issparse(X):
X_aug = sp.hstack((X, previous_predictions))
else:
X_aug = np.hstack((X, previous_predictions))
Y_prob_chain[:, chain_idx] = estimator.predict_proba(X_aug)[:, 1]
Y_pred_chain[:, chain_idx] = estimator.predict(X_aug)
inv_order = np.empty_like(self.order_)
inv_order[self.order_] = np.arange(len(self.order_))
Y_prob = Y_prob_chain[:, inv_order]
return Y_prob
@if_delegate_has_method('base_estimator')
def decision_function(self, X):
"""Evaluate the decision_function of the models in the chain.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
Y_decision : array-like, shape (n_samples, n_classes )
Returns the decision function of the sample for each model
in the chain.
"""
Y_decision_chain = np.zeros((X.shape[0], len(self.estimators_)))
Y_pred_chain = np.zeros((X.shape[0], len(self.estimators_)))
for chain_idx, estimator in enumerate(self.estimators_):
previous_predictions = Y_pred_chain[:, :chain_idx]
if sp.issparse(X):
X_aug = sp.hstack((X, previous_predictions))
else:
X_aug = np.hstack((X, previous_predictions))
Y_decision_chain[:, chain_idx] = estimator.decision_function(X_aug)
Y_pred_chain[:, chain_idx] = estimator.predict(X_aug)
inv_order = np.empty_like(self.order_)
inv_order[self.order_] = np.arange(len(self.order_))
Y_decision = Y_decision_chain[:, inv_order]
return Y_decision
def _more_tags(self):
return {'_skip_test': True,
'multioutput_only': True}
class RegressorChain(MetaEstimatorMixin, RegressorMixin, _BaseChain):
"""A multi-label model that arranges regressions into a chain.
Each model makes a prediction in the order specified by the chain using
all of the available features provided to the model plus the predictions
of models that are earlier in the chain.
Read more in the :ref:`User Guide <regressorchain>`.
.. versionadded:: 0.20
Parameters
----------
base_estimator : estimator
The base estimator from which the classifier chain is built.
order : array-like of shape (n_outputs,) or 'random', optional
By default the order will be determined by the order of columns in
the label matrix Y.::
order = [0, 1, 2, ..., Y.shape[1] - 1]
The order of the chain can be explicitly set by providing a list of
integers. For example, for a chain of length 5.::
order = [1, 3, 2, 4, 0]
means that the first model in the chain will make predictions for
column 1 in the Y matrix, the second model will make predictions
for column 3, etc.
If order is 'random' a random ordering will be used.
cv : int, cross-validation generator or an iterable, optional \
(default=None)
Determines whether to use cross validated predictions or true
labels for the results of previous estimators in the chain.
If cv is None the true labels are used when fitting. Otherwise
possible | |
(7.64882606875107 + log(0.0001 + m.x1025))*m.x1025 + (3.17102601538367 + log(0.0001 +
m.x1026))*m.x1026 + (5.18893371623782 + log(0.0001 + m.x1027))*m.x1027 + (3.92481308582461 + log(
0.0001 + m.x1028))*m.x1028 + (3.5163958265858 + log(0.0001 + m.x1029))*m.x1029 + (
3.58276300677207 + log(0.0001 + m.x1030))*m.x1030 + (3.15407431711919 + log(0.0001 + m.x1031))*
m.x1031 + (2.97034339815666 + log(0.0001 + m.x1032))*m.x1032 + (2.69471698884051 + log(0.0001 +
m.x1033))*m.x1033 + (2.93971440613975 + log(0.0001 + m.x1034))*m.x1034 + (2.90557777549424 + log(
0.0001 + m.x1035))*m.x1035 + (3.55945580392719 + log(0.0001 + m.x1036))*m.x1036 + (
3.14920632848144 + log(0.0001 + m.x1037))*m.x1037 + (2.6616600145979 + log(0.0001 + m.x1038))*
m.x1038 + (2.45289240165669 + log(0.0001 + m.x1039))*m.x1039 + (2.65826164982588 + log(0.0001 +
m.x1040))*m.x1040 + (2.25124415484747 + log(0.0001 + m.x1041))*m.x1041 + (5.35389382653186 + log(
0.0001 + m.x1042))*m.x1042 + (7.92888159474731 + log(0.0001 + m.x1043))*m.x1043 + (
9.16491818785822 + log(0.0001 + m.x1044))*m.x1044 + (7.90822204479493 + log(0.0001 + m.x1045))*
m.x1045 + (7.19854086372297 + log(0.0001 + m.x1046))*m.x1046 + (8.31035922349514 + log(0.0001 +
m.x1047))*m.x1047 + (9.01040577554678 + log(0.0001 + m.x1048))*m.x1048 + (8.45487750887876 + log(
0.0001 + m.x1049))*m.x1049 + (7.25913335955074 + log(0.0001 + m.x1050))*m.x1050 + (
6.79892281277951 + log(0.0001 + m.x1051))*m.x1051 + (7.42636843726248 + log(0.0001 + m.x1052))*
m.x1052 + (8.4322993987483 + log(0.0001 + m.x1053))*m.x1053 + (4.92706011079366 + log(0.0001 +
m.x1054))*m.x1054 + (5.00777808911489 + log(0.0001 + m.x1055))*m.x1055 + (4.56621014093153 + log(
0.0001 + m.x1056))*m.x1056 + (4.05392059039682 + log(0.0001 + m.x1057))*m.x1057 + (
4.5197785774208 + log(0.0001 + m.x1058))*m.x1058 + (4.48588711297551 + log(0.0001 + m.x1059))*
m.x1059 + (5.13326919930208 + log(0.0001 + m.x1060))*m.x1060 + (4.72756967074558 + log(0.0001 +
m.x1061))*m.x1061 + (4.63424970329053 + log(0.0001 + m.x1062))*m.x1062 + (4.75582243143201 + log(
0.0001 + m.x1063))*m.x1063 + (4.62243548707035 + log(0.0001 + m.x1064))*m.x1064 + (
4.8704813069273 + log(0.0001 + m.x1065))*m.x1065 + (2.69033985677655 + log(0.0001 + m.x1066))*
m.x1066 + (7.65373178421888 + log(0.0001 + m.x1067))*m.x1067 + (6.75624995239639 + log(0.0001 +
m.x1068))*m.x1068 + (7.00232318307263 + log(0.0001 + m.x1069))*m.x1069 + (6.00233423544569 + log(
0.0001 + m.x1070))*m.x1070 + (5.63870676778868 + log(0.0001 + m.x1071))*m.x1071 + (
5.0702592298217 + log(0.0001 + m.x1072))*m.x1072 + (4.56807302522199 + log(0.0001 + m.x1073))*
m.x1073 + (4.6621731267809 + log(0.0001 + m.x1074))*m.x1074 + (4.62832891905196 + log(0.0001 +
m.x1075))*m.x1075 + (5.27446354182781 + log(0.0001 + m.x1076))*m.x1076 + (4.86963675900696 + log(
0.0001 + m.x1077))*m.x1077 + (5.11069118526845 + log(0.0001 + m.x1078))*m.x1078 + (
4.51038238820556 + log(0.0001 + m.x1079))*m.x1079 + (4.3206160326921 + log(0.0001 + m.x1080))*
m.x1080 + (4.09596447444053 + log(0.0001 + m.x1081))*m.x1081 + (1.66761857304175 + log(0.0001 +
m.x1082))*m.x1082 + (7.87173809440172 + log(0.0001 + m.x1083))*m.x1083 + (7.575345997029 + log(
0.0001 + m.x1084))*m.x1084 + (6.55528992028842 + log(0.0001 + m.x1085))*m.x1085 + (
6.5307549662613 + log(0.0001 + m.x1086))*m.x1086 + (5.52138963287981 + log(0.0001 + m.x1087))*
m.x1087 + (7.57202981581294 + log(0.0001 + m.x1088))*m.x1088 + (7.89388483538278 + log(0.0001 +
m.x1089))*m.x1089 + (7.89385895250962 + log(0.0001 + m.x1090))*m.x1090 + (7.83152289348323 + log(
0.0001 + m.x1091))*m.x1091 + (4.50365012310275 + log(0.0001 + m.x1092))*m.x1092 + (
8.83015334974703 + log(0.0001 + m.x1093))*m.x1093 + (7.56847186690093 + log(0.0001 + m.x1094))*
m.x1094 + (5.89711079869161 + log(0.0001 + m.x1095))*m.x1095 + (5.61710567407374 + log(0.0001 +
m.x1096))*m.x1096 + (5.94388361851006 + log(0.0001 + m.x1097))*m.x1097 + (5.43463574469069 + log(
0.0001 + m.x1098))*m.x1098 + (5.99602761756116 + log(0.0001 + m.x1099))*m.x1099 + (
7.76063073843599 + log(0.0001 + m.x1100))*m.x1100 + (7.05762696286422 + log(0.0001 + m.x1101))*
m.x1101 + (6.94659660513233 + log(0.0001 + m.x1102))*m.x1102 + (7.79508879793581 + log(0.0001 +
m.x1103))*m.x1103 + (6.12372206710507 + log(0.0001 + m.x1104))*m.x1104 + (5.27772489423381 + log(
0.0001 + m.x1105))*m.x1105 + (5.73708821420563 + log(0.0001 + m.x1106))*m.x1106 + (
5.2010556497806 + log(0.0001 + m.x1107))*m.x1107 + (7.21261770133521 + log(0.0001 + m.x1108))*
m.x1108 + (5.54143054029086 + log(0.0001 + m.x1109))*m.x1109 + (6.18012212470848 + log(0.0001 +
m.x1110))*m.x1110 + (3.97483992500746 + log(0.0001 + m.x1111))*m.x1111 + (5.98777705878093 + log(
0.0001 + m.x1112))*m.x1112 + (4.12829105983182 + log(0.0001 + m.x1113))*m.x1113 + (
4.2742657848744 + log(0.0001 + m.x1114))*m.x1114 + (9.08930424596903 + log(0.0001 + m.x1115))*
m.x1115 + (5.20340820830515 + log(0.0001 + m.x1116))*m.x1116 + (3.8273895688645 + log(0.0001 +
m.x1117))*m.x1117 + (3.6598952226062 + log(0.0001 + m.x1118))*m.x1118 + (6.42312453125125 + log(
0.0001 + m.x1119))*m.x1119 + (4.77079698178867 + log(0.0001 + m.x1120))*m.x1120 + (
7.72873655538131 + log(0.0001 + m.x1121))*m.x1121 + (7.65841777975541 + log(0.0001 + m.x1122))*
m.x1122 + (7.65753892201256 + log(0.0001 + m.x1123))*m.x1123 + (7.57620639285741 + log(0.0001 +
m.x1124))*m.x1124 + (7.56572619045858 + log(0.0001 + m.x1125))*m.x1125 + (7.53803911355367 + log(
0.0001 + m.x1126))*m.x1126 + (8.03322500684413 + log(0.0001 + m.x1127))*m.x1127 + (
7.73154298894836 + log(0.0001 + m.x1128))*m.x1128 + (7.5754227291517 + log(0.0001 + m.x1129))*
m.x1129 + (7.53216237513685 + log(0.0001 + m.x1130))*m.x1130 + (7.62720733828558 + log(0.0001 +
m.x1131))*m.x1131 + (7.50411912067037 + log(0.0001 + m.x1132))*m.x1132 + (1.068793165406 + log(
0.0001 + m.x1133))*m.x1133 + (5.5232770297552 + log(0.0001 + m.x1134))*m.x1134 + (4.5260554120196
+ log(0.0001 + m.x1135))*m.x1135 + (6.50619084760942 + log(0.0001 + m.x1136))*m.x1136 + (
7.2266866566933 + log(0.0001 + m.x1137))*m.x1137 + (7.20935007941764 + log(0.0001 + m.x1138))*
m.x1138 + (7.22683621804415 + log(0.0001 + m.x1139))*m.x1139 + (6.34064739848753 + log(0.0001 +
m.x1140))*m.x1140 + (8.80014305324971 + log(0.0001 + m.x1141))*m.x1141 + (6.30396490696542 + log(
0.0001 + m.x1142))*m.x1142 + (7.40884714976982 + log(0.0001 + m.x1143))*m.x1143 + (
4.20851687528548 + log(0.0001 + m.x1144))*m.x1144 + (3.79833226250462 + log(0.0001 + m.x1145))*
m.x1145 + (3.28514431765869 + log(0.0001 + m.x1146))*m.x1146 + (3.88565319467683 + log(0.0001 +
m.x1147))*m.x1147 + (4.37228392555667 + log(0.0001 + m.x1148))*m.x1148 + (6.34718176912869 + log(
0.0001 + m.x1149))*m.x1149 + (6.31998097039701 + log(0.0001 + m.x1150))*m.x1150 + (
6.15048625569217 + log(0.0001 + m.x1151))*m.x1151 + (6.34004112928999 + log(0.0001 + m.x1152))*
m.x1152 + (5.0461166064614 + log(0.0001 + m.x1153))*m.x1153 + (4.39305393341429 + log(0.0001 +
m.x1154))*m.x1154 + (2.32027046712058 + log(0.0001 + m.x1155))*m.x1155 + (1.77450719277707 + log(
0.0001 + m.x1156))*m.x1156 + (5.7149359625172 + log(0.0001 + m.x1157))*m.x1157 + (
3.82874415102474 + log(0.0001 + m.x1158))*m.x1158 + (3.24404191604419 + log(0.0001 + m.x1159))*
m.x1159 + (4.78128886643439 + log(0.0001 + m.x1160))*m.x1160 + (7.69369549327047 + log(0.0001 +
m.x1161))*m.x1161 + (3.81769409434685 + log(0.0001 + m.x1162))*m.x1162 + (5.73402733174318 + log(
0.0001 + m.x1163))*m.x1163 + (3.7100602433451 + log(0.0001 + m.x1164))*m.x1164 + (
6.32285868865852 + log(0.0001 + m.x1165))*m.x1165 + (3.92732010525007 + log(0.0001 + m.x1166))*
m.x1166 + (4.64615417418438 + log(0.0001 + m.x1167))*m.x1167 + (5.57465607395687 + log(0.0001 +
m.x1168))*m.x1168 + (3.86965950257139 + log(0.0001 + m.x1169))*m.x1169 + (5.59546456476559 + log(
0.0001 + m.x1170))*m.x1170 + (6.97239394342348 + log(0.0001 + m.x1171))*m.x1171 + (
5.45254663423327 + log(0.0001 + m.x1172))*m.x1172 + (2.9183890764784 + log(0.0001 + m.x1173))*
m.x1173 + (2.82845501940064 + log(0.0001 + m.x1174))*m.x1174 + (2.82734196068308 + log(0.0001 +
m.x1175))*m.x1175 + (2.72541867304994 + log(0.0001 + m.x1176))*m.x1176 + (2.7124340017604 + log(
0.0001 + m.x1177))*m.x1177 + (2.67828444425379 + log(0.0001 + m.x1178))*m.x1178 + (
3.33250611334696 + log(0.0001 + m.x1179))*m.x1179 + (2.92201569551532 + log(0.0001 + m.x1180))*
m.x1180 + (2.72444660871635 + log(0.0001 + m.x1181))*m.x1181 + (2.67106412862856 + log(0.0001 +
m.x1182))*m.x1182 + (2.78908478766655 + log(0.0001 + m.x1183))*m.x1183 + (2.6367410631334 + log(
0.0001 + m.x1184))*m.x1184 + (6.5414363534118 + log(0.0001 + m.x1185))*m.x1185 + (
6.18013779913182 + log(0.0001 + m.x1186))*m.x1186 + (7.11001666762411 + log(0.0001 + m.x1187))*
m.x1187 + (5.87180845727197 + log(0.0001 + m.x1188))*m.x1188 + (7.9308831508934 + log(0.0001 +
m.x1189))*m.x1189 + (8.40965772590728 + log(0.0001 + m.x1190))*m.x1190 + (7.49732980148956 + log(
0.0001 + m.x1191))*m.x1191 + (7.83938583641237 + log(0.0001 + m.x1192))*m.x1192 + (
7.02988284758386 + log(0.0001 + m.x1193))*m.x1193 + (9.05851700022301 + log(0.0001 + m.x1194))*
m.x1194 + (4.44824867621695 + log(0.0001 + m.x1195))*m.x1195 + (4.33354457990407 + log(0.0001 +
m.x1196))*m.x1196 + (5.12528613369426 + log(0.0001 + m.x1197))*m.x1197 + (8.61483887123932 + log(
0.0001 + m.x1198))*m.x1198 + (4.15231725741119 + log(0.0001 + m.x1199))*m.x1199 + (
4.98260231849147 + log(0.0001 + m.x1200))*m.x1200 + (3.67178983267861 + log(0.0001 + m.x1201))*
m.x1201 + (3.58203511412627 + log(0.0001 + m.x1202))*m.x1202 + (3.58092417567267 + log(0.0001 +
m.x1203))*m.x1203 + (3.47918538944954 + log(0.0001 + m.x1204))*m.x1204 + (3.46622290345689 + log(
0.0001 + m.x1205))*m.x1205 + (3.43213033869112 + log(0.0001 + m.x1206))*m.x1206 + (
4.08483779695136 + log(0.0001 + m.x1207))*m.x1207 + (3.67540887674253 + log(0.0001 + m.x1208))*
m.x1208 + (3.47821499593843 + log(0.0001 + m.x1209))*m.x1209 + (3.42492182632853 + log(0.0001 +
m.x1210))*m.x1210 + (3.5427384651279 + log(0.0001 + m.x1211))*m.x1211 + (3.39065372071622 + log(
0.0001 + m.x1212))*m.x1212 + (8.42196346523243 + log(0.0001 + m.x1213))*m.x1213 + (
6.35155857336196 + log(0.0001 + m.x1214))*m.x1214 + (5.85095859411503 + log(0.0001 + m.x1215))*
m.x1215 + (5.52901131368272 + log(0.0001 + m.x1216))*m.x1216 + (5.82423964498116 + log(0.0001 +
m.x1217))*m.x1217 + (6.37929373652371 + log(0.0001 + m.x1218))*m.x1218 + (4.2321413574768 + log(
0.0001 + m.x1219))*m.x1219 + | |
<filename>gen/calc.py
""" This module contains the logic for specifying and validating the top-level
DC/OS configuration from user arguments
The data structure called 'entry' is what defines which validation checks
should be run, how arguments should be calculated, which arguments should have
set defaults, which arguments should be user specified, and how some arguments
should be calculated.
HOW THIS WORKS:
The ARGUMENT NAME in the validate and calculate functions correspond
to the FIELD FROM THE INPUT (config.yaml).
Notes:
validate_* function: the arguments it takes will define the arguments which the
function is evaluated against. All validations are performed at once
argument calculation functions: like validation function, the arguments specified
will be pulled from the Source or user arguments. These function can be used
for both 'default' and 'must'
See gen.internals for more on how the nuts and bolts of this process works
"""
import collections
import ipaddress
import json
import os
import re
import socket
import string
from math import floor
from subprocess import check_output
import schema
import yaml
import gen.internals
DCOS_VERSION = '1.14.0-dev'
CHECK_SEARCH_PATH = '/opt/mesosphere/bin:/usr/bin:/bin:/sbin'
def type_str(value):
return type(value).__name__
def check_duplicates(items: list):
counter = collections.Counter(items)
duplicates = dict(filter(lambda x: x[1] > 1, counter.items()))
assert not duplicates, 'List cannot contain duplicates: {}'.format(
', '.join('{} appears {} times'.format(*item) for item in duplicates.items()))
def validate_true_false(val) -> None:
gen.internals.validate_one_of(val, ['true', 'false'])
def validate_int_in_range(value, low, high):
try:
int_value = int(value)
except ValueError as ex:
raise AssertionError('Must be an integer but got a {}: {}'.format(type_str(value), value)) from ex
# Only a lower bound
if high is None:
assert low <= int_value, 'Must be above or equal to {}'.format(low)
else:
assert low <= int_value <= high, 'Must be between {} and {} inclusive'.format(low, high)
def validate_json_list(value):
try:
items = json.loads(value)
except ValueError as ex:
raise AssertionError("Must be a JSON formatted list, but couldn't be parsed the given "
"value `{}` as one because of: {}".format(value, ex)) from ex
assert isinstance(items, list), "Must be a JSON list. Got a {}".format(type_str(items))
non_str = list(filter(lambda x: not isinstance(x, str), items))
assert not non_str, "Items in list must be strings, got invalid values: {}".format(
", ".join("{} type {}".format(elem, type_str(elem)) for elem in non_str))
return items
def valid_ipv4_address(ip):
try:
socket.inet_pton(socket.AF_INET, ip)
return True
except OSError:
return False
except TypeError:
return False
def validate_ipv4_addresses(ips: list):
invalid_ips = []
for ip in ips:
if not valid_ipv4_address(ip):
invalid_ips.append(ip)
assert not invalid_ips, 'Invalid IPv4 addresses in list: {}'.format(', '.join(invalid_ips))
def validate_absolute_path(path):
if not path.startswith('/'):
raise AssertionError('Must be an absolute filesystem path starting with /')
def valid_ipv6_address(ip6):
try:
socket.inet_pton(socket.AF_INET6, ip6)
return True
except OSError:
return False
except TypeError:
return False
def validate_ipv6_addresses(ip6s: list):
invalid_ip6s = []
for ip6 in ip6s:
if not valid_ipv6_address(ip6):
invalid_ip6s.append(ip6)
assert not invalid_ip6s, 'Invalid IPv6 addresses in list: {}'.format(', '.join(invalid_ip6s))
def validate_ip_list(json_str: str):
nodes_list = validate_json_list(json_str)
check_duplicates(nodes_list)
validate_ipv4_addresses(nodes_list)
def validate_ip_port_list(json_str: str):
nodes_list = validate_json_list(json_str)
check_duplicates(nodes_list)
# Create a list of only ip addresses by spliting the port from the node. Use the resulting
# ip_list to validate that it is an ipv4 address. If the port was specified, validate its
# value is between 1 and 65535.
ip_list = []
for node in nodes_list:
ip, separator, port = node.rpartition(':')
if not separator:
ip = node
else:
validate_int_in_range(port, 1, 65535)
ip_list.append(ip)
validate_ipv4_addresses(ip_list)
def calculate_environment_variable(name):
value = os.getenv(name)
assert value is not None, "{} must be a set environment variable".format(name)
return value
def calulate_dcos_image_commit():
dcos_image_commit = os.getenv('DCOS_IMAGE_COMMIT', None)
if dcos_image_commit is None:
dcos_image_commit = check_output(['git', 'rev-parse', 'HEAD']).decode('utf-8').strip()
assert dcos_image_commit is not None, "Unable to set dcos_image_commit from teamcity or git."
return dcos_image_commit
def calculate_resolvers_str(resolvers):
# Validation because accidentally slicing a string instead of indexing a
# list of resolvers then finding out at cluster launch is painful.
resolvers = json.loads(resolvers)
return ",".join(resolvers)
def calculate_mesos_dns_resolvers_str(resolvers):
resolver_list = json.loads(resolvers)
# Mesos-DNS unfortunately requires completley different config parameters
# for saying "Don't resolve / reject non-Mesos-DNS requests" than "there are
# no upstream resolvers". As such, if resolvers is given output that.
# Otherwise output the option externalOn which means "don't try resolving
# external queries / just fail fast without an error."
# This logic _should_ live in the Jinja template but it unfortunately can't
# because the "unset argument detection" in Jinja doesn't work around using
# jinja functions (the function names show up as unset arguments...).
# As such, generate the full JSON line and replace it in the manner given
# above.
if len(resolver_list) > 0:
return '"resolvers": ' + resolvers
else:
return '"externalOn": false'
def validate_mesos_log_retention_mb(mesos_log_retention_mb):
assert int(mesos_log_retention_mb) >= 1024, "Must retain at least 1024 MB of logs"
def validate_mesos_container_log_sink(mesos_container_log_sink):
assert mesos_container_log_sink in [
'fluentbit',
'journald',
'logrotate',
'fluentbit+logrotate',
'journald+logrotate',
], "Container logs must go to 'fluentbit', 'journald', 'logrotate', 'fluentbit+logrotate', or 'journald+logrotate'."
def validate_metronome_gpu_scheduling_behavior(metronome_gpu_scheduling_behavior):
assert metronome_gpu_scheduling_behavior in ['restricted', 'unrestricted', ''], \
"metronome_gpu_scheduling_behavior must be 'restricted', 'unrestricted', 'undefined' or ''"
def validate_marathon_gpu_scheduling_behavior(marathon_gpu_scheduling_behavior):
assert marathon_gpu_scheduling_behavior in ['restricted', 'unrestricted', ''], \
"marathon_gpu_scheduling_behavior must be 'restricted', 'unrestricted', 'undefined' or ''"
def calculate_mesos_log_retention_count(mesos_log_retention_mb):
# Determine how many 256 MB log chunks can be fit into the given size.
# We assume a 90% compression factor; logs are compressed after 2 rotations.
# We return the number of times the log can be rotated by logrotate;
# this is one less than the total number of log file retained.
return str(int(1 + (int(mesos_log_retention_mb) - 512) / 256 * 10))
def calculate_mesos_log_directory_max_files(mesos_log_retention_mb):
# We allow some maximum number of temporary/random files in the
# Mesos log directory. This maximum takes into account the number
# of rotated logs that stay in the archive subdirectory.
return str(25 + int(calculate_mesos_log_retention_count(mesos_log_retention_mb)))
def calculate_ip_detect_contents(ip_detect_filename):
assert os.path.exists(ip_detect_filename), "ip-detect script `{}` must exist".format(ip_detect_filename)
return yaml.dump(open(ip_detect_filename, encoding='utf-8').read())
def calculate_ip_detect_public_contents(ip_detect_contents, ip_detect_public_filename):
if ip_detect_public_filename != '':
return calculate_ip_detect_contents(ip_detect_public_filename)
return ip_detect_contents
def calculate_ip6_detect_contents(ip6_detect_filename):
if ip6_detect_filename != '':
return yaml.dump(open(ip6_detect_filename, encoding='utf-8').read())
return yaml.dump("")
def calculate_rexray_config_contents(rexray_config):
return yaml.dump(
# Assume block style YAML (not flow) for REX-Ray config.
yaml.dump(json.loads(rexray_config), default_flow_style=False)
)
def validate_json_dictionary(data):
# TODO(cmaloney): Pull validate_json() out.
try:
loaded = json.loads(data)
assert isinstance(loaded, dict), "Must be a JSON dictionary. Got a {}".format(type_str(loaded))
return loaded
except ValueError as ex:
raise AssertionError("Must be valid JSON. Got: {}".format(data)) from ex
def calculate_gen_resolvconf_search(dns_search):
if len(dns_search) > 0:
return "SEARCH=" + dns_search
else:
return ""
def calculate_mesos_hooks(dcos_remove_dockercfg_enable):
if dcos_remove_dockercfg_enable == 'true':
return "com_mesosphere_dcos_RemoverHook"
else:
return ""
def calculate_use_mesos_hooks(mesos_hooks):
if mesos_hooks == "":
return "false"
else:
return "true"
def validate_network_default_name(overlay_network_default_name, dcos_overlay_network):
try:
overlay_network = json.loads(dcos_overlay_network)
except ValueError as ex:
raise AssertionError("Provided input was not valid JSON: {}".format(dcos_overlay_network)) from ex
overlay_names = map(lambda overlay: overlay['name'], overlay_network['overlays'])
assert overlay_network_default_name in overlay_names, (
"Default overlay network name does not reference a defined overlay network: {}".format(
overlay_network_default_name))
def validate_dcos_ucr_default_bridge_subnet(dcos_ucr_default_bridge_subnet):
try:
ipaddress.ip_network(dcos_ucr_default_bridge_subnet)
except ValueError as ex:
raise AssertionError(
"Incorrect value for dcos_ucr_default_bridge_subnet: {}."
" Only IPv4 subnets are allowed".format(dcos_ucr_default_bridge_subnet)) from ex
def validate_dcos_overlay_network(dcos_overlay_network):
try:
overlay_network = json.loads(dcos_overlay_network)
except ValueError as ex:
raise AssertionError("Provided input was not valid JSON: {}".format(dcos_overlay_network)) from ex
assert 'overlays' in overlay_network, (
'Missing "overlays" in overlay configuration {}'.format(overlay_network))
assert len(overlay_network['overlays']) > 0, (
'"Overlays" network configuration is empty: {}'.format(overlay_network))
for overlay in overlay_network['overlays']:
assert 'name' in overlay, (
'Missing "name" in overlay configuration: {}'.format(overlay))
assert (len(overlay['name']) <= 13), (
"Overlay name cannot exceed 13 characters:{}".format(overlay['name']))
assert ('subnet' in overlay or 'subnet6' in overlay), (
'Missing "subnet" or "subnet6" in overlay configuration:{}'.format(overlay))
assert 'vtep_mac_oui' in overlay_network.keys(), (
'Missing "vtep_mac_oui" in overlay configuration {}'.format(overlay_network))
vtep_mtu = overlay_network.get('vtep_mtu', 1500)
validate_int_in_range(vtep_mtu, 552, None)
if 'subnet' in overlay:
# Check the VTEP IP is present in the overlay configuration
assert 'vtep_subnet' in overlay_network, (
'Missing "vtep_subnet" in overlay configuration {}'.format(overlay_network))
try:
ipaddress.ip_network(overlay_network['vtep_subnet'])
except ValueError as ex:
raise AssertionError(
"Incorrect value for vtep_subnet: {}."
" Only IPv4 values are allowed".format(overlay_network['vtep_subnet'])) from ex
try:
ipaddress.ip_network(overlay['subnet'])
except ValueError as ex:
raise AssertionError(
"Incorrect value for overlay subnet {}."
" Only IPv4 values are allowed".format(overlay['subnet'])) from ex
if 'subnet6' in overlay:
# Check the VTEP IP6 is present in the overlay configuration
assert 'vtep_subnet6' in overlay_network, (
'Missing "vtep_subnet6" in overlay configuration {}'.format(overlay_network))
try:
ipaddress.ip_network(overlay_network['vtep_subnet6'])
except ValueError as ex:
raise AssertionError(
"Incorrect value for vtep_subnet6: {}."
" Only IPv6 values are allowed".format(overlay_network['vtep_subnet6'])) from ex
try:
ipaddress.ip_network(overlay['subnet6'])
except ValueError as ex:
raise AssertionError(
"Incorrect value for overlay subnet6 {}."
" Only IPv6 values are allowed".format(overlay_network['subnet6'])) from ex
| |
<reponame>RI-imaging/ODTbrain
"""2D Fourier mapping"""
import numpy as np
import scipy.interpolate as intp
def fourier_map_2d(uSin, angles, res, nm, lD=0, semi_coverage=False,
coords=None, count=None, max_count=None, verbose=0):
r"""2D Fourier mapping with the Fourier diffraction theorem
Two-dimensional diffraction tomography reconstruction
algorithm for scattering of a plane wave
:math:`u_0(\mathbf{r}) = u_0(x,z)`
by a dielectric object with refractive index
:math:`n(x,z)`.
This function implements the solution by interpolation in
Fourier space.
Parameters
----------
uSin: (A,N) ndarray
Two-dimensional sinogram of line recordings
:math:`u_{\mathrm{B}, \phi_j}(x_\mathrm{D})`
divided by the incident plane wave :math:`u_0(l_\mathrm{D})`
measured at the detector.
angles: (A,) ndarray
Angular positions :math:`\phi_j` of `uSin` in radians.
res: float
Vacuum wavelength of the light :math:`\lambda` in pixels.
nm: float
Refractive index of the surrounding medium :math:`n_\mathrm{m}`.
lD: float
Distance from center of rotation to detector plane
:math:`l_\mathrm{D}` in pixels.
semi_coverage: bool
If set to `True`, it is assumed that the sinogram does not
necessarily cover the full angular range from 0 to 2π, but an
equidistant coverage over 2π can be achieved by inferring point
(anti)symmetry of the (imaginary) real parts of the Fourier
transform of f. Valid for any set of angles {X} that result in
a 2π coverage with the union set {X}U{X+π}.
coords: None [(2,M) ndarray]
Computes only the output image at these coordinates. This
keyword is reserved for future versions and is not
implemented yet.
count, max_count: multiprocessing.Value or `None`
Can be used to monitor the progress of the algorithm.
Initially, the value of `max_count.value` is incremented
by the total number of steps. At each step, the value
of `count.value` is incremented.
verbose: int
Increment to increase verbosity.
Returns
-------
f: ndarray of shape (N,N), complex if `onlyreal` is `False`
Reconstructed object function :math:`f(\mathbf{r})` as defined
by the Helmholtz equation.
:math:`f(x,z) =
k_m^2 \left(\left(\frac{n(x,z)}{n_m}\right)^2 -1\right)`
See Also
--------
backpropagate_2d: implementation by backpropagation
odt_to_ri: conversion of the object function :math:`f(\mathbf{r})`
to refractive index :math:`n(\mathbf{r})`
Notes
-----
Do not use the parameter `lD` in combination with the Rytov
approximation - the propagation is not correctly described.
Instead, numerically refocus the sinogram prior to converting
it to Rytov data (using e.g. :func:`odtbrain.sinogram_as_rytov`)
with a numerical focusing algorithm (available in the Python
package :py:mod:`nrefocus`).
The interpolation in Fourier space (which is done with
:func:`scipy.interpolate.griddata`) may be unstable and lead to
artifacts if the data to interpolate contains sharp spikes. This
issue is not handled at all by this method (in fact, a test has
been removed in version 0.2.6 because ``griddata`` gave different
results on Windows and Linux).
"""
##
##
# TODO:
# - zero-padding as for backpropagate_2D - However this is not
# necessary as Fourier interpolation is not parallelizable with
# multiprocessing and thus unattractive. Could be interesting for
# specific environments without the Python GIL.
# - Deal with oversampled data. Maybe issue a warning.
##
##
A = angles.shape[0]
if max_count is not None:
max_count.value += 4
# Check input data
assert len(uSin.shape) == 2, "Input data `uSin` must have shape (A,N)!"
assert len(uSin) == A, "`len(angles)` must be equal to `len(uSin)`!"
if coords is not None:
raise NotImplementedError("Output coordinates cannot yet be set"
+ "for the 2D backrpopagation algorithm.")
# Cut-Off frequency
# km [1/px]
km = (2 * np.pi * nm) / res
# Fourier transform of all uB's
# In the script we used the unitary angular frequency (uaf) Fourier
# Transform. The discrete Fourier transform is equivalent to the
# unitary ordinary frequency (uof) Fourier transform.
#
# uof: f₁(ξ) = int f(x) exp(-2πi xξ)
#
# uaf: f₃(ω) = (2π)^(-n/2) int f(x) exp(-i ωx)
#
# f₁(ω/(2π)) = (2π)^(n/2) f₃(ω)
# ω = 2πξ
#
# Our Backpropagation Formula is with uaf convention of the Form
#
# F(k) = 1/sqrt(2π) U(kD)
#
# If we convert now to uof convention, we get
#
# F(k) = U(kD)
#
# This means that if we divide the Fourier transform of the input
# data by sqrt(2π) to convert f₃(ω) to f₁(ω/(2π)), the resulting
# value for F is off by a factor of 2π.
#
# Instead, we can just multiply *UB* by sqrt(2π) and calculate
# everything in uof.
# UB = np.fft.fft(np.fft.ifftshift(uSin, axes=-1))/np.sqrt(2*np.pi)
#
#
# Furthermore, we define
# a wave propagating to the right as:
#
# u0(x) = exp(ikx)
#
# However, in physics usually we use the other sign convention:
#
# u0(x) = exp(-ikx)
#
# In order to be consistent with programs like Meep or our
# scattering script for a dielectric cylinder, we want to use the
# latter sign convention.
# This is not a big problem. We only need to multiply the imaginary
# part of the scattered wave by -1.
UB = np.fft.fft(np.fft.ifftshift(uSin, axes=-1)) * np.sqrt(2 * np.pi)
# Corresponding sample frequencies
fx = np.fft.fftfreq(len(uSin[0])) # 1D array
# kx is a 1D array.
kx = 2 * np.pi * fx
if count is not None:
count.value += 1
# Undersampling/oversampling?
# Determine if the resolution of the image is too low by looking
# at the maximum value for kx. This is no comparison between
# Nyquist and Rayleigh frequency.
if verbose and np.max(kx**2) <= km**2:
# Detector is not set up properly. Higher resolution
# can be achieved.
print("......Measurement data is undersampled.")
else:
print("......Measurement data is oversampled.")
# raise NotImplementedError("Oversampled data not yet supported."+
# " Please rescale xD-axis of the input data.")
# DEAL WITH OVERSAMPLED DATA?
# lenk = len(kx)
# kx = np.fft.ifftshift(np.linspace(-np.sqrt(km),
# np.sqrt(km),
# len(fx), endpoint=False))
#
# F(kD-kₘs₀) = - i kₘ sqrt(2/π) / a₀ * M exp(-i kₘ M lD) * UB(kD)
# kₘM = sqrt( kₘ² - kx² )
# s₀ = ( -sin(ϕ₀), cos(ϕ₀) )
#
# We create the 2D interpolation object F
# - We compute the real coordinates (krx,kry) = kD-kₘs₀
# - We set as grid points the right side of the equation
#
# The interpolated griddata may go up to sqrt(2)*kₘ for kx and ky.
kx = kx.reshape(1, -1)
# a0 should have same shape as kx and UB
# a0 = np.atleast_1d(a0)
# a0 = a0.reshape(1,-1)
filter_klp = (kx**2 < km**2)
M = 1. / km * np.sqrt(km**2 - kx**2)
# Fsin = -1j * km * np.sqrt(2/np.pi) / a0 * M * np.exp(-1j*km*M*lD)
# new in version 0.1.4:
# We multiply by the factor (M-1) instead of just (M)
# to take into account that we have a scattered
# wave that is normalized by u0.
Fsin = -1j * km * np.sqrt(2 / np.pi) * M * np.exp(-1j * km * (M-1) * lD)
# UB has same shape (len(angles), len(kx))
Fsin = Fsin * UB * filter_klp
ang = angles.reshape(-1, 1)
if semi_coverage:
Fsin = np.vstack((Fsin, np.conj(Fsin)))
ang = np.vstack((ang, ang + np.pi))
if count is not None:
count.value += 1
# Compute kxl and kyl (in rotated system ϕ₀)
kxl = kx
kyl = np.sqrt((km**2 - kx**2) * filter_klp) - km
# rotate kxl and kyl to where they belong
krx = np.cos(ang) * kxl + np.sin(ang) * kyl
kry = - np.sin(ang) * kxl + np.cos(ang) * kyl
Xf = krx.flatten()
Yf = kry.flatten()
Zf = Fsin.flatten()
# DEBUG: plot kry vs krx
# from matplotlib import pylab as plt
# plt.figure()
# for i in range(len(krx)):
# plt.plot(krx[i],kry[i],"x")
# plt.axes().set_aspect('equal')
# plt.show()
# interpolation on grid with same resolution as input data
kintp = np.fft.fftshift(kx.reshape(-1))
Fcomp = intp.griddata((Xf, Yf), Zf, (kintp[None, :], kintp[:, None]))
if count is not None:
count.value += 1
# removed nans
Fcomp[np.where(np.isnan(Fcomp))] = 0
# Filter data
kinx, kiny = np.meshgrid(np.fft.fftshift(kx), np.fft.fftshift(kx))
Fcomp[np.where((kinx**2 + kiny**2) > np.sqrt(2) * km)] = 0
# Fcomp[np.where(kinx**2+kiny**2<km)] = 0
# Fcomp is centered at K = 0 due to the way we chose kintp/coords
f = np.fft.fftshift(np.fft.ifft2(np.fft.ifftshift(Fcomp)))
if count | |
residue_1.resname in AMINO_ACIDS and residue_1.id[0] == " ":
resnum_1 = str(residue_1.id[1]) + residue_1.id[2].strip()
resaa_1 = convert_aa(residue_1.get_resname(), quiet=True)
interacting_residues = set()
for atom_1 in residue_1:
interacting_residues.update(ns.search(atom_1.get_coord(), r_cutoff, "R"))
interacting_resids = []
for residue_2 in interacting_residues:
resnum_2 = str(residue_2.id[1]) + residue_2.id[2].strip()
resaa_2 = convert_aa(residue_2.get_resname(), quiet=True)
if residue_2.resname in AMINO_ACIDS and residue_2.id[0] == " ":
interacting_resids.append(
(
resnum_2,
resaa_2,
)
)
if interacting_resids:
interacting_resids.sort(
key=lambda x: int("".join([c for c in x[0] if c.isdigit()]))
)
interactions_between_chains[(resnum_1, resaa_1)] = interacting_resids
return interactions_between_chains
def get_interactions_between_chains_slow(model, pdb_chain_1, pdb_chain_2, r_cutoff=5):
"""Calculate interactions between residues in pdb_chain_1 and pdb_chain_2.
An interaction is defines as a pair of residues where at least one pair of atom
is closer than r_cutoff. The default value for r_cutoff is 5 Angstroms.
.. deprecated:: 1.0
Use :func:`get_interacting_residues` instead.
It gives you both the residue index and the resnum.
"""
# Extract the chains of interest from the model
chain_1 = None
chain_2 = None
for child in model.get_list():
if child.id == pdb_chain_1:
chain_1 = child
if child.id == pdb_chain_2:
chain_2 = child
if chain_1 is None or chain_2 is None:
raise Exception("Chains %s and %s were not found in the model" % (pdb_chain_1, pdb_chain_2))
interactions_between_chains = OrderedDict()
for idx, residue_1 in enumerate(chain_1):
if residue_1.resname in AMINO_ACIDS and residue_1.id[0] == " ":
resnum_1 = str(residue_1.id[1]) + residue_1.id[2].strip()
resaa_1 = convert_aa(residue_1.get_resname())
interacting_resids = []
for residue_2 in chain_2:
resnum_2 = str(residue_2.id[1]) + residue_2.id[2].strip()
resaa_2 = convert_aa(residue_2.get_resname())
r_min = None
if residue_2.resname in AMINO_ACIDS and residue_2.id[0] == " ":
for atom_1 in residue_1:
for atom_2 in residue_2:
r = calculate_distance(atom_1, atom_2, r_cutoff)
if r is not None:
if r_min and r < r_min:
r_min = r
elif not r_min:
r_min = r
if r_min:
interacting_resids.append(
(
resnum_2,
resaa_2,
r_min,
)
)
if interacting_resids:
interactions_between_chains[(resnum_1, resaa_1)] = interacting_resids
return interactions_between_chains
def chain_is_hetatm(chain):
"""Return True if the chain is made up entirely of HETATMs."""
hetatms = [None] * len(chain)
for i in range(len(chain.child_list)):
res = chain.child_list[i]
hetatms[i] = res.resname not in AAA_DICT
if all(hetatms):
return True
elif not any(hetatms):
return False
else:
# Something went wrong.
sequence, numbering = get_chain_sequence_and_numbering(chain)
message = (
"Some but not all residues in chain {} are hetatms!\n".format(chain.id)
+ "sequence: {}\n".format(sequence)
+ "numbering: {}\n".format(numbering)
)
logger.debug(message)
False
def get_aa_residues(chain):
aa_residues = [residue.id for residue in chain if residue.resname in AAA_DICT]
return aa_residues
def get_interacting_residues(model, r_cutoff=5, skip_hetatm_chains=True):
"""Return residue-residue interactions between all chains in `model`.
Parameters
----------
model : biopython.Model
Model to analyse.
Returns
-------
dict
A dictionary of interactions between chains i (0..n-1) and j (i+1..n).
Keys are (chain_idx, chain_id, residue_idx, residue_resnum, residue_amino_acid) tuples.
(e.g. (0, 'A', 0, '0', 'M'), (0, 1, '2', 'K'), ...)
Values are a list of tuples having the same format as the keys.
Examples
--------
You can reverse the order of keys and values like this::
complement = dict()
for key, values in get_interacting_chains(model):
for value in values:
complement.setdefault(value, set()).add(key)
You can get a list of all interacting chains using this command::
{(key[0], value[0])
for (key, values) in get_interacting_chains(model).items()
for value in values}
"""
from Bio.PDB import NeighborSearch
interactions_between_chains = dict()
# Chain 1
for chain_1_idx, chain_1 in enumerate(model):
if skip_hetatm_chains and chain_is_hetatm(chain_1):
message = "Skipping chain_1 with idx {} because it contains only hetatms.".format(
chain_1_idx
)
logger.debug(message)
continue
chain_1_residue_ids = get_aa_residues(chain_1)
# Chain 2
for j, chain_2 in enumerate(model.child_list[chain_1_idx + 1 :]):
chain_2_idx = chain_1_idx + 1 + j
if skip_hetatm_chains and chain_is_hetatm(chain_2):
message = "Skipping chain_2 with idx {} because it contains only hetatms.".format(
chain_2_idx
)
logger.debug(message)
continue
chain_2_residue_ids = get_aa_residues(chain_2)
ns = NeighborSearch(list(chain_2.get_atoms()))
# Residue 1
for residue_1 in chain_1:
try:
residue_1_idx = chain_1_residue_ids.index(residue_1.id)
except ValueError:
continue
residue_1_resnum = str(residue_1.id[1]) + residue_1.id[2].strip()
residue_1_aa = convert_aa(residue_1.resname, quiet=True)
residue_1_key = (
chain_1_idx,
chain_1.id,
residue_1_idx,
residue_1_resnum,
residue_1_aa,
)
interacting_residues = set()
for atom_1 in residue_1:
interacting_residues.update(ns.search(atom_1.get_coord(), r_cutoff, "R"))
# Residue 2
interacting_residue_ids = []
for residue_2 in interacting_residues:
try:
residue_2_idx = chain_2_residue_ids.index(residue_2.id)
except ValueError:
continue
residue_2_resnum = str(residue_2.id[1]) + residue_2.id[2].strip()
residue_2_aa = convert_aa(residue_2.get_resname(), quiet=True)
residue_2_key = (
chain_2_idx,
chain_2.id,
residue_2_idx,
residue_2_resnum,
residue_2_aa,
)
interacting_residue_ids.append(residue_2_key)
if interacting_residue_ids:
interactions_between_chains.setdefault(residue_1_key, set()).update(
interacting_residue_ids
)
return interactions_between_chains
def decode_domain_def(domains, merge=True, return_string=False):
"""Return a tuple of tuples of strings, preserving letter numbering (e.g. 10B)."""
if not domains:
return None, None
if domains[-1] == ",":
domains = domains[:-1]
x = domains
if return_string:
domain_fragments = [[r.strip() for r in ro.split(":")] for ro in x.split(",")]
else:
domain_fragments = [[int(r.strip()) for r in ro.split(":")] for ro in x.split(",")]
domain_merged = [domain_fragments[0][0], domain_fragments[-1][-1]]
if merge:
return domain_merged
else:
return domain_fragments
# Additions for `pipeline_structure`
class SelectChains(Select):
"""Only accept the specified chains when saving."""
def __init__(self, chain_letters, ns_chain_letters=None, ns=None, r_cutoff=None):
self.chain_letters = chain_letters
self.ns_chain_letters = ns_chain_letters
self.ns = ns
self.r_cutoff = r_cutoff
def accept_residue(self, residue):
chain_id = residue.parent.id
if chain_id in self.chain_letters:
return True
elif (self.ns_chain_letters and self.ns) and (chain_id in self.ns_chain_letters):
for atom in residue:
if self.ns.search(atom.get_coord(), self.r_cutoff, "C"):
return True
return False
class StructureParser:
""".
Attributes
----------
pdb_id : ___
domain_boundaries : list of lists of lists
Elements in the outer list correspond to domains in each chain of the
pdb. Elements of the inner list contain the start and end of each
fragment of each domain. For example, if there is only one chain
with pdb domain boundaries 1-10:20-45, this would correspond to
domain_boundaries [[[1,10],[20,45]]].
"""
def __init__(self, pdb_file, chain_ids=None, domain_defs=[]):
""".
Parameters
----------
pdb_file : str
Full path and filename of the structure.
output_dir : str
Folder where to save extracted structures and sequences.
chain_ids : list
Chains of the structure that should be kept.
"""
self.pdb_id = get_pdb_id(pdb_file)
self.pdb_file = pdb_file
self.input_structure = get_pdb_structure(self.pdb_file, self.pdb_id)
if chain_ids is None:
self.chain_ids = [chain.id for chain in self.input_structure[0].child_list]
elif isinstance(chain_ids, str):
self.chain_ids = chain_ids.split(",")
elif isinstance(chain_ids, list) or isinstance(chain_ids, tuple):
self.chain_ids = list(chain_ids)
else:
raise Exception
self.r_cutoff = 6 # remove hetatms more than x A away from the main chain(s)
self.domain_boundaries = []
for domain_def in domain_defs:
self.domain_boundaries.append(
decode_domain_def(domain_def, merge=False, return_string=True)
)
self.unique_id = "pdb_id: {}, chain_ids: {}".format(self.pdb_id, self.chain_ids)
def extract(self):
"""Extract the wanted chains out of the PDB file.
Remove water atoms and selects the domain regions (i.e. selects only those parts
of the domain that are within the domain boundaries specified).
"""
logger.debug("Extracting {}...".format(self.unique_id))
model = self.input_structure[0] # assuming that model 0 is always the desired one
new_structure = Bio.PDB.Structure.Structure(self.pdb_id)
new_model = Bio.PDB.Model.Model(0)
# Always assigning hetatms to chain 'Z' may lead to undesirable performance
# when the PDB stucture actually has a chain 'Z'.
# As of 2015, there are ~1300 structures with chain 'Z' in the elaspic.domain table.
# TODO: Convert `pdb_chain` tables in the database to use binary collation.
# I think the Bio.PDB module may have to be upgraded too as it currently does not support
# lowercase chain ids.
hetatm_chain_id = "Z"
hetatm_chain = Bio.PDB.Chain.Chain(hetatm_chain_id)
# Loop over every chain and every residue and make sure that everything is ok
chain_idx = 0
while chain_idx < len(self.chain_ids):
chain_id = self.chain_ids[chain_idx]
chain = model[chain_id]
(
chain_numbering,
domain_start_idxs,
domain_end_idxs,
) = self._get_domain_def_idxs_for_chain(chain, chain_idx)
logger.debug(
"domain_def: {}, domain_start_idxs: {}, domain_end_idxs: {}".format(
self.domain_boundaries, domain_start_idxs, domain_end_idxs
)
)
res_idx = 0
while res_idx < len(chain):
res = chain.child_list[res_idx]
original_res_id = res.id
# Move water to the hetatm chain
if res.id[0] == "W":
self._move_hetatm_to_hetatm_chain(chain, hetatm_chain, res, echo=False)
continue
# # Move heteroatoms to the hetatm chain
# if res.id[0] != ' ':
# self._move_hetatm_to_hetatm_chain(chain, hetatm_chain, res, echo=True)
# continue
# Now treating all unusual amino acids as hetatms
# Convert methylated lysines to regular lysines
if res.resname in METHYLATED_LYSINES:
self._correct_methylated_lysines(res)
# Move hetatms to the hetatm chain
if res.resname not in AMINO_ACIDS:
self._move_hetatm_to_hetatm_chain(chain, hetatm_chain, res)
continue
# Cut each chain to domain boundaries
residue_is_outside_domain = self._residue_outside_domain(
chain, chain_numbering, domain_start_idxs, domain_end_idxs, res
)
if residue_is_outside_domain:
chain.detach_child(original_res_id)
continue
res_idx += 1
if len(chain):
new_model.add(chain)
chain_idx += 1
else:
logger.debug("Chain {} is empty! Removing...".format(chain.id))
self.chain_ids.remove(chain.id)
# Make sure that the new model is not empty
if | |
################################################################################
# Copyright (c) 2006-2017 Franz Inc.
# All rights reserved. This program and the accompanying materials are
# made available under the terms of the MIT License which accompanies
# this distribution, and is available at http://opensource.org/licenses/MIT
################################################################################
from __future__ import print_function
from itertools import islice
from future.builtins import bytes, next, object, range
from future.utils import iteritems, python_2_unicode_compatible, bchr
from past.builtins import unicode
from past.builtins import str as old_str
from future.utils import native_str
import os, re, sys
# Select the backend (curl or requests).
if os.environ.get('AG_FORCE_REQUESTS_BACKEND'):
import franz.miniclient.backends.requests as backend
else:
try:
import franz.miniclient.backends.curl as backend
except ImportError:
import franz.miniclient.backends.requests as backend
from franz.openrdf.util.strings import to_native_string
from franz.openrdf.util.http import merge_headers
from franz.miniclient.agjson import decode_json, JsonDecodeError
if sys.version_info[0] > 2:
from urllib.parse import quote
from io import StringIO
else:
from urllib import quote
from cStringIO import StringIO
# Note: this is mocked in some unit tests, be careful when changing the way
# the import works.
makeRequest = backend.makeRequest
def jsonRequest(obj, method, url, body=None, content_type="application/x-www-form-urlencoded",
callback=None, accept=None, headers=None):
"""
Create a request that expects a JSON response.
The response can optionally be saved to a file-like object if
the connection object has the _saveFile and _saveAccept attributes.
Instead of being returned the response might be passed to a callback function.
Raise an exception if the returned status is not in the 2XX range.
:param obj: Service object with connection information (e.g. credentials).
:type obj: franz.openrdf.miniclient.repository.Service
:param method: Request method (``"GET"``, ``"POST"``, ...).
:type method: string
:param url: Target address
:type url: string
:param body: Request body (for PUT/POST requests) or query string, optional.
:type body: basestring|file
:param accept: Value of the accept header (default: ``"application/json"``)
:type accept: string
:param content_type: MIME type of the request body, optional.
:type content_type: string
:param headers: Either a dictionary mapping headers to values or
a list of strings that will be included in the request's headers.
:type headers: Iterable[string] | dict[string, string] | None
:return: Status code and response body, unless callback is specified (in that case None is returned).
:param callback: A callback function that will be called for each response chunk (optional).
The return value should be either None or the number of bytes
received, anything else will cause the request to be aborted.
:type callback: (bytestring) -> int
:return: A parsed JSON response or ``None`` if the response was saved to a file or processed by a callback.
:rtype: dict|string|int|float|None
"""
if accept is None:
accept = "application/json"
# If there is a _saveFile and _saveAccept, they override the arguments
if hasattr(obj, '_saveFile') and hasattr(obj, '_saveAccept'):
accept = obj._saveAccept
callback = obj._saveFile.write
headers = merge_headers(obj.getHeaders(), headers)
if callback is None:
status, body = makeRequest(obj, method, url, body, accept, content_type,
headers=headers)
if status == 204:
body = decode_json("{}")
return body
elif status == 200:
if accept in ('application/json', 'text/integer', "application/x-quints+json"):
body = decode_json(body)
return body
else: raise RequestError(status, body)
else:
def raiseErr(status, message): raise RequestError(status, message)
makeRequest(obj, method, url, body, accept, content_type, callback=callback, errCallback=raiseErr, headers=headers)
def nullRequest(obj, method, url, body=None, content_type="application/x-www-form-urlencoded", content_encoding=None):
"""
Create a request that expects an empty response body.
Raise an exception if the returned status is not in the 2XX range.
:param obj: Service object with connection information (e.g. credentials).
:type obj: franz.openrdf.miniclient.repository.Service
:param method: Request method (``"GET"``, ``"POST"``, ...).
:type method: string
:param url: Target address
:type url: string
:param body: Request body (for PUT/POST requests) or query string, optional.
:type body: basestring|file
:param content_type: MIME type of the request body, optional.
:type content_type: string
"""
headers = None
if content_encoding is not None:
headers = ['Content-Encoding: ' + content_encoding]
status, body = makeRequest(obj, method, url, body, "application/json", content_type,
headers=merge_headers(obj.getHeaders(), headers))
if status < 200 or status > 204:
raise RequestError(status, body)
if sys.version_info[0] == 2:
# Workaround for a bug in python-future
def ibytes(x):
"""
Construct a bytes object from a sequence or iterator over integers.
In Python 3, bytes() can do that, but python-future does not have
that capability.
"""
if not hasattr(x, '__len__'):
return bytes(list(x))
return bytes(x)
else:
ibytes = bytes
def mk_unicode(text):
if not isinstance(text, unicode):
return unicode(text, 'utf-8')
return text
@python_2_unicode_compatible
class RequestError(Exception):
code = None
def __init__(self, status, message):
# Why the [----] did anynone think it's ok for a client
# library to just happily write stuff to stdout?!?!
# I can't fix it now, because our Lisp query cancelling test
# depends on this nonsensical behavior.
print(status, message)
self.status = status
if status == 400:
match = re.match("([A-Z ]+): (.*)", message)
if match:
self.code = match.group(1)
message = match.group(2)
self.message = message
def __str__(self):
return "Server returned %s: %s" % (self.status, self.message)
def urlenc(**args):
buf = StringIO()
def enc(name, val):
if buf.tell():
buf.write('&')
buf.write(quote(to_native_string(name)))
buf.write("=")
buf.write(quote(to_native_string(val)))
def encval(name, val):
if val is None: pass
elif isinstance(val, bool): enc(name, (val and "true") or "false")
elif isinstance(val, int): encval(name, "%d" % val)
elif isinstance(val, float): encval(name, "%g" % val)
elif isinstance(val, list) or isinstance(val, tuple):
for elt in val: encval(name, elt)
elif isinstance(val, native_str):
enc(name, val)
elif isinstance(val, (old_str, unicode)):
enc(name, to_native_string(val))
else:
enc(name, to_native_string(str(val)))
for arg_name, value in iteritems(args):
encval(arg_name, value)
return buf.getvalue()
class SerialConstants(object):
SO_VECTOR = 1
SO_STRING = 5
SO_NULL = 7
SO_LIST = 8
SO_POS_INTEGER = 9
SO_END_OF_ITEMS = 10
SO_NEG_INTEGER = 11
SO_BYTEVECTOR = 15
def serialize(obj):
def serialize_int(i):
# make sure i is non negative
i = abs(i)
def int_bytes(i):
rest = True
while rest:
lower = i & 0x7f
rest = i >> 7
yield lower | (0x80 if rest else 0)
i = rest
return ibytes(int_bytes(i))
if obj is None:
return bchr(SerialConstants.SO_NULL)
if isinstance(obj, unicode):
return b''.join([bchr(SerialConstants.SO_STRING), serialize_int(len(obj)),
bytes(obj, 'utf-8')])
if isinstance(obj, int):
return b''.join([bchr(SerialConstants.SO_POS_INTEGER) if obj >= 0 else
bchr(SerialConstants.SO_NEG_INTEGER), serialize_int(obj)])
try:
# Byte vector
if obj.typecode == b'b':
return b''.join([bchr(SerialConstants.SO_BYTEVECTOR),
serialize_int(len(obj)), obj.tostring()])
except:
pass
try:
iobj = iter(obj)
return b''.join([bchr(SerialConstants.SO_VECTOR),
serialize_int(len(obj)),
b''.join([serialize(elem) for elem in iobj])])
except:
pass
raise TypeError("cannot serialize object of type %s" % type(obj))
def deserialize(string):
def posInteger(chars):
result = shift = 0
# Set value to get into the loop the first time
value = 0x80
while value & 0x80:
value = next(chars)
result += ((value & 0x7f) << shift)
shift += 7
return result
if isinstance(string, old_str):
string = bytes(string)
chars = iter(string)
value = next(chars)
if value == SerialConstants.SO_BYTEVECTOR:
length = posInteger(chars)
import array
return array.array(b'b', [ord(next(chars)) for i in range(length)])
if (value == SerialConstants.SO_VECTOR or
value == SerialConstants.SO_LIST):
length = posInteger(chars)
return [deserialize(chars) for i in range(length)]
if value == SerialConstants.SO_STRING:
length = posInteger(chars)
return unicode(ibytes(islice(chars, 0, length)), 'utf-8')
if value == SerialConstants.SO_POS_INTEGER:
return posInteger(chars)
if value == SerialConstants.SO_NEG_INTEGER:
return - posInteger(chars)
if value == SerialConstants.SO_NULL:
return None
if value == SerialConstants.SO_END_OF_ITEMS:
return None
raise ValueError("bad code found by deserializer: %d" % value)
def encode(string):
def convert(string):
codes = encode.codes
state = rem = 0
for byte in bytes(string):
if state == 0:
yield codes[byte & 0x3f]
rem = (byte >> 6) & 0x3
state = 1
elif state == 1:
yield codes[((byte & 0xf) << 2) | rem]
rem = (byte >> 4) & 0xf
state = 2
else:
yield codes[((byte & 0x3) << 4) | rem]
yield codes[((byte >> 2) & 0x3f)]
state = 0
if state:
yield codes[rem]
return ibytes(convert(string))
encode.codes = bytes(b"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789*+")
def decode(string):
def convert(string):
codes = decode.codes
state = rem = 0
if isinstance(string, unicode):
string = bytes(string, 'utf-8')
else:
string = bytes(string)
for byte in string:
byte = codes[byte]
if state == 0:
rem = byte
state = 1
elif state == 1:
yield rem | ((byte & 0x3) << 6)
rem = byte >> 2
state = 2
elif state == 2:
yield rem | ((byte & 0xf) << 4)
rem = byte >> 4
state = 3
else:
yield rem | (byte << 2)
state = 0
return ibytes(convert(string))
decode.codes = [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | |
def kfold_column(self, n_folds=3, seed=-1):
"""
Build a fold assignments column for cross-validation. This call will produce a column
having the same data layout as the calling object.
:param n_folds: Number of folds.
:param seed:Seed for random numbers (affects sampling when balance_classes=T)
:return: A column of fold IDs.
"""
return H2OFrame(expr=ExprNode("kfold_column",self,n_folds,seed))._frame()
def modulo_kfold_column(self, n_folds=3):
"""
Build a fold assignments column for cross-validation. Rows are assigned a fold according
to the current row number modulo n_folds.
Parameters
----------
n_folds : int
The number of folds to build.
:return: An H2OFrame holding a single column of the fold assignments.
"""
return H2OFrame(expr=ExprNode("modulo_kfold_column",self,n_folds))._frame()
def stratified_kfold_column(self, n_folds=3, seed=-1):
"""
Build a fold assignment column with the constraint that each fold has the same class
distribution as the fold column.
Parameters
----------
n_folds: int
The number of folds to build.
seed: int
A random seed.
:return: An H2OFrame holding a single column of the fold assignments.
"""
return H2OFrame(expr=ExprNode("stratified_kfold_column",self,n_folds,seed))._frame()
def structure(self):
"""
Similar to R's str method: Compactly Display the Structure of this H2OFrame instance.
:return: None
"""
df = self.head().as_data_frame(use_pandas=False)
nr = self.nrow
nc = len(df[0])
cn = df.pop(0)
width = max([len(c) for c in cn])
isfactor = [c.isfactor() for c in self]
numlevels = [self.nlevels(i) for i in range(nc)]
lvls = self.levels()
print "H2OFrame '{}': \t {} obs. of {} variables(s)".format(self._id,nr,nc)
for i in range(nc):
print "$ {} {}: ".format(cn[i], ' '*(width-max(0,len(cn[i])))),
if isfactor[i]:
nl = numlevels[i]
print "Factor w/ {} level(s) {},..: ".format(nl, '"' + '","'.join(zip(*lvls)[i]) + '"'),
print " ".join(it[0] for it in h2o.as_list(self[:10,i].match(list(zip(*lvls)[i])), False)[1:]),
print "..."
else:
print "num {} ...".format(" ".join(it[0] for it in h2o.as_list(self[:10,i], False)[1:]))
def as_data_frame(self, nrows=None, skiprows=None, ncols=None, use_pandas=True):
"""
Obtain the dataset as a python-local object (pandas frame if possible, list otherwise)
:param nrows: The number of rows for pandas to read. Default is None, which reads all rows.
:param skiprows: A list of rows to skip. Default is None, which skips no rows.
:param ncols: The number of columns for pandas to read. Default is None, which reads all columns.
:param use_pandas: A flag specifying whether or not to return a pandas DataFrame.
:return: A local python object (a list of lists of strings, each list is a row, if use_pandas=False, otherwise a
pandas DataFrame) containing this H2OFrame instance's data.
"""
self._eager()
url = 'http://' + h2o.H2OConnection.ip() + ':' + str(h2o.H2OConnection.port()) + "/3/DownloadDataset?frame_id=" + urllib.quote(self._id) + "&hex_string=false"
response = urllib2.urlopen(url)
if h2o.can_use_pandas() and use_pandas:
import pandas
df = pandas.read_csv(response,low_memory=False,nrows=nrows,skiprows=skiprows,usecols=None if ncols is None else range(ncols))
time_cols = []
category_cols = []
if self.types is not None:
for col_name in self.names[:ncols]:
type = self.types[col_name]
if type.lower() == 'time': time_cols.append(col_name)
elif type.lower() == 'enum': category_cols.append(col_name)
#change Time to pandas datetime
if time_cols:
#hacky way to get the utc offset
from datetime import datetime
sample_timestamp = 1380610868
utc_offset = 1000 * ((datetime.utcfromtimestamp(sample_timestamp) - datetime.fromtimestamp(sample_timestamp)).total_seconds())
try:
df[time_cols] = (df[time_cols] - utc_offset).astype('datetime64[ms]')
except pandas.tslib.OutOfBoundsDatetime:
pass
#change Enum to pandas category
for cat_col in category_cols: #for loop is required
df[cat_col] = df[cat_col].astype('category')
return df
else:
cr = csv.reader(response)
rows = []
for row in cr: rows.append([''] if row == [] else row[:ncols])
if not skiprows:
return rows[:None if nrows is None else nrows+1]
else:
return [rows[0]] + rows[skiprows[-1]+1:]
# Find a named H2OVec and return the zero-based index for it. Error if name is missing
def _find_idx(self,name):
self._eager()
for i,v in enumerate(self._col_names):
if name == v: return i
raise ValueError("Name " + name + " not in Frame")
def index(self,name):
self._eager()
return self._find_idx(name)
def flatten(self):
return H2OFrame(expr=ExprNode("flatten",self))._scalar()
def __getitem__(self, item):
"""
Frame slicing.
Supports R-like row and column slicing.
Examples:
fr[0:5,:] # first 5 rows, all columns
fr[fr[0] > 1, :] # all rows greater than 1 in the first column, all columns
fr[[1,5,6]] # columns 1, 5, and 6
fr[0:50, [1,2,3]] # first 50 rows, columns 1,2, and 3
:param item: A tuple, a list, a string, or an int.
If a tuple, then this indicates both row and column selection. The tuple
must be exactly length 2.
If a list, then this indicates column selection.
If a int, the this indicates a single column to be retrieved at the index.
If a string, then slice on the column with this name.
:return: An H2OFrame.
"""
if isinstance(item, (int,basestring,list)): return H2OFrame(expr=ExprNode("cols",self,item)) # just columns
elif isinstance(item, slice):
item = slice(item.start,min(self.ncol,item.stop))
return H2OFrame(expr=ExprNode("cols",self,item))
elif isinstance(item, H2OFrame): return H2OFrame(expr=ExprNode("rows",self,item)) # just rows
elif isinstance(item, tuple):
rows = item[0]
cols = item[1]
allrows = False
allcols = False
if isinstance(cols, slice): allcols = all([a is None for a in [cols.start,cols.step,cols.stop]])
if isinstance(rows, slice): allrows = all([a is None for a in [rows.start,rows.step,rows.stop]])
if allrows and allcols: return self # fr[:,:] -> all rows and columns.. return self
if allrows: return H2OFrame(expr=ExprNode("cols",self,item[1])) # fr[:,cols] -> really just a column slice
if allcols: return H2OFrame(expr=ExprNode("rows",self,item[0])) # fr[rows,:] -> really just a row slices
res = H2OFrame(expr=ExprNode("rows", ExprNode("cols",self,item[1]),item[0]))
return res.flatten() if isinstance(item[0], (basestring,int)) and isinstance(item[1],(basestring,int)) else res
def __setitem__(self, b, c):
"""
Replace a column in an H2OFrame.
:param b: A 0-based index or a column name.
:param c: The vector that 'b' is replaced with.
:return: Returns this H2OFrame.
"""
col_expr=None
row_expr=None
colname=None
if isinstance(b, basestring):
if b in self.col_names: col_expr = self.col_names.index(b)
else:
col_expr = self._ncols
colname = b
elif isinstance(b, int): col_expr = b
elif isinstance(b, tuple):
col_expr = b[1]
row_expr = b[0]
if isinstance(col_expr, basestring):
if col_expr not in self.col_names:
colname = col_expr
col_expr = self._ncols
elif isinstance(col_expr, slice):
if col_expr.start is None and col_expr.stop is None:
col_expr = slice(0,self.ncol)
elif col_expr==-1: col_expr = slice(0,self.ncol)
elif isinstance(b, H2OFrame): row_expr = b
else: row_expr = slice(0,self.nrow)
if row_expr is None: row_expr = slice(0,self.nrow)
if col_expr is None: col_expr = slice(0,self.ncol)
src = c._frame() if isinstance(c,H2OFrame) else float("nan") if c is None else c
expr = ExprNode("=",self,src,col_expr,row_expr) if colname is None else ExprNode("=",self,src,col_expr,row_expr,colname)
h2o.rapids(ExprNode._collapse_sb(expr._eager()), self._id)
self._update()
def __int__(self): return int(self._scalar())
def __float__(self): return self._scalar()
def __del__(self):
if not H2OFrame.COUNTING:
H2OFrame.dropped_instances.append(self._id)
return
if not self._keep and self._computed: h2o.remove(self)
@staticmethod
def del_dropped():
live_frames = list(H2OFrame.get_instance_ids())
dead_frames = H2OFrame.dropped_instances
for fr in dead_frames:
if fr not in live_frames:
h2o.remove(fr)
H2OFrame.dropped_instances = []
def keep(self):
self._keep = True
return self
def drop(self, i):
"""
Returns a Frame with the column at index i dropped.
:param i: Column to drop
:return: Returns an H2OFrame
"""
if isinstance(i, basestring): i = self._find_idx(i)
return H2OFrame(expr=ExprNode("cols", self,-(i+1)))._frame()
def pop(self,i):
"""
Pop a colunn out of an H2OFrame.
:param i: The index or name of the column to pop.
:return: The column dropped from the frame.
"""
if isinstance(i, basestring): i=self._find_idx(i)
col = H2OFrame(expr=ExprNode("pop",self,i))._frame()
if self._keep: col.keep()
self._update()
return col
def __len__(self):
"""
:return: Number of rows
"""
return self.nrow
def quantile(self, prob=None, combine_method="interpolate"):
"""
Compute quantiles over a given H2OFrame.
:param prob: A list of probabilties, default is [0.01,0.1,0.25,0.333,0.5,0.667,0.75,0.9,0.99]. You may provide any sequence of any length.
:param combine_method: For even samples, how to combine quantiles. Should be one of ["interpolate", "average", "low", "hi"]
:return: an H2OFrame containing the quantiles and probabilities.
"""
if len(self) == 0: return self
if not prob: prob=[0.01,0.1,0.25,0.333,0.5,0.667,0.75,0.9,0.99]
return H2OFrame(expr=ExprNode("quantile",self,prob,combine_method))._frame()
def cbind(self,data):
"""
:param data: H2OFrame or H2OVec to cbind to self
:return: void
"""
return H2OFrame(expr=ExprNode("cbind", self, data))
def rbind(self, data):
"""
Combine H2O Datasets by Rows.
Takes a sequence of H2O data sets and combines them by rows.
:param data: an H2OFrame
:return: self, with data appended (row-wise)
"""
if not isinstance(data, H2OFrame): raise ValueError("`data` must be an H2OFrame, but got {0}".format(type(data)))
return H2OFrame(expr=ExprNode("rbind", self, data))
def split_frame(self, ratios=[0.75], destination_frames=""):
"""
Split a frame into distinct subsets of size determined by the given ratios.
The number of subsets is always 1 more than the number of ratios given.
:param ratios: The fraction of rows for each split.
:param destination_frames: names of the split frames
:return: | |
%(class_name)s could not be found: %(exception)s")
class NotAllowed(RackException):
msg_fmt = _("Action not allowed.")
class ImageRotationNotAllowed(RackException):
msg_fmt = _("Rotation is not allowed for snapshots")
class RotationRequiredForBackup(RackException):
msg_fmt = _("Rotation param is required for backup image_type")
class KeyPairExists(RackException):
ec2_code = 'InvalidKeyPair.Duplicate'
msg_fmt = _("Key pair '%(key_name)s' already exists.")
class InstanceExists(RackException):
msg_fmt = _("Instance %(name)s already exists.")
class FlavorExists(RackException):
msg_fmt = _("Flavor with name %(name)s already exists.")
class FlavorIdExists(RackException):
msg_fmt = _("Flavor with ID %(flavor_id)s already exists.")
class FlavorAccessExists(RackException):
msg_fmt = _("Flavor access already exists for flavor %(flavor_id)s "
"and project %(project_id)s combination.")
class InvalidSharedStorage(RackException):
msg_fmt = _("%(path)s is not on shared storage: %(reason)s")
class InvalidLocalStorage(RackException):
msg_fmt = _("%(path)s is not on local storage: %(reason)s")
class MigrationError(RackException):
msg_fmt = _("Migration error: %(reason)s")
class MigrationPreCheckError(MigrationError):
msg_fmt = _("Migration pre-check error: %(reason)s")
class MalformedRequestBody(RackException):
msg_fmt = _("Malformed message body: %(reason)s")
class ConfigNotFound(RackException):
msg_fmt = _("Could not find config at %(path)s")
class PasteAppNotFound(RackException):
msg_fmt = _("Could not load paste app '%(name)s' from %(path)s")
class CannotResizeToSameFlavor(RackException):
msg_fmt = _("When resizing, instances must change flavor!")
class ResizeError(RackException):
msg_fmt = _("Resize error: %(reason)s")
class CannotResizeDisk(RackException):
msg_fmt = _("Server disk was unable to be resized because: %(reason)s")
class FlavorMemoryTooSmall(RackException):
msg_fmt = _("Flavor's memory is too small for requested image.")
class FlavorDiskTooSmall(RackException):
msg_fmt = _("Flavor's disk is too small for requested image.")
class InsufficientFreeMemory(RackException):
msg_fmt = _("Insufficient free memory on compute node to start %(uuid)s.")
class NoValidHost(RackException):
msg_fmt = _("No valid host was found. %(reason)s")
class QuotaError(RackException):
ec2_code = 'ResourceLimitExceeded'
msg_fmt = _("Quota exceeded: code=%(code)s")
code = 413
headers = {'Retry-After': 0}
safe = True
class TooManyInstances(QuotaError):
msg_fmt = _("Quota exceeded for %(overs)s: Requested %(req)s,"
" but already used %(used)d of %(allowed)d %(resource)s")
class FloatingIpLimitExceeded(QuotaError):
msg_fmt = _("Maximum number of floating ips exceeded")
class FixedIpLimitExceeded(QuotaError):
msg_fmt = _("Maximum number of fixed ips exceeded")
class MetadataLimitExceeded(QuotaError):
msg_fmt = _("Maximum number of metadata items exceeds %(allowed)d")
class OnsetFileLimitExceeded(QuotaError):
msg_fmt = _("Personality file limit exceeded")
class OnsetFilePathLimitExceeded(QuotaError):
msg_fmt = _("Personality file path too long")
class OnsetFileContentLimitExceeded(QuotaError):
msg_fmt = _("Personality file content too long")
class KeypairLimitExceeded(QuotaError):
msg_fmt = _("Maximum number of key pairs exceeded")
class SecurityGroupLimitExceeded(QuotaError):
ec2_code = 'SecurityGroupLimitExceeded'
msg_fmt = _("Maximum number of security groups or rules exceeded")
class PortLimitExceeded(QuotaError):
msg_fmt = _("Maximum number of ports exceeded")
class AggregateError(RackException):
msg_fmt = _("Aggregate %(aggregate_id)s: action '%(action)s' "
"caused an error: %(reason)s.")
class AggregateNotFound(NotFound):
msg_fmt = _("Aggregate %(aggregate_id)s could not be found.")
class AggregateNameExists(RackException):
msg_fmt = _("Aggregate %(aggregate_name)s already exists.")
class AggregateHostNotFound(NotFound):
msg_fmt = _("Aggregate %(aggregate_id)s has no host %(host)s.")
class AggregateMetadataNotFound(NotFound):
msg_fmt = _("Aggregate %(aggregate_id)s has no metadata with "
"key %(metadata_key)s.")
class AggregateHostExists(RackException):
msg_fmt = _("Aggregate %(aggregate_id)s already has host %(host)s.")
class FlavorCreateFailed(RackException):
msg_fmt = _("Unable to create flavor")
class InstancePasswordSetFailed(RackException):
msg_fmt = _("Failed to set admin password on %(instance)s "
"because %(reason)s")
safe = True
class DuplicateVlan(RackException):
msg_fmt = _("Detected existing vlan with id %(vlan)d")
class CidrConflict(RackException):
msg_fmt = _("There was a conflict when trying to complete your request.")
code = 409
class InstanceNotFound(NotFound):
ec2_code = 'InvalidInstanceID.NotFound'
msg_fmt = _("Instance %(instance_id)s could not be found.")
class InstanceInfoCacheNotFound(NotFound):
msg_fmt = _("Info cache for instance %(instance_uuid)s could not be "
"found.")
class NodeNotFound(NotFound):
msg_fmt = _("Node %(node_id)s could not be found.")
class NodeNotFoundByUUID(NotFound):
msg_fmt = _("Node with UUID %(node_uuid)s could not be found.")
class MarkerNotFound(NotFound):
msg_fmt = _("Marker %(marker)s could not be found.")
class InvalidInstanceIDMalformed(Invalid):
ec2_code = 'InvalidInstanceID.Malformed'
msg_fmt = _("Invalid id: %(val)s (expecting \"i-...\").")
class CouldNotFetchImage(RackException):
msg_fmt = _("Could not fetch image %(image_id)s")
class CouldNotUploadImage(RackException):
msg_fmt = _("Could not upload image %(image_id)s")
class TaskAlreadyRunning(RackException):
msg_fmt = _("Task %(task_name)s is already running on host %(host)s")
class TaskNotRunning(RackException):
msg_fmt = _("Task %(task_name)s is not running on host %(host)s")
class InstanceIsLocked(InstanceInvalidState):
msg_fmt = _("Instance %(instance_uuid)s is locked")
class ConfigDriveInvalidValue(Invalid):
msg_fmt = _("Invalid value for Config Drive option: %(option)s")
class ConfigDriveMountFailed(RackException):
msg_fmt = _("Could not mount vfat config drive. %(operation)s failed. "
"Error: %(error)s")
class ConfigDriveUnknownFormat(RackException):
msg_fmt = _("Unknown config drive format %(format)s. Select one of "
"iso9660 or vfat.")
class InterfaceAttachFailed(Invalid):
msg_fmt = _("Failed to attach network adapter device to %(instance)s")
class InterfaceDetachFailed(Invalid):
msg_fmt = _("Failed to detach network adapter device from %(instance)s")
class InstanceUserDataTooLarge(RackException):
msg_fmt = _("User data too large. User data must be no larger than "
"%(maxsize)s bytes once base64 encoded. Your data is "
"%(length)d bytes")
class InstanceUserDataMalformed(RackException):
msg_fmt = _("User data needs to be valid base 64.")
class UnexpectedTaskStateError(RackException):
msg_fmt = _("Unexpected task state: expecting %(expected)s but "
"the actual state is %(actual)s")
class UnexpectedDeletingTaskStateError(UnexpectedTaskStateError):
pass
class InstanceActionNotFound(RackException):
msg_fmt = _("Action for request_id %(request_id)s on instance"
" %(instance_uuid)s not found")
class InstanceActionEventNotFound(RackException):
msg_fmt = _("Event %(event)s not found for action id %(action_id)s")
class UnexpectedVMStateError(RackException):
msg_fmt = _("Unexpected VM state: expecting %(expected)s but "
"the actual state is %(actual)s")
class CryptoCAFileNotFound(FileNotFound):
msg_fmt = _("The CA file for %(project)s could not be found")
class CryptoCRLFileNotFound(FileNotFound):
msg_fmt = _("The CRL file for %(project)s could not be found")
class InstanceRecreateNotSupported(Invalid):
msg_fmt = _('Instance recreate is not supported.')
class ServiceGroupUnavailable(RackException):
msg_fmt = _("The service from servicegroup driver %(driver)s is "
"temporarily unavailable.")
class DBNotAllowed(RackException):
msg_fmt = _('%(binary)s attempted direct database access which is '
'not allowed by policy')
class UnsupportedVirtType(Invalid):
msg_fmt = _("Virtualization type '%(virt)s' is not supported by "
"this compute driver")
class UnsupportedHardware(Invalid):
msg_fmt = _("Requested hardware '%(model)s' is not supported by "
"the '%(virt)s' virt driver")
class Base64Exception(RackException):
msg_fmt = _("Invalid Base 64 data for file %(path)s")
class BuildAbortException(RackException):
msg_fmt = _("Build of instance %(instance_uuid)s aborted: %(reason)s")
class RescheduledException(RackException):
msg_fmt = _("Build of instance %(instance_uuid)s was re-scheduled: "
"%(reason)s")
class ShadowTableExists(RackException):
msg_fmt = _("Shadow table with name %(name)s already exists.")
class InstanceFaultRollback(RackException):
def __init__(self, inner_exception=None):
message = _("Instance rollback performed due to: %s")
self.inner_exception = inner_exception
super(InstanceFaultRollback, self).__init__(message % inner_exception)
class UnsupportedObjectError(RackException):
msg_fmt = _('Unsupported object type %(objtype)s')
class OrphanedObjectError(RackException):
msg_fmt = _('Cannot call %(method)s on orphaned %(objtype)s object')
class IncompatibleObjectVersion(RackException):
msg_fmt = _('Version %(objver)s of %(objname)s is not supported')
class ObjectActionError(RackException):
msg_fmt = _('Object action %(action)s failed because: %(reason)s')
class CoreAPIMissing(RackException):
msg_fmt = _("Core API extensions are missing: %(missing_apis)s")
class AgentError(RackException):
msg_fmt = _('Error during following call to agent: %(method)s')
class AgentTimeout(AgentError):
msg_fmt = _('Unable to contact guest agent. '
'The following call timed out: %(method)s')
class AgentNotImplemented(AgentError):
msg_fmt = _('Agent does not support the call: %(method)s')
class InstanceGroupNotFound(NotFound):
msg_fmt = _("Instance group %(group_uuid)s could not be found.")
class InstanceGroupIdExists(RackException):
msg_fmt = _("Instance group %(group_uuid)s already exists.")
class InstanceGroupMetadataNotFound(NotFound):
msg_fmt = _("Instance group %(group_uuid)s has no metadata with "
"key %(metadata_key)s.")
class InstanceGroupMemberNotFound(NotFound):
msg_fmt = _("Instance group %(group_uuid)s has no member with "
"id %(instance_id)s.")
class InstanceGroupPolicyNotFound(NotFound):
msg_fmt = _("Instance group %(group_uuid)s has no policy %(policy)s.")
class PluginRetriesExceeded(RackException):
msg_fmt = _("Number of retries to plugin (%(num_retries)d) exceeded.")
class ImageDownloadModuleError(RackException):
msg_fmt = _("There was an error with the download module %(module)s. "
"%(reason)s")
class ImageDownloadModuleMetaDataError(ImageDownloadModuleError):
msg_fmt = _("The metadata for this location will not work with this "
"module %(module)s. %(reason)s.")
class ImageDownloadModuleNotImplementedError(ImageDownloadModuleError):
msg_fmt = _("The method %(method_name)s is not implemented.")
class ImageDownloadModuleConfigurationError(ImageDownloadModuleError):
msg_fmt = _("The module %(module)s is misconfigured: %(reason)s.")
class ResourceMonitorError(RackException):
msg_fmt = _("Error when creating resource monitor: %(monitor)s")
class PciDeviceWrongAddressFormat(RackException):
msg_fmt = _("The PCI address %(address)s has an incorrect format.")
class PciDeviceNotFoundById(NotFound):
msg_fmt = _("PCI device %(id)s not found")
class PciDeviceNotFound(RackException):
msg_fmt = _("PCI Device %(node_id)s:%(address)s not found.")
class PciDeviceInvalidStatus(RackException):
msg_fmt = _(
"PCI device %(compute_node_id)s:%(address)s is %(status)s "
"instead of %(hopestatus)s")
class PciDeviceInvalidOwner(RackException):
msg_fmt = _(
"PCI device %(compute_node_id)s:%(address)s is owned by %(owner)s "
"instead of %(hopeowner)s")
class PciDeviceRequestFailed(RackException):
msg_fmt = _(
"PCI device request (%requests)s failed")
class PciDevicePoolEmpty(RackException):
msg_fmt = _(
"Attempt to consume PCI device %(compute_node_id)s:%(address)s "
"from empty pool")
class PciInvalidAlias(RackException):
msg_fmt = _("Invalid PCI alias definition: %(reason)s")
class PciRequestAliasNotDefined(RackException):
msg_fmt = _("PCI alias %(alias)s is not defined")
class MissingParameter(RackException):
ec2_code = 'MissingParameter'
msg_fmt = _("Not enough parameters: %(reason)s")
code = 400
class PciConfigInvalidWhitelist(Invalid):
msg_fmt = _("Invalid PCI devices Whitelist config %(reason)s")
class PciTrackerInvalidNodeId(RackException):
msg_fmt = _("Cannot change %(node_id)s to %(new_node_id)s")
class InternalError(RackException):
ec2_code = 'InternalError'
msg_fmt = "%(err)s"
class PciDevicePrepareFailed(RackException):
msg_fmt = _("Failed to prepare PCI device %(id)s for instance "
"%(instance_uuid)s: %(reason)s")
class PciDeviceDetachFailed(RackException):
msg_fmt = _("Failed to detach PCI device %(dev)s: %(reason)s")
class PciDeviceUnsupportedHypervisor(RackException):
msg_fmt = _("%(type)s hypervisor does not support PCI devices")
class KeyManagerError(RackException):
msg_fmt = _("Key manager error: %(reason)s")
class InvalidVideoMode(Invalid):
msg_fmt = _("Provided video model (%(model)s) is not supported.")
class RngDeviceNotExist(Invalid):
msg_fmt = _("The provided RNG device path: (%(path)s) is not "
"present on the host.")
class RequestedVRamTooHigh(RackException):
msg_fmt = _("The requested amount of video memory %(req_vram)d is higher "
"than the maximum allowed by flavor %(max_vram)d.")
class InvalidWatchdogAction(Invalid):
msg_fmt = _("Provided watchdog action (%(action)s) is not supported.")
class NoBlockMigrationForConfigDriveInLibVirt(RackException):
| |
import warnings
import torch
from torch.autograd import gradcheck, gradgradcheck
from xitorch.interpolate.interp1 import Interp1D
from xitorch._tests.utils import device_dtype_float_test
@device_dtype_float_test(only64=True, additional_kwargs={
"bc_type": ["clamped", "natural", "not-a-knot", "periodic", None],
"scramble": [False, True]
})
def test_interp1_cspline(dtype, device, bc_type, scramble):
dtype_device_kwargs = {"dtype": dtype, "device": device}
x = torch.tensor([0.0, 0.2, 0.3, 0.5, 0.8, 1.0], **dtype_device_kwargs).requires_grad_()
if bc_type != "periodic":
y1 = torch.tensor([1.0, 1.5, 2.1, 1.1, 2.3, 2.5], **dtype_device_kwargs).requires_grad_()
y2 = torch.tensor([[1.0, 1.5, 2.1, 1.1, 2.3, 2.5],
[0.8, 1.2, 2.2, 0.4, 3.2, 1.2]], **dtype_device_kwargs).requires_grad_()
else:
y1 = torch.tensor([1.0, 1.5, 2.1, 1.1, 2.3, 1.0], **dtype_device_kwargs).requires_grad_()
y2 = torch.tensor([[1.0, 1.5, 2.1, 1.1, 2.3, 1.0],
[0.8, 1.2, 2.2, 0.4, 3.2, 0.8]], **dtype_device_kwargs).requires_grad_()
# points are well inside to avoid extrapolation in numerical gradient calculations
xq1 = torch.linspace(0.05, 0.95, 10, **dtype_device_kwargs)
xq2 = torch.linspace(0.05, 0.95, 4, **dtype_device_kwargs)
scramble = scramble and bc_type != "periodic"
if scramble:
idx1 = torch.randperm(len(xq1))
idx2 = torch.randperm(len(xq2))
xq1 = xq1[..., idx1]
xq2 = xq2[..., idx2]
xq1 = xq1.requires_grad_()
xq2 = xq2.requires_grad_()
# true results (obtained from scipy.interpolate.CubicSpline)
# from scipy.interpolate import CubicSpline
# print("yq11:", CubicSpline(x.detach(), y1.detach(), bc_type=bc_type)(xq1.detach()))
# print("yq12:", CubicSpline(x.detach(), y1.detach(), bc_type=bc_type)(xq2.detach()))
# print("yq21:", CubicSpline(x.detach(), y2[1].detach(), bc_type=bc_type)(xq1.detach()))
# print("yq22:", CubicSpline(x.detach(), y2[1].detach(), bc_type=bc_type)(xq2.detach()))
# get the y_trues from scipy
if bc_type == "clamped":
yq11_true = torch.tensor([1.01599131, 1.23547394, 1.85950467, 2.02868906, 1.37102567, 1.04108172,
1.42061722, 2.04849297, 2.4435166, 2.5061722],
**dtype_device_kwargs)
yq12_true = torch.tensor([1.01599131, 2.02868906, 1.42061722, 2.5061722], **dtype_device_kwargs)
yq21_true = torch.tensor([[1.01599131, 1.23547394, 1.85950467, 2.02868906, 1.37102567, 1.04108172,
1.42061722, 2.04849297, 2.4435166, 2.5061722],
[0.76740145, 0.85220436, 1.79469225, 2.01628631, 0.78122407, 0.53357346,
1.80606846, 3.07316928, 2.80705394, 1.48568465]],
**dtype_device_kwargs)
yq22_true = torch.tensor([[1.01599131, 2.02868906, 1.42061722, 2.5061722],
[0.76740145, 2.01628631, 1.80606846, 1.48568465]],
**dtype_device_kwargs)
elif bc_type == "not-a-knot" or bc_type is None: # default choice
yq11_true = torch.tensor([0.66219741, 1.06231845, 1.8959342, 2.01058952, 1.36963168, 1.02084725,
1.33918614, 1.97824847, 2.56027129, 2.70749165],
**dtype_device_kwargs)
yq12_true = torch.tensor([0.66219741, 2.01058952, 1.33918614, 2.70749165], **dtype_device_kwargs)
yq21_true = torch.tensor([[0.66219741, 1.06231845, 1.8959342, 2.01058952, 1.36963168, 1.02084725,
1.33918614, 1.97824847, 2.56027129, 2.70749165],
[-0.01262521, 0.47242487, 1.87087507, 1.99610601, 0.81846828, 0.39785058,
1.33699082, 2.68769477, 3.43433639, 2.56128965]],
**dtype_device_kwargs)
yq22_true = torch.tensor([[0.66219741, 2.01058952, 1.33918614, 2.70749165],
[-0.01262521, 1.99610601, 1.33699082, 2.56128965]],
**dtype_device_kwargs)
elif bc_type == "natural":
yq11_true = torch.tensor([1.03045416, 1.24263582, 1.85784168, 2.03025785, 1.37277695, 1.03808008,
1.41177844, 2.04167374, 2.45428693, 2.52449066],
**dtype_device_kwargs)
yq12_true = torch.tensor([1.03045416, 2.03025785, 1.41177844, 2.52449066], **dtype_device_kwargs)
yq21_true = torch.tensor([[1.03045416, 1.24263582, 1.85784168, 2.03025785, 1.37277695, 1.03808008,
1.41177844, 2.04167374, 2.45428693, 2.52449066],
[0.70073217, 0.82102504, 1.79853565, 2.02728778, 0.8104202, 0.46318855,
1.57916384, 2.89143794, 3.09930603, 1.98521859]],
**dtype_device_kwargs)
yq22_true = torch.tensor([[1.03045416, 2.03025785, 1.41177844, 2.52449066],
[0.70073217, 2.02728778, 1.57916384, 1.98521859]],
**dtype_device_kwargs)
elif bc_type == "periodic":
yq11_true = torch.tensor([0.88184647, 1.16754002, 1.87806756, 1.99916778, 1.3241823, 1.13211374,
1.69017244, 2.25696675, 2.09041608, 1.31247223],
**dtype_device_kwargs)
yq12_true = torch.tensor([0.88184647, 1.99916778, 1.69017244, 1.31247223], **dtype_device_kwargs)
yq21_true = torch.tensor([[0.88184647, 1.16754002, 1.87806756, 1.99916778, 1.3241823, 1.13211374,
1.69017244, 2.25696675, 2.09041608, 1.31247223],
[0.46559344, 0.70408188, 1.82662341, 1.99677022, 0.77170332, 0.52939286,
1.76540093, 3.03216372, 2.8731096, 1.44347038]],
**dtype_device_kwargs)
yq22_true = torch.tensor([[0.88184647, 1.99916778, 1.69017244, 1.31247223],
[0.46559344, 1.99677022, 1.76540093, 1.44347038]],
**dtype_device_kwargs)
if scramble:
yq11_true = yq11_true[..., idx1]
yq12_true = yq12_true[..., idx2]
yq21_true = yq21_true[..., idx1]
yq22_true = yq22_true[..., idx2]
def interp(x, y, xq):
return Interp1D(x, y, method="cspline", bc_type=bc_type)(xq)
yq11 = interp(x, y1, xq1)
yq12 = interp(x, y1, xq2)
yq21 = interp(x, y2, xq1)
yq22 = interp(x, y2, xq2)
# import matplotlib.pyplot as plt
# from scipy.interpolate import CubicSpline
# xx = torch.linspace(0, 1, 1000, **dtype_device_kwargs)
# xx2 = torch.linspace(-1, 2, 1000, **dtype_device_kwargs)
# plt.plot(xx2, interp(x, y1, xx2).detach().numpy())
# plt.plot(xx, CubicSpline(x.detach(), y1.detach(), bc_type=bc_type)(xx.detach()))
# plt.plot(x.detach(), y1.detach(), 'x')
# plt.show()
if bc_type == "periodic":
rtol = 2e-2
else:
rtol = 1e-3
assert torch.allclose(yq11, yq11_true, rtol=rtol)
assert torch.allclose(yq12, yq12_true, rtol=rtol)
assert torch.allclose(yq21, yq21_true, rtol=rtol)
assert torch.allclose(yq22, yq22_true, rtol=rtol)
# skip the gradient check if bc_type is None
if bc_type is None:
return
gradcheck(interp, (x, y1, xq1))
gradcheck(interp, (x, y1, xq2))
gradcheck(interp, (x, y2, xq1))
gradcheck(interp, (x, y2, xq2))
gradgradcheck(interp, (x, y1, xq1))
gradgradcheck(interp, (x, y1, xq2))
gradgradcheck(interp, (x, y2, xq1))
gradgradcheck(interp, (x, y2, xq2))
@device_dtype_float_test(only64=True, additional_kwargs={
"scramble": [False, True]
})
def test_interp1_linear(dtype, device, scramble):
dtype_device_kwargs = {"dtype": dtype, "device": device}
x = torch.tensor([0.0, 0.2, 0.3, 0.5, 0.8, 1.0], **dtype_device_kwargs).requires_grad_()
y1 = torch.tensor([1.0, 1.5, 2.1, 1.1, 2.3, 2.5], **dtype_device_kwargs).requires_grad_()
y2 = torch.tensor([[1.0, 1.5, 2.1, 1.1, 2.3, 2.5],
[0.8, 1.2, 2.2, 0.4, 3.2, 1.2]], **dtype_device_kwargs).requires_grad_()
# points are well inside to avoid extrapolation in numerical gradient calculations
xq1 = torch.linspace(0.05, 0.95, 10, **dtype_device_kwargs)
xq2 = torch.linspace(0.05, 0.95, 4, **dtype_device_kwargs)
if scramble:
idx1 = torch.randperm(len(xq1))
idx2 = torch.randperm(len(xq2))
xq1 = xq1[..., idx1]
xq2 = xq2[..., idx2]
xq1 = xq1.requires_grad_()
xq2 = xq2.requires_grad_()
# # true results (obtained from scipy.interpolate.interp1d)
# from scipy.interpolate import interp1d
# print("yq11:", interp1d(x.detach(), y1.detach())(xq1.detach()))
# print("yq12:", interp1d(x.detach(), y1.detach())(xq2.detach()))
# print("yq21:", interp1d(x.detach(), y2[1].detach())(xq1.detach()))
# print("yq22:", interp1d(x.detach(), y2[1].detach())(xq2.detach()))
yq11_true = torch.tensor([1.125, 1.375, 1.8, 1.85, 1.35, 1.3, 1.7, 2.1, 2.35, 2.45],
**dtype_device_kwargs)
yq12_true = torch.tensor([1.125, 1.85, 1.7, 2.45], **dtype_device_kwargs)
yq21_true = torch.tensor([[1.125, 1.375, 1.8, 1.85, 1.35, 1.3, 1.7, 2.1, 2.35, 2.45],
[0.9, 1.1, 1.7, 1.75, 0.85, 0.86666667, 1.8, 2.73333333, 2.7, 1.7]],
**dtype_device_kwargs)
yq22_true = torch.tensor([[1.125, 1.85, 1.7, 2.45],
[0.9, 1.75, 1.8, 1.7]],
**dtype_device_kwargs)
if scramble:
yq11_true = yq11_true[..., idx1]
yq12_true = yq12_true[..., idx2]
yq21_true = yq21_true[..., idx1]
yq22_true = yq22_true[..., idx2]
def interp(x, y, xq):
return Interp1D(x, y, method="linear")(xq)
yq11 = interp(x, y1, xq1)
yq12 = interp(x, y1, xq2)
yq21 = interp(x, y2, xq1)
yq22 = interp(x, y2, xq2)
# import matplotlib.pyplot as plt
# from scipy.interpolate import interp1d
# xx = torch.linspace(0, 1, 1000, **dtype_device_kwargs)
# xx2 = torch.linspace(-1, 2, 1000, **dtype_device_kwargs)
# plt.plot(xx2, interp(x, y1, xx2).detach().numpy())
# plt.plot(xx, interp1d(x.detach(), y1.detach())(xx.detach()))
# plt.plot(x.detach(), y1.detach(), 'x')
# plt.show()
assert torch.allclose(yq11, yq11_true)
assert torch.allclose(yq12, yq12_true)
assert torch.allclose(yq21, yq21_true)
assert torch.allclose(yq22, yq22_true)
gradcheck(interp, (x, y1, xq1))
gradcheck(interp, (x, y1, xq2))
gradcheck(interp, (x, y2, xq1))
gradcheck(interp, (x, y2, xq2))
gradgradcheck(interp, (x, y1, xq1))
gradgradcheck(interp, (x, y1, xq2))
gradgradcheck(interp, (x, y2, xq1))
gradgradcheck(interp, (x, y2, xq2))
@device_dtype_float_test(only64=True)
def test_interp1_unsorted(dtype, device):
dtype_device_kwargs = {"dtype": dtype, "device": device}
x = torch.tensor([0.0, 0.2, 0.3, 0.5, 0.8, 1.0], **dtype_device_kwargs).requires_grad_()
y1 = torch.tensor([1.0, 1.5, 2.1, 1.1, 2.3, 2.5], **dtype_device_kwargs).requires_grad_()
y2 = torch.tensor([[1.0, 1.5, 2.1, 1.1, 2.3, 2.5],
[0.8, 1.2, 2.2, 0.4, 3.2, 1.2]], **dtype_device_kwargs).requires_grad_()
# points are well inside to avoid extrapolation in numerical gradient calculations
xq1 = torch.linspace(0.05, 0.95, 10, **dtype_device_kwargs)
xq2 = torch.linspace(0.05, 0.95, 4, **dtype_device_kwargs)
def interp(x, y, xq):
return Interp1D(x, y, method="linear")(xq)
def interp2(x, y, xq):
return Interp1D(x, method="linear")(xq, y)
# calculate the interpolated value with sorted x
yq11 = interp(x, y1, xq1)
yq12 = interp(x, y1, xq2)
yq21 = interp(x, y2, xq1)
yq22 = interp(x, y2, xq2)
# scramble x and y1 and y2
idx1 = torch.randperm(len(x))
x = x[..., idx1]
y1 = y1[..., idx1]
y2 = y2[..., idx1]
# calculate the interpolated value with unsorted x
yq11_u = interp(x, y1, xq1)
yq12_u = interp(x, y1, xq2)
yq21_u = interp(x, y2, xq1)
yq22_u = interp(x, y2, xq2)
yq11_u2 = interp2(x, y1, xq1)
yq12_u2 = interp2(x, y1, xq2)
yq21_u2 = interp2(x, y2, xq1)
yq22_u2 = interp2(x, y2, xq2)
assert torch.allclose(yq11, yq11_u)
assert torch.allclose(yq12, yq12_u)
assert torch.allclose(yq21, yq21_u)
assert torch.allclose(yq22, yq22_u)
assert torch.allclose(yq11, yq11_u2)
assert torch.allclose(yq12, yq12_u2)
assert torch.allclose(yq21, yq21_u2)
assert torch.allclose(yq22, yq22_u2)
@device_dtype_float_test(only64=True, additional_kwargs={
"method": ["cspline", "linear"]
})
def test_interp1_editable_module(dtype, device, method):
dtype_device_kwargs = {"dtype": dtype, "device": device}
x = torch.tensor([0.0, 0.2, 0.3, 0.5, 0.8, 1.0], **dtype_device_kwargs).requires_grad_()
y = torch.tensor([[1.0, 1.5, 2.1, 1.1, 2.3, 2.5],
[0.8, 1.2, 2.2, 0.4, 3.2, 1.2]], **dtype_device_kwargs).requires_grad_()
xq = torch.linspace(0, 1, 10, **dtype_device_kwargs).requires_grad_()
cls1 = Interp1D(x, y, method=method)
cls2 = Interp1D(x, method=method)
with warnings.catch_warnings():
warnings.simplefilter("error")
cls1.assertparams(cls1.__call__, xq)
cls2.assertparams(cls2.__call__, xq, y)
@device_dtype_float_test(only64=True)
def test_extrap(dtype, device):
dtype_device_kwargs = {"dtype": dtype, "device": device}
x = torch.tensor([0.0, 0.2, 0.3, 0.5, 0.8, 1.0], **dtype_device_kwargs).requires_grad_()
y1 = torch.tensor([[1.0, 2.1, 1.5, 1.1, 2.3, 2.5],
[0.0, 1.2, 2.2, 0.4, 3.2, 1.2]], **dtype_device_kwargs).requires_grad_()
xq1 = torch.tensor([0.0, 1. / 3, 2. / 3, 3. / 3, -1. / 3, -1.0, -4. / 3, 4. / 3,
6. / 3, 7. / 3, 9. / 3], **dtype_device_kwargs).requires_grad_()
# true results (obtained from scipy.interpolate.CubicSpline)
nan = float("nan")
yq_nan_true = torch.tensor([
[1., 1.3127193, 1.7445744, 2.5, nan, nan, nan, nan, nan, nan, nan],
[0., 2.13368966, 1.82654566, 1.2, nan, nan, nan, nan, nan, nan, nan],
], **dtype_device_kwargs)
yq_mir_true = torch.tensor([
[1., 1.3127193, 1.7445744, 2.5, 1.3127193, 2.5, 1.7445744, 1.7445744, 1., 1.3127193, 2.5],
[0., 2.13368966, 1.82654566, 1.2, 2.13368966, 1.2, 1.82654566, 1.82654566, 0., 2.13368966, 1.2],
], **dtype_device_kwargs)
yq_bnd_true = torch.tensor([
| |
<reponame>Grim-es/udon-pie-auto-completion
from typing import overload
from UdonPie import System
from UdonPie import UnityEngine
from UdonPie.Undefined import *
class AudioSource:
def __new__(cls, arg1=None):
'''
:returns: AudioSource
:rtype: UnityEngine.AudioSource
'''
pass
@staticmethod
def op_Implicit(arg1):
'''
:param arg1: Object
:type arg1: UnityEngine.Object
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def op_Equality(arg1, arg2):
'''
:param arg1: Object
:type arg1: UnityEngine.Object
:param arg2: Object
:type arg2: UnityEngine.Object
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def op_Inequality(arg1, arg2):
'''
:param arg1: Object
:type arg1: UnityEngine.Object
:param arg2: Object
:type arg2: UnityEngine.Object
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def set_priority(arg1):
'''
:param arg1: Int32
:type arg1: System.Int32 or int
'''
pass
@staticmethod
def get_mute():
'''
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def set_mute(arg1):
'''
:param arg1: Boolean
:type arg1: System.Boolean or bool
'''
pass
@staticmethod
def get_minDistance():
'''
:returns: Single
:rtype: System.Single
'''
pass
@staticmethod
def set_minDistance(arg1):
'''
:param arg1: Single
:type arg1: System.Single or float
'''
pass
@staticmethod
def get_maxDistance():
'''
:returns: Single
:rtype: System.Single
'''
pass
@staticmethod
def set_maxDistance(arg1):
'''
:param arg1: Single
:type arg1: System.Single or float
'''
pass
@staticmethod
def get_rolloffMode():
'''
:returns: AudioRolloffMode
:rtype: UnityEngine.AudioRolloffMode
'''
pass
@staticmethod
def set_rolloffMode(arg1):
'''
:param arg1: AudioRolloffMode
:type arg1: UnityEngine.AudioRolloffMode
'''
pass
@staticmethod
def GetOutputData(arg1, arg2):
'''
:param arg1: SingleArray
:type arg1: System.SingleArray
:param arg2: Int32
:type arg2: System.Int32 or int
'''
pass
@staticmethod
def GetSpectrumData(arg1, arg2, arg3):
'''
:param arg1: SingleArray
:type arg1: System.SingleArray
:param arg2: Int32
:type arg2: System.Int32 or int
:param arg3: FFTWindow
:type arg3: UnityEngine.FFTWindow
'''
pass
@staticmethod
def SetSpatializerFloat(arg1, arg2):
'''
:param arg1: Int32
:type arg1: System.Int32 or int
:param arg2: Single
:type arg2: System.Single or float
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def GetSpatializerFloat(arg1, arg2):
'''
:param arg1: Int32
:type arg1: System.Int32 or int
:param arg2: Undefined variable
:type arg2: SingleRef.SingleRef
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def SetAmbisonicDecoderFloat(arg1, arg2):
'''
:param arg1: Int32
:type arg1: System.Int32 or int
:param arg2: Single
:type arg2: System.Single or float
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def GetAmbisonicDecoderFloat(arg1, arg2):
'''
:param arg1: Int32
:type arg1: System.Int32 or int
:param arg2: Undefined variable
:type arg2: SingleRef.SingleRef
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def get_volume():
'''
:returns: Single
:rtype: System.Single
'''
pass
@staticmethod
def set_volume(arg1):
'''
:param arg1: Single
:type arg1: System.Single or float
'''
pass
@staticmethod
def get_pitch():
'''
:returns: Single
:rtype: System.Single
'''
pass
@staticmethod
def set_pitch(arg1):
'''
:param arg1: Single
:type arg1: System.Single or float
'''
pass
@staticmethod
def get_time():
'''
:returns: Single
:rtype: System.Single
'''
pass
@staticmethod
def set_time(arg1):
'''
:param arg1: Single
:type arg1: System.Single or float
'''
pass
@staticmethod
def get_timeSamples():
'''
:returns: Int32
:rtype: System.Int32
'''
pass
@staticmethod
def set_timeSamples(arg1):
'''
:param arg1: Int32
:type arg1: System.Int32 or int
'''
pass
@staticmethod
def get_clip():
'''
:returns: AudioClip
:rtype: UnityEngine.AudioClip
'''
pass
@staticmethod
def set_clip(arg1):
'''
:param arg1: AudioClip
:type arg1: UnityEngine.AudioClip
'''
pass
@staticmethod
@overload
def Play(arg1):
'''
:param arg1: UInt64
:type arg1: System.UInt64
'''
pass
@staticmethod
@overload
def Play():
pass
@staticmethod
def Play(arg1=None):
pass
@staticmethod
def PlayDelayed(arg1):
'''
:param arg1: Single
:type arg1: System.Single or float
'''
pass
@staticmethod
def PlayScheduled(arg1):
'''
:param arg1: Double
:type arg1: System.Double
'''
pass
@staticmethod
def SetScheduledStartTime(arg1):
'''
:param arg1: Double
:type arg1: System.Double
'''
pass
@staticmethod
def SetScheduledEndTime(arg1):
'''
:param arg1: Double
:type arg1: System.Double
'''
pass
@staticmethod
def Stop():
pass
@staticmethod
def Pause():
pass
@staticmethod
def UnPause():
pass
@staticmethod
def get_isPlaying():
'''
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def get_isVirtual():
'''
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
@overload
def PlayOneShot(arg1):
'''
:param arg1: AudioClip
:type arg1: UnityEngine.AudioClip
'''
pass
@staticmethod
@overload
def PlayOneShot(arg1, arg2):
'''
:param arg1: AudioClip
:type arg1: UnityEngine.AudioClip
:param arg2: Single
:type arg2: System.Single or float
'''
pass
@staticmethod
def PlayOneShot(arg1=None, arg2=None):
pass
@staticmethod
@overload
def PlayClipAtPoint(arg1, arg2):
'''
:param arg1: AudioClip
:type arg1: UnityEngine.AudioClip
:param arg2: Vector3
:type arg2: UnityEngine.Vector3
'''
pass
@staticmethod
@overload
def PlayClipAtPoint(arg1, arg2, arg3):
'''
:param arg1: AudioClip
:type arg1: UnityEngine.AudioClip
:param arg2: Vector3
:type arg2: UnityEngine.Vector3
:param arg3: Single
:type arg3: System.Single or float
'''
pass
@staticmethod
def PlayClipAtPoint(arg1=None, arg2=None, arg3=None):
pass
@staticmethod
def get_loop():
'''
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def set_loop(arg1):
'''
:param arg1: Boolean
:type arg1: System.Boolean or bool
'''
pass
@staticmethod
def get_playOnAwake():
'''
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def set_playOnAwake(arg1):
'''
:param arg1: Boolean
:type arg1: System.Boolean or bool
'''
pass
@staticmethod
def get_velocityUpdateMode():
'''
:returns: AudioVelocityUpdateMode
:rtype: UnityEngine.AudioVelocityUpdateMode
'''
pass
@staticmethod
def set_velocityUpdateMode(arg1):
'''
:param arg1: AudioVelocityUpdateMode
:type arg1: UnityEngine.AudioVelocityUpdateMode
'''
pass
@staticmethod
def get_panStereo():
'''
:returns: Single
:rtype: System.Single
'''
pass
@staticmethod
def set_panStereo(arg1):
'''
:param arg1: Single
:type arg1: System.Single or float
'''
pass
@staticmethod
def get_spatialBlend():
'''
:returns: Single
:rtype: System.Single
'''
pass
@staticmethod
def set_spatialBlend(arg1):
'''
:param arg1: Single
:type arg1: System.Single or float
'''
pass
@staticmethod
def get_spatialize():
'''
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def set_spatialize(arg1):
'''
:param arg1: Boolean
:type arg1: System.Boolean or bool
'''
pass
@staticmethod
def get_spatializePostEffects():
'''
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def set_spatializePostEffects(arg1):
'''
:param arg1: Boolean
:type arg1: System.Boolean or bool
'''
pass
@staticmethod
def SetCustomCurve(arg1, arg2):
'''
:param arg1: AudioSourceCurveType
:type arg1: UnityEngine.AudioSourceCurveType
:param arg2: AnimationCurve
:type arg2: UnityEngine.AnimationCurve
'''
pass
@staticmethod
def GetCustomCurve(arg1):
'''
:param arg1: AudioSourceCurveType
:type arg1: UnityEngine.AudioSourceCurveType
:returns: AnimationCurve
:rtype: UnityEngine.AnimationCurve
'''
pass
@staticmethod
def get_reverbZoneMix():
'''
:returns: Single
:rtype: System.Single
'''
pass
@staticmethod
def set_reverbZoneMix(arg1):
'''
:param arg1: Single
:type arg1: System.Single or float
'''
pass
@staticmethod
def get_bypassReverbZones():
'''
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def set_bypassReverbZones(arg1):
'''
:param arg1: Boolean
:type arg1: System.Boolean or bool
'''
pass
@staticmethod
def get_dopplerLevel():
'''
:returns: Single
:rtype: System.Single
'''
pass
@staticmethod
def set_dopplerLevel(arg1):
'''
:param arg1: Single
:type arg1: System.Single or float
'''
pass
@staticmethod
def get_spread():
'''
:returns: Single
:rtype: System.Single
'''
pass
@staticmethod
def set_spread(arg1):
'''
:param arg1: Single
:type arg1: System.Single or float
'''
pass
@staticmethod
def get_priority():
'''
:returns: Int32
:rtype: System.Int32
'''
pass
@staticmethod
def get_enabled():
'''
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def set_enabled(arg1):
'''
:param arg1: Boolean
:type arg1: System.Boolean or bool
'''
pass
@staticmethod
def get_transform():
'''
:returns: Transform
:rtype: UnityEngine.Transform
'''
pass
@staticmethod
def get_gameObject():
'''
:returns: GameObject
:rtype: UnityEngine.GameObject
'''
pass
@staticmethod
@overload
def GetComponent(arg1):
'''
:param arg1: Type
:type arg1: System.Type
:returns: Component
:rtype: UnityEngine.Component
'''
pass
@staticmethod
@overload
def GetComponent(arg1):
'''
:param arg1: String
:type arg1: System.String or str
:returns: Component
:rtype: UnityEngine.Component
'''
pass
@staticmethod
def GetComponent(arg1=None):
pass
@staticmethod
@overload
def GetComponentInChildren(arg1, arg2):
'''
:param arg1: Type
:type arg1: System.Type
:param arg2: Boolean
:type arg2: System.Boolean or bool
:returns: Component
:rtype: UnityEngine.Component
'''
pass
@staticmethod
@overload
def GetComponentInChildren(arg1):
'''
:param arg1: Type
:type arg1: System.Type
:returns: Component
:rtype: UnityEngine.Component
'''
pass
@staticmethod
def GetComponentInChildren(arg1=None, arg2=None):
pass
@staticmethod
@overload
def GetComponentsInChildren(arg1, arg2):
'''
:param arg1: Type
:type arg1: System.Type
:param arg2: Boolean
:type arg2: System.Boolean or bool
:returns: ComponentArray
:rtype: UnityEngine.ComponentArray
'''
pass
@staticmethod
@overload
def GetComponentsInChildren(arg1):
'''
:param arg1: Type
:type arg1: System.Type
:returns: ComponentArray
:rtype: UnityEngine.ComponentArray
'''
pass
@staticmethod
@overload
def GetComponentsInChildren(arg1, arg2):
'''
:param arg1: Boolean
:type arg1: System.Boolean or bool
:param arg2: Undefined variable
:type arg2: ListT.ListT
'''
pass
@staticmethod
@overload
def GetComponentsInChildren(arg1):
'''
:param arg1: Undefined variable
:type arg1: ListT.ListT
'''
pass
@staticmethod
def GetComponentsInChildren(arg1=None, arg2=None):
pass
@staticmethod
@overload
def GetComponentInParent(arg1):
'''
:param arg1: Type
:type arg1: System.Type
:returns: Component
:rtype: UnityEngine.Component
'''
pass
@staticmethod
def GetComponentInParent(arg1=None):
pass
@staticmethod
@overload
def GetComponentsInParent(arg1, arg2):
'''
:param arg1: Type
:type arg1: System.Type
:param arg2: Boolean
:type arg2: System.Boolean or bool
:returns: ComponentArray
:rtype: UnityEngine.ComponentArray
'''
pass
@staticmethod
@overload
def GetComponentsInParent(arg1):
'''
:param arg1: Type
:type arg1: System.Type
:returns: ComponentArray
:rtype: UnityEngine.ComponentArray
'''
pass
@staticmethod
@overload
def GetComponentsInParent(arg1, arg2):
'''
:param arg1: Boolean
:type arg1: System.Boolean or bool
:param arg2: Undefined variable
:type arg2: ListT.ListT
'''
pass
@staticmethod
def GetComponentsInParent(arg1=None, | |
<reponame>guillaume-thiry/OECD-Chatbot
# @ Copyright Inria, Ecole Polytechnique
# Shared under the MIT license https://opensource.org/licenses/mit-license.php
# This file contains all the functions that are used in the area detection
# The main part of the code is the function find_areas, that will be used elsewhere in the code
# The other functions are auxiliary that are being used in the main one
### IMPORT
# Python libraries import
import nltk
from nltk.parse import CoreNLPParser
# Utils import
import utils
from utils import lower_list, get_index
from parsing_analysis import get_subtrees, first_word
### PARSER
parser = CoreNLPParser(url='http://localhost:9000')
### DICTIONNARIES
region_dict = utils.region_dict
demo_dict = utils.demo_dict
acro_dict = utils.acro_dict
country_list = region_dict["World"]
region_list = list(region_dict.keys())
demo_list = list(demo_dict.keys())
acro_list = list(acro_dict.keys())
### FUNCTIONS
# The function area_type takes the name of a recognized area
# and uses the dictionaires to determine if it is a region or a country
def area_type(area):
if area in country_list:
return 'country'
elif area in region_list:
return 'region'
else:
return None
# The function replacement replaces the abbreviated name of some countries with the complete form
# " ... UK ... " --> " ... United Kingdom ... "
# This is important as most of the abbreviations are not detected correctly by the NER
def replacement(sent):
s = sent
for a in acro_list:
s = s.replace(a, acro_dict[a])
tok = nltk.word_tokenize(s)
return (s,tok)
def group_locations(locations, sent):
n = len(locations)
i = 0
group = []
current = ''
while i < n:
if current == '':
current += locations[i]
else:
new = current + ' ' + locations[i]
if new in sent:
current = new
else:
group.append(current)
current = locations[i]
i += 1
group.append(current)
return group
# The function get_locations find the words in the sentence
# that are taggd as "LOCATION" by the NER
def get_locations(ner):
res = []
for l in ner:
if l[1] == "LOCATION":
res.append(l[0])
return res
# The function bilist creates two lists of names :
# The first list contains full name of countries/regions
# while the second one contains partial name
# Therefore, "South Africa" will be in the first list and "South" in the second
# When looking at the location words in the sentence, we can easily find out if it is the name of a place
# or the beginning of the name of a place (in which case, we will go further to complete it)
def bilist(liste):
partial = []
for l in liste:
words = nltk.word_tokenize(l)
n = len(words)
if n>1:
i = 1
while (i<n):
partial.append(" ".join(words[0:i]))
i += 1
return (lower_list(liste),lower_list(partial))
# The function find_areas_in_list formally finds all the areas in the sentence :
# The locations identified thanks to the NER are searched in the dictionaries to make sure they exist
# Thanks to the complete and partial dictionaries (with bilist) the names in several words are detected as well
# Finally, the type of area (country or region) is also added for each location
def find_areas_in_list(tokens):
areas = [] #final list of areas
n = len(tokens)
i = 0
#First we treat the countries
country_comp, country_part = bilist(country_list) #lists of complete and partial names of countries
while (i<n):
if tokens[i].lower() in country_comp: #if the word is a complete name of country, we add it to the list
areas.append([tokens[i], 'country', 'n'])
i += 1
elif tokens[i].lower() in country_part: #this works because there is no set of words being at the same time
#a country and the beginning of another country (ie. in both lists)
#"South" is in the partial list but not complete and vice versa for "Denmark"
a = 1
while(" ".join(tokens[i:i+a]).lower() in country_part):
a += 1
if(" ".join(tokens[i:i+a]).lower() in country_comp):
areas.append([" ".join(tokens[i:i+a]), 'country', 'n'])
i = i+a
else:
i+=1
else:
i += 1
#Exactly the same, but for regions
region_comp, region_part = bilist(region_list)
i = 0
while (i<n):
if tokens[i].lower() in region_comp:
areas.append([tokens[i], 'region', 'n'])
i += 1
elif tokens[i].lower() in region_part:
a = 1
while(" ".join(tokens[i:i+a]).lower() in region_part):
a += 1
if(" ".join(tokens[i:i+a]).lower() in region_comp):
areas.append([" ".join(tokens[i:i+a]), 'region', 'n'])
i = i+a
else:
i+=1
else:
i += 1
#Search of the demonyms (adjectives) like "Australia" or "Russian"
#The idea is the name (a list of complete name and another of partial names)
#but with an extra step : when the complete adjective is found, find the name of the country/region associated
#because we do not want adjectives but nouns in areas at the end
#also we deal with countries and regions at the same time
demo_comp, demo_part = bilist(demo_list)
i = 0
while (i<n):
if tokens[i].lower() in demo_comp:
name = ""
type = ""
for d in demo_list:
if d.lower() == tokens[i].lower():
name = demo_dict[d]
if name in country_list:
type = "country"
elif name in region_list:
type = "region"
areas.append([tokens[i], type, 'a'])
i += 1
elif tokens[i].lower() in demo_part:
a = 1
while(" ".join(tokens[i:i+a]).lower() in demo_part):
a += 1
if(" ".join(tokens[i:i+a]).lower() in demo_comp):
name = ""
type = ""
for d in demo_list:
if d.lower() == " ".join(tokens[i:i+a]).lower():
name = demo_dict[d]
if name in country_list:
type = "country"
elif name in region_list:
type = "region"
areas.append([" ".join(tokens[i:i+a]), type, 'a'])
i = i+a
else:
i+=1
else:
i += 1
return areas
# The function find_areas gives the final result
# Once the areas have been identified (and we know they exist) in the sentence,
# we have to look at the context to determine the role of each area (IN/FROM, TO or THAN)
# An area will be put in the category "THAN" if there is a "than" in the sentence and the area is found after it
# Indeed, sometimes a word can be a part of the comparison while being really far away from the "than"
# So we cannot just look if "than" is in the context of the word
# Example : "Countries with more male population in 2012 than the female population in 2000 in Germany"
# --> "than" may not be in the same context as "Germany" here
# So everything in that case is classified as "THAN" and a more precise analysis will be done in "find_aggregators"
# to determine if the country is or is not a real part of the comparison
# For the others, we look at the context, and more precisely, we look at the Prepositional Phrase (PP) to which the area belongs
def find_areas(sent):
s, tok = replacement(sent)
parse = next(parser.raw_parse(sent))
areas = find_areas_in_list(tok)
pps = get_subtrees(parse, "PP")
areas_in = []
areas_to = []
areas_than = []
if 'than' in tok:
idx = get_index(tok, "than") #position of the "than"
else:
idx = len(tok) + 10 #if no "than", we put it after all the words (so the condition is never met)
for a in areas:
name = a[0] #name of the area
type = a[1] #country or region
form = a[2] #adjective 'a' or name 'n'
classification = None
if get_index(tok, name) > idx:
classification = "THAN"
else:
#looking at the PP (context) of the area
p = None
for pp in pps:
b = True
for mot in name.split(" "):
if mot not in pp.leaves():
b = False
if b:
p = pp.leaves()
if p != None:
if (('to' in p) or ('into' in p) or ('towards' in p)): #words that would indicate a category "TO"
classification = "TO"
elif (('between' in p) and ('and' in p)):
if first_word(name, 'and', tok) == 'and':
classification = "TO"
else:
classification = "IN" #most of the time, the default case is the category "IN"
else:
classification = "IN"
else:
classification = "IN"
#Finally, before adding the area to the list, we change its name to the good format
#Indeed, until now, the name of the area was written as in the sentence, to find it easily
#Now, we take the official writing, the same as in the dictionaries
#Thus, if the word is "INDIA" or "india" or "INDia", now it becomes "India"
name_f = []
if form == 'a':
for adj in demo_list:
if adj.lower() == name.lower():
name_f = [demo_dict[adj],type]
elif form == 'n':
if type == 'country':
for c in country_list:
| |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 17 10:31:31 2020
@author: grat05
"""
import numpy as np
from numpy import exp
import pandas as pd
from functools import wraps
class ObjDict():
def __repr__(self):
return str(self.__dict__)
def isList(thing):
return isinstance(thing, (list, tuple, np.ndarray))
class SodiumChannelModel():
def __init__(self, TEMP = 310.0, naO = 140.0, naI = 7,
recArrayNames = [],
state_vals = [],
retOptions = {'G': True, 'INa': True, 'INaL': True,\
'Open': True, 'RevPot': True}):
self.TEMP = TEMP
self.naO = naO
self.naI = naI
self.recArrayNames = pd.Index(recArrayNames)
self.num_states = len(self.recArrayNames)
self._state_vals = np.array(state_vals, dtype='float64')
self._recArray = []
self.retOptions = retOptions
self.lastVal = None
self.memoize = True
self.RGAS = 8314.0;
self.FDAY = 96487.0;
@property
def recArray(self):
return pd.DataFrame(np.array(self._recArray), columns=self.recArrayNames)
@property
def state_vals(self):
return pd.Series(self._state_vals, index=self.recArrayNames, dtype='float64')
def calc_constants(self, vOld):
pass
def jac(self, vOld):
pass
def ddtcalc(self, vals, vOld):
pass
def getRevPot(self):
return (self.RGAS * self.TEMP / self.FDAY) * np.log(self.naO / self.naI)
def calcCurrent(self, vals, vOld, setRecArray=True):
pass
def update(self, vOld, dt, record=True):
vals = self._state_vals
ddt = self.ddtcalc(vals, vOld)
vals += ddt*dt
return self.calcCurrent(vals, vOld, setRecArray=record)
def memoize_calc_constants(calc_constants):
@wraps(calc_constants)
def memoized(self, vOld):
if self.memoize:
if self.lastVal is not None and np.array_equal(self.lastVal[0], vOld):
return self.lastVal[1]
ret = calc_constants(self, vOld)
if self.memoize:
self.lastVal = (vOld, ret)
return ret
return memoized
class OHaraRudy_INa(SodiumChannelModel):
num_params = 33
param_bounds = [(-3,3)]*2 + \
[(-0.1,3)] + [(-3,3)] + [(-0.1,3)] +\
[(-3,3)] + [(-0.1,3)] +\
[(-1,3), (-3,3), (-0.1,3)] + \
[(-3,3)] + [(-0.1,3)] +\
[(-3,3)] + [(-1,3)] +\
[(-3,3)] + [(-1,3)] +\
[(-20,20)] + \
[(-3,3)] + [(-3,3)] + [(-1,3)] +\
[(-3,3)] + [(-1,3)] +\
[(-1,1)]*3 + \
[(-1,1)]*2 + \
[(-1,1)]*2 + \
[(-15,15)]*2 + \
[(-15,15), (-1,3)]
KmCaMK = 0.15
CaMKa = 1e-5
def __init__(self, GNaFactor=0, GNaLFactor=0, \
mss_tauFactor=0, tm_maxFactor=0, tm_tau1Factor=0,\
tm_shiftFactor=0, tm_tau2Factor=0,\
hss_tauFactor=0, thf_maxFactor=0, thf_tau1Factor=0,\
thf_shiftFactor=0, thf_tau2Factor=0,\
ths_maxFactor=0, ths_tau1Factor=0,\
ths_shiftFactor=0, ths_tau2Factor=0,\
Ahf_multFactor=0,\
tj_baselineFactor=0, tj_maxFactor=0, tj_tau1Factor=0,\
tj_shiftFactor=0, tj_tau2Factor=0,\
hssp_tauFactor=0, tssp_multFactor=0, tjp_multFactor=0,\
mLss_tauFactor=0, hLss_tauFactor=0,\
thL_baselineFactor=0, thLp_multFactor=0,\
mss_shiftFactor=0, hss_shiftFactor=0,\
jss_shiftFactor=0, jss_tauFactor=0,
TEMP = 310.0, naO = 140.0, naI = 7):
super().__init__(TEMP=TEMP, naO=naO, naI=naI,
recArrayNames = ["m","hf","hs","j","hsp","jp","mL","hL","hLp"],
state_vals = [0,1,1,1,1,1,0,1,1])
# scaling currents 0
self.GNa = 75*np.exp(GNaFactor);
self.GNaL = 0.0075*np.exp(GNaLFactor);
#m gate 2
self.mss_tau = 9.871*np.exp(mss_tauFactor)
self.tm_max = 0.473824721*np.exp(tm_maxFactor)
self.tm_tau1 = 34.77*np.exp(tm_tau1Factor)
self.tm_shift = -57.6379999+tm_shiftFactor
self.tm_tau2 = 5.955*np.exp(tm_tau2Factor)
tm_cshift = np.log(self.tm_tau1/self.tm_tau2)/(1/self.tm_tau1+1/self.tm_tau2)
tm_cmax = np.exp(tm_cshift/self.tm_tau1) + np.exp(-tm_cshift/self.tm_tau2)
self.tm_shift -= tm_cshift #shift correction
self.tm_max *= tm_cmax #height correction
#h gate 7
self.hss_tau = 6.086*np.exp(hss_tauFactor)
self.thf_max = 3.594172982376325*np.exp(thf_maxFactor)
self.thf_tau1 = 6.285*np.exp(thf_tau1Factor)
self.thf_shift = -57.639999999606744+thf_shiftFactor
self.thf_tau2 = 20.27*np.exp(thf_tau2Factor)
thf_cshift = np.log(self.thf_tau2/self.thf_tau1)/(1/self.thf_tau2+1/self.thf_tau1)
thf_cmax = np.exp(thf_cshift/self.thf_tau2) + np.exp(-thf_cshift/self.thf_tau1)
self.thf_shift -= thf_cshift
self.thf_max *= thf_cmax
#12
self.ths_max = 5.894233734241695*np.exp(ths_maxFactor)
self.ths_tau1 = 28.05*np.exp(ths_tau1Factor)
self.ths_shift = -66.94699999965118+ths_shiftFactor
self.ths_tau2 = 56.66*np.exp(ths_tau2Factor)
ths_cshift = np.log(self.ths_tau2/self.ths_tau1)/(1/self.ths_tau2+1/self.ths_tau1)
ths_cmax = np.exp(ths_cshift/self.ths_tau2) + np.exp(-ths_cshift/self.ths_tau1)
self.ths_shift -= ths_cshift
self.ths_max *= ths_cmax
mixingodds = np.exp(Ahf_multFactor+np.log(99))
self.Ahf_mult = mixingodds/(mixingodds+1)# np.exp(Ahf_multFactor)
#j gate 17
self.tj_baseline = 2.038*np.exp(tj_baselineFactor)
self.tj_max = 27.726847365476033*np.exp(tj_maxFactor)
self.tj_tau1 = 8.281*np.exp(tj_tau1Factor)
self.tj_shift = -90.60799999976416+tj_shiftFactor
self.tj_tau2 = 38.45*np.exp(tj_tau2Factor)
tj_cshift = np.log(self.tj_tau2/self.tj_tau1)/(1/self.tj_tau2+1/self.tj_tau1)
tj_cmax = np.exp(tj_cshift/self.tj_tau2) + np.exp(-tj_cshift/self.tj_tau1)
self.tj_shift -= tj_cshift
self.tj_max *= tj_cmax
# phosphorylated gates
self.hssp_tau = 6.086*np.exp(hssp_tauFactor)
self.tssp_mult = 3.0*np.exp(tssp_multFactor)
self.tjp_mult = 1.46*np.exp(tjp_multFactor)
#late gates & late gate phosphorylation
self.mLss_tau = 5.264*np.exp(mLss_tauFactor)
self.hLss_tau = 7.488*np.exp(hLss_tauFactor)
self.hLssp_tau = self.hLss_tau
self.thL_baseline = 200.0*np.exp(thL_baselineFactor)
self.thLp_mult = 3*np.exp(thLp_multFactor)
#added later 29
self.mss_shift = 39.57+mss_shiftFactor
self.hss_shift = 82.90+hss_shiftFactor
self.jss_shift = 82.90+jss_shiftFactor
self.jss_tau = 6.086*np.exp(jss_tauFactor)
@memoize_calc_constants
def calc_constants(self, vOld):
tau = ObjDict()
ss = ObjDict()
ss.mss = 1.0 / (1.0 + np.exp((-(vOld + self.mss_shift)) / self.mss_tau));
tau.tm = self.tm_max/(np.exp((vOld-self.tm_shift)/self.tm_tau1)
+ np.exp(-(vOld-self.tm_shift)/self.tm_tau2))
ss.hss = 1.0 / (1 + np.exp((vOld + self.hss_shift) / self.hss_tau));
tau.thf = self.thf_max/(np.exp((vOld-self.thf_shift)/self.thf_tau2)
+ np.exp(-(vOld-self.thf_shift)/self.thf_tau1))
tau.ths = self.ths_max/(np.exp((vOld-self.ths_shift)/self.ths_tau2)
+ np.exp(-(vOld-self.ths_shift)/self.ths_tau1))
ss.jss = ss.hss#1.0 / (1 + np.exp((vOld + self.jss_shift) / self.jss_tau));#hss;
tau.tj = self.tj_baseline + self.tj_max/(np.exp((vOld-self.tj_shift)/self.tj_tau2)
+ np.exp(-(vOld-self.tj_shift)/self.tj_tau1))
ss.hssp = 1.0 / (1 + np.exp((vOld + 89.1) / self.hssp_tau));
tau.thsp = self.tssp_mult * tau.ths;
tau.tjp = self.tjp_mult * tau.tj;
ss.mLss = 1.0 / (1.0 + np.exp((-(vOld + 42.85)) / self.mLss_tau));
tau.tmL = tau.tm;
ss.hLss = 1.0 / (1.0 + np.exp((vOld + 87.61) / self.hLss_tau));
tau.thL = self.thL_baseline;
ss.hLssp = 1.0 / (1.0 + np.exp((vOld + 93.81) / self.hLssp_tau));
tau.thLp = self.thLp_mult * tau.thL;
tau.__dict__ = {key: min(max(value, 1e-8), 1e20) for key,value in tau.__dict__.items()}
return tau, ss
def jac(self, vOld):
vOld = np.array(vOld,ndmin=1)
d_vals = np.squeeze(np.zeros((9,len(vOld))))
tau, _ = self.calc_constants(vOld)
d_vals[0] = -1 / tau.tm
d_vals[1] = -1 / tau.thf
d_vals[2] = -1 / tau.ths
d_vals[3] = -1 / tau.tj
d_vals[4] = -1 / tau.thsp
d_vals[5] = -1 / tau.tjp
d_vals[6] = -1 / tau.tmL
d_vals[7] = -1 / tau.thL
d_vals[8] = -1 / tau.thLp
# np.clip(d_vals, a_min=-1e15, a_max=None, out=d_vals)
return np.diag(d_vals)
def ddtcalc(self, vals, vOld):
d_vals = np.zeros_like(vals)
tau, ss = self.calc_constants(vOld)
d_vals[0] = (ss.mss-vals[0]) / tau.tm
d_vals[1] = (ss.hss-vals[1]) / tau.thf
d_vals[2] = (ss.hss-vals[2]) / tau.ths
d_vals[3] = (ss.jss-vals[3]) / tau.tj
d_vals[4] = (ss.hssp-vals[4]) / tau.thsp
d_vals[5] = (ss.jss-vals[5]) / tau.tjp
d_vals[6] = (ss.mLss-vals[6]) / tau.tmL
d_vals[7] = (ss.hLss-vals[7]) / tau.thL
d_vals[8] = (ss.hLssp-vals[8]) / tau.thLp
# np.clip(d_vals, a_min=-1e15, a_max=1e15, out=d_vals)
return d_vals
def calcCurrent(self, vals, vOld, setRecArray=True):
vals = np.array(vals)
if len(vals.shape) == 1:
vals.shape = (9,-1)
elif vals.shape[0] != 9 and vals.shape[-1] == 9:
vals = vals.T
m,hf,hs,j,hsp,jp,mL,hL,hLp = vals
if setRecArray:
self._recArray += list(np.copy(vals.T))
ena = self.getRevPot()
Ahf = 0.99*self.Ahf_mult;
Ahs = 1.0 - Ahf;
h = Ahf * hf + Ahs * hs;
hp = Ahf * hf + Ahs *hsp;
fINap = (1.0 / (1.0 + self.KmCaMK / self.CaMKa));
# oprob = self.m * self.m * self.m * ((1.0 - fINap) * h * self.j + fINap * hp * self.jp)
# oprob = m**3 * ((1.0 - fINap) * h + fINap * hp * jp)
oprob = m**3 * ((1.0 - fINap) * h * j + fINap * hp * jp)
INa = (self.GNa if self.retOptions['G'] else 1) *\
(oprob if self.retOptions['Open'] else 1) *\
((vOld - ena) if self.retOptions['RevPot'] else 1);
fINaLp = (1.0 / (1.0 + self.KmCaMK / self.CaMKa));
loprob = mL * ((1.0 - fINaLp) * hL + fINaLp * hLp)
INaL = (self.GNaL if self.retOptions['G'] else 1)*\
(loprob if self.retOptions['Open'] else 1)*\
((vOld - ena) if self.retOptions['RevPot'] else 1);
return (INa if self.retOptions['INa'] else 0)+\
(INaL if self.retOptions['INaL'] else 0)
def update(self, vOld, dt, record=True):
m,hf,hs,j,hsp,jp,mL,hL,hLp = self.state_vals
tau, ss = self.calc_taus_ss(vOld)
m = ss.mss - (ss.mss - m) * np.exp(-dt / tau.tm);
hf = ss.hss - (ss.hss - hf) * np.exp(-dt / tau.thf);
hs = ss.hss - (ss.hss - hs) * np.exp(-dt / tau.ths);
j = ss.jss - (ss.jss - j) * np.exp(-dt / tau.tj);
hsp = ss.hssp - (ss.hssp - hsp) * np.exp(-dt / tau.thsp);
jp = ss.jss - (ss.jss - jp) * np.exp(-dt / tau.tjp);
mL = ss.mLss - (ss.mLss - mL) * np.exp(-dt / tau.tmL);
hL = ss.hLss - (ss.hLss - hL) * np.exp(-dt / tau.thL);
hLp = ss.hLssp - (ss.hLssp - hLp) * np.exp(-dt / tau.thLp);
self.state_vals = m,hf,hs,j,hsp,jp,mL,hL,hLp
return self.calcCurrent(self.state_vals, vOld, setRecArray=record)
class OHaraRudy_wMark_INa(SodiumChannelModel):
num_params = 25
param_bounds = [(-3,3),
(-3,3),
(-1,3), (-15,15),
(-3,3), (-1,3),
(-15,15), (-1,3),
(-1,3), (-15,15),# (-15,15),
(-3,3), (-15,15),
(-1,3), (-3,3),
(-3,3), (-15,15),
(-1,3), (-1,3),
(-5,5),
(-1,3), (-15,15),
(-3,3), (-15,15),
(-1,3), (-1,3),
]
def __init__(self, GNaFactor=0,
baselineFactor=0,
mss_tauFactor=0, mss_shiftFactor=0,
tm_maxFactor=0, tm_tau1Factor=0,
tm_shiftFactor=0, tm_tau2Factor=0,
hss_tauFactor=0, hss_shiftFactor=0,
thf_maxFactor=0, thf_shiftFactor=0,
thf_tau1Factor=0, thf_tau2Factor=0,
ths_maxFactor=0, ths_shiftFactor=0,
ths_tau1Factor=0, ths_tau2Factor=0,
Ahf_multFactor=0,
jss_tauFactor=0, jss_shiftFactor=0,
tj_maxFactor=0, tj_shiftFactor=0,
tj_tau2Factor=0, tj_tau1Factor=0,
TEMP = 310.0, naO = 140.0, naI = 7):
# scaling currents 0
self.GNa = np.exp(GNaFactor);
#fastest tau 1
self.baseline = 2.038*np.exp(baselineFactor)
#m gate 2
self.mss_tau = 9.871*np.exp(mss_tauFactor)
self.mss_shift = 51.57+mss_shiftFactor
self.tm_max = 0.474*np.exp(tm_maxFactor)
self.tm_tau1 = 34.77*np.exp(tm_tau1Factor)
self.tm_shift = -57.6+tm_shiftFactor
self.tm_tau2 = 5.955*np.exp(tm_tau2Factor)
tm_cshift = np.log(self.tm_tau1/self.tm_tau2)/(1/self.tm_tau1+1/self.tm_tau2)
tm_cmax = np.exp(tm_cshift/self.tm_tau1) + np.exp(-tm_cshift/self.tm_tau2)
self.tm_shift -= tm_cshift #shift correction
self.tm_max *= tm_cmax #height correction
#h gate 8
self.hss_tau = 14.086*np.exp(hss_tauFactor)
self.hfss_shift = -76+hss_shiftFactor
#self.hsss_shift = -87.90+hss_shiftFactor#np.exp(hsss_shiftFactor)
self.thf_max = 5*np.exp(thf_maxFactor)
| |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import datetime
import hashlib
import json
import logging
import math
import os
import shutil
import signal
import stat
import subprocess
import sys
import time
import warnings
from shutil import which
import click
import click_log
import distro
import requests
from tqdm import tqdm
from .active_learning.alp import ActiveLearningPipelineFactory
from .augmentation import AugmentorFactory, register_all_augmentors
from .auto_annotator import register_all_annotators
from . import markup, path
from ._util import blueprint
from ._version import current as __version__
from .components import Conversation, QuestionAnswerer
from .components._config import (
get_active_learning_config,
get_augmentation_config,
get_auto_annotator_config,
get_language_config,
)
from .constants import BINARIES_URL, DUCKLING_VERSION, UNANNOTATE_ALL_RULE
from .converter import DialogflowConverter, RasaConverter
from .exceptions import ElasticsearchKnowledgeBaseConnectionError, KnowledgeBaseError, MindMeldError
from .models.helpers import create_annotator
from .path import (
MODEL_CACHE_PATH,
QUERY_CACHE_DB_PATH,
get_generated_data_folder,
get_dvc_local_remote_path,
)
from .resource_loader import ResourceLoader
logger = logging.getLogger(__name__)
click.disable_unicode_literals_warning = True
CONTEXT_SETTINGS = {"help_option_names": ["-h", "--help"], "auto_envvar_prefix": "MM"}
# deprecation warning for python 3.5
if sys.version_info < (3, 6):
deprecation_msg = (
"DEPRECATION: Python 3.5 reached end of life on 13 Sept 2020. MindMeld will deprecate"
" official support for Python 3.5 in the next release. Please consider migrating"
" your application to Python 3.6 and above."
)
warnings.warn(deprecation_msg)
DVC_INIT_ERROR_MESSAGE = "you are not inside of a DVC repository"
DVC_ADD_DOES_NOT_EXIST_MESSAGE = "does not exist"
DVC_INIT_HELP = "Run 'dvc init' to instantiate this project as a DVC repository"
DVC_ADD_DOES_NOT_EXIST_HELP = "The folder {dvc_add_path} does not exist"
DVC_COMMAND_HELP_MESSAGE = (
"Options:"
"\n\t--init\t\tInstantiate DVC within a repository"
"\n\t--save\t\tSave built models using dvc"
"\n\t--checkout HASH\tCheckout repo and models corresponding to git hash"
"\n\t--destroy\tRemove all files associated with DVC from a directory"
"\n\t--help\t\tShow this message and exit\n"
)
def _version_msg():
"""Returns the MindMeld version, location and Python powering it."""
python_version = sys.version[:3]
location = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
message = "MindMeld %(version)s from {} (Python {})"
return message.format(location, python_version)
#
# App only Commands
#
@click.group()
def _app_cli(ctx):
"""Command line interface for MindMeld apps."""
# configure logger settings for dependent libraries
urllib3_logger = logging.getLogger("urllib3")
urllib3_logger.setLevel(logging.ERROR)
es_logger = logging.getLogger("elasticsearch")
es_logger.setLevel(logging.ERROR)
if ctx.obj is None:
ctx.obj = {}
def _dvc_add_helper(filepath):
"""
Returns True if successful, False otherwise along with helper message
Args:
filepath (str): path to file/folder to add to DVC
Returns:
(tuple) True if no errors, False + error string otherwise
"""
p = subprocess.Popen(
["dvc", "add", filepath], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
# Get DVC error message from standard error
_, error = p.communicate()
error_string = error.decode("utf-8")
if DVC_INIT_ERROR_MESSAGE in error_string:
return False, DVC_INIT_HELP
elif DVC_ADD_DOES_NOT_EXIST_MESSAGE in error_string:
return False, DVC_ADD_DOES_NOT_EXIST_HELP.format(dvc_add_path=filepath)
elif p.returncode != 0:
return False, error_string
else:
return True, None
def _bash_helper(command_list):
"""
Helper for running bash using subprocess and error handling
Args:
command_list (list): Bash command formatted as a list, no spaces in each element
Returns:
(tuple) True if no errors, False + error string otherwise
"""
p = subprocess.Popen(command_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
_, error = p.communicate()
error_string = error.decode("utf-8")
if p.returncode != 0:
return False, error_string
return True, None
@_app_cli.command("dvc", context_settings=CONTEXT_SETTINGS)
@click.pass_context
@click.option(
"--init", is_flag=True, required=False, help="Instantiate DVC within a repository"
)
@click.option(
"--save", is_flag=True, required=False, help="Save built models using dvc"
)
@click.option("--checkout", required=False, help="Instantiate DVC within a repository")
@click.option(
"--help",
"help_",
is_flag=True,
required=False,
help="Print message showing available options",
)
@click.option(
"--destroy",
is_flag=True,
required=False,
help="Remove all files associated with dvc from directory",
)
def dvc(ctx, init, save, checkout, help_, destroy):
app = ctx.obj.get("app")
app_path = app.app_path
# Ensure that DVC is installed
if not which("dvc"):
logger.error(
"DVC is not installed. You can install DVC by running 'pip install dvc'."
)
return
if init:
success, error_string = _bash_helper(["dvc", "init", "--subdir"])
if not success:
logger.error("Error during initialization: %s", error_string)
return
# Set up a local remote
local_remote_path = get_dvc_local_remote_path(app_path)
success, error_string = _bash_helper(
["dvc", "remote", "add", "-d", "myremote", local_remote_path]
)
if not success:
logger.error("Error during local remote set up: %s", error_string)
return
# Add DVC config file to staging
success, error_string = _bash_helper(["git", "add", ".dvc/config"])
if not success:
logger.error("Error while adding dvc config file: %s", error_string)
return
logger.info(
"Instantiated DVC repo and set up local remote in %s", local_remote_path
)
logger.info(
"The newly generated dvc config file (.dvc/config) has been added to git staging"
)
elif save:
generated_model_folder = get_generated_data_folder(app_path)
success, error_string = _dvc_add_helper(generated_model_folder)
if not success:
logger.error("Error during saving: %s", error_string)
return
success, error_string = _bash_helper(["dvc", "push"])
if not success:
logger.error("Error during dvc push: %s", error_string)
return
success, error_string = _bash_helper(
["git", "add", "{}/.generated.dvc".format(app_path)]
)
if not success:
logger.error("Error adding model dvc file: %s", error_string)
return
logger.info("Successfully added .generated model folder to dvc")
logger.info(
"The newly generated .dvc file (%s/.generated.dvc) has been added to git staging",
app_path,
)
elif checkout:
success, error_string = _bash_helper(["git", "checkout", checkout])
if not success:
logger.error("Error during git checkout: %s", error_string)
return
success, error_string = _bash_helper(["dvc", "pull"])
if not success:
logger.error("Error during dvc checkout: %s", error_string)
return
logger.info(
"Successfully checked out models corresponding to hash %s", checkout
)
elif destroy:
logger.info(
"This command must be run in the directory containing the .dvc/ folder. "
"It will remove all files associated with dvc from the directory."
)
input("Press any key to continue:")
# dvc destroy with -f flag always throws a benign error message so we don't handle
_bash_helper(["dvc", "destroy", "-f"])
elif help_:
logger.info(DVC_COMMAND_HELP_MESSAGE)
else:
logger.error("No option provided, see options below.")
logger.info(DVC_COMMAND_HELP_MESSAGE)
@_app_cli.command("run", context_settings=CONTEXT_SETTINGS)
@click.pass_context
@click.option("-P", "--port", type=int, default=7150)
@click.option(
"--no-debug", is_flag=True, help="starts the service with debug mode turned off"
)
@click.option(
"-r",
"--reloader",
is_flag=True,
help="starts the service with the reloader enabled",
)
def run_server(ctx, port, no_debug, reloader):
"""Starts the MindMeld service."""
app = ctx.obj.get("app")
if app is None:
raise ValueError(
"No app was given. Run 'python app.py run' from your app folder."
)
# make sure num parser is running
ctx.invoke(num_parser, start=True)
app.run(
port=port,
debug=not no_debug,
host="0.0.0.0",
threaded=True,
use_reloader=reloader,
)
@_app_cli.command("converse", context_settings=CONTEXT_SETTINGS)
@click.pass_context
@click.option("--context", help="JSON object to be used as the context")
@click.option(
"-v",
"--verbose",
is_flag=True,
help="Print the full metrics instead of just accuracy.",
)
def converse(ctx, context, verbose):
"""
Starts a conversation with the app.
When the verbose flag is set to true, the confidences are included
in the request objects passed to the intents
"""
try:
app = ctx.obj.get("app")
if isinstance(context, str):
context = json.loads(context)
if app is None:
raise ValueError(
"No app was given. Run 'python app.py converse' from your app"
" folder."
)
# make sure num parser is running
ctx.invoke(num_parser, start=True)
if app.async_mode:
loop = asyncio.get_event_loop()
loop.run_until_complete(_converse_async(app, context))
return
convo = Conversation(app=app, context=context, verbose=verbose)
while True:
message = click.prompt("You")
responses = convo.say(message)
for index, response in enumerate(responses):
prefix = "App: " if index == 0 else "... "
click.secho(prefix + response, fg="blue", bg="white")
except MindMeldError as ex:
logger.error(ex.message)
ctx.exit(1)
async def _converse_async(app, context):
convo = Conversation(app=app, context=context)
while True:
message = click.prompt("You")
responses = await convo.say(message)
for index, response in enumerate(responses):
prefix = "App: " if index == 0 else "... "
click.secho(prefix + response, fg="blue", bg="white")
@_app_cli.command("build", context_settings=CONTEXT_SETTINGS)
@click.pass_context
@click.option(
"-i",
"--incremental",
is_flag=True,
default=False,
help="only build models with changed training data or configuration",
)
def build(ctx, incremental):
"""Builds the app with default config."""
try:
app = ctx.obj.get("app")
if app is None:
raise ValueError(
"No app was given. Run 'python app.py build' from your app folder."
)
# make sure num parser is running
ctx.invoke(num_parser, start=True)
app.lazy_init()
nlp = app.app_manager.nlp
nlp.build(incremental=incremental)
nlp.dump()
except MindMeldError as ex:
logger.error(ex.message)
ctx.exit(1)
except RuntimeError as ex:
logger.error(ex)
ctx.exit(1)
@_app_cli.command("evaluate", context_settings=CONTEXT_SETTINGS)
@click.pass_context
@click.option(
"-v",
"--verbose",
is_flag=True,
help="Print the full metrics instead of just accuracy.",
)
def evaluate(ctx, verbose):
"""Evaluates the app with default config."""
try:
app = ctx.obj.get("app")
if app is None:
raise ValueError(
"No app was given. Run 'python app.py evaluate' from your app folder."
)
# make sure num parser is running
ctx.invoke(num_parser, start=True)
app.lazy_init()
nlp = app.app_manager.nlp
try:
nlp.load()
except MindMeldError:
logger.error(
"You must build the app before running evaluate. "
"Try 'python app.py build'."
)
ctx.exit(1)
nlp.evaluate(verbose)
except MindMeldError as ex:
logger.error(ex.message)
ctx.exit(1)
except RuntimeError as ex:
logger.error(ex)
ctx.exit(1)
@_app_cli.command("predict", context_settings=CONTEXT_SETTINGS)
@click.pass_context
@click.option(
"-o",
| |
: list
ordered sql entries.
"""
reordered_entries = []
other_entries = []
for sql_entry in sql_entries:
if 'CREATE' in sql_entry:
reordered_entries.append(sql_entry)
else:
other_entries.append(sql_entry)
reordered_entries.extend(other_entries)
return reordered_entries
def _transform_sql(self, sql_entries, synonyms):
"""
Transform the sql entries to assure proper syntax for procedures.
ie table variables and <table_var>.INSERT to add data to parameter table.
Parameters
----------
sql_entries : list
the sql entries generated by the hana ml package
synonyms : list
synonyms to replace the direct usage of schema/table/function to synonym usage.
Returns
-------
transformed_entries : list
transformed sql entries.
"""
transformed_entries = []
# Build synonym replacements
synonym_replacements = self._build_synonym_replacements(synonyms)
for sql_entry in sql_entries:
replacements = {}
if 'CREATE' in sql_entry:
replacements.update({
'CREATE LOCAL TEMPORARY COLUMN TABLE': 'DECLARE',
'" (': ' TABLE (',
'"#': ''
})
if 'INSERT' in sql_entry:
replacements.update({
'INSERT INTO ': '',
' VALUES ': '.INSERT(',
')': '))',
'"': '',
'#': ':'
})
if 'CALL' in sql_entry:
replacements.update({
' WITH OVERVIEW': '',
'"': '',
'#': ':'
})
if 'SELECT' in sql_entry:
replacements.update({
'#': ':'
})
# Ignore anonymous block statement
if sql_entry.startswith('DO') or sql_entry.startswith('BEGIN') or \
sql_entry.startswith('END'):
continue
# Merge dicts in the 'classical' way. Not using the option (merged_replacements
# = {**replacements, **synonym_replacements}) of >3.5 on purpose for backwards
# compatibility
merged_replacements = replacements.copy()
merged_replacements.update(synonym_replacements)
sql_entry = StringUtils.multi_replace(sql_entry, merged_replacements)
if not sql_entry == '':
# Add sqlscript end statement identfier
sql_entry = sql_entry + ';'
transformed_entries.append(sql_entry)
return transformed_entries
def _build_synonym_replacements(self, synonyms):
"""
Based on the synonyms build the replacement structure to support multi replacement in
string.
Parameters
----------
synonyms : list
synonyms to replace the direct usage of schema/table/function to synonym usage.
Returns
-------
replacements : list
replacments entries of the synonyms
"""
replacements = {}
for synonym in synonyms:
if synonym['type'] == self.TRACE_KEY_FUNCTION:
# There is difference how HANA ML Pythoon API deals with the call statements
# to functions. Traditional function is quoted,
# ie _SYS_AFL."PAL_RANDOM_DECISION_TREES", but in the auto (autonomous)
# case the function is unquoted, ie _SYS_AFL.PAL_RANDOM_DECISION_TREES. To assure
# we support both cases we provide both replacements.
# traditional single sql entries
replacements[synonym['schema'] + '."' + synonym['object'] + '"'] = '"' + \
synonym['synonym'] + '"'
# Autonomous grouped sql entries
replacements[synonym['schema'] + '.' + synonym['object']] = '"' + \
synonym['synonym'] + '"'
if synonym['type'] == self.TRACE_KEY_DATASET:
# There is difference how HANA ML Pythoon API deals with the table statements.
# Traditional it is quoted, ie "SCHEMA"."TABLE", but in the auto (autonomous) case
# the schema and table is unquoted, ie SCHEMA.TABLE. To assure we support both
# cases we provide both replacements
# Traditional single sql entries
replacements['"' + synonym['schema'] + '"."' + synonym['object'] + '"'] = '"' + \
synonym['synonym'] + '"'
# Autonomous grouped sql entries
replacements[synonym['schema'] + '.' + synonym['object']] = '"' + \
synonym['synonym'] + '"'
return replacements
class SqlProcessorConsumptionLayer(SqlProcessorBase):
"""
This class deals with generating the consumption layer objects. This is generic as it needs
to cater for multiple soluiton specific implementations.
Based on generation type decides how to prepare the basic objects for the consumption layer
Also merging and grouping is implemented here.
"""
def generate_consumption_layer(self, sql_processed):
"""
Start the generation of the consumption layer objects based on the base layer generated.
Parameters
----------
sql_processed : dict
The object the conversion from sql trace
Returns
-------
consumption_layer : dict
Generated consumption layer
"""
consumption_layer = []
# We need to understand the relation between base_layer objects. So build up t
# his context first and save for reference:
sql_processed[self.TRACE_KEY_RELATION_CONTEXT] = \
self._build_relation_context(sql_processed)
# Based on grouping type grouping is set on the base_objects and passed tot he
# consumption layer as the grouping implementation is on consumption layer level
self._set_grouping(sql_processed)
sql_proc_base_layer = sql_processed[self.TRACE_KEY_BASE_LAYER]
for algo in sql_proc_base_layer:
for function in sql_proc_base_layer[algo]:
consumption_elements = self._build_consumption_layer_structure(
sql_processed, algo, function, self.TRACE_KEY_TABLES_ATTRIB_DBOBJECT_NAME)
if consumption_elements:
consumption_layer.extend(consumption_elements)
return consumption_layer
def _build_relation_context(self, sql_processed):
"""
Based on the base layer distill relationships between elements in the base layer.
Generally speaking the output and input is validated where there is a relation.
Note: Only the relationship between elements is defined. Not the relationship between
the actual input and output tables / variables. This is validated and linked during
consumption layer generation.
Parameters
----------
sql_processed : dict
The object the conversion from sql trace
Returns
-------
relations : list
A list with relations
"""
relations = []
sql_proc_base_layer = sql_processed[self.TRACE_KEY_BASE_LAYER]
for algo in sql_proc_base_layer:
for function in sql_proc_base_layer[algo]:
if self.TRACE_KEY_TABLES_OUTPUT_PROCESSED in sql_proc_base_layer[algo][function]:
# For each input table check wether the select of the output table is being
# used which indicates a relationship
for table in sql_proc_base_layer[algo][function][
self.TRACE_KEY_TABLES_OUTPUT_PROCESSED]:
select = table[self.TRACE_KEY_TABLES_ATTRIB_SELECT]
# check input structures
for check_algo in sql_proc_base_layer:
for check_function in sql_proc_base_layer[check_algo]:
if self.TRACE_KEY_TABLES_INPUT_PROCESSED in \
sql_proc_base_layer[check_algo][check_function]:
for check_table in sql_proc_base_layer[check_algo][
check_function][
self.TRACE_KEY_TABLES_INPUT_PROCESSED]:
if select in \
check_table[self.TRACE_KEY_TABLES_ATTRIB_SELECT]:
from_path = self.TRACE_KEY_BASE_LAYER + '/' \
+ algo + '/' + function
from_metadata = sql_processed[
self.TRACE_KEY_BASE_LAYER][algo][function][
self.TRACE_KEY_METADATA_PROCESSED]
to_path = self.TRACE_KEY_BASE_LAYER + '/' \
+ check_algo + '/' + check_function
to_metadata = sql_processed[self.TRACE_KEY_BASE_LAYER][
check_algo][check_function][self.
TRACE_KEY_METADATA_PROCESSED]
relation = {
'from_path': from_path,
'from_object': table,
'from_metadata': from_metadata,
'to_path': to_path,
'to_object': check_table,
'to_metadata': to_metadata
}
relations.append(relation)
return relations
def _set_grouping(self, sql_processed):
"""
Add grouping identifiers to the processed elements. This is done by traversing the parents
and children to see which group it should belong to.
Parameters
----------
sql_processed : dict
The object the conversion from sql trace
"""
group_strategy = self.config.get_entry(ConfigConstants.CONFIG_KEY_GROUP_STRATEGY)
sql_proc_base_layer = sql_processed[self.TRACE_KEY_BASE_LAYER]
if group_strategy == ConfigConstants.GENERATION_GROUP_FUNCTIONAL:
for algo in sql_proc_base_layer:
for function in sql_proc_base_layer[algo]:
base_object = sql_processed[self.TRACE_KEY_BASE_LAYER][algo][function]
if not 'groups' in base_object or not base_object['groups']:
base_object['groups'] = []
group_type = ConfigConstants.GROUP_UKNOWN_TYPE
unique_id = self._generate_unique_id()
group_identifier = algo + '_' + function
if ConfigConstants.GROUP_FIT_TYPE in function.lower() and \
ConfigConstants.GROUP_PREDICT_TYPE in function.lower():
group_type = ConfigConstants.GROUP_FIT_PREDICT_TYPE
elif ConfigConstants.GROUP_FIT_TYPE in function.lower():
group_type = ConfigConstants.GROUP_FIT_TYPE
elif ConfigConstants.GROUP_PREDICT_TYPE in function.lower():
group_type = ConfigConstants.GROUP_PREDICT_TYPE
else:
continue
self._set_chain_group(base_object,
sql_processed,
algo,
function,
group_type,
group_identifier,
unique_id)
else:
# All elements have the same group id
for algo in sql_proc_base_layer:
for function in sql_proc_base_layer[algo]:
base_object = sql_processed[self.TRACE_KEY_BASE_LAYER][algo][function]
group_type = ConfigConstants.GROUP_UKNOWN_TYPE
unique_id = self._generate_unique_id()
group_identifier = ConfigConstants.GROUP_IDENTIFIER_MERGE_ALL
self._set_group(base_object, group_type, group_identifier, unique_id)
def _set_chain_group(self,
base_object,
sql_processed,
algo,
function,
group_type,
group_identifier,
uid):
"""
Add grouping identifiers to the processed elements. This is done by traversing the
parents and children to see which group it should belong to.
Parameters
----------
base_object : dict
The base layer element.
sql_processed : dict
The object the conversion from sql trace
algo : str
Algorithm of the sql entries. ie RandomForestClassifier
function : str
Function of the sql entries.
group_type : str
Which group type. Mainly Fit or Predict.
group_identifier : str
Humen readable identifier of the group
uid : str
Generated unique id
"""
self._set_group(base_object, group_type, group_identifier, uid)
parent_objects = self._get_parent_objects(sql_processed, algo, function)
child_objects = self._get_child_objects(sql_processed, algo, function)
if parent_objects:
for parent in parent_objects:
parent_base, parent_algo, parent_function = self._get_path_parts(parent['path'])
parent_base_object = sql_processed[parent_base][parent_algo][parent_function]
self._set_chain_group_ascending(parent_base_object, sql_processed, parent_algo,
parent_function, group_type, group_identifier, uid)
if child_objects:
for child in child_objects:
child_base, child_algo, child_function = self._get_path_parts(child['path'])
child_base_object = sql_processed[child_base][child_algo][child_function]
self._set_chain_group_decending(child_base_object, sql_processed, child_algo,
child_function, group_type, group_identifier, uid)
def _set_group(self, base_object, group_type, group_identifier, uid):
"""
Setting the group by creating a group dict struct.
Parameters
----------
base_object : dict
The base layer element.
group_type : str
Which group type. Mainly Fit or Predict.
group_identifier : str
Humen readable identifier of the group
uid : str
Generated unique id
"""
group = {
'type': group_type,
'identifier': group_identifier,
'uid': uid
}
if not 'groups' in base_object or not base_object['groups']:
base_object['groups'] = []
base_object['groups'].append(group)
def _set_chain_group_ascending(self, base_object, sql_processed, algo,
function, group_type, group_identifier, uid):
"""
Ascend the chain to find elements that belong to the same group.
Note: When we are in a predict group we ignore fit and when we are in a fit group we ignore
predict. As to assure Proper merging of elements
Parameters
----------
base_object : dict
The base layer element.
sql_processed : dict
| |
= 'LOB2?' # Seems to have been removed. May have been on LOB2, though.
# pipe_rf260['exit_position']['direction'] = 180. # degrees, giving dir of pipe leading towards injector, up is 90
i += 1
# CPMID
pipe_cpmid = ods['gas_injection']['pipe'][i]
pipe_cpmid['name'] = 'CPMID'
pipe_cpmid['exit_position']['r'] = 0.9 # m
pipe_cpmid['exit_position']['z'] = -0.2 # m
pipe_cpmid['exit_position']['phi'] = np.nan # Unknown, sorry
pipe_cpmid['valve'][0]['identifier'] = '???' # Seems to have been removed. Not on schematic.
# pipe_cpmid['exit_position']['direction'] = 0. # degrees, giving dir of pipe leading towards injector, up is 90
i += 1
# ================================
@machine_mapping_function(__regression_arguments__)
def pf_active_hardware(ods):
r"""
Loads DIII-D tokamak poloidal field coil hardware geometry
:param ods: ODS instance
"""
from omfit_classes.omfit_efund import OMFITmhdin
mhdin_dat_filename = os.sep.join([omas_dir, 'machine_mappings', 'support_files', 'd3d', 'mhdin.dat'])
mhdin = get_support_file(OMFITmhdin, mhdin_dat_filename)
mhdin.to_omas(ods, update='pf_active')
for k in range(len(ods['pf_active.coil'])):
fcid = 'F{}{}'.format((k % 9) + 1, 'AB'[int(mhdin['FC'][k, 1] < 0)])
ods['pf_active.coil'][k]['name'] = fcid
ods['pf_active.coil'][k]['identifier'] = fcid
ods['pf_active.coil'][k]['element.0.name'] = fcid
ods['pf_active.coil'][k]['element.0.identifier'] = fcid
@machine_mapping_function(__regression_arguments__, pulse=133221)
def pf_active_coil_current_data(ods, pulse):
# get pf_active hardware description --without-- placing the data in this ods
# use `unwrap` to avoid calling `@machine_mapping_function` of `pf_active_hardware`
ods1 = ODS()
unwrap(pf_active_hardware)(ods1)
# fetch the actual pf_active currents data
with omas_environment(ods, cocosio=1):
fetch_assign(
ods,
ods1,
pulse,
channels='pf_active.coil',
identifier='pf_active.coil.{channel}.element.0.identifier',
time='pf_active.coil.{channel}.current.time',
data='pf_active.coil.{channel}.current.data',
validity=None,
mds_server='d3d',
mds_tree='D3D',
tdi_expression='ptdata2("{signal}",{pulse})',
time_norm=0.001,
data_norm=1.0,
)
# ================================
@machine_mapping_function(__regression_arguments__, pulse=170325)
def ec_launcher_active_hardware(ods, pulse):
setup = '.ECH.'
# We need three queries in order to retrieve only the fields we need
# First the amount of systems in use
query = {'NUM_SYSTEMS': setup + 'NUM_SYSTEMS'}
num_systems = mdsvalue('d3d', treename='RF', pulse=pulse, TDI=query).raw()['NUM_SYSTEMS']
query = {}
# Second query the used systems to resolve the gyrotron names
for system_no in range(1, num_systems + 1):
cur_system = f'SYSTEM_{system_no}.'
query[f'GYROTRON_{system_no}'] = setup + cur_system + 'GYROTRON.NAME'
query[f'FREQUENCY_{system_no}'] = setup + cur_system + 'GYROTRON.FREQUENCY'
for field in ['LAUNCH_R', 'LAUNCH_Z', 'PORT']:
query[field + f'_{system_no}'] = setup + cur_system + f'antenna.{field}'
systems = mdsvalue('d3d', treename='RF', pulse=pulse, TDI=query).raw()
query = {}
gyrotron_names = []
for system_no in range(1, num_systems + 1):
if len(systems[f'GYROTRON_{system_no}']) == 0:
"""
If nothing is connected to this system the gyrotron name is blank.
"""
continue
gyrotron = systems[f'GYROTRON_{system_no}']
gyrotron_names.append(gyrotron)
gyr = gyrotron.upper()
gyr = gyr[:3]
for field in ['STAT', 'XMFRAC', 'FPWRC', 'AZIANG', 'POLANG']:
query[field + f'_{system_no}'] = setup + f'{gyrotron.upper()}.EC{gyr}{field}'
if field in ['XMFRAC', 'FPWRC', 'AZIANG', 'POLANG']:
query["TIME_" + field + f'_{system_no}'] = "dim_of(" + query[field + f'_{system_no}'] + "+01)"
# Final, third query now that we have resolved all the TDIs related to gyrotron names
gyortrons = mdsvalue('d3d', treename='RF', pulse=pulse, TDI=query).raw()
for system_no in range(1, num_systems + 1):
system_index = system_no - 1
if gyortrons[f'STAT_{system_no}'] == 0:
continue
launchers = ods['ec_launchers.launcher']
launchers[system_index]['identifier'] = launchers[system_index]['name'] = gyrotron_names[system_index]
launchers[system_index]['launching_position.r'] = np.atleast_1d(systems[f'LAUNCH_R_{system_no}'])
launchers[system_index]['launching_position.z'] = np.atleast_1d(systems[f'LAUNCH_Z_{system_no}'])
launchers[system_index]['launching_position.time'] = np.zeros(launchers[system_index]['launching_position.r'].shape)
phi = np.deg2rad(float(systems[f'PORT_{system_no}'].split(' ')[0]))
phi = -phi - np.pi / 2.0
launchers[system_index]['launching_position.phi'] = np.atleast_1d(phi)
launchers[system_index]['frequency.data'] = np.atleast_1d(systems[f'FREQUENCY_{system_no}'])
launchers[system_index]['frequency.time'] = np.atleast_1d(0)
launchers[system_index]['power_launched.time'] = np.atleast_1d(gyortrons[f'TIME_FPWRC_{system_no}']) / 1.0e3
launchers[system_index]['power_launched.data'] = np.atleast_1d(gyortrons[f'FPWRC_{system_no}'])
xfrac = gyortrons[f'XMFRAC_{system_no}']
if iterable(xfrac):
launchers[system_index]['mode.data'] = np.atleast_1d(np.array(np.round(1.0 - 2.0 * xfrac), dtype=np.int))
launchers[system_index]['mode.time'] = np.atleast_1d(gyortrons[f'TIME_XMFRAC_{system_no}']) / 1.0e3
else:
launchers[system_index]['mode.data'] = np.atleast_1d([np.round(1.0 - 2.0 * xfrac)], dtype=np.int)
launchers[system_index]['mode.time'] = np.atleast_1d(0)
launchers[system_index]['steering_angle_tor.data'] = np.atleast_1d(np.deg2rad((gyortrons[f'AZIANG_{system_no}'] - 180.0)))
if len(launchers[system_index]['steering_angle_tor.data']) == 1:
launchers[system_index]['steering_angle_tor.time'] = np.atleast_1d(0)
else:
launchers[system_index]['steering_angle_tor.time'] = np.atleast_1d(gyortrons[f'TIME_AZIANG_{system_no}']) / 1.0e3
launchers[system_index]['steering_angle_pol.data'] = np.atleast_1d(np.deg2rad((gyortrons[f'POLANG_{system_no}'] - 90.0)))
if len(launchers[system_index]['steering_angle_pol.data']) == 1:
launchers[system_index]['steering_angle_pol.time'] = np.atleast_1d(0)
else:
launchers[system_index]['steering_angle_pol.time'] = np.atleast_1d(gyortrons[f'TIME_POLANG_{system_no}']) / 1.0e3
# The spot size and radius are computed using the evolution formula for Gaussian beams
# see: https://en.wikipedia.org/wiki/Gaussian_beam#Beam_waist
# The beam is divergent because the beam waist is focused on to the final launching mirror.
# The values of the ECRH group are the beam waist w_0 = 1.72 cm and
# the beam is focused onto the mirror meaning that it is paraxial at the launching point.
# Hence, the inverse curvature radius is zero
# Notably the ECRH beams are astigmatic in reality so this is just an approximation
launchers[system_index]['beam.phase.angle.time'] = np.array([0.0])
launchers[system_index]['beam.phase.angle.data'] = np.deg2rad([0.0])
launchers[system_index]['beam.phase.curvature.time'] = np.array([0.0])
launchers[system_index]['beam.phase.curvature.data'] = np.array([np.atleast_1d(0.0), np.atleast_1d(0.0)])
launchers[system_index]['beam.spot.angle.time'] = np.array([0.0])
launchers[system_index]['beam.spot.angle.data'] = np.deg2rad([0.0])
launchers[system_index]['beam.spot.size.time'] = np.array([0.0])
launchers[system_index]['beam.spot.size.data'] = np.array([np.atleast_1d(0.0172), np.atleast_1d(0.0172)])
# ================================
@machine_mapping_function(__regression_arguments__, pulse=133221)
def interferometer_hardware(ods, pulse):
"""
Loads DIII-D CO2 interferometer chord locations
The chord endpoints ARE NOT RIGHT. Only the R for vertical lines or Z for horizontal lines is right.
Data sources:
DIII-D webpage: https://diii-d.gat.com/diii-d/Mci accessed 2018 June 07 by <NAME>
:param ods: an OMAS ODS instance
:param pulse: int
"""
# As of 2018 June 07, DIII-D has four interferometers
# phi angles are compliant with odd COCOS
ods['interferometer.channel.0.identifier'] = ods['interferometer.channel.0.name'] = 'r0'
r0 = ods['interferometer.channel.0.line_of_sight']
r0['first_point.phi'] = r0['second_point.phi'] = 225 * (-np.pi / 180.0)
r0['first_point.r'], r0['second_point.r'] = 3.0, 0.8 # These are not the real endpoints
r0['first_point.z'] = r0['second_point.z'] = 0.0
for i, r in enumerate([1.48, 1.94, 2.10]):
ods['interferometer.channel'][i + 1]['identifier'] = ods['interferometer.channel'][i + 1]['name'] = 'v{}'.format(i + 1)
los = ods['interferometer.channel'][i + 1]['line_of_sight']
los['first_point.phi'] = los['second_point.phi'] = 240 * (-np.pi / 180.0)
los['first_point.r'] = los['second_point.r'] = r
los['first_point.z'], los['second_point.z'] = -1.8, 1.8 # These are not the real points
for i in range(len(ods['interferometer.channel'])):
ch = ods['interferometer.channel'][i]
ch['line_of_sight.third_point'] = ch['line_of_sight.first_point']
@machine_mapping_function(__regression_arguments__, pulse=133221)
def interferometer_data(ods, pulse):
"""
Loads DIII-D interferometer measurement data
:param pulse: int
"""
from scipy.interpolate import interp1d
ods1 = ODS()
unwrap(interferometer_hardware)(ods1, pulse=pulse)
# fetch
TDIs = {}
for k, channel in enumerate(ods1['interferometer.channel']):
identifier = ods1[f'interferometer.channel.{k}.identifier'].upper()
TDIs[identifier] = f"\\BCI::TOP.DEN{identifier}"
TDIs[f'{identifier}_validity'] = f"\\BCI::TOP.STAT{identifier}"
TDIs['time'] = f"dim_of({TDIs['R0']})"
TDIs['time_valid'] = f"dim_of({TDIs['R0_validity']})"
data = mdsvalue('d3d', 'BCI', pulse, TDIs).raw()
# assign
for k, channel in enumerate(ods1['interferometer.channel']):
identifier = ods1[f'interferometer.channel.{k}.identifier'].upper()
ods[f'interferometer.channel.{k}.n_e_line.time'] = data['time']/1.e3
ods[f'interferometer.channel.{k}.n_e_line.data'] = data[identifier] * 1e6
ods[f'interferometer.channel.{k}.n_e_line.validity_timed'] = interp1d(data['time_valid'] / 1.e3,
-data[f'{identifier}_validity'], kind='nearest', bounds_error=False,
fill_value = (-data[f'{identifier}_validity'][0], -data[f'{identifier}_validity'][-1]), assume_sorted=True)(
ods[f'interferometer.channel.{k}.n_e_line.time'])
# ================================
@machine_mapping_function(__regression_arguments__, pulse=133221)
def thomson_scattering_hardware(ods, pulse, revision='BLESSED'):
"""
Gathers DIII-D Thomson measurement locations
:param pulse: int
:param revision: string
Thomson scattering data revision, like 'BLESSED', 'REVISIONS.REVISION00', etc.
"""
unwrap(thomson_scattering_data)(ods, pulse, revision, _get_measurements=False)
@machine_mapping_function(__regression_arguments__, pulse=133221)
def thomson_scattering_data(ods, pulse, revision='BLESSED', _get_measurements=True):
"""
Loads DIII-D Thomson measurement data
:param pulse: int
:param revision: string
Thomson scattering data revision, like 'BLESSED', 'REVISIONS.REVISION00', etc.
"""
systems = ['TANGENTIAL', 'DIVERTOR', 'CORE']
# get the actual data
query = {'calib_nums': f'.ts.{revision}.header.calib_nums'}
for system in systems:
for quantity in ['R', 'Z', 'PHI']:
query[f'{system}_{quantity}'] = f'.TS.{revision}.{system}:{quantity}'
if _get_measurements:
for quantity in ['TEMP', 'TEMP_E', 'DENSITY', 'DENSITY_E', 'TIME']:
query[f'{system}_{quantity}'] = f'.TS.{revision}.{system}:{quantity}'
tsdat = mdsvalue('d3d', treename='ELECTRONS', pulse=pulse, TDI=query).raw()
# Read the Thomson scattering hardware map to figure out which lens each chord looks through
cal_set = tsdat['calib_nums'][0]
query = {}
for system in systems:
query[f'{system}_hwmapints'] = f'.{system}.hwmapints'
hw_ints = mdsvalue('d3d', treename='TSCAL', pulse=cal_set, TDI=query).raw()
# assign data in ODS
i = 0
for system in systems:
if isinstance(tsdat[f'{system}_R'], Exception):
continue
nc = len(tsdat[f'{system}_R'])
if not nc:
continue
# determine which lenses were used
ints = hw_ints[f'{system}_hwmapints']
if len(np.shape(ints)) < 2:
# Contingency needed for cases where all view-chords are taken off of divertor laser and reassigned to core
ints = ints.reshape(1, -1)
lenses = ints[:, 2]
# Assign data to ODS
for j in range(nc):
ch = ods['thomson_scattering']['channel'][i]
if not _get_measurements:
ch['name'] = 'TS_{system}_r{lens:+0d}_{ch:}'.format(
system=system.lower(), ch=j, lens=lenses[j] if lenses is not None else -9
)
ch['identifier'] = f'{system[0]}{j:02d}'
ch['position']['r'] = tsdat[f'{system}_R'][j]
ch['position']['z'] = tsdat[f'{system}_Z'][j]
ch['position']['phi'] = -tsdat[f'{system}_PHI'][j] * np.pi / 180.0
else:
ch['n_e.time'] = tsdat[f'{system}_TIME'] / 1e3
ch['n_e.data'] = unumpy.uarray(tsdat[f'{system}_DENSITY'][j], tsdat[f'{system}_DENSITY_E'][j])
ch['t_e.time'] = tsdat[f'{system}_TIME'] / 1e3
ch['t_e.data'] = unumpy.uarray(tsdat[f'{system}_TEMP'][j], tsdat[f'{system}_TEMP_E'][j])
i += 1
# ================================
@machine_mapping_function(__regression_arguments__, pulse=133221)
def electron_cyclotron_emission_hardware(ods, pulse, fast_ece=False):
"""
Gathers DIII-D Electron cyclotron emission locations
:param pulse: int
:param fast_ece: bool
Use data sampled at high frequency
"""
unwrap(electron_cyclotron_emission_data)(ods, pulse, fast_ece=fast_ece, _measurements=False)
@machine_mapping_function(__regression_arguments__, pulse=133221)
def electron_cyclotron_emission_data(ods, pulse=133221, fast_ece=False, _measurements=True):
"""
Loads DIII-D Electron cyclotron emission data
:param pulse: int
:param fast_ece: bool
Use data sampled at high frequency
"""
fast_ece = 'F' if fast_ece else ''
setup = '\\ECE::TOP.SETUP.'
cal = '\\ECE::TOP.CAL%s.' % fast_ece
TECE = '\\ECE::TOP.TECE.TECE' + fast_ece
query = {}
for node, quantities in zip([setup, cal], [['ECEPHI', 'ECETHETA', 'ECEZH', 'FREQ'], ['NUMCH']]):
for quantity in quantities:
query[quantity] = node + quantity
query['TIME'] = f"dim_of({TECE + '01'})"
ece_map = mdsvalue('d3d', treename='ELECTRONS', pulse=pulse, TDI=query).raw()
N_time = len(ece_map['TIME'])
N_ch = ece_map['NUMCH'].item()
if _measurements:
query = {}
for ich | |
# Call the MoE
moe_out_2d, importance, load, _, _ = moe.Eval(
dp.devices, xs_2d, train, identifiers=None, summaries=True)
# Reshape the output to the original shape.
moe_out = dp(tf.reshape, moe_out_2d, dp(tf.shape, xs))
# These losses encourage equal load on the different experts.
loss = loss_coef * (eu.CVSquared(importance) + eu.CVSquared(load))
return moe_out, loss
def simple_attention(target, source, bias=None, summaries=True):
"""A simple attention function.
Args:
target: a `Tensor` with shape `[batch, target_timesteps, depth]` or
`[batch, target_timesteps_1, target_timesteps_2, depth]`
source: a `Tensor` with shape `[batch, source_timesteps, depth]` or
`[batch, source_timesteps_1, source_timesteps_2, depth]`
bias: an optional `Tensor` with shape `[batch, timesteps, 1, 1]` used
to mask the attention to not attend to padding of input.
summaries: Boolean, whether to output summaries.
Returns:
a `Tensor` with same shape as `target`
"""
with tf.name_scope("simple_attention", [target, source]):
target_shape = tf.shape(target)
source_shape = tf.shape(source)
target = tf.reshape(target, [
target_shape[0], target_shape[1] * target_shape[2], target_shape[3]
])
source = tf.reshape(source, [
source_shape[0], source_shape[1] * source_shape[2], source_shape[3]
])
attention = tf.matmul(target, source, transpose_b=True)
attention *= tf.rsqrt(tf.to_float(tf.shape(target)[2]))
if bias is not None:
attention += tf.expand_dims(tf.squeeze(bias, axis=[2, 3]), axis=1)
attention = tf.nn.softmax(attention)
if summaries and not tf.get_variable_scope().reuse:
tf.summary.image("attention", tf.expand_dims(attention, 3), max_outputs=5)
attended = tf.matmul(attention, source)
return tf.reshape(attended, target_shape)
def multiscale_conv_sum(inputs, output_size, dilation_rates_and_kernel_sizes,
pooling_type, **kwargs):
"""Sum of several dilated convolutions.
For all convolutions with dilation_rate > 1, we first pool the input with
width dilation_rate.
Args:
inputs: a Tensor
output_size: an Integer
dilation_rates_and_kernel_sizes: a list of pairs (dilation, kernel_size)
pooling_type: "AVG" or "MAX"
**kwargs: additional
Returns:
a Tensor.
"""
name = kwargs.pop("name") if "name" in kwargs else None
with tf.variable_scope(name, "multiscale_conv_sum", [inputs]):
padding = kwargs["padding"]
results, counter = [], -1
for dilation_rate, kernel_size in dilation_rates_and_kernel_sizes:
counter += 1
if dilation_rate > 1:
pooled = pool(inputs, kernel_size, pooling_type, padding)
else:
pooled = inputs
results.append(
conv(
pooled,
output_size,
kernel_size,
dilation_rate=dilation_rate,
name="conv_layer%d" % counter,
**kwargs))
return tf.add_n(results) * (len(results)**-0.5)
def multiscale_conv_and_attention(x,
padding,
hparams,
source=None,
summaries=True):
"""A common part of t2t layers.
First, do a linear multiscale convolution
Second, do attention (if source is not None)
Applies residuals and normalization on both steps.
Args:
x: a Tensor.
padding: a padding type
hparams: hyperparameters for model
source: optional source tensor for attention. (encoder output)
summaries: Boolean, whether to output summaries.
Returns:
a Tensor.
"""
# TODO(noam): The number of different scales should be a hyperparameter.
conv_sum = multiscale_conv_sum(
x,
hparams.hidden_size, [((hparams.kernel_height**i, hparams.kernel_width**
i), (hparams.kernel_height, hparams.kernel_width))
for i in xrange(3)],
"AVG",
padding=padding)
# For residuals a rescale if necessary if channels differ.
if x.get_shape().as_list()[-1] != conv_sum.get_shape().as_list()[-1]:
x = conv(x, hparams.hidden_size, (1, 1))
x = noam_norm(x + conv_sum)
if source is not None:
x = noam_norm(x + simple_attention(x, source, summaries=summaries))
return x
def conv_with_pools(inputs, output_size, kernel_size, pool_sizes, pooling_type,
**kwargs):
"""Convolution plus 1x1 convolution applied to specified pools.
For example we might do a regular convolution with kernel size (3, 1),
and pools of sizes [(9, 1), (27, 1)].
Args:
inputs: a Tensor
output_size: an Integer
kernel_size: a tuple of integers
pool_sizes: a list of tuples of integers.
pooling_type: "AVG" or "MAX"
**kwargs: additional keyword args for conv
Returns:
a Tensor.
"""
name = kwargs.pop("name") if "name" in kwargs else None
with tf.variable_scope(name, "conv_with_pools", [inputs]):
padding = kwargs["padding"]
results = []
results.append(conv(inputs, output_size, kernel_size, **kwargs))
for i, pool_size in enumerate(pool_sizes):
pooled = pool(inputs, pool_size, pooling_type, padding)
results.append(
conv(pooled, output_size, (1, 1), name="pool_%d" % i, **kwargs))
return tf.add_n(results) * (len(results)**-0.5)
def conv_with_pools_and_attention(x,
padding,
hparams,
source=None,
summaries=True):
"""A common part of t2t layers.
First, do conv_with_pools
Second, do attention (if source is not None)
Applies residuals and normalization on both steps.
Args:
x: a Tensor.
padding: a padding type
hparams: hyperparameters for model
source: optional source tensor for attention. (encoder output)
summaries: Boolean, whether to output summaries.
Returns:
a Tensor.
"""
conv_sum = conv_with_pools(
x,
hparams.hidden_size, (hparams.kernel_height, hparams.kernel_width),
hparams.pool_sizes,
"AVG",
padding=padding)
if x.get_shape().as_list()[-1] == conv_sum.get_shape().as_list()[-1]:
conv_sum += x
x = noam_norm(conv_sum)
if source is not None:
x = noam_norm(x + simple_attention(x, source, summaries=summaries))
return x
def get_timing_signal(length,
min_timescale=1,
max_timescale=1e4,
num_timescales=16):
"""Create Tensor of sinusoids of different frequencies.
Args:
length: Length of the Tensor to create, i.e. Number of steps.
min_timescale: a float
max_timescale: a float
num_timescales: an int
Returns:
Tensor of shape (length, 2*num_timescales)
"""
positions = tf.to_float(tf.range(length))
log_timescale_increment = (math.log(max_timescale / min_timescale) /
(num_timescales - 1))
inv_timescales = min_timescale * tf.exp(
tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)
scaled_time = tf.expand_dims(positions, 1) * tf.expand_dims(inv_timescales, 0)
return tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)
def add_timing_signal(x, min_timescale=1, max_timescale=1e4, num_timescales=16):
"""Adds a bunch of sinusoids of different frequencies to a Tensor.
This allows attention to learn to use absolute and relative positions.
The timing signal should be added to some precursor of both the source
and the target of the attention.
The use of relative position is possible because sin(x+y) and cos(x+y) can be
experessed in terms of y, sin(x) and cos(x).
In particular, we use a geometric sequence of timescales starting with
min_timescale and ending with max_timescale. For each timescale, we
generate the two sinusoidal signals sin(timestep/timescale) and
cos(timestep/timescale). All of these sinusoids are concatenated in
the depth dimension, padded with zeros to be the same depth as the input,
and added into input.
Args:
x: a Tensor with shape [?, length, ?, depth]
min_timescale: a float
max_timescale: a float
num_timescales: an int <= depth/2
Returns:
a Tensor the same shape as x.
"""
length = tf.shape(x)[1]
depth = tf.shape(x)[3]
signal = get_timing_signal(length, min_timescale, max_timescale,
num_timescales)
padded_signal = tf.pad(signal, [[0, 0], [0, depth - 2 * num_timescales]])
return x + tf.reshape(padded_signal, [1, length, 1, depth])
def mask_from_embedding(emb):
"""Input embeddings -> padding mask.
We have hacked symbol_modality to return all-zero embeddings for padding.
Returns a mask with 0.0 in the padding positions and 1.0 elsewhere.
Args:
emb: a Tensor with shape [batch, width, height, depth].
Returns:
a 0.0/1.0 Tensor with shape [batch, width, height, 1].
"""
return weights_nonzero(tf.reduce_sum(tf.abs(emb), axis=3, keep_dims=True))
def mask_leq(target_length, source_length):
"""A mask with 1.0 wherever source_pos <= target_pos and 0.0 elsewhere.
Args:
target_length: an integer
source_length: an integer
Returns:
a Tensor with shape [1, target_length, source_length]
"""
return tf.expand_dims(
tf.matrix_band_part(tf.ones([target_length, source_length]), -1, 0), 0)
def attention_1d_v0(source,
target,
attention_size,
output_size,
num_heads,
mask=None,
transform_source=True,
transform_target=True,
transform_output=True,
summaries=True,
name=None):
"""multi-headed attention.
TODO(noam): this could probably be extended to 2d.
Args:
source: a Tensor of shape [batch, source_length, source_depth]
target: a Tensor of shape [batch, target_length, target_depth]
attention_size: an integer
output_size: an integer
num_heads: an integer divisor of attention_size
mask: a float32 Tensor of shape [batch, target_length, source_length]
1.0 means can-see; 0.0 means can't-see.
Any dimension can be 1 (supports broadcasting).
transform_source: a boolean
transform_target: a boolean
transform_output: a boolean
summaries: a boolean
name: an optional string
Returns:
a Tensor of shape [batch, length, output_size]
"""
with tf.variable_scope(name, default_name="attention", values=[target]):
source_length = tf.shape(source)[1]
target_length = tf.shape(target)[1]
batch = tf.shape(source)[0]
def _maybe_transform(t, size, should_transform, name):
if should_transform:
return conv1d(t, size, 1, name=name)
else:
assert t.get_shape()[-1] == size
return t
source_attention = _maybe_transform(source, attention_size,
transform_source, "source_attention")
target_attention = _maybe_transform(target, attention_size,
transform_target, "target_attention")
assert attention_size % num_heads == 0
size_per_head = attention_size // num_heads
source_attention = tf.reshape(
source_attention, [batch, source_length, num_heads, size_per_head])
target_attention = tf.reshape(
target_attention, [batch, target_length, num_heads, size_per_head])
# [batch, num_heads, length, size_per_head]
source_attention = tf.transpose(source_attention, [0, 2, 1, 3])
target_attention = tf.transpose(target_attention, [0, 2, 1, 3])
# [batch, num_heads, target_length, source_length]
attention = tf.matmul(target_attention, source_attention, transpose_b=True)
attention *= size_per_head**-0.5
if mask is not None:
mask = tf.expand_dims(mask, 1)
mask = (1.0 - mask) * -1e9
attention += mask
attention = tf.nn.softmax(attention)
if summaries and not tf.get_variable_scope().reuse:
# Compute a color image summary.
image = tf.reshape(attention,
[batch, num_heads, target_length, source_length])
image = tf.transpose(image, [0, 2, 3, 1])
image = tf.pow(image, 0.2) # for high-dynamic-range
# Each head will correspond to one of RGB.
# pad the heads to be a multiple of 3
extra_heads = -num_heads % 3
image = tf.pad(image, [[0, 0], [0, 0], [0, 0], [0, -num_heads % 3]])
image = tf.reshape(image, [
batch, target_length, source_length, 3, (num_heads + extra_heads) // 3
])
image = tf.reduce_max(image, 4)
| |
<reponame>JiazeWang/PAConv
"""
@Author: <NAME>
@Contact: <EMAIL>
@Time: 2020/03/06
@Document: Basic operation/blocks of 3D-GCN
"""
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import copy
def get_neighbor_index(vertices: "(bs, vertice_num, 3)", neighbor_num: int):
"""
Return: (bs, vertice_num, neighbor_num)
"""
bs, v, _ = vertices.size()
device = vertices.device
inner = torch.bmm(vertices, vertices.transpose(1, 2)) #(bs, v, v)
quadratic = torch.sum(vertices**2, dim= 2) #(bs, v)
distance = inner * (-2) + quadratic.unsqueeze(1) + quadratic.unsqueeze(2)
neighbor_index = torch.topk(distance, k= neighbor_num + 1, dim= -1, largest= False)[1]
neighbor_index = neighbor_index[:, :, 1:]
return neighbor_index
def get_nearest_index(target: "(bs, v1, 3)", source: "(bs, v2, 3)"):
"""
Return: (bs, v1, 1)
"""
inner = torch.bmm(target, source.transpose(1, 2)) #(bs, v1, v2)
s_norm_2 = torch.sum(source ** 2, dim= 2) #(bs, v2)
t_norm_2 = torch.sum(target ** 2, dim= 2) #(bs, v1)
d_norm_2 = s_norm_2.unsqueeze(1) + t_norm_2.unsqueeze(2) - 2 * inner
nearest_index = torch.topk(d_norm_2, k= 1, dim= -1, largest= False)[1]
return nearest_index
def indexing_neighbor(tensor: "(bs, vertice_num, dim)", index: "(bs, vertice_num, neighbor_num)" ):
"""
Return: (bs, vertice_num, neighbor_num, dim)
"""
bs, v, n = index.size()
id_0 = torch.arange(bs).view(-1, 1, 1)
tensor_indexed = tensor[id_0, index]
return tensor_indexed
def get_neighbor_direction_norm(vertices: "(bs, vertice_num, 3)", neighbor_index: "(bs, vertice_num, neighbor_num)"):
"""
Return: (bs, vertice_num, neighobr_num, 3)
"""
neighbors = indexing_neighbor(vertices, neighbor_index) # (bs, v, n, 3)
neighbor_direction = neighbors - vertices.unsqueeze(2)
neighbor_direction_norm = F.normalize(neighbor_direction, dim= -1)
return neighbor_direction_norm
class Conv_surface(nn.Module):
"""Extract structure feafure from surface, independent from vertice coordinates"""
def __init__(self, kernel_num, support_num):
super().__init__()
self.kernel_num = kernel_num
self.support_num = support_num
self.relu = nn.ReLU(inplace= True)
self.directions = nn.Parameter(torch.FloatTensor(3, support_num * kernel_num))
self.initialize()
def initialize(self):
stdv = 1. / math.sqrt(self.support_num * self.kernel_num)
self.directions.data.uniform_(-stdv, stdv)
def forward(self,
neighbor_index: "(bs, vertice_num, neighbor_num)",
vertices: "(bs, vertice_num, 3)"):
"""
Return vertices with local feature: (bs, vertice_num, kernel_num)
"""
bs, vertice_num, neighbor_num = neighbor_index.size()
neighbor_direction_norm = get_neighbor_direction_norm(vertices, neighbor_index)
support_direction_norm = F.normalize(self.directions, dim= 0) #(3, s * k)
theta = neighbor_direction_norm @ support_direction_norm # (bs, vertice_num, neighbor_num, s*k)
theta = self.relu(theta)
theta = theta.contiguous().view(bs, vertice_num, neighbor_num, self.support_num, self.kernel_num)
theta = torch.max(theta, dim= 2)[0] # (bs, vertice_num, support_num, kernel_num)
feature = torch.sum(theta, dim= 2) # (bs, vertice_num, kernel_num)
return feature
def clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
# simple self attention and simple RPE in PointFormer
# position_embedding (b,n,n)
# query / key / value (b,h,n,d)
def attention(query, key, value, mask=None, dropout=None, position_embedding=None):
d_k = query.size(-1)
# scores (b,h,n,n)
scores = torch.matmul(query, key.transpose(-2, -1).contiguous()) / math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(mask == 0, -1e9)
if position_embedding is not None:
position_embedding = position_embedding.unsqueeze(1)
scores = scores + position_embedding
p_attn = F.softmax(scores, dim=-1)
return torch.matmul(p_attn, value), p_attn
class MultiHeadedAttention(nn.Module):
def __init__(self, h, d_model, fn_attention=attention, dropout=0.1):
"Take in model size and number of heads."
super(MultiHeadedAttention, self).__init__()
assert d_model % h == 0
# We assume d_v always equals d_k
self.d_model = d_model
self.d_k = d_model // h
self.h = h
self.linears = clones(nn.Linear(d_model, d_model), 4)
self.fn_attention = fn_attention
self.attn = None
self.dropout = None
self.initialize()
def initialize(self):
stdv = 1. / math.sqrt(self.d_model * self.d_model)
for linear in self.linears:
linear.weight.data.uniform_(-stdv, stdv)
def forward(self, query, key, value):
nbatches = query.size(0)
# 1) Do all the linear projections in batch from d_model => h x d_k
query, key, value = \
[l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2).contiguous()
for l, x in zip(self.linears, (query, key, value))]
# 2) Apply attention on all the projected vectors in batch.
#x, self.attn = attention(query, key, value, mask=mask,
# dropout=self.dropout, position_embedding=position_embedding)
x, self.attn = self.fn_attention(query, key, value, dropout=self.dropout)
# 3) "Concat" using a view and apply a final linear.
x = x.transpose(1, 2).contiguous() \
.view(nbatches, -1, self.h * self.d_k)
return self.linears[-1](x)
class Attention_Conv_surface(nn.Module):
def __init__(self, kernel_num, support_num):
super().__init__()
self.kernel_num = kernel_num
self.support_num = support_num
self.relu = nn.ReLU(inplace= True)
self.query_directions = nn.Parameter(torch.FloatTensor(3, support_num * kernel_num))
self.value_directions = nn.Parameter(torch.FloatTensor(3, support_num * kernel_num))
self.key_directions = nn.Parameter(torch.FloatTensor(3, support_num * kernel_num))
self.multihead_attention = MultiHeadedAttention(4, kernel_num, fn_attention=attention)
self.initialize()
def initialize(self):
stdv = 1. / math.sqrt(self.support_num * self.kernel_num)
self.query_directions.data.uniform_(-stdv, stdv)
self.value_directions.data.uniform_(-stdv, stdv)
self.key_directions.data.uniform_(-stdv, stdv)
def forward(self,
neighbor_index: "(bs, vertice_num, neighbor_num)",
vertices: "(bs, vertice_num, 3)"):
bs, vertice_num, neighbor_num = neighbor_index.size()
neighbor_direction_norm = get_neighbor_direction_norm(vertices, neighbor_index)
support_query_direction_norm = F.normalize(self.query_directions, dim= 0) #(3, s * k)
support_key_direction_norm = F.normalize(self.key_directions, dim= 0) #(3, s * k)
support_value_direction_norm = F.normalize(self.value_directions, dim= 0) #(3, s * k)
query_theta = neighbor_direction_norm @ support_query_direction_norm # (bs, vertice_num, neighbor_num, s*k)
query_theta = self.relu(query_theta)
query_theta = query_theta.contiguous().view(bs, vertice_num, neighbor_num, self.support_num, self.kernel_num)
query_theta = torch.max(query_theta, dim= 2)[0] # (bs, vertice_num, support_num, kernel_num)
query_theta = torch.sum(query_theta, dim= 2) # (bs, vertice_num, kernel_num)
key_theta = neighbor_direction_norm @ support_key_direction_norm # (bs, vertice_num, neighbor_num, s*k)
key_theta = self.relu(key_theta)
key_theta = key_theta.contiguous().view(bs, vertice_num, neighbor_num, self.support_num, self.kernel_num)
key_theta = torch.max(key_theta, dim= 2)[0] # (bs, vertice_num, support_num, kernel_num)
key_theta = torch.sum(key_theta, dim= 2) # (bs, vertice_num, kernel_num)
value_theta = neighbor_direction_norm @ support_value_direction_norm # (bs, vertice_num, neighbor_num, s*k)
value_theta = self.relu(value_theta)
value_theta = value_theta.contiguous().view(bs, vertice_num, neighbor_num, self.support_num, self.kernel_num)
value_theta = torch.max(value_theta, dim= 2)[0] # (bs, vertice_num, support_num, kernel_num)
value_theta = torch.sum(value_theta, dim= 2) # (bs, vertice_num, kernel_num)
feature = self.multihead_attention(query_theta, key_theta, value_theta)
return feature
class Attention_Conv_surface2(nn.Module):
def __init__(self, kernel_num, support_num):
super().__init__()
self.kernel_num = kernel_num
self.support_num = support_num
self.relu = nn.ReLU(inplace= True)
self.directions = nn.Parameter(torch.FloatTensor(3, support_num * kernel_num))
self.multihead_attention = MultiHeadedAttention(4, kernel_num, fn_attention=attention)
self.initialize()
def initialize(self):
stdv = 1. / math.sqrt(self.support_num * self.kernel_num)
self.directions.data.uniform_(-stdv, stdv)
def forward(self,
neighbor_index: "(bs, vertice_num, neighbor_num)",
vertices: "(bs, vertice_num, 3)"):
bs, vertice_num, neighbor_num = neighbor_index.size()
neighbor_direction_norm = get_neighbor_direction_norm(vertices, neighbor_index)
support_direction_norm = F.normalize(self.directions, dim= 0) #(3, s * k)
theta = neighbor_direction_norm @ support_direction_norm
theta = self.relu(theta).contiguous().view(bs, vertice_num, neighbor_num, self.support_num, self.kernel_num)
theta = torch.max(theta, dim= 2)[0] # (bs, vertice_num, support_num, kernel_num)
theta = torch.sum(theta, dim= 2) # (bs, vertice_num, kernel_num)
attention_theta = self.multihead_attention(theta, theta, theta)
return attention_theta
class Conv_layer(nn.Module):
def __init__(self, in_channel, out_channel, support_num):
super().__init__()
# arguments:
self.in_channel = in_channel
self.out_channel = out_channel
self.support_num = support_num
# parameters:
self.relu = nn.ReLU(inplace= True)
self.weights = nn.Parameter(torch.FloatTensor(in_channel, (support_num + 1) * out_channel))
self.bias = nn.Parameter(torch.FloatTensor((support_num + 1) * out_channel))
self.directions = nn.Parameter(torch.FloatTensor(3, support_num * out_channel))
self.initialize()
def initialize(self):
stdv = 1. / math.sqrt(self.out_channel * (self.support_num + 1))
self.weights.data.uniform_(-stdv, stdv)
self.bias.data.uniform_(-stdv, stdv)
self.directions.data.uniform_(-stdv, stdv)
def forward(self,
neighbor_index: "(bs, vertice_num, neighbor_index)",
vertices: "(bs, vertice_num, 3)",
feature_map: "(bs, vertice_num, in_channel)"):
"""
Return: output feature map: (bs, vertice_num, out_channel)
"""
bs, vertice_num, neighbor_num = neighbor_index.size()
neighbor_direction_norm = get_neighbor_direction_norm(vertices, neighbor_index)
support_direction_norm = F.normalize(self.directions, dim= 0)
theta = neighbor_direction_norm @ support_direction_norm # (bs, vertice_num, neighbor_num, support_num * out_channel)
theta = self.relu(theta)
theta = theta.contiguous().view(bs, vertice_num, neighbor_num, -1)
# (bs, vertice_num, neighbor_num, support_num * out_channel)
feature_out = feature_map @ self.weights + self.bias # (bs, vertice_num, (support_num + 1) * out_channel)
feature_center = feature_out[:, :, :self.out_channel] # (bs, vertice_num, out_channel)
feature_support = feature_out[:, :, self.out_channel:] #(bs, vertice_num, support_num * out_channel)
# Fuse together - max among product
feature_support = indexing_neighbor(feature_support, neighbor_index) # (bs, vertice_num, neighbor_num, support_num * out_channel)
activation_support = theta * feature_support # (bs, vertice_num, neighbor_num, support_num * out_channel)
activation_support = activation_support.view(bs,vertice_num, neighbor_num, self.support_num, self.out_channel)
activation_support = torch.max(activation_support, dim= 2)[0] # (bs, vertice_num, support_num, out_channel)
activation_support = torch.sum(activation_support, dim= 2) # (bs, vertice_num, out_channel)
feature_fuse = feature_center + activation_support # (bs, vertice_num, out_channel)
return feature_fuse
class Attention_Conv_layer_V2(nn.Module):
def __init__(self, in_channel, out_channel, support_num):
super().__init__()
# arguments:
self.in_channel = in_channel
self.out_channel = out_channel
self.support_num = support_num
# parameters:
self.relu = nn.ReLU(inplace= True)
self.weights = nn.Parameter(torch.FloatTensor(in_channel, (support_num + 1) * out_channel))
self.bias = nn.Parameter(torch.FloatTensor((support_num + 1) * out_channel))
self.directions = nn.Parameter(torch.FloatTensor(3, support_num * out_channel))
self.multihead_attention = MultiHeadedAttention(4, out_channel, fn_attention=attention)
self.initialize()
def initialize(self):
stdv = 1. / math.sqrt(self.out_channel * (self.support_num + 1))
self.weights.data.uniform_(-stdv, stdv)
self.bias.data.uniform_(-stdv, stdv)
self.directions.data.uniform_(-stdv, stdv)
def forward(self,
neighbor_index: "(bs, vertice_num, neighbor_index)",
vertices: "(bs, vertice_num, 3)",
feature_map: "(bs, vertice_num, in_channel)"):
"""
Return: output feature map: (bs, vertice_num, out_channel)
"""
bs, vertice_num, neighbor_num = neighbor_index.size()
neighbor_direction_norm = get_neighbor_direction_norm(vertices, neighbor_index)
support_direction_norm = F.normalize(self.directions, dim= 0)
theta = neighbor_direction_norm @ support_direction_norm # (bs, vertice_num, neighbor_num, support_num * out_channel)
theta = self.relu(theta)
theta = theta.contiguous().view(bs, vertice_num, neighbor_num, -1)
# (bs, vertice_num, neighbor_num, support_num * out_channel)
feature_out = feature_map @ self.weights + self.bias # (bs, vertice_num, (support_num + 1) * out_channel)
feature_center = feature_out[:, :, :self.out_channel] # (bs, | |
+ m.b45 - m.b156 <= 0)
m.c2389 = Constraint(expr= - m.b15 + m.b17 - m.b157 <= 0)
m.c2390 = Constraint(expr= - m.b15 + m.b19 - m.b158 <= 0)
m.c2391 = Constraint(expr= - m.b15 + m.b21 - m.b159 <= 0)
m.c2392 = Constraint(expr= - m.b15 + m.b23 - m.b160 <= 0)
m.c2393 = Constraint(expr= - m.b15 + m.b25 - m.b161 <= 0)
m.c2394 = Constraint(expr= - m.b15 + m.b27 - m.b162 <= 0)
m.c2395 = Constraint(expr= - m.b15 + m.b29 - m.b163 <= 0)
m.c2396 = Constraint(expr= - m.b15 + m.b31 - m.b164 <= 0)
m.c2397 = Constraint(expr= - m.b15 + m.b33 - m.b165 <= 0)
m.c2398 = Constraint(expr= - m.b15 + m.b35 - m.b166 <= 0)
m.c2399 = Constraint(expr= - m.b15 + m.b37 - m.b167 <= 0)
m.c2400 = Constraint(expr= - m.b15 + m.b39 - m.b168 <= 0)
m.c2401 = Constraint(expr= - m.b15 + m.b41 - m.b169 <= 0)
m.c2402 = Constraint(expr= - m.b15 + m.b43 - m.b170 <= 0)
m.c2403 = Constraint(expr= - m.b15 + m.b45 - m.b171 <= 0)
m.c2404 = Constraint(expr= - m.b17 + m.b19 - m.b172 <= 0)
m.c2405 = Constraint(expr= - m.b17 + m.b21 - m.b173 <= 0)
m.c2406 = Constraint(expr= - m.b17 + m.b23 - m.b174 <= 0)
m.c2407 = Constraint(expr= - m.b17 + m.b25 - m.b175 <= 0)
m.c2408 = Constraint(expr= - m.b17 + m.b27 - m.b176 <= 0)
m.c2409 = Constraint(expr= - m.b17 + m.b29 - m.b177 <= 0)
m.c2410 = Constraint(expr= - m.b17 + m.b31 - m.b178 <= 0)
m.c2411 = Constraint(expr= - m.b17 + m.b33 - m.b179 <= 0)
m.c2412 = Constraint(expr= - m.b17 + m.b35 - m.b180 <= 0)
m.c2413 = Constraint(expr= - m.b17 + m.b37 - m.b181 <= 0)
m.c2414 = Constraint(expr= - m.b17 + m.b39 - m.b182 <= 0)
m.c2415 = Constraint(expr= - m.b17 + m.b41 - m.b183 <= 0)
m.c2416 = Constraint(expr= - m.b17 + m.b43 - m.b184 <= 0)
m.c2417 = Constraint(expr= - m.b17 + m.b45 - m.b185 <= 0)
m.c2418 = Constraint(expr= - m.b19 + m.b21 - m.b186 <= 0)
m.c2419 = Constraint(expr= - m.b19 + m.b23 - m.b187 <= 0)
m.c2420 = Constraint(expr= - m.b19 + m.b25 - m.b188 <= 0)
m.c2421 = Constraint(expr= - m.b19 + m.b27 - m.b189 <= 0)
m.c2422 = Constraint(expr= - m.b19 + m.b29 - m.b190 <= 0)
m.c2423 = Constraint(expr= - m.b19 + m.b31 - m.b191 <= 0)
m.c2424 = Constraint(expr= - m.b19 + m.b33 - m.b192 <= 0)
m.c2425 = Constraint(expr= - m.b19 + m.b35 - m.b193 <= 0)
m.c2426 = Constraint(expr= - m.b19 + m.b37 - m.b194 <= 0)
m.c2427 = Constraint(expr= - m.b19 + m.b39 - m.b195 <= 0)
m.c2428 = Constraint(expr= - m.b19 + m.b41 - m.b196 <= 0)
m.c2429 = Constraint(expr= - m.b19 + m.b43 - m.b197 <= 0)
m.c2430 = Constraint(expr= - m.b19 + m.b45 - m.b198 <= 0)
m.c2431 = Constraint(expr= - m.b21 + m.b23 - m.b199 <= 0)
m.c2432 = Constraint(expr= - m.b21 + m.b25 - m.b200 <= 0)
m.c2433 = Constraint(expr= - m.b21 + m.b27 - m.b201 <= 0)
m.c2434 = Constraint(expr= - m.b21 + m.b29 - m.b202 <= 0)
m.c2435 = Constraint(expr= - m.b21 + m.b31 - m.b203 <= 0)
m.c2436 = Constraint(expr= - m.b21 + m.b33 - m.b204 <= 0)
m.c2437 = Constraint(expr= - m.b21 + m.b35 - m.b205 <= 0)
m.c2438 = Constraint(expr= - m.b21 + m.b37 - m.b206 <= 0)
m.c2439 = Constraint(expr= - m.b21 + m.b39 - m.b207 <= 0)
m.c2440 = Constraint(expr= - m.b21 + m.b41 - m.b208 <= 0)
m.c2441 = Constraint(expr= - m.b21 + m.b43 - m.b209 <= 0)
m.c2442 = Constraint(expr= - m.b21 + m.b45 - m.b210 <= 0)
m.c2443 = Constraint(expr= - m.b23 + m.b25 - m.b211 <= 0)
m.c2444 = Constraint(expr= - m.b23 + m.b27 - m.b212 <= 0)
m.c2445 = Constraint(expr= - m.b23 + m.b29 - m.b213 <= 0)
m.c2446 = Constraint(expr= - m.b23 + m.b31 - m.b214 <= 0)
m.c2447 = Constraint(expr= - m.b23 + m.b33 - m.b215 <= 0)
m.c2448 = Constraint(expr= - m.b23 + m.b35 - m.b216 <= 0)
m.c2449 = Constraint(expr= - m.b23 + m.b37 - m.b217 <= 0)
m.c2450 = Constraint(expr= - m.b23 + m.b39 - m.b218 <= 0)
m.c2451 = Constraint(expr= - m.b23 + m.b41 - m.b219 <= 0)
m.c2452 = Constraint(expr= - m.b23 + m.b43 - m.b220 <= 0)
m.c2453 = Constraint(expr= - m.b23 + m.b45 - m.b221 <= 0)
m.c2454 = Constraint(expr= - m.b25 + m.b27 - m.b222 <= 0)
m.c2455 = Constraint(expr= - m.b25 + m.b29 - m.b223 <= 0)
m.c2456 = Constraint(expr= - m.b25 + m.b31 - m.b224 <= 0)
m.c2457 = Constraint(expr= - m.b25 + m.b33 - m.b225 <= 0)
m.c2458 = Constraint(expr= - m.b25 + m.b35 - m.b226 <= 0)
m.c2459 = Constraint(expr= - m.b25 + m.b37 - m.b227 <= 0)
m.c2460 = Constraint(expr= - m.b25 + m.b39 - m.b228 <= 0)
m.c2461 = Constraint(expr= - m.b25 + m.b41 - m.b229 <= 0)
m.c2462 = Constraint(expr= - m.b25 + m.b43 - m.b230 <= 0)
m.c2463 = Constraint(expr= - m.b25 + m.b45 - m.b231 <= 0)
m.c2464 = Constraint(expr= - m.b27 + m.b29 - m.b232 <= 0)
m.c2465 = Constraint(expr= - m.b27 + m.b31 - m.b233 <= 0)
m.c2466 = Constraint(expr= - m.b27 + m.b33 - m.b234 <= 0)
m.c2467 = Constraint(expr= - m.b27 + m.b35 - m.b235 <= 0)
m.c2468 = Constraint(expr= - m.b27 + m.b37 - m.b236 <= 0)
m.c2469 = Constraint(expr= - m.b27 + m.b39 - m.b237 <= 0)
m.c2470 = Constraint(expr= - m.b27 + m.b41 - m.b238 <= 0)
m.c2471 = Constraint(expr= - m.b27 + m.b43 - m.b239 <= 0)
m.c2472 = Constraint(expr= - m.b27 + m.b45 - m.b240 <= 0)
m.c2473 = Constraint(expr= - m.b29 + m.b31 - m.b241 <= 0)
m.c2474 = Constraint(expr= - m.b29 + m.b33 - m.b242 <= 0)
m.c2475 = Constraint(expr= - m.b29 + m.b35 - m.b243 <= 0)
m.c2476 = Constraint(expr= - m.b29 + m.b37 - m.b244 <= 0)
m.c2477 = Constraint(expr= - m.b29 + m.b39 - m.b245 <= 0)
m.c2478 = Constraint(expr= - m.b29 + m.b41 - m.b246 <= 0)
m.c2479 = Constraint(expr= - m.b29 + m.b43 - m.b247 <= 0)
m.c2480 = Constraint(expr= - m.b29 + m.b45 - m.b248 <= 0)
m.c2481 = Constraint(expr= - m.b31 + m.b33 - m.b249 <= 0)
m.c2482 = Constraint(expr= - m.b31 + m.b35 - m.b250 <= 0)
m.c2483 = Constraint(expr= - m.b31 + m.b37 - m.b251 <= 0)
m.c2484 = Constraint(expr= - m.b31 + m.b39 - m.b252 <= 0)
m.c2485 = Constraint(expr= - m.b31 + m.b41 - m.b253 <= 0)
m.c2486 = Constraint(expr= - m.b31 + m.b43 - m.b254 <= 0)
m.c2487 = Constraint(expr= - m.b31 + m.b45 - m.b255 <= 0)
m.c2488 = Constraint(expr= - m.b33 + m.b35 - m.b256 <= 0)
m.c2489 = Constraint(expr= - m.b33 + m.b37 - m.b257 <= 0)
m.c2490 = Constraint(expr= - m.b33 + m.b39 - m.b258 <= 0)
m.c2491 = Constraint(expr= - m.b33 + m.b41 - m.b259 <= 0)
m.c2492 = Constraint(expr= - m.b33 + m.b43 - m.b260 <= 0)
m.c2493 = Constraint(expr= - m.b33 + m.b45 - m.b261 <= 0)
m.c2494 = Constraint(expr= - m.b35 + m.b37 - m.b262 <= 0)
m.c2495 = Constraint(expr= - m.b35 + m.b39 - m.b263 <= 0)
m.c2496 = Constraint(expr= - m.b35 + m.b41 - m.b264 <= 0)
m.c2497 = Constraint(expr= - m.b35 + m.b43 - m.b265 <= 0)
m.c2498 = Constraint(expr= - m.b35 + m.b45 - m.b266 <= 0)
m.c2499 = Constraint(expr= - m.b37 + m.b39 - m.b267 <= 0)
m.c2500 = Constraint(expr= - m.b37 + m.b41 - m.b268 <= 0)
m.c2501 = Constraint(expr= - m.b37 + m.b43 - m.b269 <= 0)
m.c2502 = Constraint(expr= - m.b37 + m.b45 - m.b270 <= 0)
m.c2503 = Constraint(expr= - m.b39 + m.b41 - m.b271 <= 0)
m.c2504 = Constraint(expr= - m.b39 + m.b43 - m.b272 <= 0)
m.c2505 = Constraint(expr= - m.b39 + m.b45 - m.b273 <= 0)
m.c2506 = Constraint(expr= - m.b41 + m.b43 - m.b274 <= 0)
m.c2507 = Constraint(expr= - m.b41 + m.b45 - m.b275 <= 0)
m.c2508 = Constraint(expr= - m.b43 + m.b45 - m.b276 <= 0)
m.c2509 = Constraint(expr= - m.b46 + m.b47 - m.b67 <= 0)
m.c2510 = Constraint(expr= - m.b46 + m.b48 - m.b68 <= 0)
m.c2511 = Constraint(expr= - m.b46 + m.b49 - m.b69 <= 0)
m.c2512 = Constraint(expr= - m.b46 + m.b50 - m.b70 <= 0)
m.c2513 = Constraint(expr= - m.b46 + m.b51 - m.b71 <= 0)
m.c2514 = Constraint(expr= - m.b46 + m.b52 - m.b72 <= 0)
m.c2515 = Constraint(expr= - m.b46 + m.b53 - m.b73 <= 0)
m.c2516 = Constraint(expr= - m.b46 | |
None:
instanceElement.attrib['stylemapfamilyname'] = instanceObject.styleMapFamilyName
if instanceObject.styleMapStyleName is not None:
instanceElement.attrib['stylemapstylename'] = instanceObject.styleMapStyleName
if self.effectiveFormatTuple < (5, 0):
# Deprecated members as of version 5.0
if instanceObject.glyphs:
if instanceElement.findall('.glyphs') == []:
glyphsElement = ET.Element('glyphs')
instanceElement.append(glyphsElement)
glyphsElement = instanceElement.findall('.glyphs')[0]
for glyphName, data in sorted(instanceObject.glyphs.items()):
glyphElement = self._writeGlyphElement(instanceElement, instanceObject, glyphName, data)
glyphsElement.append(glyphElement)
if instanceObject.kerning:
kerningElement = ET.Element('kerning')
instanceElement.append(kerningElement)
if instanceObject.info:
infoElement = ET.Element('info')
instanceElement.append(infoElement)
self._addLib(instanceElement, instanceObject.lib, 4)
self.root.findall('.instances')[0].append(instanceElement)
def _addSource(self, sourceObject):
sourceElement = ET.Element("source")
if sourceObject.filename is not None:
sourceElement.attrib['filename'] = sourceObject.filename
if sourceObject.name is not None:
if sourceObject.name.find("temp_master") != 0:
# do not save temporary source names
sourceElement.attrib['name'] = sourceObject.name
if sourceObject.familyName is not None:
sourceElement.attrib['familyname'] = sourceObject.familyName
if sourceObject.styleName is not None:
sourceElement.attrib['stylename'] = sourceObject.styleName
if sourceObject.layerName is not None:
sourceElement.attrib['layer'] = sourceObject.layerName
if sourceObject.localisedFamilyName:
languageCodes = list(sourceObject.localisedFamilyName.keys())
languageCodes.sort()
for code in languageCodes:
if code == "en":
continue # already stored in the element attribute
localisedFamilyNameElement = ET.Element('familyname')
localisedFamilyNameElement.attrib[XML_LANG] = code
localisedFamilyNameElement.text = sourceObject.getFamilyName(code)
sourceElement.append(localisedFamilyNameElement)
if sourceObject.copyLib:
libElement = ET.Element('lib')
libElement.attrib['copy'] = "1"
sourceElement.append(libElement)
if sourceObject.copyGroups:
groupsElement = ET.Element('groups')
groupsElement.attrib['copy'] = "1"
sourceElement.append(groupsElement)
if sourceObject.copyFeatures:
featuresElement = ET.Element('features')
featuresElement.attrib['copy'] = "1"
sourceElement.append(featuresElement)
if sourceObject.copyInfo or sourceObject.muteInfo:
infoElement = ET.Element('info')
if sourceObject.copyInfo:
infoElement.attrib['copy'] = "1"
if sourceObject.muteInfo:
infoElement.attrib['mute'] = "1"
sourceElement.append(infoElement)
if sourceObject.muteKerning:
kerningElement = ET.Element("kerning")
kerningElement.attrib["mute"] = '1'
sourceElement.append(kerningElement)
if sourceObject.mutedGlyphNames:
for name in sourceObject.mutedGlyphNames:
glyphElement = ET.Element("glyph")
glyphElement.attrib["name"] = name
glyphElement.attrib["mute"] = '1'
sourceElement.append(glyphElement)
if self.effectiveFormatTuple >= (5, 0):
self._addLocationElement(sourceElement, designLocation=sourceObject.location)
else:
# Pre-version 5.0 code was validating and filling in the location
# dict while writing it out, as preserved below.
locationElement, sourceObject.location = self._makeLocationElement(sourceObject.location)
sourceElement.append(locationElement)
self.root.findall('.sources')[0].append(sourceElement)
def _addVariableFont(self, parentElement: ET.Element, vf: VariableFontDescriptor) -> None:
vfElement = ET.Element('variable-font')
vfElement.attrib['name'] = vf.name
if vf.filename is not None:
vfElement.attrib['filename'] = vf.filename
if vf.axisSubsets:
subsetsElement = ET.Element('axis-subsets')
for subset in vf.axisSubsets:
subsetElement = ET.Element('axis-subset')
subsetElement.attrib['name'] = subset.name
if isinstance(subset, RangeAxisSubsetDescriptor):
if subset.userMinimum != -math.inf:
subsetElement.attrib['userminimum'] = self.intOrFloat(subset.userMinimum)
if subset.userMaximum != math.inf:
subsetElement.attrib['usermaximum'] = self.intOrFloat(subset.userMaximum)
if subset.userDefault is not None:
subsetElement.attrib['userdefault'] = self.intOrFloat(subset.userDefault)
elif isinstance(subset, ValueAxisSubsetDescriptor):
subsetElement.attrib['uservalue'] = self.intOrFloat(subset.userValue)
subsetsElement.append(subsetElement)
vfElement.append(subsetsElement)
self._addLib(vfElement, vf.lib, 4)
parentElement.append(vfElement)
def _addLib(self, parentElement: ET.Element, data: Any, indent_level: int) -> None:
if not data:
return
libElement = ET.Element('lib')
libElement.append(plistlib.totree(data, indent_level=indent_level))
parentElement.append(libElement)
def _writeGlyphElement(self, instanceElement, instanceObject, glyphName, data):
glyphElement = ET.Element('glyph')
if data.get('mute'):
glyphElement.attrib['mute'] = "1"
if data.get('unicodes') is not None:
glyphElement.attrib['unicode'] = " ".join([hex(u) for u in data.get('unicodes')])
if data.get('instanceLocation') is not None:
locationElement, data['instanceLocation'] = self._makeLocationElement(data.get('instanceLocation'))
glyphElement.append(locationElement)
if glyphName is not None:
glyphElement.attrib['name'] = glyphName
if data.get('note') is not None:
noteElement = ET.Element('note')
noteElement.text = data.get('note')
glyphElement.append(noteElement)
if data.get('masters') is not None:
mastersElement = ET.Element("masters")
for m in data.get('masters'):
masterElement = ET.Element("master")
if m.get('glyphName') is not None:
masterElement.attrib['glyphname'] = m.get('glyphName')
if m.get('font') is not None:
masterElement.attrib['source'] = m.get('font')
if m.get('location') is not None:
locationElement, m['location'] = self._makeLocationElement(m.get('location'))
masterElement.append(locationElement)
mastersElement.append(masterElement)
glyphElement.append(mastersElement)
return glyphElement
class BaseDocReader(LogMixin):
axisDescriptorClass = AxisDescriptor
discreteAxisDescriptorClass = DiscreteAxisDescriptor
axisLabelDescriptorClass = AxisLabelDescriptor
locationLabelDescriptorClass = LocationLabelDescriptor
ruleDescriptorClass = RuleDescriptor
sourceDescriptorClass = SourceDescriptor
variableFontsDescriptorClass = VariableFontDescriptor
valueAxisSubsetDescriptorClass = ValueAxisSubsetDescriptor
rangeAxisSubsetDescriptorClass = RangeAxisSubsetDescriptor
instanceDescriptorClass = InstanceDescriptor
def __init__(self, documentPath, documentObject):
self.path = documentPath
self.documentObject = documentObject
tree = ET.parse(self.path)
self.root = tree.getroot()
self.documentObject.formatVersion = self.root.attrib.get("format", "3.0")
self._axes = []
self.rules = []
self.sources = []
self.instances = []
self.axisDefaults = {}
self._strictAxisNames = True
@classmethod
def fromstring(cls, string, documentObject):
f = BytesIO(tobytes(string, encoding="utf-8"))
self = cls(f, documentObject)
self.path = None
return self
def read(self):
self.readAxes()
self.readLabels()
self.readRules()
self.readVariableFonts()
self.readSources()
self.readInstances()
self.readLib()
def readRules(self):
# we also need to read any conditions that are outside of a condition set.
rules = []
rulesElement = self.root.find(".rules")
if rulesElement is not None:
processingValue = rulesElement.attrib.get("processing", "first")
if processingValue not in {"first", "last"}:
raise DesignSpaceDocumentError(
"<rules> processing attribute value is not valid: %r, "
"expected 'first' or 'last'" % processingValue)
self.documentObject.rulesProcessingLast = processingValue == "last"
for ruleElement in self.root.findall(".rules/rule"):
ruleObject = self.ruleDescriptorClass()
ruleName = ruleObject.name = ruleElement.attrib.get("name")
# read any stray conditions outside a condition set
externalConditions = self._readConditionElements(
ruleElement,
ruleName,
)
if externalConditions:
ruleObject.conditionSets.append(externalConditions)
self.log.info(
"Found stray rule conditions outside a conditionset. "
"Wrapped them in a new conditionset."
)
# read the conditionsets
for conditionSetElement in ruleElement.findall('.conditionset'):
conditionSet = self._readConditionElements(
conditionSetElement,
ruleName,
)
if conditionSet is not None:
ruleObject.conditionSets.append(conditionSet)
for subElement in ruleElement.findall('.sub'):
a = subElement.attrib['name']
b = subElement.attrib['with']
ruleObject.subs.append((a, b))
rules.append(ruleObject)
self.documentObject.rules = rules
def _readConditionElements(self, parentElement, ruleName=None):
cds = []
for conditionElement in parentElement.findall('.condition'):
cd = {}
cdMin = conditionElement.attrib.get("minimum")
if cdMin is not None:
cd['minimum'] = float(cdMin)
else:
# will allow these to be None, assume axis.minimum
cd['minimum'] = None
cdMax = conditionElement.attrib.get("maximum")
if cdMax is not None:
cd['maximum'] = float(cdMax)
else:
# will allow these to be None, assume axis.maximum
cd['maximum'] = None
cd['name'] = conditionElement.attrib.get("name")
# # test for things
if cd.get('minimum') is None and cd.get('maximum') is None:
raise DesignSpaceDocumentError(
"condition missing required minimum or maximum in rule" +
(" '%s'" % ruleName if ruleName is not None else ""))
cds.append(cd)
return cds
def readAxes(self):
# read the axes elements, including the warp map.
axesElement = self.root.find(".axes")
if axesElement is not None and 'elidedfallbackname' in axesElement.attrib:
self.documentObject.elidedFallbackName = axesElement.attrib['elidedfallbackname']
axisElements = self.root.findall(".axes/axis")
if not axisElements:
return
for axisElement in axisElements:
if self.documentObject.formatTuple >= (5, 0) and "values" in axisElement.attrib:
axisObject = self.discreteAxisDescriptorClass()
axisObject.values = [float(s) for s in axisElement.attrib["values"].split(" ")]
else:
axisObject = self.axisDescriptorClass()
axisObject.minimum = float(axisElement.attrib.get("minimum"))
axisObject.maximum = float(axisElement.attrib.get("maximum"))
axisObject.default = float(axisElement.attrib.get("default"))
axisObject.name = axisElement.attrib.get("name")
if axisElement.attrib.get('hidden', False):
axisObject.hidden = True
axisObject.tag = axisElement.attrib.get("tag")
for mapElement in axisElement.findall('map'):
a = float(mapElement.attrib['input'])
b = float(mapElement.attrib['output'])
axisObject.map.append((a, b))
for labelNameElement in axisElement.findall('labelname'):
# Note: elementtree reads the "xml:lang" attribute name as
# '{http://www.w3.org/XML/1998/namespace}lang'
for key, lang in labelNameElement.items():
if key == XML_LANG:
axisObject.labelNames[lang] = tostr(labelNameElement.text)
labelElement = axisElement.find(".labels")
if labelElement is not None:
if "ordering" in labelElement.attrib:
axisObject.axisOrdering = int(labelElement.attrib["ordering"])
for label in labelElement.findall(".label"):
axisObject.axisLabels.append(self.readAxisLabel(label))
self.documentObject.axes.append(axisObject)
self.axisDefaults[axisObject.name] = axisObject.default
def readAxisLabel(self, element: ET.Element):
xml_attrs = {'userminimum', 'uservalue', 'usermaximum', 'name', 'elidable', 'oldersibling', 'linkeduservalue'}
unknown_attrs = set(element.attrib) - xml_attrs
if unknown_attrs:
raise DesignSpaceDocumentError(f"label element contains unknown attributes: {', '.join(unknown_attrs)}")
name = element.get("name")
if name is None:
raise DesignSpaceDocumentError("label element must have a name attribute.")
valueStr = element.get("uservalue")
if valueStr is None:
raise DesignSpaceDocumentError("label element must have a uservalue attribute.")
value = float(valueStr)
minimumStr = element.get("userminimum")
minimum = float(minimumStr) if minimumStr is not None else None
maximumStr = element.get("usermaximum")
maximum = float(maximumStr) if maximumStr is not None else None
linkedValueStr = element.get("linkeduservalue")
linkedValue = float(linkedValueStr) if linkedValueStr is not None else None
elidable = True if element.get("elidable") == "true" else False
olderSibling = True if element.get("oldersibling") == "true" else False
labelNames = {
lang: label_name.text or ""
for label_name in element.findall("labelname")
for attr, lang in label_name.items()
if attr == XML_LANG
# Note: elementtree reads the "xml:lang" attribute name as
# '{http://www.w3.org/XML/1998/namespace}lang'
}
return self.axisLabelDescriptorClass(
name=name,
userValue=value,
userMinimum=minimum,
userMaximum=maximum,
elidable=elidable,
olderSibling=olderSibling,
linkedUserValue=linkedValue,
labelNames=labelNames,
)
def readLabels(self):
if self.documentObject.formatTuple < (5, 0):
return
xml_attrs = {'name', 'elidable', 'oldersibling'}
for labelElement in self.root.findall(".labels/label"):
unknown_attrs = set(labelElement.attrib) - xml_attrs
if unknown_attrs:
raise DesignSpaceDocumentError(f"Label element contains unknown attributes: {', '.join(unknown_attrs)}")
name = labelElement.get("name")
if name is None:
raise DesignSpaceDocumentError("label element must have a name attribute.")
designLocation, userLocation = self.locationFromElement(labelElement)
if designLocation:
raise DesignSpaceDocumentError(f'<label> element "{name}" must only have user locations (using uservalue="").')
elidable = True if labelElement.get("elidable") == "true" else False
olderSibling = True if labelElement.get("oldersibling") == "true" else False
labelNames = {
lang: label_name.text or ""
for label_name in labelElement.findall("labelname")
for attr, lang in label_name.items()
if attr == XML_LANG
# Note: elementtree reads the "xml:lang" attribute name as
# '{http://www.w3.org/XML/1998/namespace}lang'
}
locationLabel = self.locationLabelDescriptorClass(
name=name,
userLocation=userLocation,
elidable=elidable,
olderSibling=olderSibling,
labelNames=labelNames,
)
self.documentObject.locationLabels.append(locationLabel)
def readVariableFonts(self):
if self.documentObject.formatTuple < (5, 0):
return
xml_attrs = {'name', 'filename'}
for variableFontElement in self.root.findall(".variable-fonts/variable-font"):
unknown_attrs = set(variableFontElement.attrib) - xml_attrs
if unknown_attrs:
raise DesignSpaceDocumentError(f"variable-font element contains unknown attributes: {', '.join(unknown_attrs)}")
name = variableFontElement.get("name")
if name is None:
raise DesignSpaceDocumentError("variable-font element must have a name attribute.")
filename = variableFontElement.get("filename")
axisSubsetsElement = variableFontElement.find(".axis-subsets")
if axisSubsetsElement is | |
import json
import sys
import threading as th
import time
from base64 import b64encode
from io import open
from os import path
from serial.serialutil import SerialException
from modi2_firmware_updater.util.message_util import decode_message, parse_message, unpack_data
from modi2_firmware_updater.util.modi_winusb.modi_serialport import ModiSerialPort, list_modi_serialports
from modi2_firmware_updater.util.module_util import Module, get_module_type_from_uuid
from modi2_firmware_updater.util.platform_util import delay
def retry(exception_to_catch):
def decorator(func):
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except exception_to_catch:
return wrapper(*args, **kwargs)
return wrapper
return decorator
class ModuleFirmwareUpdater(ModiSerialPort):
"""Module Firmware Updater: Updates a firmware of given module"""
NO_ERROR = 0
UPDATE_READY = 1
WRITE_FAIL = 2
VERIFY_FAIL = 3
CRC_ERROR = 4
CRC_COMPLETE = 5
ERASE_ERROR = 6
ERASE_COMPLETE = 7
def __init__(self, device=None, module_firmware_path=None):
self.print = True
self.__target_ids = (0xFFF, )
self.response_flag = False
self.response_error_flag = False
self.response_error_count = 0
self.__running = True
self.update_in_progress = False
self.modules_to_update_all = []
self.modules_to_update = []
self.modules_to_update_second_bootloader = []
self.modules_to_update_bootloader = []
self.modules_updated = []
self.network_id = None
self.network_version = None
self.ui = None
self.module_type = None
self.progress = None
self.raise_error_message = False
self.update_error = 0
self.update_error_message = ""
self.has_update_error = False
self.this_update_error = False
self.module_firmware_path = module_firmware_path
self.network_uuid = None
if device != None:
super().__init__(device, baudrate = 921600, timeout = 0.1, write_timeout = 0)
else:
modi_ports = list_modi_serialports()
if not modi_ports:
raise SerialException("No MODI+ port is connected")
for modi_port in modi_ports:
try:
super().__init__(modi_port, baudrate = 921600, timeout = 0.1, write_timeout = 0)
except Exception:
self.__print('Next network module')
continue
else:
break
self.__print(f"Connecting to MODI+ network module at {modi_port}")
self.open_recv_thread()
th.Thread(
target=self.module_firmware_update_manager, daemon=True
).start()
def module_firmware_update_manager(self):
timeout_count = 0
timeout_delay = 0.1
while timeout_count < 10:
time.sleep(timeout_delay)
timeout_count += timeout_delay
if self.update_in_progress == False:
# 장치 연결까지 대기
continue
if len(self.modules_to_update_second_bootloader) != 0:
self.__send_conn(parse_message(0x2C, 0x0, 0xFFF, (1,1))) #SWU LEGACY MODE
second_bootloader_update_module_id, second_bootloader_update_module_type = self.modules_to_update_second_bootloader[0]
self.modules_to_update_second_bootloader.pop(0)
self.__update_firmware_second_bootloader(second_bootloader_update_module_id, second_bootloader_update_module_type, 0)
timeout_count = 0
elif len(self.modules_to_update_bootloader) != 0:
self.__send_conn(parse_message(0x2C, 0x0, 0xFFF, (0,0))) #SWU MODE
bootloader_update_module_id, bootloader_update_module_type = self.modules_to_update_bootloader[0]
self.modules_to_update_bootloader.pop(0)
self.__update_firmware_bootloader(bootloader_update_module_id, bootloader_update_module_type, 0)
timeout_count = 0
elif len(self.modules_to_update) != 0:
self.__send_conn(parse_message(0x2C, 0x0, 0xFFF, (0,0))) #SWU MODE
update_module_id, update_module_type = self.modules_to_update[0]
self.modules_to_update.pop(0)
self.__update_firmware(update_module_id, update_module_type)
timeout_count = 0
if len(self.modules_updated) != 0 and len(self.modules_to_update_all) != 0:
if len(self.modules_updated) == len(self.modules_to_update_all):
# 모든 업데이트가 끝날 경우, 종료
break
reboot_message = self.__set_module_state(0xFFF, Module.REBOOT, Module.PNP_OFF)
self.__send_conn(reboot_message)
self.__print("Reboot message has been sent to all connected modules")
time.sleep(1)
self.__print("Module firmwares have been updated!")
self.close_recv_thread()
self.close()
self.update_in_progress = False
if self.has_update_error:
self.update_error = -1
else:
self.update_error = 1
time.sleep(0.5)
self.reset_state()
def set_ui(self, ui):
self.ui = ui
def set_print(self, print):
self.print = print
def set_raise_error(self, raise_error_message):
self.raise_error_message = raise_error_message
def request_network_id(self):
self.__send_conn(parse_message(0x28, 0x0, 0xFFF, (0xFF, 0x0F)))
def __assign_network_id(self, sid, data, length:int):
unpacked_data = unpack_data(data, (6, 2))
module_uuid = unpacked_data[0]
module_version_digits = unpacked_data[1]
module_type = get_module_type_from_uuid(module_uuid)
if module_type == "network":
self.network_uuid = module_uuid
self.network_id = sid
module_version = [
str((module_version_digits & 0xE000) >> 13), # major
str((module_version_digits & 0x1F00) >> 8), # minor
str(module_version_digits & 0x00FF) # patch
]
self.network_version = ".".join(module_version)
def update_module_firmware(self, firmware_version_info = {}):
self.update_in_progress = True
self.has_update_error = False
self.request_network_id()
self.reset_state()
self.firmware_version_info = firmware_version_info
for target in self.__target_ids:
self.request_to_update_firmware(target)
def close_recv_thread(self):
self.__running = False
time.sleep(2)
if self.recv_thread:
self.recv_thread.join()
def open_recv_thread(self):
self.__running = True
self.recv_thread = th.Thread(target=self.__read_conn, daemon=True)
self.recv_thread.start()
def reset_state(self, update_in_progress: bool = False) -> None:
self.response_flag = False
self.response_error_flag = False
self.response_error_count = 0
if not update_in_progress:
self.__print("Make sure you have connected module(s) to update")
self.__print("Resetting firmware updater's state")
self.modules_to_update = []
self.modules_updated = []
def request_to_update_firmware(self, module_id) -> None:
firmware_update_message = self.__set_module_state(module_id, Module.UPDATE_FIRMWARE, Module.PNP_OFF)
self.__send_conn(firmware_update_message)
time.sleep(0.01)
self.__send_conn(firmware_update_message)
time.sleep(0.01)
self.__send_conn(firmware_update_message)
time.sleep(0.01)
self.__print("Firmware update has been requested")
def check_to_update_firmware(self, module_id: int) -> None:
firmware_update_ready_message = self.__set_module_state(module_id, Module.UPDATE_FIRMWARE_READY, Module.PNP_OFF)
self.__send_conn(firmware_update_ready_message)
def add_to_module_list(self, module_id: int, module_type: str, module_section: int) -> None:
modules_update_all_flag = True
for curr_module_id, curr_module_type in self.modules_to_update_all:
if module_id == curr_module_id:
modules_update_all_flag = False
break
if modules_update_all_flag == True:
module_elem = module_id, module_type
self.modules_to_update_all.append(module_elem)
for curr_module_id, curr_module_type in self.modules_updated:
if module_id == curr_module_id:
return
if module_section == 0: # module in bootloader
for curr_module_id, curr_module_type in self.modules_to_update:
if module_id == curr_module_id:
return
module_elem = module_id, module_type
self.modules_to_update.append(module_elem)
print(f"\nAdding {module_type} ({module_id}) to update waiting list...{' ' * 60}\n")
elif module_section == 1: # module in second bootloader
for curr_module_id, curr_module_type in self.modules_to_update_bootloader:
if module_id == curr_module_id:
return
module_elem = module_id, module_type
self.modules_to_update_bootloader.append(module_elem)
print(f"\nAdding {module_type} ({module_id}) to bootloader waiting list...{' ' * 60}\n")
elif module_section == 2: # this module need to update bootloader
for curr_module_id, curr_module_type in self.modules_to_update_second_bootloader:
if module_id == curr_module_id:
return
module_elem = module_id, module_type
self.modules_to_update_second_bootloader.append(module_elem)
print(f"\nAdding {module_type} ({module_id}) to second bootloader waiting list...{' ' * 60}\n")
def update_response(self, response: bool, is_error_response: bool = False) -> None:
if not is_error_response:
self.response_flag = response
self.response_error_flag = False
else:
self.response_flag = False
self.response_error_flag = response
def __update_firmware(self, module_id: int, module_type: str) -> None:
is_already_updated = False
# Check if module is already updated
for curr_module_id, curr_module_type in self.modules_updated:
if module_id == curr_module_id:
is_already_updated = True
if not is_already_updated:
self.module_type = module_type
# Init base root_path, utilizing local binary files
root_path = path.join(self.module_firmware_path, module_type, self.firmware_version_info[module_type]["app"])
bin_path = path.join(root_path, f"{module_type.lower()}.bin")
with open(bin_path, "rb") as bin_file:
bin_buffer = bin_file.read()
self.this_update_error = False
# Init metadata of the bytes loaded
flash_memory_addr = 0x08000000
bin_size = sys.getsizeof(bin_buffer)
page_size = 0x400
bin_begin = 0x400
page_offset = 0x4C00
erase_page_num = 1
end_flash_address = 0x0800f800
if module_type == "speaker" or module_type == "display" or module_type == "env":
page_size = 0x800
bin_begin = 0x800
page_offset = 0x8800
end_flash_address = 0x0801f800
erase_page_num = 2
bin_end = bin_size - ((bin_size - bin_begin) % page_size)
page_begin = bin_begin
erase_error_limit = 2
erase_error_count = 0
crc_error_limit = 2
crc_error_count = 0
while page_begin < bin_end :
progress = 100 * page_begin // bin_end
self.progress = progress
self.__print(f"\rFirmware Update: {module_type} ({module_id}) {self.__progress_bar(page_begin, bin_end)} {progress}%", end="")
page_end = page_begin + page_size
curr_page = bin_buffer[page_begin:page_end]
# Skip current page if empty
if curr_page == bytes(len(curr_page)):
page_begin = page_begin + page_size
time.sleep(0.02)
continue
if page_begin + page_offset + flash_memory_addr == end_flash_address:
page_begin = page_begin + page_size
continue
# Erase page (send erase request and receive its response)
erase_page_success = self.send_firmware_command(
oper_type="erase",
module_id=module_id,
crc_val=erase_page_num, # when page erase, crc value is replaced by data size
dest_addr=flash_memory_addr,
page_addr=page_begin + page_offset,
)
if not erase_page_success:
erase_error_count = erase_error_count + 1
if erase_error_count > erase_error_limit:
erase_error_count = 0
self.this_update_error = True
self.update_error_message = f"{module_type} ({module_id}) erase flash failed."
break
continue
else:
erase_error_count = 0
# Copy current page data to the module's memory
checksum = 0
for curr_ptr in range(0, page_size, 8):
if page_begin + curr_ptr >= bin_size:
break
curr_data = curr_page[curr_ptr : curr_ptr + 8]
checksum = self.send_firmware_data(module_id, curr_ptr // 8, curr_data, checksum)
delay(0.001)
# CRC on current page (send CRC request / receive CRC response)
crc_page_success = self.send_firmware_command(
oper_type="crc",
module_id=module_id,
crc_val=checksum,
dest_addr=flash_memory_addr,
page_addr=page_begin + page_offset,
)
if crc_page_success:
crc_error_count = 0
else:
crc_error_count = crc_error_count + 1
if crc_error_count > crc_error_limit:
crc_error_count = 0
self.this_update_error = True
self.update_error_message = f"{module_type} ({module_id}) check crc failed."
break
continue
page_begin = page_begin + page_size
time.sleep(0.01)
self.progress = 99
self.__print(f"\rUpdating {module_type} ({module_id}) {self.__progress_bar(99, 100)} 99%")
verify_header = 0xAA
if self.this_update_error:
self.has_update_error = True
verify_header = 0xFF
# Get version info from version_path, using appropriate methods
os_version_info = self.firmware_version_info[module_type]["os"]
os_version_info = os_version_info.lstrip("v").split("-")[0]
os_version_digits = [int(digit) for digit in os_version_info.split(".")]
os_version = (
os_version_digits[0] << 13
| os_version_digits[1] << 8
| os_version_digits[2]
)
app_version_info = self.firmware_version_info[module_type]["app"]
app_version_info = app_version_info.lstrip("v").split("-")[0]
app_version_digits = [int(digit) for digit in app_version_info.split(".")]
app_version = (
app_version_digits[0] << 13
| app_version_digits[1] << 8
| app_version_digits[2]
)
# Set end-flash data to be sent at the end of the firmware update
end_flash_data = bytearray(16)
end_flash_data[0] = verify_header
# appversion
end_flash_data[6] = os_version & 0xFF
end_flash_data[7] = (os_version >> 8) & 0xFF
# app version
end_flash_data[8] = app_version & 0xFF
end_flash_data[9] = (app_version >> 8) & 0xFF
for xxx in range(4):
end_flash_data[xxx + 12] = ((0x08005000 >> (xxx * 8)) & 0xFF)
if module_type == "speaker" or module_type == "display" or module_type == "env":
for xxx in range(4):
end_flash_data[xxx + 12] = ((0x08009000 >> (xxx * 8)) & 0xFF)
success_end_flash = | |
<filename>ansible_collection/hpe/nimble/plugins/modules/hpe_nimble_network.py
#!/usr/bin/python
# # Copyright 2020 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this
# file except in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
# author <NAME> (<EMAIL>)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
author:
- HPE Nimble Storage Ansible Team (@ar-india) <<EMAIL>>
description: Manage the storage network configuration on the HPE Nimble Storage group.
module: hpe_nimble_network
options:
activate:
required: False
type: bool
description:
- Activate a network configuration.
array:
required: False
type: list
elements: dict
description:
- List of array network configs.
change_name:
required: False
type: str
description:
- Change name of the existing network config.
iscsi_automatic_connection_method:
required: False
type: bool
description:
- Whether automatic connection method is enabled. Enabling this means means redirecting connections from the specified iSCSI
discovery IP address to the best data IP address based on connection counts.
iscsi_connection_rebalancing:
required: False
type: bool
description:
- Whether rebalancing is enabled. Enabling this means rebalancing iSCSI connections by periodically breaking existing
connections that are out-of-balance, allowing the host to reconnect to a more appropriate data IP address.
ignore_validation_mask:
required: False
type: int
description:
- Indicates whether to ignore the validation.
mgmt_ip:
required: False
type: str
description:
- Management IP address for the Group. Four numbers in the range (0,255) separated by periods.
name:
required: True
type: str
choices:
- active
- backup
- draft
description:
- Name of the network configuration. Use the name 'draft' when creating a draft configuration.
secondary_mgmt_ip:
required: False
type: str
description:
- Secondary management IP address for the Group. Four numbers in the range [0,255] separated by periods.
subnet:
required: False
type: list
elements: dict
description:
- List of subnet configs.
route:
required: False
type: list
elements: dict
description:
- List of static routes.
state:
required: True
choices:
- create
- present
- absent
type: str
description:
- The network config operation.
validate:
required: False
type: bool
description:
- Validate a network configuration.
extends_documentation_fragment: hpe.nimble.hpe_nimble
short_description: Manage the HPE Nimble Storage network configuration.
version_added: "2.9.0"
'''
EXAMPLES = r'''
# if state is create, then create network config, fails if it exist or cannot create
# if state is present, then create network config if not present ,else success
- name: Create network config
hpe_nimble_network:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
name: "{{ name }}"
route: "{{ route }}"
subnet: "{{ subnet }}"
array: "{{ array }}"
iscsi_automatic_connection_method: true
iscsi_connection_rebalancing: False
mgmt_ip: "{{ mgmt_ip }}"
state: "{{ state | default('present') }}"
- name: Delete network config
hpe_nimble_network:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
name: "{{ name }}"
state: "absent"
- name: Validate network config
hpe_nimble_network:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
name: "{{ name }}"
state: "present"
ignore_validation_mask: 1
validate: true
- name: Activate Network config
hpe_nimble_network:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
name: "{{ name }}"
state: "present"
ignore_validation_mask: 1
activate: true
'''
RETURN = r'''
'''
from ansible.module_utils.basic import AnsibleModule
try:
from nimbleclient.v1 import client
except ImportError:
client = None
import ansible_collections.hpe.nimble.plugins.module_utils.hpe_nimble as utils
def create_update_network_config(
client_obj,
name,
state,
iscsi_automatic_connection_method,
iscsi_connection_rebalancing,
mgmt_ip,
change_name,
**kwargs):
if utils.is_null_or_empty(name):
return (False, False, "Create network config failed as name is not present.", {}, {})
try:
network_resp = client_obj.network_configs.get(id=None, name=name)
if utils.is_null_or_empty(network_resp):
params = utils.remove_null_args(**kwargs)
network_resp = client_obj.network_configs.create(name=name,
iscsi_automatic_connection_method=iscsi_automatic_connection_method,
iscsi_connection_rebalancing=iscsi_connection_rebalancing,
mgmt_ip=mgmt_ip,
**params)
return (True, True, f"Network config '{name}' created successfully.", {}, network_resp.attrs)
else:
if state == "create":
return (False, False, f"Network config '{name}' cannot be created as it is already present in given state.", {}, network_resp.attrs)
# update case
kwargs['name'] = change_name
changed_attrs_dict, params = utils.remove_unchanged_or_null_args(network_resp, **kwargs)
# even though some of the attributes have not changed but it still has to be passed in case of update.
params = utils.remove_null_args(**kwargs)
if changed_attrs_dict.__len__() > 0:
network_resp = client_obj.network_configs.update(id=network_resp.attrs.get("id"),
name=name,
iscsi_automatic_connection_method=iscsi_automatic_connection_method,
iscsi_connection_rebalancing=iscsi_connection_rebalancing,
mgmt_ip=mgmt_ip,
**params)
return (True, True, f"Network config '{name}' already present. Modified the following attributes '{changed_attrs_dict}'",
changed_attrs_dict, network_resp.attrs)
else:
return (True, False, f"Network config '{network_resp.attrs.get('name')}' already present in given state.", {}, network_resp.attrs)
except Exception as ex:
return (False, False, f"Network config creation failed |'{ex}'", {}, {})
def delete_network_config(
client_obj,
name):
if utils.is_null_or_empty(name):
return (False, False, "Delete network config failed as name is not present.", {})
try:
network_resp = client_obj.network_configs.get(id=None, name=name)
if utils.is_null_or_empty(network_resp):
return (False, False, f"Network config '{name}' cannot be deleted as it is not present.", {})
client_obj.network_configs.delete(id=network_resp.attrs.get("id"))
return (True, True, f"Deleted network config '{name}' successfully.", {})
except Exception as ex:
return (False, False, f"Delete network config failed |'{ex}'", {})
def validate_network_config(
client_obj,
name,
ignore_validation_mask):
if utils.is_null_or_empty(name):
return (False, False, "Validate network config failed as name is not present.", {})
try:
network_resp = client_obj.network_configs.get(id=None, name=name)
if utils.is_null_or_empty(network_resp):
return (False, False, f"Network config '{name}' cannot be validated as it is not present.", {})
client_obj.network_configs.validate_netconfig(
id=network_resp.attrs.get("id"),
ignore_validation_mask=ignore_validation_mask)
return (True, False, f"Validated network config '{name}' successfully.", {})
except Exception as ex:
return (False, False, f"Validate Network config failed |'{ex}'", {})
def activate_network_config(
client_obj,
name,
ignore_validation_mask):
if utils.is_null_or_empty(name):
return (False, False, "Activate network config failed as name is not present.", {})
try:
network_resp = client_obj.network_configs.get(id=None, name=name)
if utils.is_null_or_empty(network_resp):
return (False, False, f"Network config '{name}' cannot be activated as it is not present.", {})
client_obj.network_configs.activate_netconfig(id=network_resp.attrs.get("id"),
ignore_validation_mask=ignore_validation_mask)
return (True, True, f"Activated network config '{name}' successfully.", {})
except Exception as ex:
return (False, False, f"Activate Network config failed |'{ex}'", {})
def main():
fields = {
"activate": {
"required": False,
"type": "bool",
"no_log": False
},
"array": {
"required": False,
"type": "list",
"elements": 'dict',
"no_log": False
},
"change_name": {
"required": False,
"type": "str",
"no_log": False
},
"iscsi_automatic_connection_method": {
"required": False,
"type": "bool",
"no_log": False
},
"iscsi_connection_rebalancing": {
"required": False,
"type": "bool",
"no_log": False
},
"ignore_validation_mask": {
"required": False,
"type": "int",
"no_log": False
},
"mgmt_ip": {
"required": False,
"type": "str",
"no_log": False
},
"name": {
"required": True,
"choices": ['active',
'backup',
'draft'
],
"type": "str",
"no_log": False
},
"secondary_mgmt_ip": {
"required": False,
"type": "str",
"no_log": False
},
"subnet": {
"required": False,
"type": "list",
"elements": 'dict',
"no_log": False
},
"route": {
"required": False,
"type": "list",
"elements": 'dict',
"no_log": False
},
"state": {
"required": True,
"choices": ['create',
'present',
'absent'
],
"type": "str"
},
"validate": {
"required": False,
"type": "bool",
"no_log": False
}
}
default_fields = utils.basic_auth_arg_fields()
fields.update(default_fields)
required_if = [('state', 'create', ['array', 'iscsi_automatic_connection_method', 'iscsi_connection_rebalancing', 'mgmt_ip', 'subnet', 'route'])]
module = AnsibleModule(argument_spec=fields, required_if=required_if)
if client is None:
module.fail_json(msg='Python nimble-sdk could not be found.')
hostname = module.params["host"]
username = module.params["username"]
password = module.params["password"]
activate = module.params["activate"]
array = module.params["array"]
iscsi_automatic_connection_method = module.params["iscsi_automatic_connection_method"]
iscsi_connection_rebalancing = module.params["iscsi_connection_rebalancing"]
ignore_validation_mask = module.params["ignore_validation_mask"]
mgmt_ip = module.params["mgmt_ip"]
name = module.params["name"]
change_name = module.params["change_name"]
secondary_mgmt_ip = module.params["secondary_mgmt_ip"]
subnet = module.params["subnet"]
route = module.params["route"]
state = module.params["state"]
validate = module.params["validate"]
if (username is None or password is None or hostname is None):
module.fail_json(
msg="Missing variables: hostname, username and password is mandatory.")
# defaults
return_status = changed = False
msg = "No task to run."
resp = None
try:
client_obj = client.NimOSClient(
hostname,
username,
password
)
# States
if ((validate is None or validate is False)
and (activate is None or activate is False)
and (state == "create" or state == "present")):
# if not client_obj.network_configs.get(id=None, name=name) or state == "create":
return_status, changed, msg, changed_attrs_dict, resp = create_update_network_config(
client_obj,
name,
state,
iscsi_automatic_connection_method,
iscsi_connection_rebalancing,
mgmt_ip,
change_name,
array_list=array,
ignore_validation_mask=ignore_validation_mask,
secondary_mgmt_ip=secondary_mgmt_ip,
subnet_list=subnet,
route_list=route)
elif state == "absent":
return_status, changed, msg, changed_attrs_dict = delete_network_config(client_obj, name)
elif state == "present" and validate is True:
return_status, changed, msg, changed_attrs_dict = validate_network_config(client_obj, name, ignore_validation_mask)
elif state == "present" and activate is True:
return_status, changed, msg, changed_attrs_dict = activate_network_config(client_obj, name, ignore_validation_mask)
except Exception as ex:
# failed for some | |
expected to take responsibility for this
entry."""),
'lino_xl.lib.cal.Event.event_type' : _("""The type of this entry. Every calendar entry should have this
field pointing to a given EventType, which holds
extended configurable information about this entry."""),
'lino_xl.lib.cal.Event.state' : _("""The state of this entry. The state can change according to
rules defined by the workflow, that's why we sometimes refer
to it as the life cycle."""),
'lino_xl.lib.cal.Event.transparent' : _("""Indicates that this entry shouldn't prevent other entries at
the same time."""),
'lino_xl.lib.cal.Event.when_html' : _("""Shows the date and time of the entry with a link that opens
all entries on that day (EntriesByDay)."""),
'lino_xl.lib.cal.Event.show_conflicting' : _("""A ShowSlaveTable
button which opens the ConflictingEvents table for this event."""),
'lino_xl.lib.cal.EventTypes' : _("""The list of entry types defined on this site."""),
'lino_xl.lib.cal.EventType' : _("""The possible value of the Event.type field."""),
'lino_xl.lib.cal.EventType.event_label' : _("""Default text for summary of new entries."""),
'lino_xl.lib.cal.EventType.is_appointment' : _("""Whether entries of this type are considered as "appointments"
(i.e. whose time and place have been agreed upon with other
users or external parties)."""),
'lino_xl.lib.cal.EventType.max_days' : _("""The maximal number of days allowed as duration."""),
'lino_xl.lib.cal.EventType.locks_user' : _("""Whether calendar entries of this type make the user
unavailable for other locking events at the same time."""),
'lino_xl.lib.cal.EventType.max_conflicting' : _("""How many conflicting events should be tolerated."""),
'lino_xl.lib.cal.EventType.transparent' : _("""Allow entries of this type to conflict with other events."""),
'lino_xl.lib.cal.DailyPlanner' : _("""The daily planner actor."""),
'lino_xl.lib.cal.PlannerColumns' : _("""A choicelist that defines the columns to appear in the daily
planner. This list can be modified locally."""),
'lino_xl.lib.cal.DailyPlannerRow' : _("""A database object that represents one row of the daily planner.
The default configuration has "AM", "PM" and "All day"."""),
'lino_xl.lib.cal.Calendar.color' : _("""The color to use for entries of this calendar (in
lino_xl.lib.extensible)."""),
'lino_xl.lib.cal.EventGenerator' : _("""Base class for things that generate a series of events."""),
'lino_xl.lib.cal.EventGenerator.do_update_events' : _("""See UpdateEntries."""),
'lino_xl.lib.cal.UpdateEntries' : _("""Generate or update the automatic events controlled by this object."""),
'lino_xl.lib.cal.UpdateEntriesByEvent' : _("""Update all events of this series."""),
'lino_xl.lib.cal.Recurrencies' : _("""List of possible choices for a 'recurrency' field."""),
'lino_xl.lib.cal.Recurrencies.easter' : _("""Repeat events yearly, moving them together with the Easter
data of that year."""),
'lino_xl.lib.cal.Guest' : _("""Represents the fact that a given person is expected to attend to a
given event."""),
'lino_xl.lib.cal.Guest.event' : _("""The calendar event to which this presence applies."""),
'lino_xl.lib.cal.Guest.partner' : _("""The partner to which this presence applies."""),
'lino_xl.lib.cal.Guest.role' : _("""The role of this partner in this presence."""),
'lino_xl.lib.cal.Guest.state' : _("""The state of this presence."""),
'lino_xl.lib.cal.Guest.waiting_since' : _("""Time when the visitor arrived (checked in)."""),
'lino_xl.lib.cal.Guest.busy_since' : _("""Time when the visitor was received by agent."""),
'lino_xl.lib.cal.Guest.gone_since' : _("""Time when the visitor left (checked out)."""),
'lino_xl.lib.cal.GuestRole' : _("""The role of a guest expresses what the partner is going to do there."""),
'lino_xl.lib.cal.GuestRoles' : _("""Global table of guest roles."""),
'lino_xl.lib.cal.RemoteCalendar' : _("""Remote calendars will be synchronized by
lino_xl.lib.cal.management.commands.watch_calendars,
and local modifications will be sent back to the remote calendar."""),
'lino_xl.lib.cal.Room' : _("""A location where calendar entries can happen. For a given Room you
can see the EntriesByRoom that happened (or will happen)
there. A Room has a multilingual name."""),
'lino_xl.lib.cal.Room.name' : _("""The designation of the room. This should (but is not required
to) be unique."""),
'lino_xl.lib.cal.Rooms' : _("""List of rooms where calendar events can happen."""),
'lino_xl.lib.cal.Priority' : _("""The priority of a task or entry."""),
'lino_xl.lib.cal.Priorities' : _("""List of possible priorities of calendar events."""),
'lino_xl.lib.cal.Subscription' : _("""A Suscription is when a User subscribes to a Calendar.
It corresponds to what the extensible CalendarPanel calls "Calendars" """),
'lino_xl.lib.cal.Task' : _("""A Task is when a user plans to do something
(and optionally wants to get reminded about it)."""),
'lino_xl.lib.cal.Task.state' : _("""The state of this Task. one of TaskStates."""),
'lino_xl.lib.cal.Tasks' : _("""Global table of all tasks for all users."""),
'lino_xl.lib.cal.TasksByUser' : _("""Shows the list of tasks for this user."""),
'lino_xl.lib.cal.MyTasks' : _("""Shows my tasks whose start date is today or in the future."""),
'lino_xl.lib.cal.EventPolicy' : _("""A recurrency policy is a rule used for generating automatic
calendar entries."""),
'lino_xl.lib.cal.EventPolicy.event_type' : _("""Generated calendar entries will have this type."""),
'lino_xl.lib.cal.EventPolicies' : _("""Global table of all possible recurrencly policies."""),
'lino_xl.lib.cal.RecurrentEvent' : _("""A recurring event describes a series of recurrent calendar
entries."""),
'lino_xl.lib.cal.RecurrentEvent.name' : _("""See lino.utils.mldbc.mixins.BabelNamed.name."""),
'lino_xl.lib.cal.RecurrentEvent.every_unit' : _("""Inherited from RecurrentSet.every_unit."""),
'lino_xl.lib.cal.RecurrentEvents' : _("""The list of all recurrent events (RecurrentEvent)."""),
'lino_xl.lib.cal.UpdateGuests' : _("""Populate or update the list of participants for this calendar
entry according to the suggestions."""),
'lino_xl.lib.cal.UpdateAllGuests' : _("""Update the presence lists of all calendar events generated by
this."""),
'lino_xl.lib.cal.Events' : _("""Table which shows all calendar events."""),
'lino_xl.lib.cal.Events.show_appointments' : _("""Whether only appointments should be shown. "Yes" means only
appointments, "No" means no appointments and leaving it to
blank shows both types of events."""),
'lino_xl.lib.cal.ConflictingEvents' : _("""Shows events conflicting with this one (the master)."""),
'lino_xl.lib.cal.EntriesByDay' : _("""This table is usually labelled "Appointments today". It has no
"date" column because it shows events of a given date."""),
'lino_xl.lib.cal.EntriesByRoom' : _("""Displays the calendar entries at a given Room."""),
'lino_xl.lib.cal.EntriesByController' : _("""Shows the calendar entries controlled by this database object."""),
'lino_xl.lib.cal.OneEvent' : _("""Show a single calendar event."""),
'lino_xl.lib.cal.MyEntries' : _("""Table of appointments for which I am responsible."""),
'lino_xl.lib.cal.MyEntriesToday' : _("""Like MyEntries, but only today."""),
'lino_xl.lib.cal.MyAssignedEvents' : _("""The table of calendar entries which are assigned to me. That
is, whose Event.assigned_to field refers to the requesting
user."""),
'lino_xl.lib.cal.OverdueAppointments' : _("""Shows overdue appointments, i.e. appointments whose date is
over but who are still in a nonstable state."""),
'lino_xl.lib.cal.MyOverdueAppointments' : _("""Like OverdueAppointments, but only for myself."""),
'lino_xl.lib.cal.MyUnconfirmedAppointments' : _("""Shows my appointments in the near future which are in suggested or
draft state."""),
'lino_xl.lib.cal.Guests' : _("""The default table of presences."""),
'lino_xl.lib.cal.MyPresences' : _("""Shows all my presences in calendar events, independently of their
state."""),
'lino_xl.lib.cal.MyPendingPresences' : _("""Received invitations waiting for my feedback (accept or reject)."""),
'lino_xl.lib.cal.RecurrenceSet' : _("""Mixin for models that express a set of repeating calendar events.
See specs.cal.automatic_events."""),
'lino_xl.lib.cal.RecurrenceSet.max_events' : _("""Maximum number of calendar entries to generate."""),
'lino_xl.lib.cal.RecurrenceSet.weekdays_text' : _("""A virtual field returning the textual formulation of the
weekdays where the recurrence occurs."""),
'lino_xl.lib.cal.Reservation' : _("""Base class for lino_xl.lib.rooms.models.Booking and
lino.modlib.courses.models.Course."""),
'lino_xl.lib.cal.Reservation.max_date' : _("""Don't generate calendar entries beyond this date."""),
'lino_xl.lib.cal.Weekdays' : _("""A choicelist with the seven days of a week."""),
'lino_xl.lib.cal.DurationUnit' : _("""Base class for the choices in the DurationUnits
choicelist."""),
'lino_xl.lib.cal.DurationUnits' : _("""A list of possible values for the
lino_xl.lib.cal.Event.duration_unit field of a calendar
entry."""),
'lino_xl.lib.cal.AccessClasses' : _("""The sitewide list of access classes."""),
'lino_xl.lib.cal.ShowEntriesByDay' : _("""Show all calendar events of the same day."""),
'lino_xl.lib.cal.Component' : _("""Abstract base class for Event and Task."""),
'lino_xl.lib.cal.Component.auto_type' : _("""Contains the sequence number if this is an automatically
generated component. Otherwise this field is empty."""),
'lino_xl.lib.cal.Plugin.partner_model' : _("""The model to use as the guest of a presence."""),
'lino_xl.lib.cal.Plugin.ignore_dates_before' : _("""Ignore dates before the given date."""),
'lino_xl.lib.cal.Plugin.ignore_dates_after' : _("""Ignore dates after the given date. This should never be None.
Default value is 5 years after today."""),
'lino_xl.lib.cal.CalendarReader' : _("""Can read public calendar entries. This is a kind of minimal
calendar functionality which can be given to anonymous users,
as done e.g. by vilma."""),
'lino_xl.lib.cal.GuestOperator' : _("""Can see presences and guests of a calendar entry."""),
'lino_xl.lib.cal.ConflictingEventsChecker' : _("""Check whether this entry conflicts with other events."""),
'lino_xl.lib.cal.ObsoleteEventTypeChecker' : _("""Check whether the type of this calendar entry should be updated."""),
'lino_xl.lib.cal.LongEntryChecker' : _("""Check for entries which last longer than the maximum number of
days allowed by their type."""),
'lino_xl.lib.cal.EventGuestChecker' : _("""Check for calendar entries without participants."""),
'lino_xl.lib.clients.ClientContactBase' : _("""Also used by aids.RefundPartner."""),
'lino_xl.lib.clients.ClientBase' : _("""Base class for a client. The model specified as
client_model must implement this."""),
'lino_xl.lib.clients.ClientBase.client_state' : _("""Pointer to ClientStates"""),
'lino_xl.lib.clients.ClientStates' : _("""The list of client states."""),
'lino_xl.lib.clients.ClientEvents' : _("""The list of observable client events."""),
'lino_xl.lib.clients.ClientEvents.created' : _("""Select clients whose record has been created during the observed
period."""),
'lino_xl.lib.clients.ClientEvents.modified' : _("""The choice for ClientEvents which selects clients whose
main record has been modified during the observed period."""),
'lino_xl.lib.clients.ClientContact' : _("""A client contact is when a given partner has a given role for
a given client."""),
'lino_xl.lib.clients.ClientContact.client' : _("""The Client."""),
'lino_xl.lib.clients.ClientContact.type' : _("""The type of contact. Pointer to ClientContactType."""),
'lino_xl.lib.clients.ClientContact.company' : _("""The organization."""),
'lino_xl.lib.clients.ClientContact.contact_person' : _("""The contact person in the organization."""),
'lino_xl.lib.clients.ClientContact.contact_role' : _("""The role of the contact person in the organization."""),
'lino_xl.lib.clients.ClientContactType' : _("""A client contact type is the type or "role" which must be
specified for | |
<reponame>CiscoSystems/fabric_enabler
import os
import urllib
import json
import requests
import socket
import netaddr
import pexpect
import logging
import time
import sys
REQUEST_TIMEOUT = 30
PING_MONITOR_TYPE = 'PING'
HTTP_MONITOR_TYPE = 'HTTP'
HTTPS_MONITOR_TYPE = 'HTTPS'
TCP_MONITOR_TYPE = 'TCP'
SHARED_CONFIG_DEFAULT_TRAFFIC_GROUP = 'traffic-group-local-only'
SHARED_CONFIG_DEFAULT_FLOATING_TRAFFIC_GROUP = 'traffic-group-1'
class SystemException(Exception):
pass
class RouteAddException(Exception):
pass
class RouteDomainUpdateException(Exception):
pass
class VLANCreateException(Exception):
pass
class SelfIPCreateException(Exception):
pass
class SelfIPDeleteException(Exception):
pass
class PoolCreateException(Exception):
pass
class PoolDeleteException(Exception):
pass
class PoolUpdateException(Exception):
pass
class PoolQueryException(Exception):
pass
class MonitorCreateException(Exception):
pass
class MonitorDeleteException(Exception):
pass
class VirtualServerCreateException(Exception):
pass
class VirtualServerUpdateException(Exception):
pass
class VirtualServerDeleteException(Exception):
pass
class MonitorUnknownException(Exception):
pass
class F5Device(object):
def __init__(self, f5IpAddr, username, password):
self.deviceIp = f5IpAddr
self.username = username
self.password = password
self.session = self._getBigSession()
self.url = 'https://%s/mgmt/tm' % f5IpAddr
self.network = Network(self)
self.ltm = LTM(self)
self.strict_route_isolation = False
def logIt(self, level, prefix, msg):
log_string = prefix + ': ' + msg
log = logging.getLogger(__name__)
out_hdlr = logging.StreamHandler(sys.stdout)
out_hdlr.setFormatter(logging.Formatter('%(asctime)s %(message)s'))
log.addHandler(out_hdlr)
if level == 'debug':
log.debug(log_string)
elif level == 'error':
log.error(log_string)
elif level == 'crit':
log.critical(log_string)
else:
log.info(log_string)
log.removeHandler(out_hdlr)
def error_log(self, prefix, msg):
self.logIt('error', prefix, msg)
def _getBigSession(self, timeout=REQUEST_TIMEOUT):
session = requests.session()
session.auth = (self.username, self.password)
session.verify = False
session.headers.update({'Content-Type': 'application/json'})
socket.setdefaulttimeout(timeout)
return session
def folderExists(self, folder = None):
if folder == None:
return False
folder = str(folder).replace('/', '')
if folder == 'Common':
return True
request_url = self.url + '/sys/folder/'
request_url += '~' + folder
request_url += '?$select=name'
print request_url
response = self.session.get(request_url,
timeout=REQUEST_TIMEOUT)
if response.status_code < 400:
return True
elif response.status_code == 404:
return False
else:
self.error_log('folder', response.text)
raise SystemException(response.text)
return False
def stripDomainId(self, ip_address):
mask_index = ip_address.find('/')
if mask_index > 0:
return ip_address[:mask_index].split('%')[0] + \
ip_address[mask_index:]
else:
return ip_address.split('%')[0]
def stripToOnlyName(self, path):
if isinstance(path, list):
for i in range(len(path)):
if path[i].find('~') > -1:
path[i] = path[i].replace('~', '/')
if path[i].startswith('/Common'):
path[i] = path[i].replace('uuid_', '')
else:
path[i] = \
os.path.basename(str(path[i])).replace('uuid_', '')
return path
else:
if path.find('~') > -1:
path = path.replace('~', '/')
if path.startswith('/Common'):
return str(path).replace('uuid_', '')
else:
return os.path.basename(str(path)).replace('uuid_', '')
def createFolder(self, folder=None):
if folder == None:
return False
folder = str(folder).replace('/', '')
request_url = self.url + '/sys/folder/'
payload = dict()
payload['name'] = folder
payload['subPath'] = '/'
payload['fullPath'] = '/' + folder
payload['hidden'] = False
payload['inheritedDevicegroup'] = True
payload['inheritedTrafficGroup'] = True
response = self.session.post(request_url,
data=json.dumps(payload),
timeout=REQUEST_TIMEOUT)
if response.status_code < 400:
return True
else:
self.error_log('folder', response.text)
raise SystemException(response.text)
def deleteFolder(self, folder):
if folder:
# Before deleting the folder, change the iControl SOAP
# active folder to '/' so that we do not delete the
# active folder, which breaks the iControl session.
# We also need to do a fake query and fake command
# because changing your active folder, by itself, does
# not do anything.
folder = str(folder).replace('/', '')
request_url = self.url + '/sys/folder/~' + folder
response = self.session.delete(request_url,
timeout=REQUEST_TIMEOUT)
if response.status_code < 400:
return True
elif response.status_code == 404:
return True
else:
self.error_log('folder', response.text)
raise SystemException(response.text)
return False
class Network(object):
def __init__(self, bigip):
self.bigip = bigip
def routeAddPool(self, name=None, dest_ip_address=None, dest_mask=None,
pool_id=None, folder='Common'):
if dest_ip_address and dest_mask and pool_id:
folder = str(folder).replace('/', '')
# self.bigip.system.set_rest_folder(folder)
payload = dict()
payload['name'] = name
payload['partition'] = folder
payload['pool'] = pool_id
payload['network'] = dest_ip_address + "/" + dest_mask
request_url = self.bigip.url + '/net/route/'
response = self.bigip.session.post(request_url,
data=json.dumps(payload),
timeout=REQUEST_TIMEOUT)
if response.status_code < 400:
return True
elif response.status_code == 409:
return True
else:
self.bigip.error_log('route', response.text)
raise RouteAddException(response.text)
return False
def routeAdd(self, name=None, dest_ip_address=None, dest_mask=None,
gw_ip_address=None, folder='Common'):
if dest_ip_address and dest_mask and gw_ip_address:
folder = str(folder).replace('/', '')
# self.bigip.system.set_rest_folder(folder)
payload = dict()
payload['name'] = name
payload['partition'] = folder
payload['gw'] = gw_ip_address
payload['network'] = dest_ip_address + "/" + dest_mask
request_url = self.bigip.url + '/net/route/'
response = self.bigip.session.post(request_url,
data=json.dumps(payload),
timeout=REQUEST_TIMEOUT)
if response.status_code < 400:
return True
elif response.status_code == 409:
return True
else:
self.bigip.error_log('route', response.text)
raise RouteAddException(response.text)
return False
def routeDel(self, name=None, folder='Common'):
folder = str(folder).replace('/', '')
request_url = self.bigip.url + '/net/route/'
request_url += '~' + folder + '~' + name
response = self.bigip.session.delete(request_url,
timeout=REQUEST_TIMEOUT)
if response.status_code < 400:
return True
elif response.status_code == 404:
return True
else:
self.bigip.error_log('route', response.text)
raise RouteDeleteException(response.text)
return False
def getRouteDomain(self, folder='Common'):
folder = str(folder).replace('/', '')
if folder == 'Common':
return 0
request_url = self.bigip.url + '/net/route-domain/'
request_url += '~' + folder + '~' + folder
request_url += '?$select=id'
response = self.bigip.session.get(request_url,
timeout=REQUEST_TIMEOUT)
if response.status_code < 400:
response_obj = json.loads(response.text)
if 'id' in response_obj:
return int(response_obj['id'])
elif response.status_code != 404:
self.bigip.error_log('route-domain', response.text)
raise RouteQueryException(response.text)
return 0
def routeDomainPresent(self, folder='Common'):
folder = str(folder).replace('/', '')
if folder == 'Common':
return True
request_url = self.bigip.url + '/net/route-domain/'
request_url += '~' + folder + '~' + folder
request_url += '?$select=name'
response = self.bigip.session.get(request_url,
timeout=REQUEST_TIMEOUT)
if response.status_code < 400:
return True
elif response.status_code != 404:
self.bigip.error_log('route', response.text)
raise RouteQueryException(response.text)
return False
def startOspf(self, rdId, network, netmask):
time.sleep(2)
username = "root"
f5IpAddr = self.bigip.deviceIp
print("Connecting to the F5 Device for Configuring OSPF")
spawnid = pexpect.spawn("ssh "+username+"@"+f5IpAddr, timeout=20)
matchOpt = spawnid.expect(["Password:.*", "yes/no"])
if (matchOpt == 1):
spawnid.sendline("yes")
matchOpt = spawnid.expect("Password:.*")
spawnid.sendline(self.bigip.password)
spawnid.expect("config #.*")
ospfRetryCount = 1
while(ospfRetryCount < 60):
spawnid.sendline("imish -r " + str(rdId))
matchOpt = spawnid.expect([str(rdId) + "\]>.*", "Dynamic routing is not"])
print(spawnid.before, spawnid.after)
if (matchOpt == 1):
time.sleep(2)
ospfRetryCount += 1
continue
else:
break
if (ospfRetryCount > 60):
print("Error Starting IMISH, Dynamic routing may not be enabled on the F5 RD")
spawnid.close()
return False
print("Started IMISH command line")
spawnid.sendline("ena")
spawnid.expect("\["+str(rdId)+"\]#")
print spawnid.before
ospfRetryCount = 1
while(ospfRetryCount < 60):
spawnid.sendline("show process")
matchOpt = spawnid.expect(["ospfd.*\["+str(rdId)+"\]#", "\["+str(rdId)+"\]#"])
if (matchOpt == 0):
break
else:
ospfRetryCount += 1
if (ospfRetryCount >= 60):
print("OSPF Process is not started in the Routedomain " + str(rdId))
spawnid.close()
return False
print("OSPF Process is running in the Routedomain " + str(rdId))
spawnid.sendline("conf t")
spawnid.expect("\["+str(rdId)+"\]\(config\)#.*")
spawnid.sendline("router ospf")
spawnid.expect("\["+str(rdId)+"\]\(config-router\)#.*")
spawnid.sendline("redistribute kernel")
spawnid.expect("\["+str(rdId)+"\]\(config-router\)#.*")
spawnid.sendline("network " + network + " " + netmask + " area 0 ")
spawnid.expect("\["+str(rdId)+"\]\(config-router\)#.*")
print("OSPF is now configured in the Routedomain " + str(rdId))
spawnid.sendline("end")
spawnid.expect("\["+str(rdId)+"\]#")
spawnid.close()
def _get_protocol_from_name(self, protocol):
if (protocol == 'ospfv2'):
return ['OSPFv2']
if (protocol == 'BGP'):
return ['BGP']
return None
def routeDomainAdd(self, folder='Common', protocol=None):
folder = str(folder).replace('/', '')
if (folder == 'Common'):
return 0
rid = self.getRouteDomain(folder)
if (rid != 0):
return rid
payload = dict()
payload['name'] = folder
payload['partition'] = '/' + folder
payload['id'] = self.getFreeRouteDomainId()
if self.bigip.strict_route_isolation:
payload['strict'] = 'enabled'
else:
payload['strict'] = 'disabled'
payload['parent'] = '/Common/0'
if (protocol != None):
payload['routingProtocol'] = self._get_protocol_from_name(protocol)
request_url = self.bigip.url + '/net/route-domain/'
response = self.bigip.session.post(request_url,
data=json.dumps(payload),
timeout=REQUEST_TIMEOUT)
if response.status_code < 400:
pass
elif response.status_code == 409:
pass
else:
self.bigip.error_log('route-domain', response.text)
raise RouteAddException(response.text)
rid = -1
for retryCnt in range (0, 10):
rid = self.getRouteDomain(folder)
if (rid == 0):
time.sleep(1)
else:
return rid
rid = -1
return rid
def getFreeRouteDomainId(self):
request_url = self.bigip.url + '/net/route-domain?$select=id'
response = self.bigip.session.get(request_url,
timeout=REQUEST_TIMEOUT)
all_identifiers = []
if response.status_code < 400:
response_obj = json.loads(response.text)
if 'items' in response_obj:
for route_domain in response_obj['items']:
all_identifiers.append(int(route_domain['id']))
all_identifiers = sorted(all_identifiers)
all_identifiers.remove(0)
else:
raise RouteQueryException(response.text)
lowest_available_index = 1
for i in range(len(all_identifiers)):
if all_identifiers[i] < lowest_available_index:
if len(all_identifiers) > (i + 1):
if all_identifiers[i + 1] > lowest_available_index:
return lowest_available_index
else:
lowest_available_index = lowest_available_index + 1
elif all_identifiers[i] == lowest_available_index:
lowest_available_index = lowest_available_index + 1
else:
return lowest_available_index
def deleteRouteDomain(self, folder='Common'):
folder = str(folder).replace('/', '')
if (folder == 'Common'):
return True
request_url = self.bigip.url + '/net/route-domain/'
request_url += '~' + folder + '~' + folder
response = self.bigip.session.delete(request_url,
timeout=REQUEST_TIMEOUT)
if response.status_code < 400:
return True
elif response.status_code != 404:
self.bigip.error_log('route-domain', response.text)
raise RouteDeleteException(response.text)
return False
def getDomainVlanList(self, folder='Common'):
folder = str(folder).replace('/', '')
request_url = self.bigip.url + \
'/net/route-domain?$select=name,partition,vlans'
if folder:
request_filter = 'partition eq ' + folder
request_url += '&$filter=' + request_filter
response = self.bigip.session.get(request_url,
timeout=REQUEST_TIMEOUT)
if response.status_code < 400:
response_obj = json.loads(response.text)
if 'items' in response_obj:
vlans = []
folder = str(folder).replace('/', '')
for route_domain in response_obj['items']:
if route_domain['name'] == folder:
if 'vlans' in route_domain:
for vlan in route_domain['vlans']:
vlans.append(vlan)
return vlans
return []
else:
if response.status_code != 404:
self.bigip.error_log('route-domain', response.text)
raise RouteQueryException(response.text)
return []
def addVlanToDomain(self, name=None, folder='Common'):
folder = str(folder).replace('/', '')
existing_vlans = self.getDomainVlanList(folder)
if not name in existing_vlans:
existing_vlans.append(name)
vlans = dict()
vlans['vlans'] = existing_vlans
request_url = self.bigip.url + '/net/route-domain/'
request_url += '~' + folder + '~' + folder
response | |
single hour of a day with its 12 5-minute intervals which specify the temperature adjustment . The
thermostat will apply this temperature adjustment on top of the user's program.
:param selection: The selection criteria for update
:param demand_managements: A list of demand management objects
:param timeout: Number of seconds requests will wait to establish a connection and to receive a response
:return: A StatusResponse object
:rtype: EcobeeStatusResponse
:raises EcobeeApiException: If the request results in an ecobee API error response
:raises EcobeeRequestsException: If an exception is raised by the underlying requests module
:raises TypeError: If selection is not an instance of Selection, demand_managements is not a list,
or any member of privileges is not an instance of DemandManagement
"""
if not isinstance(selection, Selection):
raise TypeError('selection must be an instance of {0}'.format(Selection))
if not isinstance(demand_managements, list):
raise TypeError('demand_managements must be an instance of {0}'.format(list))
for demand_management in demand_managements:
if not isinstance(demand_management, DemandManagement):
raise TypeError('All members of demand_managements must be a an instance of {0}'.format(
DemandManagement))
dictionary = {'selection': utilities.object_to_dictionary(selection, type(selection)),
'dmList': [utilities.object_to_dictionary(demand_management, type(demand_management)) for
demand_management in demand_managements]}
response = EcobeeService.__make_http_request(requests.post,
EcobeeService.DEMAND_MANAGEMENT_URL,
headers={'Authorization': 'Bearer {0}'.format(self._access_token),
'Content-Type': 'application/json;charset=UTF-8'},
params={'format': 'json'},
json_=dictionary,
timeout=timeout)
return EcobeeService.__process_http_response(response, EcobeeStatusResponse)
def create_runtime_report_job(self, selection, start_date, end_date, columns, include_sensors=False, timeout=5):
"""
:param selection: The selection criteria for the request. Must have selection_type = 'thermostats' or
'managementSet'
:param start_date: The report start date
:param end_date: The report end date
:param columns: A CSV string of column names
:param include_sensors: Whether to include sensor runtime report data for those thermostats which have it.
Default: False
:param timeout: Number of seconds requests will wait to establish a connection and to receive a response
:return: A CreateRuntimeReportResponse object
:rtype: EcobeeCreateRuntimeReportJobResponse
:raises EcobeeApiException: If the request results in an ecobee API error response
:raises EcobeeRequestsException: If an exception is raised by the underlying requests module
:raises TypeError: If selection is not an instance of Selection, start_date is not a date, end_date is not a
date, columns is not a string, or include_sensors is not a boolean
:raises ValueError: If start/end date are earlier than 2008-01-02, start/end date_times are later than
2035-01-01, or start_date is later than end_date
"""
if not isinstance(selection, Selection):
raise TypeError('selection must be an instance of {0}'.format(Selection))
if selection.selection_type != SelectionType.MANAGEMENT_SET.value and selection.selection_type != \
SelectionType.THERMOSTATS.value:
raise ValueError('selection.selection_type must be set to {0} or {1}'.format(
SelectionType.MANAGEMENT_SET.value, SelectionType.THERMOSTATS.value))
if not isinstance(start_date, date):
raise TypeError('start_date must be an instance of {0}'.format(date))
if pytz.utc.localize(datetime(start_date.year, start_date.month, start_date.day, 0, 0,
0)) < EcobeeService.BEFORE_TIME_BEGAN_DATE_TIME:
raise ValueError('start_date must be later than {0}'.format(
EcobeeService.BEFORE_TIME_BEGAN_DATE_TIME.strftime('%Y-%m-%d %H:%M:%S %Z')))
if pytz.utc.localize(datetime(start_date.year, start_date.month, start_date.day, 0, 0,
0)) > EcobeeService.END_OF_TIME_DATE_TIME:
raise ValueError('start_date must be earlier than {0}'.format(
EcobeeService.END_OF_TIME_DATE_TIME.strftime('%Y-%m-%d %H:%M:%S %Z')))
if not isinstance(end_date, date):
raise TypeError('end_date must be an instance of {0}'.format(date))
if pytz.utc.localize(datetime(end_date.year, end_date.month, end_date.day, 0, 0,
0)) < EcobeeService.BEFORE_TIME_BEGAN_DATE_TIME:
raise ValueError('end_date must be later than {0}'.format(
EcobeeService.BEFORE_TIME_BEGAN_DATE_TIME.strftime('%Y-%m-%d %H:%M:%S %Z')))
if pytz.utc.localize(datetime(end_date.year, end_date.month, end_date.day, 0, 0,
0)) > EcobeeService.END_OF_TIME_DATE_TIME:
raise ValueError('end_date must be earlier than {0}'.format(
EcobeeService.END_OF_TIME_DATE_TIME.strftime('%Y-%m-%d %H:%M:%S %Z')))
if start_date >= end_date:
raise ValueError('end_date must be later than start_date')
if not isinstance(columns, six.text_type):
raise TypeError('columns must be an instance of {0}'.format(six.text_type))
if not isinstance(include_sensors, bool):
raise TypeError('include_sensors must be an instance of {0}'.format(bool))
dictionary = {'selection': utilities.object_to_dictionary(selection, type(selection)),
'startDate': '{0}-{1:02}-{2:02}'.format(start_date.year, start_date.month, start_date.day),
'endDate': '{0}-{1:02}-{2:02}'.format(end_date.year, end_date.month, end_date.day),
'columns': columns,
'includeSensors': include_sensors}
response = EcobeeService.__make_http_request(requests.post,
'{0}/create'.format(EcobeeService.RUNTIME_REPORT_JOB_URL),
headers={'Authorization': 'Bearer {0}'.format(self._access_token),
'Content-Type': 'application/json;charset=UTF-8'},
params={'format': 'json'},
json_=dictionary,
timeout=timeout)
return EcobeeService.__process_http_response(response, EcobeeCreateRuntimeReportJobResponse)
def list_runtime_report_job_status(self, job_id=None, timeout=5):
"""
The list_runtime_report_job_status method gets the status of the job for the given id or all current job
statuses for the account carrying out the request.
:param job_id: The id of the report job to get the status
:param timeout: Number of seconds requests will wait to establish a connection and to receive a response
:return: A ListRuntimeReportJobStatusResponse object
:rtype: EcobeeListRuntimeReportJobStatusResponse
:raises EcobeeApiException: If the request results in an ecobee API error response
:raises EcobeeRequestsException: If an exception is raised by the underlying requests module
:raises TypeError: If job_id is not a string
"""
if job_id is not None:
if not isinstance(job_id, six.text_type):
raise TypeError('job_id must be an instance of {0}'.format(six.text_type))
dictionary = {}
if job_id:
dictionary['jobId'] = job_id
response = EcobeeService.__make_http_request(requests.post,
'{0}/status'.format(EcobeeService.RUNTIME_REPORT_JOB_URL),
headers={'Authorization': 'Bearer {0}'.format(self._access_token),
'Content-Type': 'application/json;charset=UTF-8'},
params={'format': 'json',
'body': json.dumps(dictionary, sort_keys=True, indent=2)},
timeout=timeout)
return EcobeeService.__process_http_response(response, EcobeeListRuntimeReportJobStatusResponse)
def cancel_runtime_report_job(self, job_id, timeout=5):
"""
The cancel_runtime_report_job method cancels any queued report job to avoid getting processed and to allow
for queuing additional report jobs. A job that is already being processed will be completed,
even if a request has been made to cancel it.
:param job_id: The id of the report job to cancel
:param timeout: Number of seconds requests will wait to establish a connection and to receive a response
:return: A StatusResponse object
:rtype: EcobeeStatusResponse
:raises EcobeeApiException: If the request results in an ecobee API error response
:raises EcobeeRequestsException: If an exception is raised by the underlying requests module
:raises TypeError: If job_id is not a string
"""
if not isinstance(job_id, six.text_type):
raise TypeError('job_id must be an instance of {0}'.format(six.text_type))
dictionary = {'jobId': job_id}
response = EcobeeService.__make_http_request(requests.post,
'{0}/cancel'.format(EcobeeService.RUNTIME_REPORT_JOB_URL),
headers={'Authorization': 'Bearer {0}'.format(self._access_token),
'Content-Type': 'application/json;charset=UTF-8'},
params={'format': 'json'},
json_=dictionary,
timeout=timeout)
return EcobeeService.__process_http_response(response, EcobeeStatusResponse)
def acknowledge(self, thermostat_identifier, ack_ref, ack_type, remind_me_later=False, selection=Selection(
selection_type=SelectionType.REGISTERED.value, selection_match=''), timeout=5):
"""
The acknowledge method allows an alert to be acknowledged.
:param thermostat_identifier: The thermostat identifier to acknowledge the alert for
:param ack_ref: The acknowledge ref of alert
:param ack_type: The type of acknowledgement. Valid values: accept, decline, defer, unacknowledged
:param remind_me_later: Whether to remind at a later date, if this is a defer acknowledgement
:param selection: The selection criteria for the update
:param timeout: Number of seconds requests will wait to establish a connection and to receive a response
:return: An UpdateThermostatResponse object indicating the status of this request
:rtype: EcobeeStatusResponse
:raises EcobeeApiException: If the request results in an ecobee API error response
:raises EcobeeRequestsException: If an exception is raised by the underlying requests module
:raises TypeError: If thermostat_identifier is not a string, ack_ref is not a string, ack_type is not a
member of AckType, remind_me_later is not a boolean, or selection is not an instance of Selection
"""
if not isinstance(thermostat_identifier, six.string_types):
raise TypeError('thermostat_identifier must be an instance of {0}'.format(six.string_types))
if not isinstance(ack_ref, six.string_types):
raise TypeError('ack_ref must be an instance of {0}'.format(six.string_types))
if not isinstance(ack_type, AckType):
raise TypeError('ack_type must be an instance of {0}'.format(AckType))
if not isinstance(remind_me_later, bool):
raise TypeError('remind_me_later must be an instance of {0}'.format(bool))
if not isinstance(selection, Selection):
raise TypeError('selection must be an instance of {0}'.format(Selection))
return self.update_thermostats(selection,
thermostat=None,
functions=[Function(type='acknowledge',
params={'thermostatIdentifier': thermostat_identifier,
'ackRef': ack_ref,
'ackType': ack_type.value,
'remindMeLater': remind_me_later})],
timeout=timeout)
def control_plug(self, plug_name, plug_state, start_date_time=None, end_date_time=None,
hold_type=HoldType.INDEFINITE, hold_hours=None, selection=Selection(
selection_type=SelectionType.REGISTERED.value, selection_match=''), timeout=5):
"""
The control_plug method controls the on/off state of a plug by setting a hold on the plug, createing a hold for
the on or off state of the plug for the specified duration.
Note that an event is created regardless of whether the program is in the same state as the requested state.
:param plug_name: The name of the plug. Ensure each plug has a unique name
:param plug_state: The state to put the plug into. Valid values: PlugState.ON, PlugState.OFF, PlugState.RESUME
:param start_date_time: The start date and time in thermostat time. Must be a timezone aware datetime
:param end_date_time: The end date and time in thermostat time. Must be a timezone aware datetime
:param hold_type: The hold duration type. Valid values: HoldType.DATE_TIME, HoldType.NEXT_TRANSITION,
HoldType.INDEFINITE, and HoldType.HOLD_HOURS
:param hold_hours: The number of hours to hold for, used and required if holdType='holdHours'
:param selection: The selection criteria for the update
:param timeout: Number of seconds requests will wait to establish a connection and to receive a response
:return: An UpdateThermostatResponse object indicating the status of this request
:rtype: EcobeeStatusResponse
:raises EcobeeApiException: If the request results in an ecobee API error response
:raises EcobeeRequestsException: If an exception is raised by the | |
from functools import reduce
# from keras import layers
# from keras import initializers
# from keras import models
# from keras_ import EfficientNetB0, EfficientNetB1, EfficientNetB2
# from keras_ import EfficientNetB3, EfficientNetB4, EfficientNetB5, EfficientNetB6
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras import initializers
from tensorflow.keras import models
# from .tfkeras import EfficientNetB0, EfficientNetB1, EfficientNetB2
# from .tfkeras import EfficientNetB3, EfficientNetB4, EfficientNetB5, EfficientNetB6
from .layers import ClipBoxes, RegressBoxes, FilterDetections, wBiFPNAdd, BatchNormalization
from .initializers import PriorProbability
# from .utils.anchors import anchors_for_shape
import numpy as np
# w_bifpns = [64, 88, 112, 160, 224, 288, 384]
# d_bifpns = [3, 4, 5, 6, 7, 7, 8]
# d_heads = [3, 3, 3, 4, 4, 4, 5]
# image_sizes = [512, 640, 768, 896, 1024, 1280, 1408]
# backbones = [EfficientNetB0, EfficientNetB1, EfficientNetB2,
# EfficientNetB3, EfficientNetB4, EfficientNetB5, EfficientNetB6]
MOMENTUM = 0.997
EPSILON = 1e-4
def SeparableConvBlock(num_channels, kernel_size, strides, name, freeze_bn=False):
f1 = layers.SeparableConv2D(num_channels, kernel_size=kernel_size, strides=strides, padding='same',
use_bias=True, name=f'{name}/conv')
f2 = layers.BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON, name=f'{name}/bn')
# f2 = BatchNormalization(freeze=freeze_bn, name=f'{name}/bn')
return reduce(lambda f, g: lambda *args, **kwargs: g(f(*args, **kwargs)), (f1, f2))
def ConvBlock(num_channels, kernel_size, strides, name, freeze_bn=False):
f1 = layers.Conv2D(num_channels, kernel_size=kernel_size, strides=strides, padding='same',
use_bias=True, name='{}_conv'.format(name))
f2 = layers.BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON, name='{}_bn'.format(name))
# f2 = BatchNormalization(freeze=freeze_bn, name='{}_bn'.format(name))
f3 = layers.ReLU(name='{}_relu'.format(name))
return reduce(lambda f, g: lambda *args, **kwargs: g(f(*args, **kwargs)), (f1, f2, f3))
def build_wBiFPN(features, num_channels, id, freeze_bn=False):
if id == 0:
_, _, C3, C4, C5 = features
P3_in = C3
P4_in = C4
P5_in = C5
P6_in = layers.Conv2D(num_channels, kernel_size=1, padding='same', name='resample_p6/conv2d')(C5)
P6_in = layers.BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON, name='resample_p6/bn')(P6_in)
# P6_in = BatchNormalization(freeze=freeze_bn, name='resample_p6/bn')(P6_in)
P6_in = layers.MaxPooling2D(pool_size=3, strides=2, padding='same', name='resample_p6/maxpool')(P6_in)
P7_in = layers.MaxPooling2D(pool_size=3, strides=2, padding='same', name='resample_p7/maxpool')(P6_in)
P7_U = layers.UpSampling2D()(P7_in)
P6_td = wBiFPNAdd(name=f'fpn_cells/cell_{id}/fnode0/add')([P6_in, P7_U])
P6_td = layers.Activation(lambda x: tf.nn.swish(x))(P6_td)
P6_td = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode0/op_after_combine5')(P6_td)
P5_in_1 = layers.Conv2D(num_channels, kernel_size=1, padding='same',
name=f'fpn_cells/cell_{id}/fnode1/resample_0_2_6/conv2d')(P5_in)
P5_in_1 = layers.BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON,
name=f'fpn_cells/cell_{id}/fnode1/resample_0_2_6/bn')(P5_in_1)
# P5_in_1 = BatchNormalization(freeze=freeze_bn, name=f'fpn_cells/cell_{id}/fnode1/resample_0_2_6/bn')(P5_in_1)
P6_U = layers.UpSampling2D()(P6_td)
P5_td = wBiFPNAdd(name=f'fpn_cells/cell_{id}/fnode1/add')([P5_in_1, P6_U])
P5_td = layers.Activation(lambda x: tf.nn.swish(x))(P5_td)
P5_td = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode1/op_after_combine6')(P5_td)
P4_in_1 = layers.Conv2D(num_channels, kernel_size=1, padding='same',
name=f'fpn_cells/cell_{id}/fnode2/resample_0_1_7/conv2d')(P4_in)
P4_in_1 = layers.BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON,
name=f'fpn_cells/cell_{id}/fnode2/resample_0_1_7/bn')(P4_in_1)
# P4_in_1 = BatchNormalization(freeze=freeze_bn, name=f'fpn_cells/cell_{id}/fnode2/resample_0_1_7/bn')(P4_in_1)
P5_U = layers.UpSampling2D()(P5_td)
P4_td = wBiFPNAdd(name=f'fpn_cells/cell_{id}/fnode2/add')([P4_in_1, P5_U])
P4_td = layers.Activation(lambda x: tf.nn.swish(x))(P4_td)
P4_td = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode2/op_after_combine7')(P4_td)
P3_in = layers.Conv2D(num_channels, kernel_size=1, padding='same',
name=f'fpn_cells/cell_{id}/fnode3/resample_0_0_8/conv2d')(P3_in)
P3_in = layers.BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON,
name=f'fpn_cells/cell_{id}/fnode3/resample_0_0_8/bn')(P3_in)
# P3_in = BatchNormalization(freeze=freeze_bn, name=f'fpn_cells/cell_{id}/fnode3/resample_0_0_8/bn')(P3_in)
P4_U = layers.UpSampling2D()(P4_td)
P3_out = wBiFPNAdd(name=f'fpn_cells/cell_{id}/fnode3/add')([P3_in, P4_U])
P3_out = layers.Activation(lambda x: tf.nn.swish(x))(P3_out)
P3_out = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode3/op_after_combine8')(P3_out)
P4_in_2 = layers.Conv2D(num_channels, kernel_size=1, padding='same',
name=f'fpn_cells/cell_{id}/fnode4/resample_0_1_9/conv2d')(P4_in)
P4_in_2 = layers.BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON,
name=f'fpn_cells/cell_{id}/fnode4/resample_0_1_9/bn')(P4_in_2)
# P4_in_2 = BatchNormalization(freeze=freeze_bn, name=f'fpn_cells/cell_{id}/fnode4/resample_0_1_9/bn')(P4_in_2)
P3_D = layers.MaxPooling2D(pool_size=3, strides=2, padding='same')(P3_out)
P4_out = wBiFPNAdd(name=f'fpn_cells/cell_{id}/fnode4/add')([P4_in_2, P4_td, P3_D])
P4_out = layers.Activation(lambda x: tf.nn.swish(x))(P4_out)
P4_out = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode4/op_after_combine9')(P4_out)
P5_in_2 = layers.Conv2D(num_channels, kernel_size=1, padding='same',
name=f'fpn_cells/cell_{id}/fnode5/resample_0_2_10/conv2d')(P5_in)
P5_in_2 = layers.BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON,
name=f'fpn_cells/cell_{id}/fnode5/resample_0_2_10/bn')(P5_in_2)
# P5_in_2 = BatchNormalization(freeze=freeze_bn, name=f'fpn_cells/cell_{id}/fnode5/resample_0_2_10/bn')(P5_in_2)
P4_D = layers.MaxPooling2D(pool_size=3, strides=2, padding='same')(P4_out)
P5_out = wBiFPNAdd(name=f'fpn_cells/cell_{id}/fnode5/add')([P5_in_2, P5_td, P4_D])
P5_out = layers.Activation(lambda x: tf.nn.swish(x))(P5_out)
P5_out = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode5/op_after_combine10')(P5_out)
P5_D = layers.MaxPooling2D(pool_size=3, strides=2, padding='same')(P5_out)
P6_out = wBiFPNAdd(name=f'fpn_cells/cell_{id}/fnode6/add')([P6_in, P6_td, P5_D])
P6_out = layers.Activation(lambda x: tf.nn.swish(x))(P6_out)
P6_out = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode6/op_after_combine11')(P6_out)
P6_D = layers.MaxPooling2D(pool_size=3, strides=2, padding='same')(P6_out)
P7_out = wBiFPNAdd(name=f'fpn_cells/cell_{id}/fnode7/add')([P7_in, P6_D])
P7_out = layers.Activation(lambda x: tf.nn.swish(x))(P7_out)
P7_out = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode7/op_after_combine12')(P7_out)
else:
P3_in, P4_in, P5_in, P6_in, P7_in = features
P7_U = layers.UpSampling2D()(P7_in)
P6_td = wBiFPNAdd(name=f'fpn_cells/cell_{id}/fnode0/add')([P6_in, P7_U])
P6_td = layers.Activation(lambda x: tf.nn.swish(x))(P6_td)
P6_td = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode0/op_after_combine5')(P6_td)
P6_U = layers.UpSampling2D()(P6_td)
P5_td = wBiFPNAdd(name=f'fpn_cells/cell_{id}/fnode1/add')([P5_in, P6_U])
P5_td = layers.Activation(lambda x: tf.nn.swish(x))(P5_td)
P5_td = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode1/op_after_combine6')(P5_td)
P5_U = layers.UpSampling2D()(P5_td)
P4_td = wBiFPNAdd(name=f'fpn_cells/cell_{id}/fnode2/add')([P4_in, P5_U])
P4_td = layers.Activation(lambda x: tf.nn.swish(x))(P4_td)
P4_td = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode2/op_after_combine7')(P4_td)
P4_U = layers.UpSampling2D()(P4_td)
P3_out = wBiFPNAdd(name=f'fpn_cells/cell_{id}/fnode3/add')([P3_in, P4_U])
P3_out = layers.Activation(lambda x: tf.nn.swish(x))(P3_out)
P3_out = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode3/op_after_combine8')(P3_out)
P3_D = layers.MaxPooling2D(pool_size=3, strides=2, padding='same')(P3_out)
P4_out = wBiFPNAdd(name=f'fpn_cells/cell_{id}/fnode4/add')([P4_in, P4_td, P3_D])
P4_out = layers.Activation(lambda x: tf.nn.swish(x))(P4_out)
P4_out = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode4/op_after_combine9')(P4_out)
P4_D = layers.MaxPooling2D(pool_size=3, strides=2, padding='same')(P4_out)
P5_out = wBiFPNAdd(name=f'fpn_cells/cell_{id}/fnode5/add')([P5_in, P5_td, P4_D])
P5_out = layers.Activation(lambda x: tf.nn.swish(x))(P5_out)
P5_out = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode5/op_after_combine10')(P5_out)
P5_D = layers.MaxPooling2D(pool_size=3, strides=2, padding='same')(P5_out)
P6_out = wBiFPNAdd(name=f'fpn_cells/cell_{id}/fnode6/add')([P6_in, P6_td, P5_D])
P6_out = layers.Activation(lambda x: tf.nn.swish(x))(P6_out)
P6_out = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode6/op_after_combine11')(P6_out)
P6_D = layers.MaxPooling2D(pool_size=3, strides=2, padding='same')(P6_out)
P7_out = wBiFPNAdd(name=f'fpn_cells/cell_{id}/fnode7/add')([P7_in, P6_D])
P7_out = layers.Activation(lambda x: tf.nn.swish(x))(P7_out)
P7_out = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode7/op_after_combine12')(P7_out)
return P3_out, P4_td, P5_td, P6_td, P7_out
def build_BiFPN(features, num_channels, id, freeze_bn=False):
if id == 0:
_, _, C3, C4, C5 = features
P3_in = C3
P4_in = C4
P5_in = C5
P6_in = layers.Conv2D(num_channels, kernel_size=1, padding='same', name='resample_p6/conv2d')(C5)
P6_in = layers.BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON, name='resample_p6/bn')(P6_in)
# P6_in = BatchNormalization(freeze=freeze_bn, name='resample_p6/bn')(P6_in)
P6_in = layers.MaxPooling2D(pool_size=3, strides=2, padding='same', name='resample_p6/maxpool')(P6_in)
P7_in = layers.MaxPooling2D(pool_size=3, strides=2, padding='same', name='resample_p7/maxpool')(P6_in)
P7_U = layers.UpSampling2D()(P7_in)
P6_td = layers.Add(name=f'fpn_cells/cell_{id}/fnode0/add')([P6_in, P7_U])
P6_td = layers.Activation(lambda x: tf.nn.swish(x))(P6_td)
P6_td = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode0/op_after_combine5')(P6_td)
P5_in_1 = layers.Conv2D(num_channels, kernel_size=1, padding='same',
name=f'fpn_cells/cell_{id}/fnode1/resample_0_2_6/conv2d')(P5_in)
P5_in_1 = layers.BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON,
name=f'fpn_cells/cell_{id}/fnode1/resample_0_2_6/bn')(P5_in_1)
# P5_in_1 = BatchNormalization(freeze=freeze_bn, name=f'fpn_cells/cell_{id}/fnode1/resample_0_2_6/bn')(P5_in_1)
P6_U = layers.UpSampling2D()(P6_td)
P5_td = layers.Add(name=f'fpn_cells/cell_{id}/fnode1/add')([P5_in_1, P6_U])
P5_td = layers.Activation(lambda x: tf.nn.swish(x))(P5_td)
P5_td = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode1/op_after_combine6')(P5_td)
P4_in_1 = layers.Conv2D(num_channels, kernel_size=1, padding='same',
name=f'fpn_cells/cell_{id}/fnode2/resample_0_1_7/conv2d')(P4_in)
P4_in_1 = layers.BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON,
name=f'fpn_cells/cell_{id}/fnode2/resample_0_1_7/bn')(P4_in_1)
# P4_in_1 = BatchNormalization(freeze=freeze_bn, name=f'fpn_cells/cell_{id}/fnode2/resample_0_1_7/bn')(P4_in_1)
P5_U = layers.UpSampling2D()(P5_td)
P4_td = layers.Add(name=f'fpn_cells/cell_{id}/fnode2/add')([P4_in_1, P5_U])
P4_td = layers.Activation(lambda x: tf.nn.swish(x))(P4_td)
P4_td = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode2/op_after_combine7')(P4_td)
P3_in = layers.Conv2D(num_channels, kernel_size=1, padding='same',
name=f'fpn_cells/cell_{id}/fnode3/resample_0_0_8/conv2d')(P3_in)
P3_in = layers.BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON,
name=f'fpn_cells/cell_{id}/fnode3/resample_0_0_8/bn')(P3_in)
# P3_in = BatchNormalization(freeze=freeze_bn, name=f'fpn_cells/cell_{id}/fnode3/resample_0_0_8/bn')(P3_in)
P4_U = layers.UpSampling2D()(P4_td)
P3_out = layers.Add(name=f'fpn_cells/cell_{id}/fnode3/add')([P3_in, P4_U])
P3_out = layers.Activation(lambda x: tf.nn.swish(x))(P3_out)
P3_out = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode3/op_after_combine8')(P3_out)
P4_in_2 = layers.Conv2D(num_channels, kernel_size=1, padding='same',
name=f'fpn_cells/cell_{id}/fnode4/resample_0_1_9/conv2d')(P4_in)
P4_in_2 = layers.BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON,
name=f'fpn_cells/cell_{id}/fnode4/resample_0_1_9/bn')(P4_in_2)
# P4_in_2 = BatchNormalization(freeze=freeze_bn, name=f'fpn_cells/cell_{id}/fnode4/resample_0_1_9/bn')(P4_in_2)
P3_D = layers.MaxPooling2D(pool_size=3, strides=2, padding='same')(P3_out)
P4_out = layers.Add(name=f'fpn_cells/cell_{id}/fnode4/add')([P4_in_2, P4_td, P3_D])
P4_out = layers.Activation(lambda x: tf.nn.swish(x))(P4_out)
P4_out = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode4/op_after_combine9')(P4_out)
P5_in_2 = layers.Conv2D(num_channels, kernel_size=1, padding='same',
name=f'fpn_cells/cell_{id}/fnode5/resample_0_2_10/conv2d')(P5_in)
P5_in_2 = layers.BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON,
name=f'fpn_cells/cell_{id}/fnode5/resample_0_2_10/bn')(P5_in_2)
# P5_in_2 = BatchNormalization(freeze=freeze_bn, name=f'fpn_cells/cell_{id}/fnode5/resample_0_2_10/bn')(P5_in_2)
P4_D = layers.MaxPooling2D(pool_size=3, strides=2, padding='same')(P4_out)
P5_out = layers.Add(name=f'fpn_cells/cell_{id}/fnode5/add')([P5_in_2, P5_td, P4_D])
P5_out = layers.Activation(lambda x: tf.nn.swish(x))(P5_out)
P5_out = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode5/op_after_combine10')(P5_out)
P5_D = layers.MaxPooling2D(pool_size=3, strides=2, padding='same')(P5_out)
P6_out = layers.Add(name=f'fpn_cells/cell_{id}/fnode6/add')([P6_in, P6_td, P5_D])
P6_out = layers.Activation(lambda x: tf.nn.swish(x))(P6_out)
P6_out = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode6/op_after_combine11')(P6_out)
P6_D = layers.MaxPooling2D(pool_size=3, strides=2, padding='same')(P6_out)
P7_out = layers.Add(name=f'fpn_cells/cell_{id}/fnode7/add')([P7_in, P6_D])
P7_out = layers.Activation(lambda x: tf.nn.swish(x))(P7_out)
P7_out = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode7/op_after_combine12')(P7_out)
else:
P3_in, P4_in, P5_in, P6_in, P7_in = features
P7_U = layers.UpSampling2D()(P7_in)
P6_td = layers.Add(name=f'fpn_cells/cell_{id}/fnode0/add')([P6_in, P7_U])
P6_td = layers.Activation(lambda x: tf.nn.swish(x))(P6_td)
P6_td = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode0/op_after_combine5')(P6_td)
P6_U = layers.UpSampling2D()(P6_td)
P5_td = layers.Add(name=f'fpn_cells/cell_{id}/fnode1/add')([P5_in, P6_U])
P5_td = layers.Activation(lambda x: tf.nn.swish(x))(P5_td)
P5_td = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode1/op_after_combine6')(P5_td)
P5_U = layers.UpSampling2D()(P5_td)
P4_td = layers.Add(name=f'fpn_cells/cell_{id}/fnode2/add')([P4_in, P5_U])
P4_td = layers.Activation(lambda x: tf.nn.swish(x))(P4_td)
P4_td = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode2/op_after_combine7')(P4_td)
P4_U = layers.UpSampling2D()(P4_td)
P3_out = layers.Add(name=f'fpn_cells/cell_{id}/fnode3/add')([P3_in, P4_U])
P3_out = layers.Activation(lambda x: tf.nn.swish(x))(P3_out)
P3_out = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode3/op_after_combine8')(P3_out)
P3_D = layers.MaxPooling2D(pool_size=3, strides=2, padding='same')(P3_out)
P4_out = layers.Add(name=f'fpn_cells/cell_{id}/fnode4/add')([P4_in, P4_td, P3_D])
P4_out = layers.Activation(lambda x: tf.nn.swish(x))(P4_out)
P4_out = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode4/op_after_combine9')(P4_out)
P4_D = layers.MaxPooling2D(pool_size=3, strides=2, padding='same')(P4_out)
P5_out = layers.Add(name=f'fpn_cells/cell_{id}/fnode5/add')([P5_in, P5_td, P4_D])
P5_out = layers.Activation(lambda x: tf.nn.swish(x))(P5_out)
P5_out = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode5/op_after_combine10')(P5_out)
P5_D = layers.MaxPooling2D(pool_size=3, strides=2, padding='same')(P5_out)
P6_out = layers.Add(name=f'fpn_cells/cell_{id}/fnode6/add')([P6_in, P6_td, P5_D])
P6_out = layers.Activation(lambda x: tf.nn.swish(x))(P6_out)
P6_out = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode6/op_after_combine11')(P6_out)
P6_D = layers.MaxPooling2D(pool_size=3, strides=2, padding='same')(P6_out)
P7_out = layers.Add(name=f'fpn_cells/cell_{id}/fnode7/add')([P7_in, P6_D])
P7_out = layers.Activation(lambda x: tf.nn.swish(x))(P7_out)
P7_out = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode7/op_after_combine12')(P7_out)
return P3_out, P4_td, P5_td, P6_td, P7_out
# def efficientdet(phi, num_classes=20, num_anchors=9, weighted_bifpn=False, freeze_bn=False,
# score_threshold=0.01, detect_quadrangle=False, anchor_parameters=None, separable_conv=True):
# assert phi in range(7)
# input_size = image_sizes[phi]
# input_shape = (input_size, input_size, 3)
# image_input = layers.Input(input_shape)
# w_bifpn = w_bifpns[phi]
# d_bifpn = d_bifpns[phi]
# w_head = w_bifpn
# d_head = d_heads[phi]
# backbone_cls = backbones[phi]
# features = backbone_cls(input_tensor=image_input, freeze_bn=freeze_bn)
# if weighted_bifpn:
# fpn_features = features
# for i in range(d_bifpn):
# fpn_features = build_wBiFPN(fpn_features, w_bifpn, i, freeze_bn=freeze_bn)
# else:
# fpn_features = features
# for i in range(d_bifpn):
# fpn_features = build_BiFPN(fpn_features, w_bifpn, i, freeze_bn=freeze_bn)
# box_net = BoxNet(w_head, d_head, num_anchors=num_anchors, separable_conv=separable_conv, freeze_bn=freeze_bn,
# detect_quadrangle=detect_quadrangle, name='box_net')
# class_net = ClassNet(w_head, d_head, num_classes=num_classes, num_anchors=num_anchors,
# separable_conv=separable_conv, freeze_bn=freeze_bn, name='class_net')
# classification = [class_net([feature, i]) for i, feature in enumerate(fpn_features)]
# classification = layers.Concatenate(axis=1, name='classification')(classification)
# regression = [box_net([feature, i]) for i, feature in enumerate(fpn_features)]
# regression = layers.Concatenate(axis=1, name='regression')(regression)
#
# model = models.Model(inputs=[image_input], outputs=[classification, regression], name='efficientdet')
#
# # apply predicted regression to anchors
# anchors = anchors_for_shape((input_size, input_size), anchor_params=anchor_parameters)
# anchors_input = np.expand_dims(anchors, axis=0)
# boxes = RegressBoxes(name='boxes')([anchors_input, regression[..., :4]])
# boxes = ClipBoxes(name='clipped_boxes')([image_input, boxes])
#
# # filter detections (apply NMS / score threshold / select top-k)
# if detect_quadrangle:
# detections = FilterDetections(
# name='filtered_detections',
# score_threshold=score_threshold,
# detect_quadrangle=True
# )([boxes, classification, regression[..., 4:8], regression[..., 8]])
# else:
# | |
<reponame>MArtinherz/sportsipy
import pandas as pd
import re
from functools import wraps
from lxml.etree import ParserError, XMLSyntaxError
from pyquery import PyQuery as pq
from urllib.error import HTTPError
from .. import utils
from .constants import PLAYER_SCHEME, PLAYER_URL, ROSTER_URL, DETAILED_STATS
from .player import AbstractPlayer
def _cleanup(prop):
try:
prop = prop.replace('%', '')
prop = prop.replace('$', '')
prop = prop.replace(',', '')
return prop.replace('+', '')
# Occurs when a value is of Nonetype. When that happens, return a blank
# string as whatever came in had an incomplete value.
except AttributeError:
return ''
def _int_property_decorator(func):
@property
@wraps(func)
def wrapper(*args):
if func.__name__ in DETAILED_STATS:
index = args[0]._detailed_stats_index
else:
index = args[0]._index
prop = func(*args)
try:
value = _cleanup(prop[index])
return int(value)
except (ValueError, TypeError, IndexError):
# If there is no value, default to None
return None
return wrapper
def _float_property_decorator(func):
@property
@wraps(func)
def wrapper(*args):
if func.__name__ in DETAILED_STATS:
index = args[0]._detailed_stats_index
else:
index = args[0]._index
prop = func(*args)
try:
value = _cleanup(prop[index])
return float(value)
except (ValueError, TypeError, IndexError):
# If there is no value, default to None
return None
return wrapper
class Player(AbstractPlayer):
"""
Get player information and stats for all seasons.
Given a player ID, such as 'BreeDr00' for <NAME>, capture all relevant
stats and information like name, team, height/weight, career starts, single
season pasing yards, sacks, and much more.
By default, the class instance will return the player's career stats, but
single-season stats can be found by calling the instance with the requested
season as denoted on pro-football-reference.com.
Parameters
----------
player_id : string
A player's ID according to pro-football-reference.com, such as
'BreeDr00' for <NAME>. The player ID can be found by navigating to
the player's stats page and getting the string between the final slash
and the '.htm' in the URL. In general, the ID is in the format
'LlllFfNN' where 'Llll' are the first 4 letters in the player's last
name with the first letter capitalized, 'Ff' are the first 2 letters in
the player's first name where the first letter is capitalized, and 'NN'
is a number starting at '00' for the first time that player ID has been
used and increments by 1 for every successive player.
"""
def __init__(self, player_id):
self._most_recent_season = ''
self._detailed_stats_seasons = None
self._index = None
self._detailed_stats_index = None
self._player_id = player_id
self._season = None
self._name = None
self._team_abbreviation = None
self._position = None
self._height = None
self._weight = None
self._birth_date = None
self._games = None
self._games_started = None
self._approximate_value = None
# Passing-specific stats
self._qb_record = None
self._completed_passes = None
self._attempted_passes = None
self._passing_completion = None
self._passing_yards = None
self._passing_touchdowns = None
self._passing_touchdown_percentage = None
self._interceptions_thrown = None
self._interception_percentage = None
self._longest_pass = None
self._passing_yards_per_attempt = None
self._adjusted_yards_per_attempt = None
self._yards_per_completed_pass = None
self._yards_per_game_played = None
self._quarterback_rating = None
self._espn_qbr = None
self._times_sacked = None
self._yards_lost_to_sacks = None
self._net_yards_per_pass_attempt = None
self._adjusted_net_yards_per_pass_attempt = None
self._sack_percentage = None
self._fourth_quarter_comebacks = None
self._game_winning_drives = None
self._yards_per_attempt_index = None
self._net_yards_per_attempt_index = None
self._adjusted_yards_per_attempt_index = None
self._adjusted_net_yards_per_attempt_index = None
self._completion_percentage_index = None
self._touchdown_percentage_index = None
self._interception_percentage_index = None
self._sack_percentage_index = None
self._passer_rating_index = None
# Rushing-specific stats
self._rush_attempts = None
self._rush_yards = None
self._rush_touchdowns = None
self._longest_rush = None
self._rush_yards_per_attempt = None
self._rush_yards_per_game = None
self._rush_attempts_per_game = None
# Advanced rushing stats
self._first_downs_rushing = None
self._rush_yards_before_contact = None
self._rush_yards_before_contact_per_attempt = None
self._rush_yards_after_contact = None
self._rush_yards_after_contact_per_attempt = None
self._rush_broken_tackles = None
self._rush_attempts_per_broken_tackle = None
# Receiving-specific stats
self._times_pass_target = None
self._receptions = None
self._receiving_yards = None
self._receiving_yards_per_reception = None
self._receiving_touchdowns = None
self._longest_reception = None
self._receptions_per_game = None
self._receiving_yards_per_game = None
self._catch_percentage = None
# Advanced receiving stats
self._first_downs_receiving = None
self._receiving_yards_before_catch = None
self._receiving_yards_before_catch_per_reception = None
self._receiving_yards_after_catch = None
self._receiving_yards_after_catch_per_reception = None
self._receiving_broken_tackles = None
self._receptions_per_broken_tackle = None
self._dropped_passes = None
self._drop_percentage = None
# Combined receiving and rushing stats
self._touches = None
self._yards_per_touch = None
self._yards_from_scrimmage = None
self._rushing_and_receiving_touchdowns = None
self._fumbles = None
# Punt/Kick return stats
self._punt_returns = None
self._punt_return_yards = None
self._punt_return_touchdown = None
self._longest_punt_return = None
self._yards_per_punt_return = None
self._kickoff_returns = None
self._kickoff_return_yards = None
self._kickoff_return_touchdown = None
self._longest_kickoff_return = None
self._yards_per_kickoff_return = None
self._all_purpose_yards = None
# Kicking-specific stats
self._less_than_nineteen_yards_field_goal_attempts = None
self._less_than_nineteen_yards_field_goals_made = None
self._twenty_to_twenty_nine_yard_field_goal_attempts = None
self._twenty_to_twenty_nine_yard_field_goals_made = None
self._thirty_to_thirty_nine_yard_field_goal_attempts = None
self._thirty_to_thirty_nine_yard_field_goals_made = None
self._fourty_to_fourty_nine_yard_field_goal_attempts = None
self._fourty_to_fourty_nine_yard_field_goals_made = None
self._fifty_plus_yard_field_goal_attempts = None
self._fifty_plus_yard_field_goals_made = None
self._field_goals_attempted = None
self._field_goals_made = None
self._longest_field_goal_made = None
self._field_goal_percentage = None
self._extra_points_attempted = None
self._extra_points_made = None
self._extra_point_percentage = None
# Punting-specific stats
self._punts = None
self._total_punt_yards = None
self._longest_punt = None
self._blocked_punts = None
# Defensive-specific stats
self._interceptions = None
self._yards_returned_from_interception = None
self._interceptions_returned_for_touchdown = None
self._longest_interception_return = None
self._passes_defended = None
self._fumbles_forced = None
self._fumbles_recovered = None
self._yards_recovered_from_fumble = None
self._fumbles_recovered_for_touchdown = None
self._sacks = None
self._tackles = None
self._assists_on_tackles = None
self._safeties = None
player_data = self._pull_player_data()
if not player_data:
return
self._find_initial_index()
AbstractPlayer.__init__(self, player_id, self._name, player_data)
def __str__(self):
"""
Return the string representation of the class.
"""
return f'{self.name} ({self.player_id})'
def __repr__(self):
"""
Return the string representation of the class.
"""
return self.__str__()
def _build_url(self):
"""
Create the player's URL to pull stats from.
The player's URL requires the first letter of the player's last name
followed by the player ID.
Returns
-------
string
The string URL for the player's stats page.
"""
# The first letter of the player's last name is used to sort the player
# list and is a part of the URL.
first_character = self._player_id[0]
return PLAYER_URL % (first_character, self._player_id)
def _retrieve_html_page(self):
"""
Download the requested player's stats page.
Download the requested page and strip all of the comment tags before
returning a PyQuery object which will be used to parse the data.
Oftentimes, important data is contained in tables which are hidden in
HTML comments and not accessible via PyQuery.
Returns
-------
PyQuery object
The requested page is returned as a queriable PyQuery object with
the comment tags removed.
"""
url = self._build_url()
try:
url_data = pq(url)
except (HTTPError, ParserError):
return None
# For NFL, a 404 page doesn't actually raise a 404 error, so it needs
# to be manually checked.
if 'Page Not Found (404 error)' in str(url_data):
return None
return pq(utils._remove_html_comment_tags(url_data))
def _parse_season(self, row):
"""
Parse the season string from the table.
The season is generally located in the first column of the stats tables
and should be parsed to detonate which season metrics are being pulled
from.
Parameters
----------
row : PyQuery object
A PyQuery object of a single row in a stats table.
Returns
-------
string
A string representation of the season in the format 'YYYY', such as
'2017'.
"""
season = utils._parse_field(PLAYER_SCHEME, row, 'season')
return season.replace('*', '').replace('+', '')
def _combine_season_stats(self, table_rows, career_stats, all_stats_dict,
detailed):
"""
Combine all stats for each season.
Since all of the stats are spread across multiple tables, they should
be combined into a single field which can be used to easily query stats
at once.
Parameters
----------
table_rows : generator
A generator where each element is a row in a stats table.
career_stats : generator
A generator where each element is a row in the footer of a stats
table. Career stats are kept in the footer, hence the usage.
all_stats_dict : dictionary
A dictionary of all stats separated by season where each key is the
season ``string``, such as '2017', and the value is a
``dictionary`` with a ``string`` of 'data' and ``string``
containing all of the data.
detailed : boolean
A boolean which evaluates to True if the passed table is one of the
advanced stats tables which is labeled as 'detailed' on the site.
Returns
-------
dictionary
Returns an updated version of the passed all_stats_dict which
includes more metrics from the provided table.
"""
most_recent_season = self._most_recent_season
detailed_stats_seasons = []
if not table_rows:
table_rows = []
for row in table_rows:
season = self._parse_season(row)
try:
all_stats_dict[season]['data'] += str(row)
except KeyError:
all_stats_dict[season] = {'data': str(row)}
# Create a list | |
1) for _ in range(3) // _를 하면 변수없이 반복문이 3번 돈다.
a=[[0]*3 for _ in range(3)] # [0]*3 가 3번 반복됨.
print(a)
=>[[0, 0, 0], [0, 0, 0], [0, 0, 0]]
열
행 0 1 2
0 [0, 0, 0]
1 [0, 0, 0]
2 [0, 0, 0]
열
행 0 1 2
0 [0, 1, 0]
1 [0, 2, 0]
2 [0, 0, 0]
a=[[0]*3 for _ in range(3)] # [0]*3 가 3번 반복됨.
a[0][1]=1
a[1][1]=2
print(a)
=>[[0, 1, 0], [0, 2, 0], [0, 0, 0]]
2) 2차원 리스트 표로 출력하기
for x in a:
print(x)
=>
[0, 1, 0]
[0, 2, 0]
[0, 0, 0]
3) 대괄호없이 2차원 리스트 출력하기
a=[[0]*3 for _ in range(3)] # [0]*3 가 3번 반복됨.
for x in a:
for y in x:
print(y, end=' ')
print()
=>
0 0 0
0 0 0
0 0 0
[함수 만들기]
=================
# 함수는 값을 return 하며, 함수를 종료 한다.
def add(a,b):
c=a+b
return c
print(add(3,2))
=>5
# 파이썬에서는 2개이상의 값을 튜플로 반환할 수 있다.
def add(a,b):
c=a+b
d=a-b
return c, d
print(add(3,2))
=>(5, 1)
# 소수(나누어 떨어지지 않는 수 )만 출력하는 함수 만들기(1~자기자신까지의 수중 1,과 자기자신을 제외하고 나누어서 떨어지지 않는 수)
def isPrime(x):
for i in range(2,x):
if x%i==0:
return False # 여기서 함수가 종료됨.
return True
a=[12, 13, 7, 9, 19]
for y in a:
if isPrime(y):
print(y, end=' ')
[람다함수]
====================
- 람다함수 : 익명의 함수 또는 표현식
def plus_one(x):
return x+1
print(plus_one(1))
=>2
# 람다함수(익명의 함수)로 표현하기(변수에 담아서 호출해주어야함)
plus_two=lambda x: x+2
print(plus_two(1))
=>3
* map(함수, 함수를 적용할 자료)
a=[1,2,3]
print(list(map(plus_one, a)))
=>[2, 3, 4]
* 함수 이름이 필요없이 익명의 함수를 사용하여 바로 사용할수있음.
print(list(map(lambda x: x+1, a)))
=>[2, 3, 4]
[선형 탐색, x의 위치 찾기]
====================
L = [3,8,2,7,6,10,9]
x = 6
def linear_search(S, x):
i=0
while i<len(L) and L[i] != x:
i+=1
if i < len(L):
return i
else:
return -1
def linear_search(S, x):
return S.index(x)
[재귀 알고리즘]
===============
1) 잘못된 예시 - 종결 조건이 없음
def sum1(n):
print(n)
return n + sum1(n -1) # 자신을 재귀호출하고있음.
# a = int (input("Number:"))
# print(sum(a))
Number:30
30
29
28
27
26
25
24
23
22
21
-995..
..... 에러가 남....
def sum2(n):
print(n)
if n<= 1:
return n
else:
return n+ sum2(n-1)
n=10일때 콘솔에 찍히는 결과
10
9
8
7
6
5
4
3
2
1
sum2(10) = 55
a = int(input("Number:"))
print(sum2(a))
'''
return JsonResponse({"MESSAGE": "Hello"}, status=200)
# 2. 선형 배열 알고리즘 풀이
class ProgrammersExample2View(View):
def get(self, request):
'''(02) 정렬된 리스트에 원소 삽입
리스트 L 과 정수 x 가 인자로 주어질 때, 리스트 내의 올바른 위치에 x 를 삽입하여 그 결과 리스트를 반환하는 함수 solution 을 완성하세요.
인자로 주어지는 리스트 L 은 정수 원소들로 이루어져 있으며 크기에 따라 (오름차순으로) 정렬되어 있다고 가정합니다.
예를 들어, L = [20, 37, 58, 72, 91] 이고 x = 65 인 경우, 올바른 리턴 값은 [20, 37, 58, 65, 72, 91] 입니다.
힌트: 순환문을 이용하여 올바른 위치를 결정하고 insert() 메서드를 이용하여 삽입하는 것이 한 가지 방법입니다.
주의: 리스트 내에 존재하는 모든 원소들보다 작거나 모든 원소들보다 큰 정수가 주어지는 경우에 대해서도 올바르게 처리해야 합니다.
'''
x = 65
L = [20, 37, 58, 72, 91]
def solution1(L, x): # 풀이 실패 -> 시간 초과 및 반복문 멈추는 것 고려하지 않음
answer = []
for y in L:
if x > y:
if L.index(y) != len(L)-1:
L.insert(L.index(y)+1,x)
else: L.append(x)
elif x < y:
if L.index(y) == 0:
L.insert(0,x)
else:
L.insert(L.index(y)-1,x)
answer = L
return answer
def solution2(L, x): # 풀이 실패 -> enumerate를 사용하였으나, 역시 시간 초과 및 반복문을 멈추는 것을 고려하지 않음.
answer = []
for index, value in enumerate(L) :
if x > value:
if index != len(L)-1:
L.insert(index+1,x)
else: L.append(x)
elif x < value:
if index == 0:
L.insert(0,x)
else:
L.insert(index,x)
answer = L
return answer
def solution3(L, x): # PASS!!!
# 반복문이 계속 도는 것이 문제였으며, 이로인해 시간이 초과되는 것을 깨달음. 이를 잡아주니 풀이 성공!!!!!
for index, value in enumerate(L) :
if x > value:
if index == len(L)-1:
L.append(x)
break
else:
continue
else:
L.insert(index,x)
break
return L
def solution4(L, x): # 나와 비슷한 풀이
for idx, num in enumerate(L):
if num > x:
L.insert(idx,x)
break
if L[-1] < x: # L[-1]은 리스트 맨 끝에 있는 수를 지칭 한다!!, 맨 끝자리이면서, x가 값이 크다는 것을 모두 충족함.
L.append(x)
else:
pass
return L
def solution5(L, x): # 오름차순이라는 문제의 함정을 이용한 문제 풀이
L.append(x)
L.sort()
return L
return JsonResponse({"RESULT": solution3(L,x)}, status=200)
def post(self, request):
'''
문제 설명
인자로 주어지는 리스트 L 내에서, 또한 인자로 주어지는 원소 x 가 발견되는 모든 인덱스를 구하여
이 인덱스들로 이루어진 리스트를 반환하는 함수 solution 을 완성하세요.
리스트 L 은 정수들로 이루어져 있고 그 순서는 임의로 부여되어 있다고 가정하며, 동일한 원소가 반복하여 들어 있을 수 있습니다.
이 안에 정수 x 가 존재하면 그것들을 모두 발견하여 해당 인덱스들을 리스트로 만들어 반환하고,
만약 존재하지 않으면 하나의 원소로 이루어진 리스트 [-1] 를 반환하는 함수를 완성하세요.
예를 들어, L = [64, 72, 83, 72, 54] 이고 x = 72 인 경우의 올바른 리턴 값은 [1, 3] 입니다.
또 다른 예를 들어, L = [64, 72, 83, 72, 54] 이고 x = 83 인 경우의 올바른 리턴 값은 [2] 입니다.
마지막으로 또 다른 예를 들어, L = [64, 72, 83, 72, 54] 이고 x = 49 인 경우의 올바른 리턴 값은 [-1] 입니다.
힌트 1: 리스트의 index() 메서드와 리스트 슬라이싱을 활용하는 것이 한 가지 방법이 됩니다. 리스트 슬라이싱은 아래와 같이 동작합니다.
L = [6, 2, 8, 7, 3] 인 경우
L[1:3] = [2, 8]
L[2:] = [8, 7, 3]
L[:3] = [6, 2, 8]
힌트 2: 리스트의 index() 메서드는, 인자로 주어지는 원소가 리스트 내에 존재하지 않을 때 ValueError 를 일으킵니다.
이것을 try ... except 로 처리해도 되고, "if x in L" 과 같은 조건문으로 특정 원소가 리스트 내에 존재하는지를 판단해도 됩니다.
'''
x= 72
L= [64, 72, 83, 72, 54]
def solution1(L,x): # 나의 풀이 ) PASS!!
answer =[]
validate=True
for indx, num in enumerate(L):
if x == num:
answer.append(indx)
validate=False
if validate:
answer=[-1]
return answer
def solution2(L, x): # 다른 풀이 : 리스트 컴프리헨션으로 깔끔하게!
if x in L:
return [i for i, y in enumerate(L) if y == x] # 조건문이 만족 할 떄, i를 리스트에 담는다.
else:
return [-1]
return JsonResponse({"RESULT": solution1(L,x)}, status=200)
# 3. 정렬, 탐색 알고리즘 풀이
class ProgrammersExample3View(View):
def get (self, request):
'''
(03) 이진탐색
문제 설명
리스트 L 과, 그 안에서 찾으려 하는 원소 x 가 인자로 주어질 때,
x 와 같은 값을 가지는 원소의 인덱스를 리턴하는 함수 solution() 을 완성하세요.
만약 리스트 L 안에 x 와 같은 값을 가지는 원소가 존재하지 않는 경우에는 -1 을 리턴합니다.
리스트 L 은 자연수 원소들로 이루어져 있으며, 크기 순으로 정렬되어 있다고 가정합니다.
또한, 동일한 원소는 두 번 이상 나타나지 않습니다.
예를 들어,
L = [2, 3, 5, 6, 9, 11, 15]
x = 6
의 인자들이 주어지면, L[3] == 6 이므로 3 을 리턴해야 합니다.
또 다른 예로,
L = [2, 5, 7, 9, 11]
x = 4
로 주어지면, 리스트 L 내에 4 의 원소가 존재하지 않으므로 -1 을 리턴해야 합니다.
이 연습문제에서는 알고리즘의 효율성도 평가합니다. 만약 순차 (선형) 탐색 알고리즘을 구현하는 경우에는
제한 시간 요구사항을 만족하지 못하여 효율성 테스트 케이스들을 통과하지 못할 수도 있습니다.
'''
L = [2, 3, 5, 6, 9, 11, 15]
#L = []
x = 15
#x = 20
def solution1(L, x):
indx = -1
if len(L) == 0:
return indx
lower=0
upper=len(L)-1
tmp =[]
| |
<gh_stars>1-10
#-----------------------------------------------------#
#
# Copyright (c) 2020-2021 <NAME> <<EMAIL>>
#
# See the LICENSE file for your full rights.
#
#-----------------------------------------------------#
# Imports
#-----------------------------------------------------#
import bpy
import os
from pathlib import Path
import math
from bpy_extras.io_utils import (ImportHelper,
ExportHelper,
path_reference_mode,
)
from bpy.props import (StringProperty,
BoolProperty,
IntProperty,
EnumProperty,
)
from bpy.types import (Panel,
Menu,
Operator,
)
#-----------------------------------------------------#
# handles export panel
#-----------------------------------------------------#
class DarrowExportPanel(bpy.types.Panel):
bl_label = "DarrowFBX"
bl_category = "Darrow Toolkit"
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
bl_idname = "DARROW_PT_exportPanel"
@classmethod
def poll(cls, context):
settings = context.preferences.addons['darrow_toolkit'].preferences
obj = context.active_object
preferences = context.preferences
for obj in bpy.context.selected_objects:
if obj.type =='CURVE' : return False
if obj.type =='CAMERA' : return False
if obj.type =='LIGHT' : return False
if obj.type =='FONT' : return False
if obj.type =='LATTICE' : return False
if obj.type =='LIGHT_PROBE' : return False
if obj.type =='IMAGE' : return False
if obj.type =='SPEAKER' : return False
return settings.export_moduleBool == True
#print("poll")
def draw_header(self, context):
layout = self.layout
obj = context.scene
self.layout.prop(obj, 'advancedBool', icon="SETTINGS",text="")
def draw(self, context):
Var_prefix_bool = bpy.context.scene.useprefixBool
Var_suffix_bool = bpy.context.scene.usecounterBool
Var_custom_prefix = bpy.context.scene.PrefixOption
Var_advanced_bool = bpy.context.scene.advancedBool
Var_allowFBX = bpy.context.scene.fbxBool
obj = context.object
objs = context.selected_objects
if context.mode == 'OBJECT':
if obj is not None:
layout = self.layout
obj = context.scene
layout.prop(obj, 'exportPresets')
if Var_advanced_bool ==True:
box = layout.box()
box.label(text = "Animation Options")
split=box.split()
split.prop(obj, 'isleafBool')
split.prop(obj, 'allactionsBool')
box = layout.box()
split=box.split()
split.prop(obj, 'collectionBool')
box = layout.box()
box.label(text = "FBX Exporter")
if len(objs) is not 0:
Var_allowFBX = True
box.operator('export_selected.darrow', icon="EXPORT")
if Var_allowFBX == False:
box.enabled = False
split=box.split()
split.prop(obj, 'useprefixBool')
split.prop(obj, 'usecounterBool')
#If use prefix is selected then these options show up
if Var_prefix_bool == True:
box = layout.box()
box.label(text = "Prefix Options")
box.prop(obj, 'PrefixOption')
#If the custom enum is selected these show up
if Var_custom_prefix == 'OP2':
box.prop(context.scene, "custom_name_string", text="Prefix")
if Var_suffix_bool == True:
box = layout.box()
box.label(text = "Suffix Options")
box.label(text = "Increase the suffix by (+1)")
box.operator('reset.counter')
if context.mode == 'EDIT_MESH':
layout = self.layout
#-----------------------------------------------------#
# Find selected parent collection
#-----------------------------------------------------#
def get_parent_collection_names(collection, parent_names):
for parent_collection in bpy.data.collections:
if collection.name in parent_collection.children.keys():
parent_names.append(parent_collection.name)
get_parent_collection_names(parent_collection, parent_names)
return
def turn_collection_hierarchy_into_path(obj):
parent_collection = obj.users_collection[0]
parent_names = []
parent_names.append(parent_collection.name)
get_parent_collection_names(parent_collection, parent_names)
parent_names.reverse()
return '\\'.join(parent_names)
#-----------------------------------------------------#
# Handles logic for exporting as FBX
#-----------------------------------------------------#
class DarrowExportFBX(bpy.types.Operator, ExportHelper):
bl_idname = "export_selected.darrow"
bl_label = 'Export Selected'
bl_description = "Export selected as FBX using mesh name"
bl_options = {'PRESET'}
filename_ext = ".fbx";
"""
This class needs massive amounts of refactoring. Very slow.
"""
def execute(self, context):
objs = context.selected_objects
if len(objs) is not 0:
C = bpy.context
fbxname = bpy.context.view_layer.objects.active
#get fbx name
name = bpy.path.clean_name(fbxname.name)
Var_collectionBool = bpy.context.scene.collectionBool
# Find amount of objects in selection
amt = len(C.selected_objects)
one = 1
obj = bpy.context.view_layer.objects.active
parent_coll = turn_collection_hierarchy_into_path(obj)
if (Var_collectionBool == True) and (amt > one):
fbxname = parent_coll
name = bpy.path.clean_name(fbxname)
print(name)
print("MORE THAN 1 MESH, SELECTED. USING COLLECTION NAME TO EXPORT")
bpy.ops.object.make_single_user(object=True, obdata=True, material=False, animation=True)
#option to show in exporter
path_mode = path_reference_mode
#get the name of the active object
#get string of custom prefix user input
customprefix = bpy.context.scene.custom_name_string
#get blend name
blendName = bpy.path.basename(bpy.context.blend_data.filepath).replace(".blend", "")
#Variables for UI, like bools and enums
Var_actionsBool = bpy.context.scene.allactionsBool
Var_leafBool = bpy.context.scene.isleafBool
Var_PrefixBool = bpy.context.scene.useprefixBool
Var_custom_prefix = bpy.context.scene.PrefixOption
Var_presets = bpy.context.scene.exportPresets
Var_counterBool = bpy.context.scene.usecounterBool
if Var_presets == 'OP1':
if (amt > one):
Var_axisUp = 'Y'
Var_axisForward = 'X'
Var_scale = 1
print("more than one, not rotating")
else:
bpy.ops.object.transform_apply(location=False, rotation=True, scale=False)
bpy.context.active_object.rotation_euler[0] = math.radians(-90)
print("rotated -90")
bpy.ops.object.transform_apply(location=False, rotation=True, scale=False)
print("rotations applied")
bpy.context.active_object.rotation_euler[0] = math.radians(90)
print("rotated 90")
Var_axisUp = 'Y'
Var_axisForward = 'X'
Var_scale = 1
print("rotating")
print("Unity Exporter")
if Var_presets == 'OP2':
Var_axisUp = 'Z'
Var_axisForward = '-Y'
Var_scale = 1
print("Unreal Exporter")
Var_nlaBool = False
Var_forcestartkey = False
if Var_presets == 'OP1':
Var_leafBool = False
Var_actionsBool = False
Var_nlaBool = False
Var_forcestartkey = False
print("Unity Vars")
if Var_presets == 'OP2':
Var_nlaBool = False
Var_leafBool = False
Var_actionsBool = False
Var_forcestartkey = True
print("Unreal Vars")
#get the counter and add "1" to it, only when bool is checked
if Var_counterBool == True:
context.scene.counter += 1
count = context.scene.counter
count = str(count)
Var_exportnumber = "_" + count
#If "Use Prefix" box selected, the 2 prefix options will show up in the enum
# Hopefull these vars (deteremined in advanced panel) will overwrite all presets
Var_actionsBool = bpy.context.scene.allactionsBool
Var_leafBool = bpy.context.scene.isleafBool
if Var_PrefixBool == True:
print("USED PREFIX")
#if ".blend enum" is selected, the object will export with custom prefix + mesh name
if Var_custom_prefix == 'OP1':
if not bpy.data.is_saved:
raise Exception("Blend file is not saved")
print("SAVE YOUR FILE")
#If the "export counter" bool is true then we add the counter varable to the end of the save location
if Var_counterBool == True:
saveLoc = self.filepath + "_" + name + Var_exportnumber
self.report({'INFO'}, "Added Counter to the end of mesh")
else:
saveLoc = self.filepath + "_" + name
print(saveLoc)
#handles actual export
bpy.ops.export_scene.fbx(
filepath = saveLoc.replace('.fbx', '')+ ".fbx",
use_mesh_modifiers=True,
bake_anim_use_all_actions = Var_actionsBool,
add_leaf_bones = Var_leafBool,
bake_anim_use_nla_strips = Var_nlaBool,
bake_anim_force_startend_keying = Var_forcestartkey,
check_existing=True,
axis_forward= Var_axisForward,
axis_up= Var_axisUp,
use_selection=True,
global_scale= Var_scale,
path_mode='AUTO')
#print(Var_actionsBool)
#print(Var_leafBool)
self.report({'INFO'}, "Exported with .blend prefix and mesh name")
return {'FINISHED'}
else:
print("No Prefix Defined", context.mode)
#If use "custom" enum is selected, the object will export with custom prefix + mesh name
if Var_custom_prefix == 'OP2':
#If the "export counter" bool is true then we add the counter varable to the end of the save location
if Var_counterBool == True:
customname = customprefix + "_" + name + Var_exportnumber
else:
customname = customprefix + "_" + name
if not bpy.data.is_saved:
saveLoc = self.filepath.replace("untitled","") + customname
else:
saveLoc = self.filepath.replace(blendName,'') + customname
#print(saveLoc)
#export logic
bpy.ops.export_scene.fbx(
filepath = saveLoc.replace(".fbx", '')+ ".fbx",
use_mesh_modifiers=True,
bake_anim_use_all_actions = Var_actionsBool,
add_leaf_bones = Var_leafBool,
bake_anim_use_nla_strips = Var_nlaBool,
bake_anim_force_startend_keying = Var_forcestartkey,
check_existing=True,
axis_forward= Var_axisForward,
axis_up= Var_axisUp,
use_selection=True,
global_scale= Var_scale,
path_mode='AUTO')
#print(Var_actionsBool)
#print(Var_leafBool)
self.report({'INFO'}, "Exported with custom prefix and mesh name")
else:
print("No Prefix Defined", context.mode)
#If the user does not check "use prefix" the object will be exported as the mesh name only
#this is the default "export selected" button
else:
#print("DID NOT USE PREFIX")
#If the "export counter" bool is true then we add the counter varable to the end of the save location
if Var_counterBool == True:
if not bpy.data.is_saved:
#raise Exception("Blend file is not saved")
saveLoc = self.filepath.replace("untitled","") + name + Var_exportnumber
else:
saveLoc = self.filepath.replace(blendName,"") + name + Var_exportnumber
else:
saveLoc = self.filepath.replace(blendName,"") + name
if not bpy.data.is_saved:
#raise Exception("Blend file is not saved")
saveLoc = self.filepath.replace("untitled","") + name
print("SAVE YOUR FILE")
bpy.ops.export_scene.fbx(
filepath = saveLoc.replace('.fbx', '')+ ".fbx",
use_selection=True,
use_mesh_modifiers=True,
bake_anim_use_all_actions = Var_actionsBool,
add_leaf_bones = Var_leafBool,
bake_anim_use_nla_strips = Var_nlaBool,
bake_anim_force_startend_keying = Var_forcestartkey,
check_existing=True,
axis_forward=Var_axisForward,
axis_up= Var_axisUp,
global_scale= Var_scale,
path_mode='AUTO')
#print(saveLoc)
#print(Var_actionsBool)
#print(Var_leafBool)
self.report({'INFO'}, "Exported with mesh name")
if Var_presets == 'OP1':
if (amt > one):
print("more than one, not rotating")
else:
bpy.ops.object.transform_apply(location=False, rotation=True, scale=False)
print("applied rotations")
print("OBJ should be nack to normal")
else:
self.report({'INFO'}, "None Selected")
return {'FINISHED'}
#-----------------------------------------------------#
# handles reseting the suffix counter
#-----------------------------------------------------#
class DarrowCounterReset(bpy.types.Operator):
bl_idname = "reset.counter"
bl_description = "Resets FBX suffix counter"
bl_label = "Reset Suffix Counter"
def execute(self, context):
context.scene.counter = 0
self.report({'INFO'}, "Set suffix count to 0")
return {'FINISHED'}
classes = (DarrowExportPanel, DarrowExportFBX, DarrowCounterReset)
def register():
for cls in classes:
bpy.utils.register_class(cls)
bpy.types.Scene.advancedBool = bpy.props.BoolProperty(
name = "Advanced",
description = "Show advanced options",
default = False
)
bpy.types.Scene.fbxBool = bpy.props.BoolProperty()
bpy.types.Scene.collectionBool = bpy.props.BoolProperty(
name = "Multi-object smart naming",
description = "Use the parent collection name when exporting more than 1 object",
default = True
)
bpy.types.Scene.allactionsBool = bpy.props.BoolProperty(
name = "All actions",
description = "Export each action separated separately",
default = False
)
bpy.types.Scene.isleafBool = bpy.props.BoolProperty(
name = "Leaf bones",
description = "Exporting using leaf bones",
default | |
<reponame>royukira/MyAI
import numpy as np
import random
# ========= Node ===========
class TimeNode(object):
"""
For store the transitions generated at ith time step (i.e. self.time = i )
"""
def __init__(self, time):
self.previous = None
self.time = time
self.transitions = []
self.nextTime = None
def get_time(self):
return self.time
def next_time(self):
return self.nextTime
def extract_transition(self, idx):
"""
Extract transitions from the list
:param idxs: the indies of transitions
:return:
"""
return self.transitions[idx]
def random_extract(self, k):
"""
Randomly extract k transitions that is stored in the stack of transitions
:param k:
:return: a list of transitions
"""
transitions = []
for i in range(k):
t = self.transitions.pop(random.randrange(len(self.transitions))) # the sampled transitions must be pop out
transitions.append(t)
#t = random.sample(self.transitions, k)
return transitions
def delete_transition(self, idx):
"""
Pop the transitions from the list
:param idxs: the indies of transitions
:return:
"""
pop_out = self.transitions.pop(idx)
return pop_out
def insert_transition(self, transition):
"""
Insert the transition
:param transition:
:return:
"""
Tid = [self.time, len(self.transitions)] # the time tag
transition = np.append(transition, Tid)
self.transitions.append(transition)
return transition
def transition_length(self):
"""
get the length of transition
:return:
"""
return len(self.transitions)
def is_empty(self):
"""
Check whether the transition set is empty
:return:
"""
if len(self.transitions) == 0:
return True
else:
return False
# ========= hash ===========
class subMap(object):
def __init__(self):
self.items = []
def add(self, k, v):
self.items.append((k, v))
def get_node(self, k):
for key, val in self.items:
if key == k:
return val
return False # 找不到就return False
def remove(self, k):
for key, val in self.items:
if key == k:
self.items.remove((key, val))
def is_transition_empty(self):
if len(self.items) == 0:
return True # item里的 time node 已被删除
time_node = self.items[0][1]
transitionSet = time_node.transitions
for i in range(len(transitionSet)):
if transitionSet[i] is not None:
return False # 没有全部为None
else:
continue
return True
class hashMap(object):
def __init__(self, n):
self.hmaps = []
for i in range(n):
self.hmaps.append(subMap())
def map_idx(self, time):
"""
Using hash to find the correspondent sub-map
:param time:
:return:
"""
idx = hash(time) % len(self.hmaps)
return self.hmaps[idx]
def add(self, time, node):
"""
Add a node to a correspondent sub-map
:param time:
:param node:
:return:
"""
sub_map = self.map_idx(time)
sub_map.add(time, node)
def remove(self, time):
"""
Remove the node stored in the sub-map sub_map
:param time:
:return:
"""
sub_map = self.map_idx(time)
sub_map.remove(time)
"""
// cannot delete the sub-map, otherwise it will cause chaos; the time cannot match the correspondent sub-map
if sub_map.is_empty():
idx = hash(time) % len(self.hmaps)
self.remove_submap(idx)
"""
def remove_submap(self, hmap_idx):
"""
Remove the sub-map
:param hmap_idx:
:return:
"""
self.hmaps.pop(hmap_idx)
def get(self, time):
"""
Get the node of the specific time
:param time:
:return:
"""
idx = self.map_idx(time)
return idx.get_node(time)
def check_empty(self,time):
"""
Check if the specific sub-map is empty
:param time:
:return:
"""
sub_map = self.map_idx(time)
if sub_map.is_transition_empty():
return True
else:
return False
class timeTable(object):
def __init__(self):
self.maps = hashMap(2)
self.num = 0
def get_node(self, time):
"""
get the time node
:param time:
:return:
"""
return self.maps.get(time)
def add(self, time, node):
if self.num == len(self.maps.hmaps):
self.resize()
self.maps.add(time, node)
self.num += 1
def remove(self, time):
self.maps.remove(time)
self.num -= 1
def resize(self):
new_maps = hashMap(self.num * 2)
for m in self.maps.hmaps:
for k, v in m.items:
new_maps.add(k, v)
self.maps = new_maps
def find_oldest(self):
"""
Find the first non-empty hmaps and return the node stored in the hmaps
:return: the sub-map item -- [(time, time_node)]
"""
for i in range(len(self.maps.hmaps)):
if self.maps.hmaps[i].is_transition_empty():
continue
else:
return self.maps.hmaps[i]
def k_oldest(self, k):
"""
Find k oldest transitions
:param k:
:return: a list of oldest transitions
"""
oldest_time_node = None
oldest_item = self.find_oldest()
for kk, v in oldest_item.items:
_ = kk # the time id; useless for now
oldest_time_node = v
transition_length = oldest_time_node.transition_length()
if transition_length >= k:
# we can sample k oldest transitions in one time node
oldest_transitions = oldest_time_node.random_extract(k)
else:
rest = k - transition_length
oldest_transitions = oldest_time_node.random_extract(transition_length)
# Find the rest transitions in the next time node
# 一直搜索到够k个为止
rest_transitions = []
while True:
next_time_node = oldest_time_node.nextTime # next time node
next_t_length = next_time_node.transition_length() # the length of transition set in the next time node
if next_t_length >= rest:
rest_transitions = next_time_node.random_extract(rest)
for r in rest_transitions:
oldest_transitions.append(r)
break
else:
rest_transitions.append(next_time_node.random_extract(next_t_length))
rest -= next_t_length
return oldest_transitions
# ====== Time tag memory =====
class timeTag(object):
def __init__(self):
self.head = TimeNode(None)
self.length = 0
self.current = self.head
self.timeTable = timeTable()
def add_node(self, time, transitions):
"""
Add the time node to the time chain and the time hash table (the correspondent sub-map)
:param time: ith time step
:param transitions: a list of transitions generated at ith time step
:return:
"""
# Preprocess -- assign the Time ID
for t in range(len(transitions)):
tid = [time, t]
transitions[t] = np.append(transitions[t], tid)
new_node = TimeNode(time)
new_node.transitions = transitions
if self.is_empty():
self.head.nextTime = new_node
new_node.previous = self.head
self.current = new_node
self.timeTable.add(time, new_node) # add to the hash table
else:
self.current.nextTime = new_node
new_node.previous = self.current
self.current = new_node
self.timeTable.add(time, new_node) # add to the hash table
self.length += 1
def get_node(self, time):
return self.timeTable.get_node(time)
def insert_transition(self, transition):
"""
Insert the transition into the latest time node (i.e. self.current)
Note: self.current points at the latest time node
:param transition:
:return:
"""
transition = self.current.insert_transition(transition)
return transition
def extract_transition(self, T, idx):
"""
Extract the specific transition at T time step
If T is current time, directly return the transition using index 'idx'
Else search the time hash table to find T's node, and get the transition in that node using index 'idx'
:param T: the specific time step
:param idx: the index of the transition in the T time node
:return: A transition
"""
if T == self.current.time:
transition = self.current.extract_transition(idx)
else:
T_node = self.timeTable.get_node(T)
if T_node is False:
print('Cannot find the time node of T')
return
transition = T_node.extract_transition(idx)
return transition
def remove_transition(self, T, idx):
"""
Remove the transition from the specific T time node
:param T: the specific time step
:param idx: the index of transition stored at the T time node
:return: None
"""
if T == self.current.time:
self.current.delete_transition(idx)
else:
T_node = self.timeTable.get_node(T)
if T_node is False:
print('Cannot find the time node of T')
#T_node.delete_transition(idx)
T_node.transitions[idx] = None
def remove_node(self, T):
"""
Remove the T time node from the hash table
Note: only remove the node stored in the sub-map; however, the sub-map is reserved
:param T: the specific time node
:return: None
"""
T_node = self.timeTable.get_node(T)
previous_node = T_node.previous
next_node = T_node.nextTime
if next_node == None:
# That means the previous_node become the last node in the chain
previous_node.nextTime = None
else:
previous_node.nextTime = next_node
next_node.previous = previous_node
self.timeTable.remove(T)
self.length -= 1
def select_k_oldest(self, k):
"""
Select K oldest transitions
:param k:
:return: a list of oldest transitions
"""
oldest_transitions = self.timeTable.k_oldest(k)
return oldest_transitions
def is_empty(self):
"""
Check whether the time chain is empty
:return:
"""
if self.length == 0:
return True
else:
return False
if __name__ == '__main__':
"""
tc = timeChainV1()
tc.add(1)
tc.add(2)
print()
tb = timeTable()
for i in range(100):
tb.add(i, i)
tb.remove(3)
print()"""
def gen_t(k):
transitions = []
if k == 0:
transition = np.hstack((0, 0, 0, 0))
transitions.append(transition)
for i in range(k):
s = 1 + i
a = 2 + i
r = 3 + i
s_ = 4 + i
transition = np.hstack((s, a, r, s_))
transitions.append(transition)
return transitions
tag = timeTag()
t = 30
for tt in range(t):
ts = gen_t(tt)
tag.add_node(tt, ts) # store the transition
new_ts = gen_t(1)
tag.insert_transition(new_ts) # pass // for storing transition
tag.insert_transition(new_ts) # pass
ts_ = tag.extract_transition(28, 23) # pass // for sample the transition
tag.remove_transition(28,23) # pass // for remove the transition
tag.remove_transition(0,0) # pass
tag.remove_transition(1,0) # pass
check = tag.get_node(1) # pass // get a time node in T time step
tag.remove_node(2)
oldest = tag.timeTable.find_oldest() # pass // find the oldest sub-map item -- [(time, time_node)]
oldest_ts = tag.select_k_oldest(4) # pass // find the k oldest transitions
tag.remove_node(29) # pass // the sub-map is empty; but | |
<reponame>khoak20hcmut/Penalty_BF<gh_stars>1-10
from typing import Optional, Sequence
from procset import ProcSet
import pytest
import batsim_py
from batsim_py.events import SimulatorEvent
from batsim_py.events import JobEvent
from batsim_py.events import HostEvent
from batsim_py.jobs import DelayJobProfile
from batsim_py.jobs import Job
from batsim_py.jobs import JobState
from batsim_py.monitors import ConsumedEnergyMonitor, HostMonitor
from batsim_py.monitors import HostPowerStateSwitchMonitor
from batsim_py.monitors import HostStateSwitchMonitor
from batsim_py.monitors import JobMonitor
from batsim_py.monitors import SchedulerMonitor
from batsim_py.monitors import SimulationMonitor
from batsim_py.protocol import Converters
from batsim_py.resources import Host
from batsim_py.resources import Platform
from batsim_py.resources import PowerStateType
from .utils import BatsimPlatformAPI
@pytest.fixture()
def mock_simulator(mocker):
m = mocker.patch("batsim_py.simulator.SimulatorHandler", autospec=True)
m.is_running = False
return m
def start_job_success(name: str,
alloc: Sequence[int],
sub_t: int,
start_t: int,
stop_t: int,
walltime: Optional[int] = None) -> Job:
job = Job(name, "w", len(alloc), DelayJobProfile("a", 10), sub_t, walltime)
job._submit(sub_t)
job._allocate(alloc)
job._start(start_t)
job._terminate(stop_t, JobState.COMPLETED_SUCCESSFULLY)
return job
def start_job_failed(name: str,
alloc: Sequence[int],
sub_t: int,
start_t: int,
stop_t: int,
walltime: Optional[int] = None) -> Job:
job = Job(name, "w", len(alloc), DelayJobProfile("a", 10), sub_t, walltime)
job._submit(sub_t)
job._allocate(alloc)
job._start(start_t)
job._terminate(stop_t, JobState.COMPLETED_FAILED)
return job
def start_job_killed(name: str,
alloc: Sequence[int],
sub_t: int,
start_t: int,
stop_t: int,
walltime: Optional[int] = None) -> Job:
job = Job(name, "w", len(alloc), DelayJobProfile("a", 10), sub_t, walltime)
job._submit(sub_t)
job._allocate(alloc)
job._start(start_t)
job._terminate(stop_t, JobState.COMPLETED_KILLED)
return job
def start_job_rejected(name: str,
alloc: Sequence[int],
sub_t: int,
walltime: Optional[int] = None) -> Job:
job = Job(name, "w", len(alloc), DelayJobProfile("a", 10), sub_t, walltime)
job._submit(sub_t)
job._reject()
return job
class TestJobMonitor:
@pytest.fixture()
def monitor(self, mock_simulator) -> JobMonitor:
monitor = JobMonitor(mock_simulator)
monitor.on_simulation_begins(mock_simulator)
return monitor
def assert_values(self, info: dict, job: Job, success: int) -> None:
alloc = ProcSet(*job.allocation) if job.allocation else None
assert all(len(v) == 1 for v in info.values())
assert info['job_id'][0] == job.name
assert info['workload_name'][0] == job.workload
assert info['profile'][0] == job.profile.name
assert info['submission_time'][0] == job.subtime
assert info['requested_number_of_resources'][0] == job.res
assert info['requested_time'][0] == job.walltime
assert info['success'][0] == success
assert info['final_state'][0] == str(job.state)
assert info['starting_time'][0] == job.start_time
assert info['execution_time'][0] == job.runtime
assert info['finish_time'][0] == job.stop_time
assert info['waiting_time'][0] == job.waiting_time
assert info['turnaround_time'][0] == job.turnaround_time
assert info['stretch'][0] == job.stretch
assert info['allocated_resources'][0] == alloc
def test_simulation_already_running_must_raise(self, mock_simulator):
mock_simulator.is_running = True
with pytest.raises(RuntimeError) as excinfo:
JobMonitor(mock_simulator)
assert "running" in str(excinfo.value)
def test_subscribe(self, monitor, mock_simulator, mocker):
calls = [
mocker.call(SimulatorEvent.SIMULATION_BEGINS,
monitor.on_simulation_begins),
mocker.call(JobEvent.COMPLETED, monitor.update_info),
mocker.call(JobEvent.REJECTED, monitor.update_info),
]
mock_simulator.subscribe.assert_has_calls(calls, True)
def test_keys(self, monitor):
keys = monitor.info.keys()
assert 'job_id' in keys
assert 'success' in keys
assert 'allocated_resources' in keys
assert 'starting_time' in keys
assert 'execution_time' in keys
assert 'finish_time' in keys
assert 'waiting_time' in keys
assert 'stretch' in keys
assert 'turnaround_time' in keys
assert 'final_state' in keys
assert 'workload_name' in keys
assert 'profile' in keys
assert 'requested_number_of_resources' in keys
assert 'requested_time' in keys
assert 'submission_time' in keys
def test_reset_when_sim_begins(self, monitor: JobMonitor):
assert all(not v for v in monitor.info.values())
def test_job_completed(self, monitor: JobMonitor):
job = start_job_success("n", [0, 1, 10, 20], 0, 20, 100, 100)
monitor.update_info(job)
assert all(len(v) == 1 for v in monitor.info.values())
self.assert_values(monitor.info, job, 1)
def test_job_success_with_failed(self, monitor: JobMonitor):
job = start_job_failed("n", [0, 1, 10, 20], 0, 20, 100, 100)
monitor.update_info(job)
assert all(len(v) == 1 for v in monitor.info.values())
self.assert_values(monitor.info, job, 0)
def test_job_killed(self, monitor: JobMonitor):
job = start_job_killed("n", [0, 1, 10, 20], 0, 20, 100, 100)
monitor.update_info(job)
assert all(len(v) == 1 for v in monitor.info.values())
self.assert_values(monitor.info, job, 0)
def test_job_rejected(self, monitor: JobMonitor):
job = start_job_rejected("n", [0, 1, 10, 20], 0)
monitor.update_info(job)
assert all(len(v) == 1 for v in monitor.info.values())
self.assert_values(monitor.info, job, 0)
class TestSchedulerMonitor:
@pytest.fixture()
def monitor(self, mock_simulator) -> SchedulerMonitor:
monitor = SchedulerMonitor(mock_simulator)
monitor.on_simulation_begins(mock_simulator)
return monitor
def test_simulation_already_running_must_raise(self, mock_simulator):
mock_simulator.is_running = True
with pytest.raises(RuntimeError) as excinfo:
SchedulerMonitor(mock_simulator)
assert "running" in str(excinfo.value)
def test_subscribe(self, monitor, mock_simulator, mocker):
calls = [
mocker.call(SimulatorEvent.SIMULATION_BEGINS,
monitor.on_simulation_begins),
mocker.call(SimulatorEvent.SIMULATION_ENDS,
monitor.on_simulation_ends),
mocker.call(JobEvent.COMPLETED, monitor.on_job_completed),
mocker.call(JobEvent.SUBMITTED, monitor.on_job_submitted),
mocker.call(JobEvent.REJECTED, monitor.on_job_rejected),
]
mock_simulator.subscribe.assert_has_calls(calls, True)
def test_keys(self, monitor):
keys = monitor.info.keys()
assert 'makespan' in keys
assert 'max_slowdown' in keys
assert 'max_stretch' in keys
assert 'max_waiting_time' in keys
assert 'max_turnaround_time' in keys
assert 'mean_slowdown' in keys
assert 'mean_pp_slowdown' in keys
assert 'mean_stretch' in keys
assert 'mean_waiting_time' in keys
assert 'mean_turnaround_time' in keys
assert 'nb_jobs' in keys
assert 'nb_jobs_finished' in keys
assert 'nb_jobs_killed' in keys
assert 'nb_jobs_rejected' in keys
assert 'nb_jobs_success' in keys
def test_values(self, monitor: SchedulerMonitor, mock_simulator):
job_success = start_job_success("1", [0, 1], 5, 20, 100, 50)
monitor.on_job_submitted(job_success)
monitor.on_job_completed(job_success)
job_failed = start_job_failed("2", [0, 1, 2], 10, 20, 150, 50)
monitor.on_job_submitted(job_failed)
monitor.on_job_completed(job_failed)
job_killed = start_job_killed("3", [0, 1, 10, 20], 10, 25, 50, 250)
monitor.on_job_submitted(job_killed)
monitor.on_job_completed(job_killed)
job_rejected = start_job_rejected("4", [0], 10, 100)
monitor.on_job_submitted(job_rejected)
monitor.on_job_rejected(job_rejected)
mock_simulator.current_time = 150
monitor.on_simulation_ends(mock_simulator)
jobs = [job_success, job_failed, job_killed, job_rejected]
mean_slowdown = sum(j.slowdown or 0 for j in jobs) / 3
mean_pp_slowdown = sum(j.per_processor_slowdown or 0 for j in jobs) / 3
mean_stretch = sum(j.stretch or 0 for j in jobs) / 3
mean_waiting_time = sum(j.waiting_time or 0 for j in jobs) / 3
mean_turnaround_time = sum(j.turnaround_time or 0 for j in jobs) / 3
assert monitor.info['makespan'] == 150
assert monitor.info['max_slowdown'] == job_killed.slowdown
assert monitor.info['max_stretch'] == job_success.stretch
assert monitor.info['max_waiting_time'] == job_killed.waiting_time
assert monitor.info['max_turnaround_time'] == job_failed.turnaround_time
assert monitor.info['mean_slowdown'] == mean_slowdown
assert monitor.info['mean_pp_slowdown'] == mean_pp_slowdown
assert monitor.info['mean_stretch'] == mean_stretch
assert monitor.info['mean_waiting_time'] == mean_waiting_time
assert monitor.info['mean_turnaround_time'] == mean_turnaround_time
assert monitor.info['nb_jobs'] == 4
assert monitor.info['nb_jobs_finished'] == 3
assert monitor.info['nb_jobs_killed'] == 2
assert monitor.info['nb_jobs_rejected'] == 1
assert monitor.info['nb_jobs_success'] == 1
class TestHostMonitor:
@pytest.fixture()
def monitor(self, mock_simulator):
monitor = HostMonitor(mock_simulator)
mock_simulator.current_time = 0
watt_on = [(100, 200), (200, 300)]
pstates = BatsimPlatformAPI.get_resource_properties(watt_on=watt_on)
pstates = Converters.json_to_power_states(pstates)
hosts = [
Host(0, "0", pstates=pstates),
Host(1, "1", pstates=pstates),
]
mock_simulator.platform = Platform(hosts)
monitor.on_simulation_begins(mock_simulator)
return monitor
def test_simulation_already_running_must_raise(self, mock_simulator):
mock_simulator.is_running = True
with pytest.raises(RuntimeError) as excinfo:
HostMonitor(mock_simulator)
assert "running" in str(excinfo.value)
def test_subscribe(self, monitor, mock_simulator, mocker):
calls = [
mocker.call(SimulatorEvent.SIMULATION_BEGINS,
monitor.on_simulation_begins),
mocker.call(SimulatorEvent.SIMULATION_ENDS,
monitor.on_simulation_ends),
mocker.call(HostEvent.STATE_CHANGED,
monitor.on_host_state_changed),
mocker.call(HostEvent.COMPUTATION_POWER_STATE_CHANGED,
monitor.on_host_state_changed),
]
mock_simulator.subscribe.assert_has_calls(calls, True)
def test_keys(self, monitor):
keys = monitor.info.keys()
assert 'time_idle' in keys
assert 'time_computing' in keys
assert 'time_switching_off' in keys
assert 'time_switching_on' in keys
assert 'time_sleeping' in keys
assert 'consumed_joules' in keys
assert 'energy_waste' in keys
assert 'nb_switches' in keys
assert 'nb_computing_machines' in keys
def test_values(self, monitor, mock_simulator):
host_1 = mock_simulator.platform.get_host(0)
host_2 = mock_simulator.platform.get_host(1)
# Switch Off
mock_simulator.current_time = 100
h1_e = host_1.get_default_pstate().watt_idle * 100
h1_e_waste = h1_e
host_1._switch_off()
monitor.on_host_state_changed(host_1)
assert monitor.info['consumed_joules'] == h1_e
assert monitor.info['energy_waste'] == h1_e_waste
# Set Off
mock_simulator.current_time = 175
h1_e += host_1.pstate.watt_idle * 75
h1_e_waste = h1_e
host_1._set_off()
monitor.on_host_state_changed(host_1)
assert monitor.info['consumed_joules'] == h1_e
assert monitor.info['energy_waste'] == h1_e_waste
# Switch On
mock_simulator.current_time = 225
h1_e += host_1.pstate.watt_idle * 50
host_1._switch_on()
monitor.on_host_state_changed(host_1)
assert monitor.info['consumed_joules'] == h1_e
assert monitor.info['energy_waste'] == h1_e_waste
# Set on
mock_simulator.current_time = 325
h1_e += host_1.pstate.watt_idle * 100
h1_e_waste += host_1.pstate.watt_idle * 100
host_1._set_on()
monitor.on_host_state_changed(host_1)
assert monitor.info['consumed_joules'] == h1_e
assert monitor.info['energy_waste'] == h1_e_waste
# Computing
mock_simulator.current_time = 425
h1_e += host_1.pstate.watt_idle * 100
h1_e_waste += host_1.pstate.watt_idle * 100
host_1._allocate("j")
host_1._start_computing()
monitor.on_host_state_changed(host_1)
assert monitor.info['consumed_joules'] == h1_e
assert monitor.info['energy_waste'] == h1_e_waste
# Idle
mock_simulator.current_time = 525
h1_e += host_1.pstate.watt_full * 100
monitor.on_simulation_ends(mock_simulator)
h2_e = host_2.pstate.watt_idle * 525
assert monitor.info['consumed_joules'] == h1_e + h2_e
assert monitor.info['energy_waste'] == h1_e_waste + h2_e
assert monitor.info['time_idle'] == 725
assert monitor.info['time_computing'] == 100
assert monitor.info['time_switching_off'] == 75
assert monitor.info['time_switching_on'] == 100
assert monitor.info['time_sleeping'] == 50
assert monitor.info['nb_switches'] == 4
assert monitor.info['nb_computing_machines'] == 2
class TestSimulationMonitor:
@pytest.fixture()
def monitor(self, mock_simulator, mocker):
monitor = SimulationMonitor(mock_simulator)
mock_simulator.current_time = 0
watt_on = [(100, 200), (200, 300)]
pstates = BatsimPlatformAPI.get_resource_properties(watt_on=watt_on)
pstates = Converters.json_to_power_states(pstates)
hosts = [
Host(0, "0", pstates=pstates),
Host(1, "1", pstates=pstates),
]
mock_simulator.platform = Platform(hosts)
mocker.patch("time.time", return_value=0)
monitor.on_simulation_begins(mock_simulator)
return monitor
def test_simulation_already_running_must_raise(self, mock_simulator):
mock_simulator.is_running = True
with pytest.raises(RuntimeError) as excinfo:
SimulationMonitor(mock_simulator)
assert "running" in str(excinfo.value)
def test_subscribe(self, mock_simulator, mocker):
mocker.patch("batsim_py.monitors.SchedulerMonitor")
mocker.patch("batsim_py.monitors.HostMonitor")
monitor = SimulationMonitor(mock_simulator)
calls = [
mocker.call(SimulatorEvent.SIMULATION_BEGINS,
monitor.on_simulation_begins),
mocker.call(SimulatorEvent.SIMULATION_ENDS,
monitor.on_simulation_ends)
]
batsim_py.monitors.HostMonitor.assert_called_once() # type: ignore
batsim_py.monitors.SchedulerMonitor.assert_called_once() # type: ignore
mock_simulator.subscribe.assert_has_calls(calls, True)
def test_keys(self, monitor):
keys = monitor.info.keys()
assert 'simulation_time' in keys
assert 'consumed_joules' in keys
assert 'makespan' in keys
def test_values(self, monitor, mocker):
mocker.patch("time.time", return_value=325)
monitor.on_simulation_ends(mock_simulator)
assert monitor.info['simulation_time'] == 325
class TestHostStateSwitchMonitor:
@pytest.fixture()
def monitor(self, mock_simulator):
monitor = HostStateSwitchMonitor(mock_simulator)
mock_simulator.current_time = 0
watt_on = [(100, 200), (200, 300)]
pstates = BatsimPlatformAPI.get_resource_properties(watt_on=watt_on)
pstates = Converters.json_to_power_states(pstates)
hosts = [
Host(0, "0", pstates=pstates),
Host(1, "1", pstates=pstates),
]
mock_simulator.platform = Platform(hosts)
monitor.on_simulation_begins(mock_simulator)
return monitor
def test_simulation_already_running_must_raise(self, mock_simulator):
mock_simulator.is_running = True
with pytest.raises(RuntimeError) as excinfo:
HostStateSwitchMonitor(mock_simulator)
assert "running" in str(excinfo.value)
def test_subscribe(self, monitor, mock_simulator, mocker):
calls = [
mocker.call(SimulatorEvent.SIMULATION_BEGINS,
monitor.on_simulation_begins),
mocker.call(HostEvent.STATE_CHANGED,
monitor.on_host_state_changed),
]
mock_simulator.subscribe.assert_has_calls(calls, True)
def test_keys(self, | |
block_adapters,
task,
)
else:
layer_outputs = encoder_layer(hidden_states, attention_mask, past_key_value, block_adapters, task=task, output_attentions=output_attentions)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
)
class PrefixBartDecoder(BartDecoder):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a :class:`BartDecoderLayer`
Args:
config: BartConfig
embed_tokens (nn.Embedding): output embedding
"""
def __init__(self, config: BartConfig, embed_tokens=None):
super().__init__(config, embed_tokens)
self.config = config
if config.decoder_prompt_config:
self.prompt_modules = PromptController(config.decoder_prompt_config)
else:
self.prompt_modules = None
self.init_weights()
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using :class:`~transformers.BartTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__`
for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, encoder_sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size, encoder_sequence_length)`, `optional`):
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
past_key_values (:obj:`Tuple[Tuple[torch.Tensor]]` of length :obj:`config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up
decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last
:obj:`decoder_input_ids` (those that don't have their past key value states given to this model) of
shape :obj:`(batch_size, 1)` instead of all :obj:`decoder_input_ids`` of shape :obj:`(batch_size,
sequence_length)`.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded
representation. This is useful if you want more control over how to convert :obj:`input_ids` indices
into associated vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors
for more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
# create causal mask
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
combined_attention_mask = None
if input_shape[-1] > 1:
combined_attention_mask = _make_causal_mask(
input_shape, self.config.decoder_preseqlen, inputs_embeds.dtype, past_key_values_length=past_key_values_length
).to(self.device)
if attention_mask is not None and combined_attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
combined_attention_mask = combined_attention_mask + _expand_mask(
attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]
)
# expand encoder attention mask
if encoder_hidden_states is not None and encoder_attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
encoder_attention_mask = _expand_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1] + self.config.decoder_preseqlen)
# embed positions
positions = self.embed_positions(input_shape, past_key_values_length)
hidden_states = inputs_embeds + positions
if self.prompt_modules is not None:
prefix_embeds = self.get_prompt(hidden_states.shape[0], hidden_states.device)
hidden_states = torch.cat([prefix_embeds, hidden_states], dim=1)
hidden_states = self.layernorm_embedding(hidden_states)
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if output_attentions else None
next_decoder_cache = () if use_cache else None
for idx, decoder_layer in enumerate(self.layers):
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
if output_hidden_states:
all_hidden_states += (hidden_states,)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop):
continue
past_key_value = past_key_values[idx] if past_key_values is not None else None
if getattr(self.config, "gradient_checkpointing", False):
if use_cache:
raise ValueError(
"When using `gradient_checkpointing, make sure that `use_cache=False` and `config.use_cache=False`."
)
def create_custom_forward(module):
def custom_forward(*inputs):
# None for past_key_value
return module(*inputs, output_attentions, use_cache)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(decoder_layer),
hidden_states,
combined_attention_mask,
encoder_hidden_states,
encoder_attention_mask,
None,
)
else:
layer_outputs = decoder_layer(
hidden_states,
attention_mask=combined_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_value=past_key_value,
output_attentions=output_attentions,
use_cache=use_cache,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[3 if output_attentions else 1],)
if output_attentions:
all_self_attns += (layer_outputs[1],)
all_cross_attentions += (layer_outputs[2],)
# remove the output for prefix
hidden_states = hidden_states[:, self.config.decoder_preseqlen:]
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
next_cache = next_decoder_cache if use_cache else None
if not return_dict:
return tuple(
v
for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_cache,
hidden_states=all_hidden_states,
attentions=all_self_attns,
cross_attentions=all_cross_attentions,
)
class VLBartModel(BartModel):
def __init__(self, config: BartConfig):
super(BartModel, self).__init__(config)
padding_idx, vocab_size = config.pad_token_id, config.vocab_size
self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx)
if config.use_hyperformer:
self.shared_task_embed = TaskEmbeddingController(config.adapter_config)
else:
self.shared_task_embed = None
#----- Modified-----#
# self.encoder = BartEncoder(config, self.shared)
self.encoder = JointEncoder(config, self.shared, self.shared_task_embed)
#-------------------#
self.decoder = BartDecoder(config, self.shared, self.shared_task_embed)
self.config = config
if config.decoder_prompt_config:
self.prompt_modules = PromptController(config.decoder_prompt_config)
else:
self.prompt_modules = None
self.init_weights()
def get_prompt(self, bsz, device):
input_tokens = self.prefix_tokens.unsqueeze(0).expand(bsz, -1).to(device) # (B, L)
prefix_prompt = self.prefix_embedding(input_tokens) # (B, L, d_model)
temp_results = self.decoder(inputs_embeds=prefix_prompt, use_cache=True, return_dict=True)
past_key_values = temp_results.past_key_values
return past_key_values
def set_input_embeddings(self, value):
self.shared = value
self.encoder.embed_tokens = self.shared
self.decoder.embed_tokens = self.shared
if not self.config.expand_vis_embedding:
self.encoder.visual_embedding.obj_order_embedding = self.shared
def forward(
self,
input_ids=None,
attention_mask=None,
vis_inputs=None,
vis_attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
encoder_outputs=None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
task=None,
**kwargs,
):
# different to other models, Bart automatically creates decoder_input_ids from
# input_ids if no decoder_input_ids are provided
if decoder_input_ids is None and decoder_inputs_embeds is None:
decoder_input_ids = shift_tokens_right(
input_ids, self.config.pad_token_id, self.config.decoder_start_token_id
)
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
vis_inputs=vis_inputs,
vis_attention_mask=vis_attention_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
task=task,
)
# If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=False
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
if attention_mask is None:
attention_mask = input_ids.ne(self.config.pad_token_id).to(dtype=torch.float, device=input_ids.device)
if self.config.encoder_prompt_config is not None and self.config.encoder_prompt_config.prompt_len > 0:
prefix_attention_mask = torch.ones(
attention_mask.shape[0],
self.config.encoder_prompt_config.prompt_len,
dtype=attention_mask.dtype,
device=attention_mask.device,
)
attention_mask = torch.cat([prefix_attention_mask, attention_mask], dim=1)
if vis_attention_mask is None:
B, L = attention_mask.size()
V_L = encoder_outputs[0].size(1) - L
vis_attention_mask = attention_mask.new_ones(B, V_L)
encoder_attention_mask = torch.cat([attention_mask, vis_attention_mask], dim=1)
if self.prompt_modules is not None and past_key_values is None:
prefix_embeds = self.prompt_modules(B, attention_mask.device, task)
past_key_values = self.decoder(inputs_embeds=prefix_embeds, use_cache=True, return_dict=True).past_key_values
# decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
encoder_hidden_states=encoder_outputs[0],
# encoder_attention_mask=attention_mask,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
task=task,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return Seq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
class VLBart(BartForConditionalGeneration):
base_model_prefix = "model"
_keys_to_ignore_on_load_missing = [
r"final_logits_bias",
r"encoder\.version",
r"decoder\.version",
r"lm_head\.weight",
]
| |
<gh_stars>0
#!/usr/bin/env python
import roslib
import rospy
import smach
import smach_ros
import sys
import time
from geometry_msgs.msg import Pose
from hlpr_speech_recognition.speech_listener import SpeechListener
from hlpr_speech_msgs.srv import SpeechService
#from hlpr_speech_synthesis import speech_synthesizer
from hlpr_manipulation_utils import manipulator
from hlpr_manipulation_utils.manipulator import *
from hlpr_manipulation_utils.arm_moveit import *
from modules.utilities.baris_utils import *
_manipulator = None
_arm_planner = None
#_speech_synth = None
_isTheManipulationStateGlobalsInitialized = False
# see if it makes sense to pass these with userdata
def initGlobals():
"""
Initialize manipulator, arm planner from hlpr_manipulation_utils
Initialize speech synthesizer from hlr_speech_synthesis
"""
print 'Initializing manipulation state globals'
global _manipulator
global _arm_planner
global _speech_synth
global _isTheManipulationStateGlobalsInitialized
_arm_planner = ArmMoveIt()
_manipulator = Manipulator()
#_speech_synth = speech_synthesizer.SpeechSynthesizer()
_isTheManipulationStateGlobalsInitialized = True
def sendPlan(arm, plannedTra):
"""
Send the planned trajectory to the arm to follow
"""
traj_goal = FollowJointTrajectoryGoal()
traj_goal.trajectory = plannedTra.joint_trajectory
arm.smooth_joint_trajectory_client.send_goal(traj_goal)#sendWaypointTrajectory(traj_goal)
arm.smooth_joint_trajectory_client.wait_for_result()
return arm.smooth_joint_trajectory_client.get_result()
class ManipulateObjectMainState(smach.State):
"""
Central state for the Manipulation Sub FSM
"""
def __init__(self):
smach.State.__init__(self,
outcomes=['moveArm', 'gripper', 'findObject', 'succeeded', 'failed', 'aborted','planPath', 'pickRetract'],
input_keys=['armGoalIn', 'graspResultIn', 'objectLocationIn', 'pathFoundIn', 'pickResultIn','targetPoseIn'],
output_keys=['traOut','gripperCommandOut','statusOut'])
# This keeps track of the current state
self.status = 'begin'
# Load manipulator, arm planner and speech synthesizer as global variables
if not _isTheManipulationStateGlobalsInitialized:
initGlobals()
#self.ss = _speech_synth
def execute(self, userdata):
"""
Define state transitions within manipulation sub FSM
"""
userdata.statusOut = None
rospy.loginfo('Manipulate object substate machine')
if self.status is 'begin':
self.status = 'findObject'
return 'findObject'
elif self.status is 'findObject':
if userdata.objectLocationIn is None:
print 'Failed because no object found'
userdata.statusOut = 'failed'
return 'failed'
self.status = 'planning'
return 'planPath'
elif self.status is 'planning':
if not userdata.pathFoundIn:
print 'Failed because no plan found'
userdata.statusOut = 'failed'
return 'failed'
self.status = 'preGrasp'
return 'moveArm'
elif self.status is 'preGrasp':
if userdata.targetPoseIn is None:
print 'Failed because could not execute'
userdata.statusOut = 'failed'
return 'failed'
else:
self.status = 'pickRetract'
return 'pickRetract'
#userdata.gripperCommandOut = 'close'
#userdata.traOut = None
#self.status = 'grasp'
elif self.status is 'pickRetract':
if userdata.pickResultIn:
self.status = 'success'
return 'succeeded'
else:
return 'failed'
else:
print 'How the hell did I manage to come here! Status: ' + str(self.status)
#self.ss.say('Human I am not in a correct state, how did I get here?')
time.sleep(1)
return 'failed'
#elif self.status is 'grasp':
# if userdata.graspResultIn:
# userdata.gripperCommandOut = None
# userdata.traOut = 'retract'
# self.status = 'retract'
# else:
# userdata.gripperCommandOut = None
# userdata.traOut = 'retract'
# #self.status =
class ManipulateObjectState(smach.State):
"""
This state is not being used in the FSM
"""
def __init__(self):
smach.State.__init__(self,
outcomes=['succeeded','aborted'],
input_keys=['armGoalIn'])
def execute(self, userdata):
rospy.loginfo('Moving the arm')
if userdata.armGoalIn is 'object':
act = 'grasping'
else:
act = 'handing over'
for i in range(0,11):
sys.stdout.write('-')
sys.stdout.flush()
time.sleep(0.5)
if i is 5:
sys.stdout.write('- ' + act + ' -')
sys.stdout.flush()
time.sleep(2)
print '> Good!'
time.sleep(0.5)
return 'succeeded'
class ExecuteTrajectoryState(smach.State):
"""
Executes the trajectory provide by
Plan Trajectory state
"""
def __init__(self):
smach.State.__init__(self,
outcomes=['succeeded','failed','aborted'],
input_keys=['trajectoryIn'],
output_keys=['execResultOut'])
# Initializing manipulation and speech modules
if not _isTheManipulationStateGlobalsInitialized:
initGlobals()
self.arm = _manipulator.arm # Arm()
#self.ss = _speech_synth #speech_synthesizer.SpeechSynthesizer()
# Trajectory for person hardcoded as waypoints
handOffTra = [[-1.90, 1.50, 0.50, -2.00, 3.00, 0.72],
[-1.80, 1.80, 1.00, -2.10, 2.50, 0.72],
[-1.70, 2.00, 1.00, -2.20, 2.00, 0.90],
[-1.60, 2.20, 0.80, -2.20, 1.50, 1.20],
[-1.60, 2.40, 1.00, -2.50, 1.50, 1.20],
[-1.60, 2.60, 1.20, -3.14, 1.50, 1.20]]
self.traDict = {'person':handOffTra}
self.functionDict = {'retract':self.arm.upper_tuck}
def execute(self, userdata):
"""
Checks if the trajectory is for person, retract or towards object and executes
Object Trajectory is passed fully planned
Human Trajectory is hardcoded as waypoints and specified above
Retract Trajectory is specified as a function call
"""
rospy.loginfo('Moving the arm')
userdata.execResultOut = None
if userdata.trajectoryIn is None:
print 'Received a None trajectory, this should not have happened'
return 'failed'
print 'Executing trajectory'
#self.ss.say("Now executing the arm trajectory")
if not isinstance(userdata.trajectoryIn, str): # Check if Trajectory is to reach object
if isinstance(userdata.trajectoryIn, list):
self.arm.sendWaypointTrajectory(userdata.trajectoryIn)
else:
if len(userdata.trajectoryIn.joint_trajectory.points) < 1:
return 'failed'
sendPlan(self.arm, userdata.trajectoryIn)
else:
try: # Check if Trajectory is for retract
self.functionDict[userdata.trajectoryIn]()
except KeyError: # Check if Trajectory is for person
try:
self.arm.sendWaypointTrajectory(self.traDict[userdata.trajectoryIn])
except KeyError:
print 'Do not know how to execute ' + userdata.trajectoryIn
return 'failed'
userdata.execResultOut = 'done'
return 'succeeded'
class UseGripperState(smach.State):
"""
This gives the open/close command to gripper as per default setting from FSM or speech input
"""
def __init__(self):
smach.State.__init__(self,
outcomes=['succeeded','failed','aborted'],
input_keys=['gripperCommandIn','waitForSpeech'],
output_keys = ['resultOut'])
self.gripper_command = None
# Initializing manipulation and speech modules
self.service_topic = rospy.get_param(SpeechListener.SERVICE_TOPIC_PARAM, None)
if self.service_topic is None:
rospy.logerr("Exiting: No speech topic given, is speech listener running?")
exit()
rospy.logwarn("Waiting for speech service")
rospy.wait_for_service(self.service_topic)
self.speech_service = rospy.ServiceProxy(self.service_topic, SpeechService)
rospy.logwarn("Speech service loaded")
if not _isTheManipulationStateGlobalsInitialized:
initGlobals()
self.gripper = _manipulator.gripper
def execute(self, userdata):
"""
Passes the close/open command to gripper and outputs
"""
rospy.loginfo('Using the gripper, how exciting!')
# wait for speech input if flag set true
# instead use the default gripper command
if userdata.waitForSpeech:
while not rospy.is_shutdown():
try: # Fetch last speech command
response = self.speech_service(True)
self.last_command = response.speech_cmd
except rospy.ServiceException:
self.last_command = None
if self.last_command == 'OPEN_HAND':
self.gripper_command = 'open'
break
if self.last_command == 'CLOSE_HAND':
self.gripper_command = 'close'
break
if self.last_command == 'END':
return 'aborted'
time.sleep(0.2)
else:
self.gripper_command = userdata.gripperCommandIn # Fetch user data if no gripper speech command
print 'Executing command ' + self.gripper_command
if self.gripper_command == 'open':
self.gripper.open() # Execute open command
elif self.gripper_command == 'close':
self.gripper.close() # Execute close command
for i in range(0,2):
sys.stdout.write('-')
sys.stdout.flush()
time.sleep(0.5)
print '> Done!'
userdata.resultOut = True
time.sleep(0.5)
return 'succeeded'
class PlanTrajectoryState(smach.State):
"""
This state takes in the tranform from root to object
and returns Planned trajectory, target pose to grab object and path found flag
"""
def __init__(self):
smach.State.__init__(self,
outcomes=['succeeded','failed','aborted'],
input_keys=['objectLocationIn'],
output_keys=['traOut','pathFoundOut','targetPoseOut'])
# Initialize manipulation and speech modules
if not _isTheManipulationStateGlobalsInitialized:
initGlobals()
self.arm_planner = _arm_planner
#self.ss = _speech_synth #speech_synthesizer.SpeechSynthesizer()
self.targetPose = None
def execute(self, userdata):
"""
Convert the object transform to a pose
Use moveit arm planner to plan trajectory just above the object
"""
rospy.loginfo('Calculating path')
userdata.pathFoundOut = False
userdata.traOut = None
userdata.targetPoseOut = None
print 'Creating the trajectory to pose ' + str(userdata.objectLocationIn)
#self.ss.say('Creating the trajectory')# to pose ' + str(userdata.objectLocationIn))
# Calculating target Pose for object
# The target pose translation has offset in +ve z direction wrt to object pose
# The target pose orientation is manually set to keep gripper facing down
self.targetPose = transform2pose(userdata.objectLocationIn)
if self.targetPose is None:
print 'Received None pose, this should not have happened'
#self.ss.say('Invalid pose received')
return 'failed'
#self.targetPose.position.z += 0.25
self.targetPose.position.z += 0.12 # TODO: verify on the real robot (I think this takes into account the old EEF offset) which was 0.13 m
self.targetPose.orientation = quatFromAngleAxis([-0.5094, 0.5094, 0.5094])
# Pass target pose to moveit arm planner
self.arm_planner.group[0].set_pose_reference_frame('base_link')
plannedTra = self.arm_planner.plan_poseTargetInput(self.targetPose)
#joints = self.arm_planner.get_IK(self.targetPose)
#plannedTra = self.arm_planner.plan_jointTargetInput(joints)
if plannedTra is None:
print 'Could not find a plan'
#self.ss.say('Could not find a plan')
time.sleep(0.5)
return 'failed'
#self.ss.say("done!")
time.sleep(0.5)
userdata.traOut = plannedTra # Planned Trajectory output
userdata.pathFoundOut = True # Flag to check if path found
userdata.targetPoseOut = self.targetPose # Target Pose for planned trajectory
return 'succeeded'
class CompositePickAndRetractState(smach.State):
"""
This state grabs the object and retracts the arm
"""
def __init__(self, ik_root = 'base_link'):
smach.State.__init__(self,
outcomes=['succeeded','failed','aborted'],
input_keys=['initArmPoseIn'],
output_keys=['pickResultOut'])
# Check for initialization of manipulation and speech utils
if not _isTheManipulationStateGlobalsInitialized:
initGlobals()
# Pass global manipulation and speech utils as local variables
#self.ss = _speech_synth
self.manip = _manipulator
self.arm_planner = _arm_planner
self.ik_root = ik_root
def execute(self, userdata):
"""
Move the arm down, close the gripper and execute retract trajectory
"""
rospy.loginfo('Picking and retracting')
userdata.pickResultOut = False
# Check for a recieved target pose
if userdata.initArmPoseIn is None:
print 'Received a None pose, this should not have happened'
return 'failed'
# Set target pose a fixed -ve offset in z-direction from current pose
# Find straight-line EE trajectory from current to target pose
curPose = rosPoseCopy(userdata.initArmPoseIn)
targetPose = rosPoseCopy(userdata.initArmPoseIn)
targetPose.position.z = max(0.91, targetPose.position.z-0.05)
poses = straightLinePoses(curPose, targetPose)
# Check if the EE trajectory satisfies the IK.
# Return failure if not possible.
# Execute trajectory if possible
jntWps = self.arm_planner.wayPointIK(poses, 3, self.ik_root)
if jntWps is None:
print 'Could not calculate | |
from __main__ import vtk, qt, ctk, slicer
#
# LabelStatistics
#
class BenderLabelStatistics:
def __init__(self, parent):
import string
parent.title = "Bender Label Statistics"
parent.categories = ["Quantification"]
parent.contributors = ["<NAME> (Isomics), <NAME> (Kitware)"]
parent.dependencies = ["CropVolume"]
parent.helpText = string.Template("""
Use this module to calculate counts and volumes for different labels of a label map plus statistics on the grayscale background volume. Note: volumes must have same dimensions. See <a href=\"$a/Documentation/$b.$c/Modules/LabelStatistics\">$a/Documentation/$b.$c/Modules/LabelStatistics</a> for more information.
""").substitute({ 'a':'http://public.kitware.com/Wiki/Bender', 'b':2, 'c':0 })
parent.acknowledgementText = """
Supported by Air Force Research Laboratory (AFRL) and the Slicer Community. See http://www.slicer.org for details.
"""
self.parent = parent
#
# qSlicerPythonModuleExampleWidget
#
class BenderLabelStatisticsWidget:
def __init__(self, parent=None):
if not parent:
self.setup()
self.parent.show()
else:
self.parent = parent
self.logic = BenderLabelStatisticsLogic()
self.grayscaleNode = None
self.labelmapNode = None
self.roiNode = None
self.fileName = None
self.fileDialog = None
self.label = None
def setup(self):
loader = qt.QUiLoader()
moduleName = 'BenderLabelStatistics'
scriptedModulesPath = eval('slicer.modules.%s.path' % moduleName.lower())
scriptedModulesPath = os.path.dirname(scriptedModulesPath)
path = os.path.join(scriptedModulesPath, 'Resources', 'UI', 'BenderLabelStatistics.ui')
qfile = qt.QFile(path)
qfile.open(qt.QFile.ReadOnly)
widget = loader.load( qfile, self.parent )
self.layout = self.parent.layout()
self.widget = widget;
self.layout.addWidget(widget)
# input filters
self.get('labelmapSelector').addAttribute( "vtkMRMLScalarVolumeNode", "LabelMap", "1" )
# Fill options
self.populateChartOptions()
# icons
saveIcon = self.get('BenderLabelStatistics').style().standardIcon(qt.QStyle.SP_DialogSaveButton)
self.get('saveButton').icon = saveIcon
# connections
self.get('applyButton').connect('clicked(bool)', self.computeStatistics)
self.get('chartButton').connect('clicked()', self.onChart)
self.get('saveButton').connect('clicked()', self.onSave)
self.get('labelmapSelector').connect('currentNodeChanged(vtkMRMLNode*)', self.onLabelMapSelected)
self.get('parametersPercentages').connect('stateChanged(int)', self.onComputePercentagesEnabled)
self.get('parametersLabel').connect('valueChanged(int)', self.onLabelValueChanged)
self.onComputePercentagesEnabled()
self.widget.setMRMLScene(slicer.mrmlScene)
def populateChartOptions(self):
grayscaleEnabled = (self.grayscaleNode != None)
options = self.get('chartOption')
options.clear()
colorNode = None
if self.labelmapNode:
try:
displayNode = self.labelmapNode.GetDisplayNode()
colorNode = displayNode.GetColorNode()
except AttributeError:
return
for key in self.logic.statistics:
if self.logic.isKeyValid(key, grayscaleEnabled, self.label):
options.addItem(key, key)
options.setCurrentIndex(0)
def onLabelValueChanged(self):
if self.get('parametersLabel').enabled:
self.label = self.get('parametersLabel').value
else:
self.label = None
def onLabelMapSelected(self, node):
self.labelmapNode = node
self.get('applyButton').enabled = (self.labelmapNode != None)
def onComputePercentagesEnabled(self):
self.get('parametersLabel').enabled = self.get('parametersPercentages').isChecked()
self.onLabelValueChanged()
def computeStatistics(self, run):
"""Calculate the label statistics
"""
if not run:
return
self.labelmapNode = self.get('labelmapSelector').currentNode()
self.grayscaleNode = self.get('grayscaleSelector').currentNode()
self.roiNode = self.get('ROISelector').currentNode()
self.get('applyButton').setChecked(True)
self.logic.computeStatistics(self.grayscaleNode, self.labelmapNode, self.roiNode, self.label)
self.populateStats()
self.populateChartOptions()
self.get('chartFrame').enabled = (self.labelmapNode != None)
self.get('saveButton').enabled = (self.labelmapNode != None)
self.get('applyButton').setChecked(False)
def onChart(self):
"""chart the label statistics
"""
valueToPlot = self.get('chartOption').itemData(self.get('chartOption').currentIndex)
ignoreZero = self.get('chartIgnoreZero').checked
self.logic.createStatsChart(self.labelmapNode, valueToPlot, ignoreZero)
def onSave(self):
"""save the label statistics
"""
if not self.fileDialog:
self.fileDialog = qt.QFileDialog(self.parent)
self.fileDialog.options = self.fileDialog.DontUseNativeDialog
self.fileDialog.acceptMode = self.fileDialog.AcceptSave
self.fileDialog.defaultSuffix = "csv"
self.fileDialog.setNameFilter("Comma Separated Values (*.csv)")
self.fileDialog.connect("fileSelected(QString)", self.onFileSelected)
self.fileDialog.show()
def onFileSelected(self,fileName):
self.logic.saveStats(fileName, self.grayscaleNode != None, self.label)
def populateStats(self):
if not self.logic:
return
try:
displayNode = self.labelmapNode.GetDisplayNode()
colorNode = displayNode.GetColorNode()
lut = colorNode.GetLookupTable()
except AttributeError:
return
grayscaleEnabled = (self.grayscaleNode != None)
self.items = []
self.model = qt.QStandardItemModel()
self.get('view').setModel(self.model)
self.get('view').verticalHeader().visible = False
row = 0
for i in self.logic.labelStats["Labels"]:
color = qt.QColor()
rgb = lut.GetTableValue(i)
color.setRgb(rgb[0]*255,rgb[1]*255,rgb[2]*255)
item = qt.QStandardItem()
item.setEditable(False)
item.setData(color,1)
item.setText(colorNode.GetColorName(i))
item.setToolTip(colorNode.GetColorName(i))
self.model.setItem(row,0,item)
self.items.append(item)
col = 1
for key in self.logic.statistics:
if self.logic.isKeyValid(key, grayscaleEnabled, self.label):
item = qt.QStandardItem()
item.setText(str(self.logic.labelStats[i,key]))
item.setToolTip(colorNode.GetColorName(i))
self.model.setItem(row,col,item)
self.items.append(item)
col += 1
row += 1
self.get('view').setColumnWidth(0,30)
self.model.setHeaderData(0,1,"Color")
col = 1
for key in self.logic.statistics:
if self.logic.isKeyValid(key, grayscaleEnabled, self.label):
self.get('view').setColumnWidth(col,15*len(key))
self.model.setHeaderData(col,1,key)
col += 1
self.get('view').resizeColumnsToContents()
### Widget methods ###
def get(self, objectName):
return self.findWidget(self.widget, objectName)
def findWidget(self, widget, objectName):
if widget.objectName == objectName:
return widget
else:
children = []
for w in widget.children():
resulting_widget = self.findWidget(w, objectName)
if resulting_widget:
return resulting_widget
return None
class BenderLabelStatisticsLogic:
"""Implement the logic to calculate label statistics.
Nodes are passed in as arguments.
Results are stored as 'statistics' instance variable.
"""
def __init__(self):
'''
The statistics dictionnary gathers all the different statistics ran by the
logic. For each key, a booelan value is associated to know whether the
statistics depends on the presence of a graysacle image. If it does not
depend on the grayscale image then it is True, False otherwise.
'''
self.statistics = {"Index" : True,
"Count" : True,
"Volume (cubic millimeter)" : True,
"Minimum" : False,
"Maximum" : False,
"Mean" : False,
"Standard deviation" : False,
"Percentage of total volume" : True,
"Percentage of label volume" : True,
"Percentage of total volume except label volume" : True,
}
self.labelStats = {}
def isKeyPercentage(self, key):
isPercentage = key.split(' ')[0] == 'Percentage'
return key.split(' ')[0] == 'Percentage'
def isKeyPercentageAndValid(self, key, label):
return (label != None) if self.isKeyPercentage(key) else True
def isKeyGrayscaleAndValid(self, key, grayscaleEnabled):
return self.statistics[key] or grayscaleEnabled
def isKeyValid(self, key, grayscaleEnabled, label):
return self.isKeyPercentageAndValid(key, label) and self.isKeyGrayscaleAndValid(key, grayscaleEnabled)
def computeStatistics(self, grayscaleNode, labelmapNode, roiNode = None, label=None):
if not labelmapNode:
return
cubicMMPerVoxel = reduce(lambda x,y: x*y, labelmapNode.GetSpacing())
ccPerCubicMM = 0.001
# TODO: progress and status updates
# this->InvokeEvent(vtkLabelStatisticsLogic::StartLabelStats, (void*)"start label stats")
self.labelStats = {}
self.labelStats['Labels'] = []
labelmapImageData = labelmapNode.GetImageData()
croppedImageNode = None
if roiNode:
# Rely on crop volume logic
cropLogic = slicer.modules.cropvolume.logic()
croppedImageNode = slicer.vtkMRMLScalarVolumeNode()
cropLogic.SnapROIToVoxelGrid(roiNode, labelmapNode)
cropLogic.CropVoxelBased(roiNode, labelmapNode, croppedImageNode)
labelmapImageData = croppedImageNode.GetImageData()
stataccum = vtk.vtkImageAccumulate()
stataccum.SetInput(labelmapImageData)
stataccum.Update()
lo = int(stataccum.GetMin()[0])
hi = int(stataccum.GetMax()[0])
totalNumberOfLabel = stataccum.GetVoxelCount()
totalVolume = totalNumberOfLabel * cubicMMPerVoxel
for i in xrange(lo,hi+1):
# this->SetProgress((float)i/hi);
# std::string event_message = "Label "; std::stringstream s; s << i; event_message.append(s.str());
# this->InvokeEvent(vtkLabelStatisticsLogic::LabelStatsOuterLoop, (void*)event_message.c_str());
# logic copied from slicer3 LabelStatistics
# to create the binary volume of the label
# //logic copied from slicer2 LabelStatistics MaskStat
# // create the binary volume of the label
thresholder = vtk.vtkImageThreshold()
thresholder.SetInput(labelmapImageData)
thresholder.SetInValue(1)
thresholder.SetOutValue(0)
thresholder.ReplaceOutOn()
thresholder.ThresholdBetween(i,i)
if grayscaleNode:
thresholder.SetOutputScalarType(grayscaleNode.GetImageData().GetScalarType())
else:
thresholder.SetOutputScalarType(labelmapImageData.GetScalarType())
thresholder.Update()
# this.InvokeEvent(vtkLabelStatisticsLogic::LabelStatsInnerLoop, (void*)"0.25");
# use vtk's statistics class with the binary labelmap as a stencil
stencil = vtk.vtkImageToImageStencil()
stencil.SetInput(thresholder.GetOutput())
stencil.ThresholdBetween(1, 1)
# this.InvokeEvent(vtkLabelStatisticsLogic::LabelStatsInnerLoop, (void*)"0.5")
stat1 = vtk.vtkImageAccumulate()
if grayscaleNode != None:
stat1.SetInput(grayscaleNode.GetImageData())
else:
stat1.SetInput(labelmapImageData)
stat1.SetStencil(stencil.GetOutput())
stat1.Update()
# this.InvokeEvent(vtkLabelStatisticsLogic::LabelStatsInnerLoop, (void*)"0.75")
if stat1.GetVoxelCount() > 0:
# add an entry to the LabelStats list
self.labelStats["Labels"].append(i)
self.labelStats[i,"Index"] = i
self.labelStats[i,"Count"] = stat1.GetVoxelCount()
self.labelStats[i,"Volume (cubic millimeter)"] = self.labelStats[i,"Count"] * cubicMMPerVoxel
if grayscaleNode != None:
self.labelStats[i,"Minimum"] = stat1.GetMin()[0]
self.labelStats[i,"Maximum"] = stat1.GetMax()[0]
self.labelStats[i,"Mean"] = stat1.GetMean()[0]
self.labelStats[i,"Standard deviation"] = stat1.GetStandardDeviation()[0]
# this.InvokeEvent(vtkLabelStatisticsLogic::LabelStatsInnerLoop, (void*)"1")
if label != None:
dimensions = [0.0, 0.0, 0.0]
labelmapNode.GetImageData().GetDimensions(dimensions)
totalVolume = reduce(lambda x, y: x*y, dimensions) * cubicMMPerVoxel
volumeOfLabel = 1
totalVolumeWithoutLabel = 1
if label in self.labelStats["Labels"]:
volumeOfLabel = self.labelStats[label, "Volume (cubic millimeter)"]
totalVolumeWithoutLabel = totalVolume - volumeOfLabel
for i in self.labelStats["Labels"]:
self.labelStats[i, "Percentage of total volume"] = self.labelStats[i,"Volume (cubic millimeter)"] / totalVolume
self.labelStats[i, "Percentage of label volume"] = self.labelStats[i,"Volume (cubic millimeter)"] / volumeOfLabel
self.labelStats[i, "Percentage of total volume except label volume"] = self.labelStats[i,"Volume (cubic millimeter)"] / totalVolumeWithoutLabel
# this.InvokeEvent(vtkLabelStatisticsLogic::EndLabelStats, (void*)"end label stats")
def createStatsChart(self, labelNode, valueToPlot, ignoreZero=False):
"""Make a MRML chart of the current stats
"""
layoutNodes = slicer.mrmlScene.GetNodesByClass('vtkMRMLLayoutNode')
layoutNodes.SetReferenceCount(layoutNodes.GetReferenceCount()-1)
layoutNodes.InitTraversal()
layoutNode = layoutNodes.GetNextItemAsObject()
layoutNode.SetViewArrangement(slicer.vtkMRMLLayoutNode.SlicerLayoutConventionalQuantitativeView)
chartViewNodes = slicer.mrmlScene.GetNodesByClass('vtkMRMLChartViewNode')
chartViewNodes.SetReferenceCount(chartViewNodes.GetReferenceCount()-1)
chartViewNodes.InitTraversal()
chartViewNode = chartViewNodes.GetNextItemAsObject()
arrayNode = slicer.mrmlScene.AddNode(slicer.vtkMRMLDoubleArrayNode())
array = arrayNode.GetArray()
samples = len(self.labelStats["Labels"])
tuples = samples
if ignoreZero and self.labelStats["Labels"].__contains__(0):
tuples -= 1
array.SetNumberOfTuples(tuples)
tuple = 0
for i in xrange(samples):
index = self.labelStats["Labels"][i]
if not (ignoreZero and index == 0):
array.SetComponent(tuple, 0, index)
array.SetComponent(tuple, 1, self.labelStats[index,valueToPlot])
array.SetComponent(tuple, 2, 0)
tuple += 1
chartNode = slicer.mrmlScene.AddNode(slicer.vtkMRMLChartNode())
chartNode.AddArray(valueToPlot, arrayNode.GetID())
chartViewNode.SetChartNodeID(chartNode.GetID())
chartNode.SetProperty('default', 'title', 'Label Statistics')
chartNode.SetProperty('default', 'xAxisLabel', 'Label')
chartNode.SetProperty('default', 'yAxisLabel', valueToPlot)
chartNode.SetProperty('default', 'type', 'Bar');
chartNode.SetProperty('default', 'xAxisType', 'categorical')
chartNode.SetProperty('default', 'showLegend', 'off')
# series level properties
if labelNode.GetDisplayNode() != None and labelNode.GetDisplayNode().GetColorNode() != None:
chartNode.SetProperty(valueToPlot, 'lookupTable', labelNode.GetDisplayNode().GetColorNodeID());
def statsAsCSV(self, grayscaleStats, label):
"""
print comma separated value file with header keys in quotes
"""
csv = ""
header = ""
statisticsType = self.statistics.keys()
for key in statisticsType:
if self.isKeyValid(key, grayscaleStats, self.label):
header += "\"%s\"" % key + ","
header = header[:-1] + "\n"
csv = header
for i in self.labelStats["Labels"]:
line = ""
for key in statisticsType:
if self.isKeyValid(key, grayscaleStats, self.label):
line += str(self.labelStats[i,key]) + ","
line = line[:-1] + "\n"
csv += line
return csv
def saveStats(self,fileName, grayscaleStats, label):
fp = open(fileName, "w")
try:
fp.write(self.statsAsCSV(grayscaleStats, label))
finally:
fp.close()
class Slicelet(object):
"""A slicer slicelet is a module widget that comes up in stand alone mode
implemented as a python class.
This class provides common wrapper functionality used by all slicer modlets.
"""
# TODO: put this in a SliceletLib
# TODO: parse command line arge
def __init__(self, widgetClass=None):
self.parent = qt.QFrame()
self.parent.setLayout( qt.QVBoxLayout() )
# TODO: should have way to pop up python interactor
self.buttons = qt.QFrame()
self.buttons.setLayout( qt.QHBoxLayout() )
self.parent.layout().addWidget(self.buttons)
self.addDataButton = | |
"""The WaveBlocks Project
IOM plugin providing functions for handling
homogeneous Hagedorn wavepacket data.
@author: <NAME>
@copyright: Copyright (C) 2010, 2011, 2012, 2013, 2016 <NAME>
@license: Modified BSD License
"""
import numpy as np
def add_inhomogwavepacket(self, parameters, timeslots=None, blockid=0, key=("q", "p", "Q", "P", "S")):
r"""Add storage for the inhomogeneous wavepackets.
:param parameters: An :py:class:`ParameterProvider` instance with at
least the keys ``dimension`` and ``ncomponents``.
:param timeslots: The number of time slots we need. Can be set to ``None``
to get automatically growing datasets.
:param key: Specify which parameters to save. All are independent.
:type key: Tuple of valid identifier strings that are ``q``, ``p``, ``Q``, ``P``, ``S`` and ``adQ``.
Default is ``("q", "p", "Q", "P", "S", "adQ")``.
"""
N = parameters["ncomponents"]
D = parameters["dimension"]
if timeslots is None:
T = 0
Ts = None
else:
T = timeslots
Ts = timeslots
# The overall group containing all wavepacket data
grp_wp = self._srf[self._prefixb + str(blockid)].require_group("wavepacket_inhomog")
# The group for storing the basis shapes
grp_wp.create_group("basisshapes")
# The group for storing the parameter set Pi
grp_pi = grp_wp.create_group("Pi")
grp_pi.attrs["number_parameters"] = len(key)
# The group for storing the coefficients
grp_ci = grp_wp.create_group("coefficients")
# Create the dataset with appropriate parameters
grp_wp.create_dataset("timegrid", (T,), dtype=np.integer, chunks=True, maxshape=(None,), fillvalue=-1)
grp_wp.create_dataset("basis_shape_hash", (T, N), dtype=np.integer, chunks=True, maxshape=(None, N))
grp_wp.create_dataset("basis_size", (T, N), dtype=np.integer, chunks=True, maxshape=(None, N))
# Parameters
for i in range(N):
if "q" in key and "q" not in grp_pi.keys():
grp_pi.create_dataset("q_" + str(i), (T, D, 1), dtype=np.complexfloating, chunks=True, maxshape=(Ts, D, 1))
if "p" in key and "p" not in grp_pi.keys():
grp_pi.create_dataset("p_" + str(i), (T, D, 1), dtype=np.complexfloating, chunks=True, maxshape=(Ts, D, 1))
if "Q" in key and "Q" not in grp_pi.keys():
grp_pi.create_dataset("Q_" + str(i), (T, D, D), dtype=np.complexfloating, chunks=True, maxshape=(Ts, D, D))
if "P" in key and "P" not in grp_pi.keys():
grp_pi.create_dataset("P_" + str(i), (T, D, D), dtype=np.complexfloating, chunks=True, maxshape=(Ts, D, D))
if "S" in key and "S" not in grp_pi.keys():
grp_pi.create_dataset("S_" + str(i), (T, 1, 1), dtype=np.complexfloating, chunks=True, maxshape=(Ts, 1, 1))
if "adQ" in key and "adQ" not in grp_pi.keys():
grp_pi.create_dataset("adQ_" + str(i), (T, 1, 1), dtype=np.complexfloating, chunks=True, maxshape=(Ts, 1, 1))
# Coefficients
for i in range(N):
grp_ci.create_dataset("c_" + str(i), (T, 1), dtype=np.complexfloating, chunks=(1, 8), maxshape=(Ts, None))
# Attach pointer to data instead timegrid
grp_pi.attrs["pointer"] = 0
grp_ci.attrs["pointer"] = 0
def delete_inhomogwavepacket(self, blockid=0):
r"""Remove the stored wavepackets.
"""
try:
del self._srf[self._prefixb + str(blockid) + "/wavepacket_inhomog"]
except KeyError:
pass
def has_inhomogwavepacket(self, blockid=0):
r"""Ask if the specified data block has the desired data tensor.
"""
return "wavepacket_inhomog" in self._srf[self._prefixb + str(blockid)].keys()
def save_inhomogwavepacket_description(self, descr, blockid=0):
pathd = "/" + self._prefixb + str(blockid) + "/wavepacket_inhomog"
# Save the description
for key, value in descr.items():
self._srf[pathd].attrs[key] = self._save_attr_value(value)
def save_inhomogwavepacket_parameters(self, parameters, timestep=None, blockid=0, key=("q", "p", "Q", "P", "S")):
r"""Save the parameter set :math:`\Pi` of the Hagedorn wavepacket :math:`\Psi` to a file.
:param parameters: The parameter set of the Hagedorn wavepacket.
:type parameters: A ``list`` containing the five ``ndarrays`` like :math:`(q,p,Q,P,S)`
:param key: Specify which parameters to save. All are independent.
:type key: Tuple of valid identifier strings that are ``q``, ``p``, ``Q``, ``P``, ``S`` and ``adQ``.
Default is ``("q", "p", "Q", "P", "S")``.
"""
pathtg = "/" + self._prefixb + str(blockid) + "/wavepacket_inhomog/timegrid"
pathd = "/" + self._prefixb + str(blockid) + "/wavepacket_inhomog/Pi/"
timeslot = self._srf[pathd].attrs["pointer"]
# Write the data
for i, piset in enumerate(parameters):
for k, item in zip(key, piset):
self.must_resize(pathd + k + "_" + str(i), timeslot)
self._srf[pathd + k + "_" + str(i)][timeslot, :, :] = item
# Write the timestep to which the stored values belong into the timegrid
self.must_resize(pathtg, timeslot)
self._srf[pathtg][timeslot] = timestep
# Update the pointer
self._srf[pathd].attrs["pointer"] += 1
def save_inhomogwavepacket_coefficients(self, coefficients, basisshapes, timestep=None, blockid=0):
r"""Save the coefficients of the Hagedorn wavepacket to a file.
Warning: we do only save the hash of the basis shapes here!
You have to save the basis shape with the corresponding function too.
:param coefficients: The coefficients of the Hagedorn wavepacket.
:type coefficients: A ``list`` with :math:`N` suitable ``ndarrays``.
:param basisshapes: The corresponding basis shapes of the Hagedorn wavepacket.
:type basisshapes: A ``list`` with :math:`N` :py:class:`BasisShape` subclass instances.
"""
pathtg = "/" + self._prefixb + str(blockid) + "/wavepacket_inhomog/timegrid"
pathbs = "/" + self._prefixb + str(blockid) + "/wavepacket_inhomog/basis_shape_hash"
pathbsi = "/" + self._prefixb + str(blockid) + "/wavepacket_inhomog/basis_size"
pathd = "/" + self._prefixb + str(blockid) + "/wavepacket_inhomog/coefficients/"
timeslot = self._srf[pathd].attrs["pointer"]
# Write the data
self.must_resize(pathbs, timeslot)
self.must_resize(pathbsi, timeslot)
for index, (bs, ci) in enumerate(zip(basisshapes, coefficients)):
self.must_resize(pathd + "c_" + str(index), timeslot)
size = bs.get_basis_size()
# Do we have to resize due to changed number of coefficients
if self._srf[pathd + "c_" + str(index)].shape[1] < size:
self._srf[pathd + "c_" + str(index)].resize(size, axis=1)
self._srf[pathbsi][timeslot, index] = size
self._srf[pathbs][timeslot, index] = hash(bs)
self._srf[pathd + "c_" + str(index)][timeslot, :size] = np.squeeze(ci)
# Write the timestep to which the stored values belong into the timegrid
self.must_resize(pathtg, timeslot)
self._srf[pathtg][timeslot] = timestep
# Update the pointer
self._srf[pathd].attrs["pointer"] += 1
def save_inhomogwavepacket_basisshapes(self, basisshape, blockid=0):
r"""Save the basis shapes of the Hagedorn wavepacket to a file.
:param coefficients: The basis shapes of the Hagedorn wavepacket.
"""
pathd = "/" + self._prefixb + str(blockid) + "/wavepacket_inhomog/basisshapes/"
ha = hash(basisshape)
name = "basis_shape_" + str(ha)
# Check if we already stored this basis shape
if name not in self._srf[pathd].keys():
# Create new data set
daset = self._srf[pathd].create_dataset("basis_shape_" + str(ha), (1,), dtype=np.integer)
daset[0] = ha
# Save the description
descr = basisshape.get_description()
for key, value in descr.items():
daset.attrs[key] = self._save_attr_value(value)
# TODO: Consider to save the mapping. Do we want or need this?
def load_inhomogwavepacket_description(self, blockid=0):
pathd = "/" + self._prefixb + str(blockid) + "/wavepacket_inhomog"
# Load and return all descriptions available
descr = {}
for key, value in self._srf[pathd].attrs.items():
descr[key] = self._load_attr_value(value)
return descr
def load_inhomogwavepacket_timegrid(self, blockid=0):
pathtg = "/" + self._prefixb + str(blockid) + "/wavepacket_inhomog/timegrid"
return self._srf[pathtg][:]
def load_inhomogwavepacket_parameters(self, timestep=None, component=None, blockid=0, key=("q", "p", "<KEY> "S")):
r"""Load the wavepacket parameters.
:param timestep: Load only the data of this timestep.
:param blockid: The ID of the data block to operate on.
:param key: Specify which parameters to load. All are independent.
:type key: Tuple of valid identifier strings that are ``q``, ``p``, ``Q``, ``P``, ``S`` and ``adQ``.
Default is ``("q", "p", "Q", "P", "S")``.
"""
pathtg = "/" + self._prefixb + str(blockid) + "/wavepacket_inhomog/timegrid"
pathd = "/" + self._prefixb + str(blockid) + "/wavepacket_inhomog/Pi/"
if timestep is not None:
index = self.find_timestep_index(pathtg, timestep)
data = []
for i in range(len(self._srf[pathd].keys()) // int(self._srf[pathd].attrs["number_parameters"])):
if timestep is not None:
data.append(tuple([self._srf[pathd + k + "_" + str(i)][index, :, :] for k in key]))
else:
data.append(tuple([self._srf[pathd + k + "_" + str(i)][..., :, :] for k in key]))
return tuple(data)
def load_inhomogwavepacket_coefficients(self, timestep=None, get_hashes=False, component=None, blockid=0):
pathtg = "/" + self._prefixb + str(blockid) + "/wavepacket_inhomog/timegrid"
pathbs = "/" + self._prefixb + str(blockid) + "/wavepacket_inhomog/basis_shape_hash"
pathbsi = "/" + self._prefixb + str(blockid) + "/wavepacket_inhomog/basis_size"
pathd = "/" + self._prefixb + str(blockid) + "/wavepacket_inhomog/coefficients/"
if timestep is not None:
index = self.find_timestep_index(pathtg, timestep)
else:
index = slice(None)
if get_hashes is True:
hashes = self._srf[pathbs][index, ...]
# Number of components
N = self._srf[pathbs].shape[1]
hashes = np.hsplit(hashes, N)
data = []
for i in range(len(list(self._srf[pathd].keys()))):
if timestep is not None:
size = self._srf[pathbsi][index, i]
data.append(self._srf[pathd + "c_" + str(i)][index, :size])
else:
data.append(self._srf[pathd + "c_" + str(i)][index, ...])
if get_hashes is True:
return (hashes, data)
else:
return data
def load_inhomogwavepacket_basisshapes(self, the_hash=None, blockid=0):
r"""Load the basis shapes by hash.
"""
pathd = "/" + self._prefixb + str(blockid) + "/wavepacket_inhomog/basisshapes/"
if the_hash is None:
# Load and return all descriptions available
descrs = {}
for ahash in self._srf[pathd].keys():
# TODO: What data exactly do we want to return?
descr = {}
for key, value in self._srf[pathd + ahash].attrs.items():
descr[key] = self._load_attr_value(value)
# 'ahash' is "basis_shape_..." and we want only the "..." part
descrs[int(ahash[12:])] = descr
return descrs
else:
# Be sure the hash is a plain number and not something
# else like a numpy array with one element.
the_hash = int(the_hash)
name = "basis_shape_" + str(the_hash)
# Check if | |
# Copyright 2002-2011 <NAME>. See LICENSE for licensing information.
"""mixminion.server.PacketHandler: Code to process mixminion packets"""
import binascii
import threading
import types
from mixminion.Common import encodeBase64, formatBase64, LOG
import mixminion.Crypto as Crypto
import mixminion.Packet as Packet
import mixminion.BuildMessage
from mixminion.ServerInfo import PACKET_KEY_BYTES
from mixminion.Common import MixError, MixFatalError, isPrintingAscii
__all__ = [ 'PacketHandler', 'ContentError', 'DeliveryPacket', 'RelayedPacket']
class ContentError(MixError):
"""Exception raised when a packed is malformatted or unacceptable."""
pass
class PacketHandler:
"""Class to handle processing packets. Given an incoming packet,
it removes one layer of encryption, does all necessary integrity
checks, swaps headers if necessary, re-pads, and decides whether
to drop the packet, relay the packet, or send the packet to
an exit handler."""
## Fields:
# privatekeys: a list of 2-tuples of
# (1) a RSA private key that we accept
# (2) a HashLog objects corresponding to the given key
def __init__(self, privatekeys=(), hashlogs=()):
"""Constructs a new packet handler, given a sequence of
private key object for header encryption, and a sequence of
corresponding hashlog object to prevent replays.
The lists must be equally long. When a new packet is
processed, we try each of the private keys in sequence. If
the packet is decodeable with one of the keys, we log it in
the corresponding entry of the hashlog list.
"""
self.privatekeys = []
self.lock = threading.Lock()
assert type(privatekeys) in (types.ListType, types.TupleType)
assert type(hashlogs) in (types.ListType, types.TupleType)
self.setKeys(privatekeys, hashlogs)
def setKeys(self, keys, hashlogs):
"""Change the keys and hashlogs used by this PacketHandler.
Arguments are as to PacketHandler.__init__
"""
self.lock.acquire()
newKeys = {}
try:
# Build a set of asn.1-encoded public keys in *new* set.
for k in keys:
newKeys[k.encode_key(1)] = 1
if k.get_modulus_bytes() != PACKET_KEY_BYTES:
raise MixFatalError("Incorrect packet key length")
# For all old public keys, if they aren't in the new set, close
# their hashlogs.
for k, h in self.privatekeys:
if not newKeys.get(k.encode_key(1)):
h.close()
# Now, set the keys.
self.privatekeys = zip(keys, hashlogs)
finally:
self.lock.release()
def syncLogs(self):
"""Sync all this PacketHandler's hashlogs."""
try:
self.lock.acquire()
for _, h in self.privatekeys:
h.sync()
finally:
self.lock.release()
def close(self):
"""Close all this PacketHandler's hashlogs."""
try:
self.lock.acquire()
for _, h in self.privatekeys:
h.close()
finally:
self.lock.release()
def processPacket(self, msg):
"""Given a 32K mixminion packet, processes it completely.
Return one of:
None [if the packet should be dropped.]
a DeliveryPacket object
a RelayedPacket object
May raise CryptoError, ParseError, or ContentError if the packet
is malformatted, misencrypted, unparseable, repeated, or otherwise
unhandleable.
WARNING: This implementation does nothing to prevent timing
attacks: dropped packets, packets with bad digests, replayed
packets, and exit packets are all processed faster than
forwarded packets. You must prevent timing attacks elsewhere."""
# Break into headers and payload
pkt = Packet.parsePacket(msg)
header1 = Packet.parseHeader(pkt.header1)
encSubh = header1[:Packet.ENC_SUBHEADER_LEN]
header1 = header1[Packet.ENC_SUBHEADER_LEN:]
assert len(header1) == Packet.HEADER_LEN - Packet.ENC_SUBHEADER_LEN
assert len(header1) == (128*16) - 256 == 1792
# Try to decrypt the first subheader. Try each private key in
# order. Only fail if all private keys fail.
subh = None
e = None
self.lock.acquire()
try:
for pk, hashlog in self.privatekeys:
try:
subh = Crypto.pk_decrypt(encSubh, pk)
break
except Crypto.CryptoError, err:
e = err
finally:
self.lock.release()
if not subh:
# Nobody managed to get us the first subheader. Raise the
# most-recently-received error.
raise e
if len(subh) != Packet.MAX_SUBHEADER_LEN:
raise ContentError("Bad length in RSA-encrypted part of subheader")
subh = Packet.parseSubheader(subh) #may raise ParseError
# Check the version: can we read it?
if subh.major != Packet.MAJOR_NO or subh.minor != Packet.MINOR_NO:
raise ContentError("Invalid protocol version")
# Check the digest of all of header1 but the first subheader.
if subh.digest != Crypto.sha1(header1):
raise ContentError("Invalid digest")
# Get ready to generate packet keys.
keys = Crypto.Keyset(subh.secret)
# Replay prevention
replayhash = keys.get(Crypto.REPLAY_PREVENTION_MODE, Crypto.DIGEST_LEN)
if hashlog.seenHash(replayhash):
raise ContentError("Duplicate packet detected.")
else:
hashlog.logHash(replayhash)
# If we're meant to drop, drop now.
rt = subh.routingtype
if rt == Packet.DROP_TYPE:
return None
# Prepare the key to decrypt the header in counter mode. We'll be
# using this more than once.
header_sec_key = Crypto.aes_key(keys.get(Crypto.HEADER_SECRET_MODE))
# Prepare key to generate padding
junk_key = Crypto.aes_key(keys.get(Crypto.RANDOM_JUNK_MODE))
# Pad the rest of header 1
header1 += Crypto.prng(junk_key,
Packet.OAEP_OVERHEAD + Packet.MIN_SUBHEADER_LEN
+ subh.routinglen)
assert len(header1) == (Packet.HEADER_LEN - Packet.ENC_SUBHEADER_LEN
+ Packet.OAEP_OVERHEAD+Packet.MIN_SUBHEADER_LEN
+ subh.routinglen)
assert len(header1) == 1792 + 42 + 42 + subh.routinglen == \
1876 + subh.routinglen
# Decrypt the rest of header 1, encrypting the padding.
header1 = Crypto.ctr_crypt(header1, header_sec_key)
# If the subheader says that we have extra routing info that didn't
# fit in the RSA-encrypted part, get it now.
overflowLength = subh.getOverflowLength()
if overflowLength:
subh.appendOverflow(header1[:overflowLength])
header1 = header1[overflowLength:]
assert len(header1) == (
1876 + subh.routinglen
- max(0,subh.routinglen-Packet.MAX_ROUTING_INFO_LEN))
header1 = subh.underflow + header1
assert len(header1) == Packet.HEADER_LEN
# Decrypt the payload.
payload = Crypto.lioness_decrypt(pkt.payload,
keys.getLionessKeys(Crypto.PAYLOAD_ENCRYPT_MODE))
# If we're an exit node, there's no need to process the headers
# further.
if rt >= Packet.MIN_EXIT_TYPE:
return DeliveryPacket(rt, subh.getExitAddress(0),
keys.get(Crypto.APPLICATION_KEY_MODE),
payload)
# If we're not an exit node, make sure that what we recognize our
# routing type.
if rt not in (Packet.SWAP_FWD_IPV4_TYPE, Packet.FWD_IPV4_TYPE,
Packet.SWAP_FWD_HOST_TYPE, Packet.FWD_HOST_TYPE):
raise ContentError("Unrecognized Mixminion routing type")
# Decrypt header 2.
header2 = Crypto.lioness_decrypt(pkt.header2,
keys.getLionessKeys(Crypto.HEADER_ENCRYPT_MODE))
# If we're the swap node, (1) decrypt the payload with a hash of
# header2... (2) decrypt header2 with a hash of the payload...
# (3) and swap the headers.
if Packet.typeIsSwap(rt):
hkey = Crypto.lioness_keys_from_header(header2)
payload = Crypto.lioness_decrypt(payload, hkey)
hkey = Crypto.lioness_keys_from_payload(payload)
header2 = Crypto.lioness_decrypt(header2, hkey)
header1, header2 = header2, header1
# Build the address object for the next hop
address = Packet.parseRelayInfoByType(rt, subh.routinginfo)
# Construct the packet for the next hop.
pkt = Packet.Packet(header1, header2, payload).pack()
return RelayedPacket(address, pkt)
class RelayedPacket:
"""A packet that is to be relayed to another server; returned by
returned by PacketHandler.processPacket."""
## Fields:
# address -- an instance of IPV4Info DOCDOC
# msg -- a 32K packet.
def __init__(self, address, msg):
"""Create a new packet, given an instance of IPV4Info or
MMTPHostInfo and a 32K packet."""
assert isinstance(address, Packet.IPV4Info) or isinstance(address, Packet.MMTPHostInfo)
assert len(msg) == 1<<15
self.address = address
self.msg = msg
def isDelivery(self):
"""Return true iff this packet is a delivery (non-relay) packet."""
return 0
def getAddress(self):
"""Return an instance of IPV4Info or MMTPHostInfo indicating
the address where this packet is to be delivered."""
return self.address
def getPacket(self):
"""Returns the 32K contents of this packet."""
return self.msg
class DeliveryPacket:
"""A packet that is to be delivered via some exit module; returned by
PacketHandler.processPacket"""
##Fields:
# exitType -- a 2-byte integer indicating which exit module to use.
# address -- a string encoding the address to deliver to.
# key -- the 16-byte application key
# tag -- the 20-byte delivery handle
# payload -- the unencoded 28K payload
# contents -- until decode is called, None. After decode is called,
# the actual contents of this message as delivered.
# type -- until decode is called, None. After decode is called,
# one of 'plain' (plaintext message), 'long' (overcompressed message),
# 'enc' (encrypted message), or 'err' (malformed message).
# headers -- a map from key to value for the delivery headers in
# this message's payload. In the case of a fragment, or a
# non-plaintext message, the map is empty.
# isfrag -- Is this packet a fragment of a complete message? If so, the
# type must be 'plain'.
# dPayload -- An instance of mixminion.Packet.Payload for this object.
# error -- None, or a string containing an error encountered while trying
# to decode the payload.
def __init__(self, routingType, routingInfo, applicationKey, payload):
"""Construct a new DeliveryPacket."""
assert 0 <= routingType <= 0xFFFF
assert len(applicationKey) == 16
assert len(payload) == 28*1024
self.exitType = routingType
self.address = routingInfo
self.key = applicationKey
self.tag = ""
self.payload = payload
self.contents = None
self.type = None
self.headers = None
self.isfrag = 0
self.dPayload = None
self.error = None
def setTagged(self,tagged=1):
"""Re-frame the routingInfo in this packet. If 'tagged' is true,
then the routingInfo starts with TAG_LEN bytes of decoding
| |
"""
Test JADN Schema transformations
Transformation -> Reduce Complexity
"""
from unittest import main, TestCase
import jadn
from jadn.definitions import EXTENSIONS
class Resolve(TestCase):
schema = {} # TODO: test Merge imported definitions
# def test_resolve(self):
class StripComments(TestCase):
schema = {
'types': [
['Person', 'Record', [], 'JADN equivalent of structure from https://developers.google.com/protocol-buffers', [
[1, 'name', 'String', [], 'The person\'s name.'],
[2, 'id', 'Integer', [], 'A person\'s unique id'],
[3, 'email', 'String', ['[0', '/email'], 'An email address for the person.']
]]
]
}
stripped_schema = {
'types': [
['Person', 'Record', [], '', [
[1, 'name', 'String', [], ''],
[2, 'id', 'Integer', [], ''],
[3, 'email', 'String', ['[0', '/email'], '']
]
]]
}
c20_schema = {
'types': [
['Person', 'Record', [], 'JADN equivalent of..', [
[1, 'name', 'String', [], 'The person\'s name.'],
[2, 'id', 'Integer', [], 'A person\'s unique id'],
[3, 'email', 'String', ['[0', '/email'], 'An email address f..']
]]
]
}
def test_strip_comments(self):
jadn.check(self.schema)
jadn.check(self.stripped_schema)
ss = jadn.transform.strip_comments(self.schema)
self.assertEqual(ss['types'], self.stripped_schema['types'])
def test_truncate_comments(self):
jadn.check(self.schema)
jadn.check(self.c20_schema)
ss = jadn.transform.strip_comments(self.schema, width=20)
self.assertEqual(ss['types'], self.c20_schema['types'])
class SimplifyExtensions(TestCase):
def do_simplify_test(self, extension_schema, simplified_schema, extensions=EXTENSIONS):
jadn.check(extension_schema)
jadn.check(simplified_schema)
ss = jadn.transform.simplify(extension_schema, extensions)
self.assertEqual(ss['types'], simplified_schema['types'])
"""
Type Definition in Fields Extension
"""
schema_anon_extension = { # id, vtype, ktype, enum, pointer, format, pattern, minv, maxv, unique
'types': [
['Color', 'Map', [], '', [
[1, 'red', 'Integer', [], ''],
[2, 'green', 'Integer', [], ''],
[3, 'blue', 'Integer', [], '']
]],
['Dir', 'Record', [], '', [
[1, 'a', 'String', [], ''],
[2, 'b', 'Subdir', ['<'], '']
]],
['Subdir', 'Map', [], '', [
[1, 'foo', 'Number', [], ''],
[2, 'bar', 'String', [], '']
]],
['T-anon', 'Record', [], '', [
[1, 'id', 'Enumerated', ['#Color', '='], ''],
[2, 'enum', 'Enumerated', ['#Color', '[0'], ''],
[3, 'vtype', 'ArrayOf', ['*#Color'], ''],
[4, 'kvtype', 'MapOf', ['+#Color', '*String'], ''],
[5, 'pointer', 'Enumerated', ['>Dir'], ''],
[6, 'format', 'String', ['/idn-email', '[0'], ''],
[7, 'pattern', 'String', ['%\\d+'], ''],
[8, 'mult', 'ArrayOf', ['*Color', '{2', '}5'], ''],
[9, 'unique', 'ArrayOf', ['*String', 'q'], '']
]]
]
}
schema_anon_simplified = {
'types': [
['Color', 'Map', [], '', [
[1, 'red', 'Integer', [], ''],
[2, 'green', 'Integer', [], ''],
[3, 'blue', 'Integer', [], '']
]],
['Dir', 'Record', [], '', [
[1, 'a', 'String', [], ''],
[2, 'b', 'Subdir', ['<'], '']
]],
['Subdir', 'Map', [], '', [
[1, 'foo', 'Number', [], ''],
[2, 'bar', 'String', [], '']
]],
['T-anon', 'Record', [], '', [
[1, 'id', 'Color$Enum-Id', [], ''],
[2, 'enum', 'Color$Enum', ['[0'], ''],
[3, 'vtype', 'T-anon$vtype', [], ''],
[4, 'kvtype', 'T-anon$kvtype', [], ''],
[5, 'pointer', 'Dir$Pointer', [], ''],
[6, 'format', 'T-anon$format', ['[0'], ''],
[7, 'pattern', 'T-anon$pattern', [], ''],
[8, 'mult', 'T-anon$mult', [], ''],
[9, 'unique', 'T-anon$unique', [], '']
]],
['Color$Enum-Id', 'Enumerated', ['#Color', '='], ''],
['Color$Enum', 'Enumerated', ['#Color'], ''],
['T-anon$vtype', 'ArrayOf', ['*#Color'], ''],
['T-anon$kvtype', 'MapOf', ['+#Color', '*String'], ''],
['Dir$Pointer', 'Enumerated', ['>Dir'], ''],
['T-anon$format', 'String', ['/idn-email'], ''],
['T-anon$pattern', 'String', ['%\\d+'], ''],
['T-anon$mult', 'ArrayOf', ['*Color', '{2', '}5'], ''],
['T-anon$unique', 'ArrayOf', ['*String', 'q'], ''],
]
}
schema_all_simplified = {
'types': [
['Color', 'Map', [], '', [
[1, 'red', 'Integer', [], ''],
[2, 'green', 'Integer', [], ''],
[3, 'blue', 'Integer', [], '']
]],
['Dir', 'Record', [], '', [
[1, 'a', 'String', [], ''],
[2, 'b', 'Subdir', ['<'], '']
]],
['Subdir', 'Map', [], '', [
[1, 'foo', 'Number', [], ''],
[2, 'bar', 'String', [], '']
]],
['T-anon', 'Record', [], '', [
[1, 'id', 'Color$Enum-Id', [], ''],
[2, 'enum', 'Color$Enum', ['[0'], ''],
[3, 'vtype', 'T-anon$vtype', [], ''],
[4, 'kvtype', 'T-anon$kvtype', [], ''],
[5, 'pointer', 'Dir$Pointer', [], ''],
[6, 'format', 'T-anon$format', ['[0'], ''],
[7, 'pattern', 'T-anon$pattern', [], ''],
[8, 'mult', 'T-anon$mult', [], ''],
[9, 'unique', 'T-anon$unique', [], '']
]],
['Color$Enum-Id', 'Enumerated', ['='], '', [
[1, 'red', ''],
[2, 'green', ''],
[3, 'blue', '']
]],
['Color$Enum', 'Enumerated', [], '', [
[1, 'red', ''],
[2, 'green', ''],
[3, 'blue', '']
]],
['T-anon$vtype', 'ArrayOf', ['*Color$Enum'], ''],
['T-anon$kvtype', 'Map', [], '', [
[1, 'red', 'String', [], ''],
[2, 'green', 'String', [], ''],
[3, 'blue', 'String', [], '']
]],
['Dir$Pointer', 'Enumerated', [], '', [
[1, 'a', ''],
[2, 'b/foo', ''],
[3, 'b/bar', '']
]],
['T-anon$format', 'String', ['/idn-email'], ''],
['T-anon$pattern', 'String', ['%\\d+'], ''],
['T-anon$mult', 'ArrayOf', ['*Color', '{2', '}5'], ''],
['T-anon$unique', 'ArrayOf', ['*String', 'q'], ''],
]
}
def test_anon_type_definitions(self):
self.do_simplify_test(self.schema_anon_extension, self.schema_anon_simplified, {'AnonymousType'})
def test_all_extensions(self):
self.do_simplify_test(self.schema_anon_extension, self.schema_all_simplified)
"""
Field Multiplicity Extension
"""
schema_mult_extension = { # JADN schema for fields with cardinality > 1 (e.g., list of x)
'types': [
['T-opt-list1', 'Record', [], '', [
[1, 'string', 'String', [], ''],
[2, 'list', 'T-array1', ['[0'], ''] # Min = 0, Max default = 1 (Undefined type OK for Extension tests)
]],
['T-list-1-2', 'Record', [], '', [
[1, 'string', 'String', [], ''],
[2, 'list', 'String', [']2'], ''] # Min default = 1, Max = 2
]],
['T-list-0-2', 'Record', [], '', [
[1, 'string', 'String', [], ''],
[2, 'list', 'String', ['[0', ']2'], ''] # Min = 0, Max = 2 (Array is optional, empty is invalid)
]],
['T-list-2-3', 'Record', [], '', [
[1, 'string', 'String', [], ''],
[2, 'list', 'String', ['[2', ']3'], ''] # Min = 2, Max = 3
]],
['T-list-1-n', 'Record', [], '', [
[1, 'string', 'String', [], ''],
[2, 'list', 'String', [']0'], ''] # Min default = 1, Max = 0 -> n
]]
]}
schema_mult_simplified = { # JADN schema for fields with cardinality > 1 (e.g., list of x)
'types': [
['T-opt-list1', 'Record', [], '', [
[1, 'string', 'String', [], ''],
[2, 'list', 'T-array1', ['[0'], ''] # Min = 0, Max default = 1 (Undefined type OK for Extension tests)
]],
['T-list-1-2', 'Record', [], '', [
[1, 'string', 'String', [], ''],
[2, 'list', 'T-list-1-2$list', [], ''] # Min default = 1 required
]],
['T-list-0-2', 'Record', [], '', [
[1, 'string', 'String', [], ''],
[2, 'list', 'T-list-0-2$list', ['[0'], ''] # Min = 0 optional
]],
['T-list-2-3', 'Record', [], '', [
[1, 'string', 'String', [], ''],
[2, 'list', 'T-list-2-3$list', [], ''] # Min default = 1 required
]],
['T-list-1-n', 'Record', [], '', [
[1, 'string', 'String', [], ''],
[2, 'list', 'T-list-1-n$list', [], '']
]],
['T-list-1-2$list', 'ArrayOf', ['*String', '{1', '}2'], ''], # Min = 1, Max = 2 (options are unordered)
['T-list-0-2$list', 'ArrayOf', ['*String', '{1', '}2'], ''], # Min = 1, Max = 2
['T-list-2-3$list', 'ArrayOf', ['*String', '{2', '}3'], ''], # Min = 2, Max = 3
['T-list-1-n$list', 'ArrayOf', ['*String', '{1'], ''] # Min = 1, Max default *
]}
def test_multiplicity(self):
self.do_simplify_test(self.schema_mult_extension, self.schema_mult_simplified, {'Multiplicity'})
"""
Derived Enumeration Extension
"""
schema_enum_extension = {
'types': [
['Pixel', 'Record', [], '', [
[1, 'red', 'Integer', [], 'rojo'],
[2, 'green', 'Integer', [], 'verde'],
[3, 'blue', 'Integer', [], '']
]],
['Channel', 'Enumerated', ['#Pixel'], '', []], # Derived enumeration (explicitly named)
['ChannelId', 'Enumerated', ['#Pixel', '='], '', []], # Derived enumeration with ID option
['ChannelMask', 'ArrayOf', ['*#Pixel'], '', []], # Array of items from named derived enum
['Pixel2', 'Map', ['='], '', [
[1, 'yellow', 'Integer', [], ''],
[2, 'orange', 'Integer', [], ''],
[3, 'purple', 'Integer', [], '']
]],
['ChannelMask2', 'ArrayOf', ['*#Pixel2'], '', []], # Array of items from generated derived enum
]
}
schema_enum_simplified = {
'types': [
['Pixel', 'Record', [], '', [
[1, 'red', 'Integer', [], 'rojo'],
[2, 'green', 'Integer', [], 'verde'],
[3, 'blue', 'Integer', [], '']
]],
['Channel', 'Enumerated', [], '', [
[1, 'red', 'rojo'],
[2, 'green', 'verde'],
[3, 'blue', '']
]],
['ChannelId', 'Enumerated', ['='], '', [
[1, 'red', 'rojo'],
[2, 'green', 'verde'],
[3, 'blue', '']
]],
['ChannelMask', 'ArrayOf', ['*Channel'], '', []],
['Pixel2', 'Map', ['='], '', [
[1, 'yellow', 'Integer', [], ''],
[2, 'orange', 'Integer', [], ''],
[3, 'purple', 'Integer', [], '']
]],
['ChannelMask2', 'ArrayOf', ['*Pixel2$Enum'], '', []], # Array of items from generated derived enum
['Pixel2$Enum', 'Enumerated', [], '', [ # Generated derived enum - Id not propogated
[1, 'yellow', ''],
[2, 'orange', ''],
[3, 'purple', '']
]],
]
}
def test_derived_enum(self):
self.do_simplify_test(self.schema_enum_extension, self.schema_enum_simplified, {'DerivedEnum'})
"""
MapOf Enumerated Key Extension
"""
schema_mapof_extension = {
'types': [
['Colors-Enum', 'Enumerated', [], '', [
[1, 'red', 'rojo'],
[2, 'green', 'verde'],
[3, 'blue', '']
]],
['Colors-Map', 'MapOf', ['+Colors-Enum', '*Number'], '']
]
}
schema_mapof_simplified = | |
+= err
y_prob_test[idx] = pred
test_batches += 1
pred_y_test = np.argmax(y_prob_test, axis=1)
acc_test = accuracy_score(pred_y_test, y_test)
roc_auc_test = ROC_AUC(y_test, pred_y_test)
# Then we print the results for this epoch:
print(
"Epoch {} of {}\ttraining loss:\t{:.6f}\tvalidation accuracy:\t{:.2f}\tvalidation AUC: \t{:.2f}\ttest accuracy:\t{:.2f}\ttest AUC:\t{:.2f} \tnumber of ones in val:\t{:d}\tLearning rate: {:.1e} took {:.3f}s".format(
epoch + 1, num_epochs, train_err / train_batches, accuracy * 100, roc_auc_val * 100, acc_test * 100,
roc_auc_test * 100, sum(pred_y_val), learning_rate, time.time() - start_time))
# print(" \ttest accuracy:\t\t{:.2f}\ttest AUC:\t\t{:.2f}\tnumber of ones:\t\t{:d}".format(test_err / test_batches,
# acc_test * 100,
# roc_auc * 100, sum(pred_y_test)))
test_err = 0
test_batches = 0
set_all_param_values(network, parameters[-1])
for batch in iterate_minibatches(X_test, y_test, minibatch_size, shuffle=False):
inputs, targets, idx = batch
err, pred = test_fn(inputs, targets)
test_err += err
y_prob_test[idx] = pred
test_batches += 1
pred_y = np.argmax(y_prob_test, axis=1)
acc_test = accuracy_score(pred_y, y_test)
roc_auc = ROC_AUC(y_test, pred_y)
# Then we print the results for this epoch:
print(" test loss:\t\t{:.6f}\ttest accuracy:\t\t{:.2f}\ttest AUC:\t\t{:.2f}".format(test_err / test_batches,
acc_test * 100, roc_auc * 100))
test_accuracies.append(acc_test)
test_roc_aucs.append(roc_auc)
val_accuracies.append(best_acc)
val_roc_aucs.append(best_roc_auc)
def train_forward(X_train, X_train_masses, X_train_mass_masks, y_train, X_val, y_val, X_test, y_test, parameters,
num_epochs, val_accuracies, val_roc_aucs,
test_accuracies, test_roc_aucs, network, minibatch_size, train_fn, test_fn, lr, num_classes,
balance_flag,
occlusion_flag):
y_prob_val = np.zeros((X_val.shape[0], 2))
y_prob_test = np.zeros((X_test.shape[0], 2))
best_roc_auc = 0
best_acc = 0
parameters.append(get_all_param_values(network))
for epoch in range(num_epochs):
# lr.set_value(lr.get_value() * (np.float32(num_epochs - epoch) / np.float32(num_epochs)))
# In each epoch, we do a full pass over the training data:
train_err = 0
train_batches = 0
start_time = time.time()
occlusion_size = 50
for batch in iterate_minibatches(X_train, y_train, minibatch_size, shuffle=True):
inputs, targets, idx = batch
input_masses = X_train_masses[idx]
input_mass_masks = X_train_mass_masks[idx]
# Random occlusion
if occlusion_flag:
for i in range(len(targets)):
occlusion_mask = np.zeros(inputs[i, 0].shape)
if np.random.randint(2):
redo_occ_flag = 1
while redo_occ_flag:
height = np.random.randint(inputs.shape[2] - occlusion_size)
width = np.random.randint(inputs.shape[3] - occlusion_size)
occlusion_mask[height:height + occlusion_size, width:width + occlusion_size] = 1
combination = occlusion_mask + input_mass_masks[i]
combination[np.where(combination > 1)] = 1
if (float(np.sum(combination) - np.sum(input_mass_masks[i])) /
np.sum(occlusion_mask)) > 0.5:
redo_occ_flag = 0
inputs[i, 0][height:height + occlusion_size, width:width + occlusion_size] = 0
err = train_fn(inputs, targets)
train_err += err
train_batches += 1
# And a full pass over the validation data:
val_err = 0
val_pred = 0
val_batches = 0
for batch in iterate_minibatches(X_val, y_val, minibatch_size, shuffle=False):
inputs, targets, idx = batch
err, pred = test_fn(inputs, targets)
val_err += err
y_prob_val[idx] = pred
val_batches += 1
pred_y_val = np.argmax(y_prob_val, axis=1)
# Then we print the results for this epoch:
learning_rate = float(lr.get_value())
accuracy = accuracy_score(pred_y_val, y_val)
roc_auc_val = ROC_AUC(y_val, pred_y_val)
if roc_auc_val > best_roc_auc:
print('New best here!')
best_roc_auc = roc_auc_val
best_acc = accuracy
parameters.pop()
parameters.append(get_all_param_values(network))
# print(
# "Epoch {} of {} took {:.3f}s\tLearning rate: {:.6f}\ttraining loss:\t\t{:.6f}\tvalidation loss:\t\t{:.6f}\tvalidation accuracy:\t\t{:.2f}\tvalidation AUC:\t\t{:.2f}\tnumber of ones:\t\t{:d}".format(
#
# epoch + 1, num_epochs, time.time() - start_time, learning_rate, train_err / train_batches,
# val_err / val_batches, accuracy * 100, roc_auc * 100, sum(pred_y_val)))
#
test_err = 0
test_batches = 0
for batch in iterate_minibatches(X_test, y_test, minibatch_size, shuffle=False):
inputs, targets, idx = batch
err, pred = test_fn(inputs, targets)
test_err += err
y_prob_test[idx] = pred
test_batches += 1
pred_y_test = np.argmax(y_prob_test, axis=1)
acc_test = accuracy_score(pred_y_test, y_test)
roc_auc_test = ROC_AUC(y_test, pred_y_test)
# Then we print the results for this epoch:
print(
"Epoch {} of {}\ttraining loss:\t{:.6f}\tvalidation accuracy:\t{:.2f}\tvalidation AUC: \t{:.2f}\ttest accuracy:\t{:.2f}\ttest AUC:\t{:.2f} \tnumber of ones in val:\t{:d}\tLearning rate: {:.8f} took {:.3f}s".format(
epoch + 1, num_epochs, train_err / train_batches, accuracy * 100, roc_auc_val * 100, acc_test * 100,
roc_auc_test * 100, sum(pred_y_val), learning_rate, time.time() - start_time))
# print(" \ttest accuracy:\t\t{:.2f}\ttest AUC:\t\t{:.2f}\tnumber of ones:\t\t{:d}".format(test_err / test_batches,
# acc_test * 100,
# roc_auc * 100, sum(pred_y_test)))
test_err = 0
test_batches = 0
set_all_param_values(network, parameters[-1])
for batch in iterate_minibatches(X_test, y_test, minibatch_size, shuffle=False):
inputs, targets, idx = batch
err, pred = test_fn(inputs, targets)
test_err += err
y_prob_test[idx] = pred
test_batches += 1
pred_y = np.argmax(y_prob_test, axis=1)
acc_test = accuracy_score(pred_y, y_test)
roc_auc = ROC_AUC(y_test, pred_y)
# Then we print the results for this epoch:
print(" test loss:\t\t{:.6f}\ttest accuracy:\t\t{:.2f}\ttest AUC:\t\t{:.2f}".format(test_err / test_batches,
acc_test * 100, roc_auc * 100))
test_accuracies.append(acc_test)
test_roc_aucs.append(roc_auc)
val_accuracies.append(best_acc)
val_roc_aucs.append(best_roc_auc)
def train(X_train, X_train_masses, X_train_mass_masks, y_train, X_val, y_val, X_test, y_test, parameters, num_epochs,
val_accuracies, val_roc_aucs,
test_accuracies, test_roc_aucs, network, minibatch_size, train_fn, test_fn, lr, num_classes, balance_flag,
occlusion_flag):
y_prob_val = np.zeros((X_val.shape[0], 2))
y_prob_test = np.zeros((X_test.shape[0], 2))
best_roc_auc = 0
best_acc = 0
parameters.append(get_all_param_values(network))
for epoch in range(num_epochs):
# lr.set_value(lr.get_value() * (np.float32(num_epochs - epoch) / np.float32(num_epochs)))
# In each epoch, we do a full pass over the training data:
train_err = 0
train_err_recons = 0
train_err_classification = 0
train_batches = 0
start_time = time.time()
occlusion_size = 50
for batch in iterate_minibatches(X_train, y_train, minibatch_size, shuffle=True):
inputs, targets, idx = batch
input_masses = X_train_masses[idx]
input_mass_masks = X_train_mass_masks[idx]
# Random occlusion
if occlusion_flag:
for i in range(len(targets)):
occlusion_mask = np.zeros(inputs[i, 0].shape)
if np.random.randint(2):
redo_occ_flag = 1
while redo_occ_flag:
height = np.random.randint(inputs.shape[2] - occlusion_size)
width = np.random.randint(inputs.shape[3] - occlusion_size)
occlusion_mask[height:height + occlusion_size, width:width + occlusion_size] = 1
combination = occlusion_mask + input_mass_masks
combination[np.where(combination > 1)] = 1
if (float(sum(sum(combination)) - sum(sum(input_mass_masks))) / sum(
sum(occlusion_mask))) > 0.5:
redo_occ_flag = 0
inputs[i, 0][height:height + occlusion_size, width:width + occlusion_size] = 0
err, err_recons, err_classification = train_fn(inputs, input_masses, targets)
train_err += err
train_err_recons += err_recons
train_err_classification += err_classification
train_batches += 1
# And a full pass over the validation data:
val_err = 0
val_pred = 0
val_batches = 0
for batch in iterate_minibatches(X_val, y_val, minibatch_size, shuffle=False):
inputs, targets, idx = batch
err, pred = test_fn(inputs, targets)
val_err += err
y_prob_val[idx] = pred
val_batches += 1
pred_y_val = np.argmax(y_prob_val, axis=1)
# Then we print the results for this epoch:
learning_rate = float(lr.get_value())
accuracy = accuracy_score(pred_y_val, y_val)
roc_auc_val = ROC_AUC(y_val, pred_y_val, num_classes)
if roc_auc_val > best_roc_auc:
print('New best here!')
best_roc_auc = roc_auc_val
best_acc = accuracy
parameters.pop()
parameters.append(get_all_param_values(network))
# print(
# "Epoch {} of {} took {:.3f}s\tLearning rate: {:.6f}\ttraining loss:\t\t{:.6f}\tvalidation loss:\t\t{:.6f}\tvalidation accuracy:\t\t{:.2f}\tvalidation AUC:\t\t{:.2f}\tnumber of ones:\t\t{:d}".format(
#
# epoch + 1, num_epochs, time.time() - start_time, learning_rate, train_err / train_batches,
# val_err / val_batches, accuracy * 100, roc_auc * 100, sum(pred_y_val)))
#
test_err = 0
test_batches = 0
for batch in iterate_minibatches(X_test, y_test, minibatch_size, shuffle=False):
inputs, targets, idx = batch
err, pred = test_fn(inputs, targets)
test_err += err
y_prob_test[idx] = pred
test_batches += 1
pred_y_test = np.argmax(y_prob_test, axis=1)
acc_test = accuracy_score(pred_y_test, y_test)
roc_auc_test = ROC_AUC(y_test, pred_y_test, num_classes)
# Then we print the results for this epoch:
print(
"Epoch {} of {}\ttraining loss:\t{:.6f}\tvalidation accuracy:\t{:.2f}\tvalidation AUC: \t{:.2f}\ttest accuracy:\t{:.2f}\ttest AUC:\t{:.2f} \tnumber of ones in val:\t{:d}\tLearning rate: {:.6f} took {:.3f}s".format(
epoch + 1, num_epochs, train_err / train_batches, accuracy * 100, roc_auc_val * 100, acc_test * 100,
roc_auc_test * 100, sum(pred_y_val), learning_rate, time.time() - start_time))
# print(" \ttest accuracy:\t\t{:.2f}\ttest AUC:\t\t{:.2f}\tnumber of ones:\t\t{:d}".format(test_err / test_batches,
# acc_test * 100,
# roc_auc * 100, sum(pred_y_test)))
test_err = 0
test_batches = 0
set_all_param_values(network, parameters[-1])
for batch in iterate_minibatches(X_test, y_test, minibatch_size, shuffle=False):
inputs, targets, idx = batch
err, pred = test_fn(inputs, targets)
test_err += err
y_prob_test[idx] = pred
test_batches += 1
pred_y = np.argmax(y_prob_test, axis=1)
acc_test = accuracy_score(pred_y, y_test)
roc_auc = ROC_AUC(y_test, pred_y, num_classes)
# Then we print the results for this epoch:
print(" test loss:\t\t{:.6f}\ttest accuracy:\t\t{:.2f}\ttest AUC:\t\t{:.2f}".format(test_err / test_batches,
acc_test * 100, roc_auc * 100))
test_accuracies.append(acc_test)
test_roc_aucs.append(roc_auc)
val_accuracies.append(best_acc)
val_roc_aucs.append(best_roc_auc)
def ROC_AUC(y, pred_y):
# Compute ROC curve and ROC area for each class
fpr, tpr, _ = roc_curve(y, pred_y)
roc_auc = auc(fpr, tpr)
return roc_auc
def balancing(X, X_masses, X_mass_masks, y, balance_flag):
if balance_flag:
num_positives = sum(y)
num_negatives = len(y) - num_positives
num_positives_temp = num_positives
num_negatives_temp = num_negatives
inputs = X.copy()
input_masses = X_masses.copy()
input_mass_masks = X_mass_masks.copy()
targets = y.copy()
while inputs.shape[0] != num_negatives * 2:
if num_negatives_temp - num_positives_temp > num_positives_temp:
inputs = np.concatenate((inputs, X[np.where(y == 1)]), axis=0)
input_masses = np.concatenate((input_masses, X_masses[np.where(y == 1)]), axis=0)
input_mass_masks = np.concatenate((input_mass_masks, X_mass_masks[np.where(y == 1)]), axis=0)
targets = np.concatenate((targets, np.ones(np.where(y == 1)[0].shape, dtype='int32')), axis=0)
else:
inputs = np.concatenate((inputs, X[
np.where(targets == 1)[0][
np.random.randint(num_positives, size=num_negatives_temp - num_positives_temp)]]), axis=0)
input_masses = np.concatenate((input_masses, X_masses[
np.where(targets == 1)[0][
np.random.randint(num_positives, size=num_negatives_temp - num_positives_temp)]]), axis=0)
input_mass_masks = np.concatenate((input_mass_masks, X_mass_masks[
np.where(targets == 1)[0][
np.random.randint(num_positives, size=num_negatives_temp - num_positives_temp)]]), | |
if d[s0][None] is not False:
populate_selection(
d[s0], selection[1:], os.path.join(root, s0)
)
if d[s0][None] is False:
# At best, the root is not all true
d[None] = None
def split_all(x):
head, tail = os.path.split(x)
if (len(head) == 0) or (len(tail) == 0):
return [x]
else:
return split_all(head) + [tail]
for selection in selections:
selection_parts = split_all(selection)
populate_selection(d, selection_parts, root)
finally:
self.module_panel.SetCursor(wx.NullCursor)
dlg = _tree_checkbox_dialog.TreeCheckboxDialog(
self.module_panel, d, size=(320, 480)
)
dlg.set_parent_reflects_child(False)
dlg.Title = "Select folders"
if dlg.ShowModal() == wx.ID_OK:
def collect_state(prefix, d):
if d is None:
return []
if hasattr(d, "__call__") or d[None]:
return []
elif d[None] is False:
return [prefix]
result = []
for key in list(d.keys()):
if key is None:
continue
result += collect_state(os.path.join(prefix, key), d[key])
return result
selections = []
for object_name in [x for x in list(d.keys()) if x is not None]:
selections += collect_state(object_name, d[object_name])
proposed_value = v.get_value_string(selections)
setting_edited_event = SettingEditedEvent(
v, self.__module, proposed_value, event
)
self.notify(setting_edited_event)
self.reset_view()
control.Bind(wx.EVT_BUTTON, on_press)
else:
control.Show()
return control
def make_multichoice_control(self, v, control_name, control):
selections = v.selections
assert isinstance(v, MultiChoice)
if isinstance(v, SubscriberMultiChoice):
# Get the choices from the providers
v.load_choices(self.__pipeline)
choices = v.choices + [
selection for selection in selections if selection not in v.choices
]
if not control:
control = wx.ListBox(
self.__module_panel,
-1,
choices=choices,
style=wx.LB_EXTENDED,
name=control_name,
)
for selection in selections:
index = choices.index(selection)
control.SetSelection(index)
if selection not in v.choices:
control.SetItemForegroundColour(index, get_error_color())
def callback(event, setting=v, control=control):
self.__on_multichoice_change(event, setting, control)
self.__module_panel.Bind(wx.EVT_LISTBOX, callback, control)
else:
old_choices = control.Items
if len(choices) != len(old_choices) or not all(
[x == y for x, y in zip(choices, old_choices)]
):
control.Items = choices
for i in range(len(choices)):
if control.IsSelected(i):
if choices[i] not in selections:
control.Deselect(i)
elif choices[i] in selections:
control.Select(i)
if choices[i] not in v.choices:
control.SetItemForegroundColour(i, get_error_color())
return control
def make_colormap_control(self, v, control_name, control):
"""Make a combo-box that shows colormap choices
v - the setting
choices - the possible values for the setting
control_name - assign this name to the control
style - one of the CB_ styles
"""
try:
if v.value == "Default":
cmap_name = get_default_colormap()
else:
cmap_name = v.value
cm = matplotlib.cm.get_cmap(cmap_name)
sm = matplotlib.cm.ScalarMappable(cmap=cm)
i, j = numpy.mgrid[0:12, 0:128]
if cm.N < 128:
j *= int((cm.N + 128) / 128)
image = (sm.to_rgba(j) * 255).astype(numpy.uint8)
bitmap = wx.Bitmap.FromBufferRGBA(128, 12, image.tostring())
except:
logging.warning("Failed to create the %s colorbar" % cmap_name)
bitmap = None
if not control:
control = wx.Panel(self.__module_panel, -1, name=control_name)
sizer = wx.BoxSizer(wx.VERTICAL)
control.SetSizer(sizer)
colorbar = wx.StaticBitmap(control, -1, name=colorbar_ctrl_name(v))
if bitmap is not None:
colorbar.SetBitmap(bitmap)
sizer.Add(colorbar, 0, wx.EXPAND | wx.BOTTOM, 2)
combo = wx.ComboBox(
control,
-1,
v.value,
choices=v.choices,
style=wx.CB_READONLY,
name=combobox_ctrl_name(v),
)
sizer.Add(combo, 1, wx.EXPAND)
def callback(event, setting=v, control=combo):
self.__on_combobox_change(event, setting, combo)
def ignore_mousewheel(event):
return
combo.Bind(wx.EVT_MOUSEWHEEL, ignore_mousewheel)
self.__module_panel.Bind(wx.EVT_COMBOBOX, callback, combo)
else:
combo = control.FindWindowByName(combobox_ctrl_name(v))
colorbar = control.FindWindowByName(colorbar_ctrl_name(v))
old_choices = combo.Items
if len(v.choices) != len(old_choices) or not all(
[x == y for x, y in zip(v.choices, old_choices)]
):
combo.Items = v.choices
if combo.Value != v.value:
combo.Value = v.value
if bitmap is not None:
colorbar.SetBitmap(bitmap)
return control
def make_color_control(self, v, control_name, control):
try:
color = wx.Colour()
color.Set(v.value)
except:
color = wx.BLACK
if (
not hasattr(control, "bad_color_name")
or control.bad_color_name != v.value
):
logging.warn("Failed to set color to %s" % v.value)
control.bad_color_name = v.value
if control is None:
control = wx.lib.colourselect.ColourSelect(
self.__module_panel, colour=color
)
control.SetName(control_name)
def on_press(event, v=v, control=control):
proposed_value = control.GetColour().GetAsString(
wx.C2S_NAME | wx.C2S_HTML_SYNTAX
)
setting_edited_event = SettingEditedEvent(
v, self.__module, proposed_value, event
)
self.notify(setting_edited_event)
self.reset_view()
#
# There's a little display bugginess that, when the window's
# size changes, the colored bitmap does not.
#
def on_size(event, control=control):
control.SetBitmap(control.MakeBitmap())
control.Bind(wx.lib.colourselect.EVT_COLOURSELECT, on_press)
control.Bind(wx.EVT_SIZE, on_size)
else:
control.SetColour(color)
return control
def make_tree_choice_control(self, v, control_name, control):
new_label = ">".join(v.get_path_parts())
def make_bitmap(control, flags):
assert isinstance(control, wx.BitmapButton)
text_width, text_height = control.GetFullTextExtent(new_label)
gap = 4
drop_width = wx.SystemSettings.GetMetric(wx.SYS_VSCROLL_ARROW_X)
drop_height = wx.SystemSettings.GetMetric(wx.SYS_VSCROLL_ARROW_Y)
width = text_width + 2 * gap + drop_width
height = max(text_height, drop_height) + 4
bitmap = wx.Bitmap(width, height)
dc = wx.MemoryDC(bitmap)
dc.SetFont(control.GetFont())
brush = wx.Brush(control.GetBackgroundColour())
dc.SetBackground(brush)
dc.Clear()
wx.RendererNative.Get().DrawComboBox(
control, dc, wx.Rect(0, 0, width, height), flags
)
dc.DrawText(new_label, 2, 2)
return bitmap
if control is None:
control = wx.BitmapButton(self.module_panel, style=wx.BU_EXACTFIT)
control.label_text = None
def on_press(event, v=v, control=control):
id_dict = {}
def on_event(event, v=v, control=control, id_dict=None):
if id_dict is None:
id_dict = id_dict
new_path = v.encode_path_parts(id_dict[event.GetId()])
self.on_value_change(v, control, new_path, event)
def make_menu(tree, id_dict=None, path=None):
if id_dict is None:
id_dict = id_dict
if path is None:
path = []
menu = wx.Menu()
for node in tree:
text, subtree = node[:2]
subpath = path + [text]
if v.fn_is_leaf(node):
item = menu.Append(-1, text)
id_dict[item.GetId()] = subpath
if wx.VERSION >= (2, 9) and sys.platform != "win32":
wx.EVT_MENU(menu, item.GetId(), on_event)
if subtree is not None and len(subtree) > 0:
submenu = make_menu(subtree, path=subpath)
menu.Append(-1, text, submenu)
return menu
menu = make_menu(v.get_tree())
assert isinstance(control, wx.Window)
if wx.VERSION < (2, 9) or sys.platform == "win32":
menu.Bind(wx.EVT_MENU, on_event)
control.PopupMenu(menu, 0, control.GetSize()[1])
menu.Destroy()
control.Bind(wx.EVT_BUTTON, on_press)
old_label = control.label_text
if old_label != new_label:
control.label_text = new_label
for getter, setter, flags in (
(control.GetBitmapLabel, control.SetBitmapLabel, 0),
(control.GetBitmapFocus, control.SetBitmapFocus, wx.CONTROL_FOCUSED),
(
control.GetBitmapSelected,
control.SetBitmapSelected,
wx.CONTROL_SELECTED,
),
):
old_bitmap = getter()
setter(make_bitmap(control, flags))
if old_bitmap is not None:
old_bitmap.Destroy()
return control
def make_callback_control(self, v, control_name, control):
"""Make a control that calls back using the callback buried in the setting"""
if not control:
control = wx.Button(self.module_panel, -1, v.label, name=control_name)
def callback(event, setting=v):
self.__on_do_something(event, setting)
self.module_panel.Bind(wx.EVT_BUTTON, callback, control)
else:
control.Label = v.label
return control
def make_callback_controls(self, v, control_name, control):
"""Make a panel of buttons for each of the setting's actions
v - a DoThings setting
control_name - the name that we apply to the panel
control - either None or the panel containing the buttons
"""
assert isinstance(v, DoThings)
if not control:
control = wx.Panel(self.module_panel, name=control_name)
control.SetSizer(wx.BoxSizer(wx.HORIZONTAL))
for i in range(v.count):
if i != 0:
control.Sizer.AddSpacer(2)
button = wx.Button(control, name=button_control_name(v, i))
control.Sizer.Add(button, 0, wx.ALIGN_LEFT)
def callback(event, index=i):
v.on_event_fired(index)
setting_edited_event = SettingEditedEvent(
v, self.__module, None, event
)
self.notify(setting_edited_event)
self.__module.on_setting_changed(v, self.__pipeline)
self.reset_view()
button.Bind(wx.EVT_BUTTON, callback)
for i in range(v.count):
button = control.FindWindowByName(button_control_name(v, i))
button.Label = v.get_label(i)
return control
def make_regexp_control(self, v, control):
"""Make a textbox control + regular expression button"""
if not control:
panel = wx.Panel(self.__module_panel, -1, name=edit_control_name(v))
control = panel
sizer = wx.BoxSizer(wx.HORIZONTAL)
panel.SetSizer(sizer)
text_ctrl = wx.TextCtrl(panel, -1, str(v.value), name=text_control_name(v))
sizer.Add(text_ctrl, 1, wx.EXPAND | wx.RIGHT, 1)
bitmap = wx.ArtProvider.GetBitmap(wx.ART_FIND, wx.ART_TOOLBAR, (16, 16))
bitmap_button = wx.BitmapButton(
panel, bitmap=bitmap, name=button_control_name(v)
)
sizer.Add(bitmap_button, 0, wx.EXPAND)
def on_cell_change(event, setting=v, control=text_ctrl):
self.__on_cell_change(event, setting, control, timeout=False)
def on_button_pressed(event, setting=v, control=text_ctrl):
#
# Find a file in the image directory
#
filename = "plateA-2008-08-06_A12_s1_w1_[89A882DE-E675-4C12-9F8E-46C9976C4ABE].tif"
try:
if setting.get_example_fn is None:
path = get_default_image_directory()
filenames = [
x
for x in os.listdir(path)
if x.find(".") != -1
and os.path.splitext(x)[1].upper()
in (".TIF", ".JPG", ".PNG", ".BMP")
]
if len(filenames):
filename = filenames[0]
else:
filename = setting.get_example_fn()
except:
pass
if v.guess == RegexpText.GUESS_FOLDER:
guesses = regexp_editor.RE_FOLDER_GUESSES
else:
guesses = regexp_editor.RE_FILENAME_GUESSES
new_value = regexp_editor.edit_regexp(
panel, control.GetValue(), filename, guesses
)
if new_value:
control.SetValue(new_value)
self.__on_cell_change(event, setting, control)
def on_kill_focus(event, setting=v, control=text_ctrl):
# Make sure not to call set_selection again if a set_selection is already
# in process. Doing so may have adverse effects (e.g. disappearing textboxes)
if self.__module is not None and self.__handle_change:
self.set_selection(self.__module.module_num)
event.Skip()
self.__module_panel.Bind(wx.EVT_TEXT, on_cell_change, text_ctrl)
self.__module_panel.Bind(wx.EVT_BUTTON, on_button_pressed, bitmap_button)
#
# http://www.velocityreviews.com/forums/t359823-textctrl-focus-events-in-wxwidgets.html
# explains why bind is to control itself
#
text_ctrl.Bind(wx.EVT_KILL_FOCUS, on_kill_focus)
else:
text_control = control.FindWindow(text_control_name(v))
if v.value != text_control.Value:
text_control.Value = v.value
return control
def make_filename_text_control(self, v, control):
"""Make a filename text control"""
edit_name = subedit_control_name(v)
control_name = edit_control_name(v)
button_name = button_control_name(v)
if control is None:
control = wx.Panel(self.module_panel, -1, name=control_name)
sizer = wx.BoxSizer(wx.HORIZONTAL)
control.SetSizer(sizer)
if v.metadata_display:
edit_control = metadatactrl.MetadataControl(
self.__pipeline,
self.__module,
control,
value=v.value,
name=edit_name,
)
else:
edit_control = wx.TextCtrl(control, -1, str(v.value), name=edit_name)
sizer.Add(edit_control, 1, wx.ALIGN_LEFT | wx.ALIGN_TOP)
def on_cell_change(event, setting=v, control=edit_control):
self.__on_cell_change(event, setting, control)
self.__module_panel.Bind(wx.EVT_TEXT, on_cell_change, edit_control)
bitmap = wx.ArtProvider.GetBitmap(wx.ART_FILE_OPEN, wx.ART_BUTTON, (16, 16))
button_control = wx.BitmapButton(control, bitmap=bitmap, name=button_name)
def on_press(event):
"""Open a file browser"""
if v.mode == Filename.MODE_OPEN:
mode = wx.FD_OPEN
elif v.mode == Filename.MODE_APPEND:
mode = wx.FD_SAVE
else:
mode = wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT
| |
the data. This profile has an radius unit of {r} and a value unit of "
"{v}".format(r=self.radii_unit.to_string(), v=self.values_unit.to_string()))
# I don't think I'm going to allow any fits without value uncertainties - just seems daft
if self._values_err is None:
raise XGAFitError("You cannot fit to a profile that doesn't have value uncertainties.")
# Checking that the method passed is valid
if method not in self._fit_methods:
allowed = ", ".join(self._fit_methods)
raise XGAFitError("{me} is not a valid fitting method, please use one of these; {a}".format(me=method,
a=allowed))
# Check whether a good fit result already exists for this model. We use the storage_key property that
# XGA model objects generate from their name and their start parameters
if model.name in self._good_model_fits[method]:
warn("{m} already has a successful fit result for this profile using {me}, with those start "
"parameters".format(m=model.name, me=method))
already_done = True
elif model.name in self._bad_model_fits[method]:
warn("{m} already has a failed fit result for this profile using {me} with those start "
"parameters".format(m=model.name, me=method))
already_done = False
else:
already_done = False
# Running the requested fitting method
if not already_done and method == 'mcmc':
model, success = self.emcee_fit(model, num_steps, num_walkers, progress_bar, show_warn, num_samples)
elif not already_done and method == 'curve_fit':
model, success = self.nlls_fit(model, num_samples, show_warn)
elif not already_done and method == 'odr':
model, success = self._odr_fit(model, show_warn)
else:
model = self.get_model_fit(model.name, method)
# Storing the model in the internal dictionaries depending on whether the fit was successful or not
if not already_done and success:
self._good_model_fits[method][model.name] = model
elif not already_done and not success:
self._bad_model_fits[method][model.name] = model
# This method means that a change has happened to the model, so it should be re-saved
self.save()
return model
def allowed_models(self, table_format: str = 'fancy_grid'):
"""
This is a convenience function to tell the user what models can be used to fit a profile
of the current type, what parameters are expected, and what the defaults are.
:param str table_format: The desired format of the allowed models table. This is passed to the
tabulate module (allowed formats can be found here - https://pypi.org/project/tabulate/), and
alters the way the printed table looks.
"""
# Base profile don't have any type of model associated with them, so just making an empty list
if self._prof_type == "base":
warn("There are no implemented models for this profile type")
else:
allowed = list(PROF_TYPE_MODELS[self._prof_type].keys())
# These just roll through the available models for this type of profile and construct strings of
# parameter names and start parameters to put in the table
model_par_names = []
model_par_starts = []
for m in allowed:
exp_pars = ""
par_len = 0
def_starts = ""
def_len = 0
# This chunk of code tries to make sure that the strings aren't too long to display nicely
# in the table
mod_inst = PROF_TYPE_MODELS[self._prof_type][m]()
for p_ind, p in enumerate(list(inspect.signature(mod_inst.model).parameters.values())[1:]):
if par_len > 35:
exp_pars += ' \n'
par_len = 0
next_par = '{}, '.format(p.name)
par_len += len(next_par)
exp_pars += next_par
if def_len > 35:
def_starts += ' \n'
def_len = 0
next_def = '{}, '.format(str(mod_inst.start_pars[p_ind]))
def_len += len(next_def)
def_starts += next_def
# We slice out the last character because we know its going to be a spurious comma, just
# because of the lazy way I wrote the loop above
model_par_names.append(exp_pars[:-2])
model_par_starts.append(def_starts[:-2])
# Construct the table data and display it using tabulate module
tab_dat = [[allowed[i], model_par_names[i], model_par_starts[i]] for i in range(0, len(allowed))]
print(tabulate(tab_dat, ["MODEL NAME", "EXPECTED PARAMETERS", "DEFAULT START VALUES"],
tablefmt=table_format))
def get_model_fit(self, model: str, method: str) -> BaseModel1D:
"""
A get method for fitted model objects associated with this profile. Models for which the fit failed will
also be returned, but a warning will be shown to inform the user that the fit failed.
:param str model: The name of the model to retrieve.
:param str method: The method which was used to fit the model.
:return: An instance of an XGA model object that was fitted to this profile and updated with the
parameter values.
:rtype: BaseModel1D
"""
if model not in PROF_TYPE_MODELS[self._prof_type]:
allowed = list(PROF_TYPE_MODELS[self._prof_type].keys())
prof_name = self._y_axis_name.lower()
raise XGAInvalidModelError("{m} is not a valid model for a {p} profile, please choose from "
"one of these; {a}".format(m=model, a=", ".join(allowed), p=prof_name))
elif model in self._bad_model_fits[method]:
warn("An attempt was made to fit {m} with {me} but it failed, so treat the model with "
"suspicion".format(m=model, me=method))
ret_model = self._bad_model_fits[method][model]
elif model not in self._good_model_fits[method]:
raise ModelNotAssociatedError("{m} is valid for this profile, but hasn't been fit with {me} "
"yet".format(m=model, me=method))
else:
ret_model = self._good_model_fits[method][model]
return ret_model
def add_model_fit(self, model: BaseModel1D, method: str):
"""
There are rare circumstances where XGA processes might wish to add a model to a profile from the outside,
which is what this method allows you to do.
:param BaseModel1D model: The XGA model object to add to the profile.
:param str method: The method used to fit the model.
"""
# Checking that the method passed is valid
if method not in self._fit_methods:
allowed = ", ".join(self._fit_methods)
raise XGAFitError("{me} is not a valid fitting method, please use one of these; {a}".format(me=method,
a=allowed))
# Checking that the model is valid for this particular profile
allowed = ", ".join(PROF_TYPE_MODELS[self._prof_type])
if model.name not in PROF_TYPE_MODELS[self._prof_type]:
raise XGAInvalidModelError("{p} is not valid for this type of profile, please use one of the "
"following models {a}".format(p=model.name, a=allowed))
elif model.x_unit != self.radii_unit or model.y_unit != self.values_unit:
raise UnitConversionError("The model instance passed to the fit method has units that are incompatible, "
"with the data. This profile has an radius unit of {r} and a value unit of "
"{v}".format(r=self.radii_unit.to_string(), v=self.values_unit.to_string()))
elif not model.success:
raise ValueError("Please only add successful models to this profile.")
else:
self._good_model_fits[method][model.name] = model
# This method means that a change has happened to the model, so it should be re-saved
self.save()
def get_sampler(self, model: str) -> em.EnsembleSampler:
"""
A get method meant to retrieve the MCMC ensemble sampler used to fit a particular
model (supplied by the user). Checks are applied to the supplied model, to make
sure that it is valid for the type of profile, that a good fit has actually been
performed, and that the fit was performed with Emcee and not another method.
:param str model: The name of the model for which to retrieve the sampler.
:return: The Emcee sampler used to fit the user supplied model.
:rtype: em.EnsembleSampler
"""
model = self.get_model_fit(model, 'mcmc')
return model.emcee_sampler
def get_chains(self, model: str, discard: Union[bool, int] = True, flatten: bool = True,
thin: int = 1) -> np.ndarray:
"""
Get method for the sampler chains of an MCMC fit to the user supplied model. get_sampler is
called to retrieve the sampler object, as well as perform validity checks on the model name.
:param str model: The name of the model for which to retrieve the chains.
:param bool/int discard: Whether steps should be discarded for burn-in. If True then the cut off decided
using the auto-correlation time will be used. If an integer is passed then this will be used as the
number of steps to discard, and if False then no steps will be discarded.
:param bool flatten: Should the chains of the multiple walkers be flattened into one chain per parameter.
:param int thin: The thinning that should be applied to the chains. The default is 1, which means no
thinning is applied.
:return: The requested chains.
:rtype: np.ndarray
"""
model = self.get_model_fit(model, 'mcmc')
if isinstance(discard, bool) and discard:
chains = model.emcee_sampler.get_chain(discard=model.cut_off, flat=flatten, thin=thin)
elif isinstance(discard, int):
chains = model.emcee_sampler.get_chain(discard=discard, flat=flatten, thin=thin)
else:
chains = model.emcee_sampler.get_chain(flat=flatten, thin=thin)
return chains
def view_chains(self, model: str, discard: Union[bool, int] = True, thin: int = 1, figsize: Tuple = None):
"""
Simple view method to quickly look at the MCMC chains for a given model fit.
:param str model: The name of the model for | |
db_procedure_not_implemented.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 244
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_protocol_error'):
continue
db_protocol_error = _lib.db_protocol_error
db_protocol_error.argtypes = []
db_protocol_error.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 245
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_read_dbmscap'):
continue
db_read_dbmscap = _lib.db_read_dbmscap
db_read_dbmscap.argtypes = []
db_read_dbmscap.restype = POINTER(dbDbmscap)
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 246
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_realloc'):
continue
db_realloc = _lib.db_realloc
db_realloc.argtypes = [POINTER(None), c_int]
db_realloc.restype = POINTER(c_ubyte)
db_realloc.errcheck = lambda v,*a : cast(v, c_void_p)
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 247
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_char'):
continue
db__recv_char = _lib.db__recv_char
db__recv_char.argtypes = [String]
db__recv_char.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 248
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_column_default_value'):
continue
db__recv_column_default_value = _lib.db__recv_column_default_value
db__recv_column_default_value.argtypes = [POINTER(dbColumn)]
db__recv_column_default_value.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 249
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_column_definition'):
continue
db__recv_column_definition = _lib.db__recv_column_definition
db__recv_column_definition.argtypes = [POINTER(dbColumn)]
db__recv_column_definition.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 250
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_column_value'):
continue
db__recv_column_value = _lib.db__recv_column_value
db__recv_column_value.argtypes = [POINTER(dbColumn)]
db__recv_column_value.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 251
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_datetime'):
continue
db__recv_datetime = _lib.db__recv_datetime
db__recv_datetime.argtypes = [POINTER(dbDateTime)]
db__recv_datetime.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 252
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_double'):
continue
db__recv_double = _lib.db__recv_double
db__recv_double.argtypes = [POINTER(c_double)]
db__recv_double.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 253
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_double_array'):
continue
db__recv_double_array = _lib.db__recv_double_array
db__recv_double_array.argtypes = [POINTER(POINTER(c_double)), POINTER(c_int)]
db__recv_double_array.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 254
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_float'):
continue
db__recv_float = _lib.db__recv_float
db__recv_float.argtypes = [POINTER(c_float)]
db__recv_float.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 255
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_float_array'):
continue
db__recv_float_array = _lib.db__recv_float_array
db__recv_float_array.argtypes = [POINTER(POINTER(c_float)), POINTER(c_int)]
db__recv_float_array.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 256
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_handle'):
continue
db__recv_handle = _lib.db__recv_handle
db__recv_handle.argtypes = [POINTER(dbHandle)]
db__recv_handle.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 257
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_index'):
continue
db__recv_index = _lib.db__recv_index
db__recv_index.argtypes = [POINTER(dbIndex)]
db__recv_index.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 258
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_index_array'):
continue
db__recv_index_array = _lib.db__recv_index_array
db__recv_index_array.argtypes = [POINTER(POINTER(dbIndex)), POINTER(c_int)]
db__recv_index_array.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 259
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_int'):
continue
db__recv_int = _lib.db__recv_int
db__recv_int.argtypes = [POINTER(c_int)]
db__recv_int.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 260
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_int_array'):
continue
db__recv_int_array = _lib.db__recv_int_array
db__recv_int_array.argtypes = [POINTER(POINTER(c_int)), POINTER(c_int)]
db__recv_int_array.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 261
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_procnum'):
continue
db__recv_procnum = _lib.db__recv_procnum
db__recv_procnum.argtypes = [POINTER(c_int)]
db__recv_procnum.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 262
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_return_code'):
continue
db__recv_return_code = _lib.db__recv_return_code
db__recv_return_code.argtypes = [POINTER(c_int)]
db__recv_return_code.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 263
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_short'):
continue
db__recv_short = _lib.db__recv_short
db__recv_short.argtypes = [POINTER(c_short)]
db__recv_short.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 264
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_short_array'):
continue
db__recv_short_array = _lib.db__recv_short_array
db__recv_short_array.argtypes = [POINTER(POINTER(c_short)), POINTER(c_int)]
db__recv_short_array.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 265
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_string'):
continue
db__recv_string = _lib.db__recv_string
db__recv_string.argtypes = [POINTER(dbString)]
db__recv_string.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 266
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_string_array'):
continue
db__recv_string_array = _lib.db__recv_string_array
db__recv_string_array.argtypes = [POINTER(POINTER(dbString)), POINTER(c_int)]
db__recv_string_array.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 267
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_table_data'):
continue
db__recv_table_data = _lib.db__recv_table_data
db__recv_table_data.argtypes = [POINTER(dbTable)]
db__recv_table_data.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 268
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_table_definition'):
continue
db__recv_table_definition = _lib.db__recv_table_definition
db__recv_table_definition.argtypes = [POINTER(POINTER(dbTable))]
db__recv_table_definition.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 269
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_token'):
continue
db__recv_token = _lib.db__recv_token
db__recv_token.argtypes = [POINTER(dbToken)]
db__recv_token.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 270
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_value'):
continue
db__recv_value = _lib.db__recv_value
db__recv_value.argtypes = [POINTER(dbValue), c_int]
db__recv_value.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 271
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_Cstring'):
continue
db__send_Cstring = _lib.db__send_Cstring
db__send_Cstring.argtypes = [String]
db__send_Cstring.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 272
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_char'):
continue
db__send_char = _lib.db__send_char
db__send_char.argtypes = [c_int]
db__send_char.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 273
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_column_default_value'):
continue
db__send_column_default_value = _lib.db__send_column_default_value
db__send_column_default_value.argtypes = [POINTER(dbColumn)]
db__send_column_default_value.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 274
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_column_definition'):
continue
db__send_column_definition = _lib.db__send_column_definition
db__send_column_definition.argtypes = [POINTER(dbColumn)]
db__send_column_definition.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 275
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_column_value'):
continue
db__send_column_value = _lib.db__send_column_value
db__send_column_value.argtypes = [POINTER(dbColumn)]
db__send_column_value.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 276
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_datetime'):
continue
db__send_datetime = _lib.db__send_datetime
db__send_datetime.argtypes = [POINTER(dbDateTime)]
db__send_datetime.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 277
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_double'):
continue
db__send_double = _lib.db__send_double
db__send_double.argtypes = [c_double]
db__send_double.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 278
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_double_array'):
continue
db__send_double_array = _lib.db__send_double_array
db__send_double_array.argtypes = [POINTER(c_double), c_int]
db__send_double_array.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 279
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_failure'):
continue
db__send_failure = _lib.db__send_failure
db__send_failure.argtypes = []
db__send_failure.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 280
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_float'):
continue
db__send_float = _lib.db__send_float
db__send_float.argtypes = [c_float]
db__send_float.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 281
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_float_array'):
continue
db__send_float_array = _lib.db__send_float_array
db__send_float_array.argtypes = [POINTER(c_float), c_int]
db__send_float_array.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 282
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_handle'):
continue
db__send_handle = _lib.db__send_handle
db__send_handle.argtypes = [POINTER(dbHandle)]
db__send_handle.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 283
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_index'):
continue
db__send_index = _lib.db__send_index
db__send_index.argtypes = [POINTER(dbIndex)]
db__send_index.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 284
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_index_array'):
continue
db__send_index_array = _lib.db__send_index_array
db__send_index_array.argtypes = [POINTER(dbIndex), c_int]
db__send_index_array.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 285
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_int'):
continue
db__send_int = _lib.db__send_int
db__send_int.argtypes = [c_int]
db__send_int.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 286
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_int_array'):
continue
db__send_int_array = _lib.db__send_int_array
db__send_int_array.argtypes = [POINTER(c_int), c_int]
db__send_int_array.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 287
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_procedure_not_implemented'):
continue
db__send_procedure_not_implemented = _lib.db__send_procedure_not_implemented
db__send_procedure_not_implemented.argtypes = [c_int]
db__send_procedure_not_implemented.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 288
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_procedure_ok'):
continue
db__send_procedure_ok = _lib.db__send_procedure_ok
db__send_procedure_ok.argtypes = [c_int]
db__send_procedure_ok.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 289
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_short'):
continue
db__send_short = _lib.db__send_short
db__send_short.argtypes = [c_int]
db__send_short.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 290
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_short_array'):
continue
db__send_short_array = _lib.db__send_short_array
db__send_short_array.argtypes = [POINTER(c_short), c_int]
db__send_short_array.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 291
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_string'):
continue
db__send_string = _lib.db__send_string
db__send_string.argtypes = [POINTER(dbString)]
db__send_string.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 292
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_string_array'):
continue
db__send_string_array = _lib.db__send_string_array
db__send_string_array.argtypes = [POINTER(dbString), c_int]
db__send_string_array.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 293
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_success'):
continue
db__send_success = _lib.db__send_success
db__send_success.argtypes = []
db__send_success.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 294
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_table_data'):
continue
db__send_table_data = _lib.db__send_table_data
db__send_table_data.argtypes = [POINTER(dbTable)]
db__send_table_data.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 295
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_table_definition'):
continue
db__send_table_definition = _lib.db__send_table_definition
db__send_table_definition.argtypes = [POINTER(dbTable)]
db__send_table_definition.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 296
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_token'):
continue
db__send_token = _lib.db__send_token
db__send_token.argtypes = [POINTER(dbToken)]
db__send_token.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 297
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_value'):
continue
db__send_value = _lib.db__send_value
db__send_value.argtypes = [POINTER(dbValue), c_int]
db__send_value.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 298
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_select_CatValArray'):
continue
db_select_CatValArray = _lib.db_select_CatValArray
db_select_CatValArray.argtypes = [POINTER(dbDriver), String, String, String, String, POINTER(dbCatValArray)]
db_select_CatValArray.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 301
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_select_int'):
continue
db_select_int = _lib.db_select_int
db_select_int.argtypes = [POINTER(dbDriver), String, String, String, POINTER(POINTER(c_int))]
db_select_int.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 303
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_select_value'):
continue
db_select_value = _lib.db_select_value
db_select_value.argtypes = [POINTER(dbDriver), String, String, c_int, String, POINTER(dbValue)]
db_select_value.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 305
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_column_description'):
continue
db_set_column_description = _lib.db_set_column_description
db_set_column_description.argtypes = [POINTER(dbColumn), String]
db_set_column_description.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 306
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_column_has_defined_default_value'):
continue
db_set_column_has_defined_default_value = _lib.db_set_column_has_defined_default_value
db_set_column_has_defined_default_value.argtypes = [POINTER(dbColumn)]
db_set_column_has_defined_default_value.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 307
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_column_has_undefined_default_value'):
continue
db_set_column_has_undefined_default_value = _lib.db_set_column_has_undefined_default_value
db_set_column_has_undefined_default_value.argtypes = [POINTER(dbColumn)]
db_set_column_has_undefined_default_value.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 308
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_column_host_type'):
continue
db_set_column_host_type = _lib.db_set_column_host_type
db_set_column_host_type.argtypes = [POINTER(dbColumn), c_int]
db_set_column_host_type.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 309
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_column_length'):
continue
db_set_column_length = _lib.db_set_column_length
db_set_column_length.argtypes = [POINTER(dbColumn), c_int]
db_set_column_length.restype | |
not None:
if self.tools.is_clang:
with open(self.export_map_file, 'wt') as fh:
for export_entry in actual_export_list:
print('_{}'.format(export_entry), file=fh)
argv += [ '-Wl,-exported_symbols_list,{}'.format(self.export_map_file) ]
else:
with open(self.export_map_file, 'wt') as fh:
print("{", file=fh)
print(" global:", file=fh)
for export_entry in actual_export_list:
print(" {};".format(export_entry), file=fh)
print("\n local: *;", file=fh)
print("};", file=fh)
argv += [ '-Wl,--version-script={}'.format(self.export_map_file) ]
with open(os.path.join(self.link_private_dir, 'symbols.json'), 'wt') as fh:
print("[", file=fh)
exp_idx = 0
for export_entry in sorted(actual_export_list):
exp_idx += 1
exp_tail = ',' if exp_idx < len(actual_export_list) else ''
print(' "{}"{}'.format(export_entry, exp_tail), file=fh)
print("]", file=fh)
else:
if self.tools.is_mingw:
if self.use_wmain:
argv += ['-municode']
if self.win_stack_size:
argv += ['-Wl,--stack,{}'.format(self.win_stack_size)]
else:
if not self.tools.is_clang:
argv += ['-pie']
if self.tools.is_clang:
argv += ['-Wl,-dead_strip', '-Wl,-dead_strip_dylibs', '-Wl,-no_dead_strip_inits_and_terms' ]
else:
argv += ['-Wl,--gc-sections']
if self.tools.is_mingw:
argv += ['-Wl,-strip-all']
if not self.tools.is_mingw and not self.tools.is_clang:
argv += ['-Wl,-z,noexecstack']
if not self.tools.is_clang:
argv += ['-Wl,--as-needed']
argv += [ '-o', self.bin_path_private ]
if self.is_dll and self.tools.is_mingw:
if self.export_def_file and not self.export_map_file:
argv += [ self.export_def_file ]
if self.tools.is_mingw:
if self.res_file is not None:
argv += [ self.res_file ]
if self.manifest_res_file is not None:
argv += [ self.manifest_res_file ]
argv += self.obj_fnames
if not self.tools.is_clang:
argv += ['-static-libgcc']
wrap_libs_in_group = False
if self.link_libstatic_names or self.link_libshared_names:
if not self.tools.is_clang:
wrap_libs_in_group = True
if wrap_libs_in_group:
argv += [ '-Wl,--start-group' ]
if self.link_libstatic_names:
argv += [ '-L{}'.format(self.lib_directory) ]
for libname in self.link_libstatic_names:
argv += [ '-l{}'.format(libname) ]
if self.link_libshared_names:
argv += [ '-L{}'.format(self.sharedlib_directory) ]
for libname in self.link_libshared_names:
argv += [ '-l{}'.format(libname) ]
if wrap_libs_in_group:
argv += [ '-Wl,--end-group' ]
for libname in self.prebuilt_lib_names:
argv += [ '-l{}'.format(libname) ]
if self.tools.is_clang:
argv += [ '-Wl,-install_name,{}'.format(self.bin_basename) ]
if self.macosx_framework_list:
for framework_name in self.macosx_framework_list:
argv += [ '-framework', framework_name ]
argv = argv_to_rsp(argv, self.rsp_file)
ctx.subprocess_communicate(output, argv, issuer=self.bin_path_private, env=self.tools.env, cwd=self.obj_directory)
if self.macosx_install_name_options:
install_name_tool = 'install_name_tool'
if self.tools.bin_prefix:
install_name_tool = self.tools.bin_prefix + install_name_tool
if sys.platform == 'win32':
install_name_tool = install_name_tool + '.exe'
if self.tools.dir_prefix:
install_name_tool = os.path.join(self.tools.dir_prefix, install_name_tool)
argv = [install_name_tool]
argv += self.macosx_install_name_options
argv += [ self.bin_path_private ]
ctx.subprocess_communicate(output, argv, issuer=self.bin_path_private, env=self.tools.env)
if self.zip_section is not None:
if not os.path.isfile(self.zip_section):
raise BuildSystemException("File '{}' for zip-section not found".format(self.zip_section))
if ctx.verbose:
output.report_message("BUILDSYS: EXEC: {} << {}".format(self.bin_path_private, self.zip_section))
with open(self.bin_path_private, 'ab') as fhbin:
with open(self.zip_section, 'rb') as fhzip:
shutil.copyfileobj(fhzip, fhbin)
os.rename(self.bin_path_private, self.bin_path_public)
os.rename(link_stamp_file_tmp, self.link_stamp_file)
os.utime(self.link_stamp_file, None)
os.utime(self.bin_path_public, None)
return ToolsetActionResult(rebuilt=True, artifacts=build_result)
class GccModel(ToolsetModel):
def __init__(self, model_name, toolset_version, is_native, target_os, target_os_alias, arch_name, arch_flags, arch_link_flags, os_version=None, crosstool=False):
ToolsetModel.__init__(self)
self._model_name = model_name
self._toolset_version = toolset_version
self._is_native = is_native
self._target_os = target_os
self._target_os_alias = target_os_alias
self._arch_name = arch_name
self._arch_compile_flags = arch_flags
self._arch_link_flags = arch_link_flags
self._os_version = os_version
self._crosstool = crosstool
@property
def model_name(self):
return self._model_name
@property
def platform_name(self):
return self._target_os
@property
def platform_alias(self):
return self._target_os_alias
@property
def architecture_abi_name(self):
return self._arch_name
@property
def toolset_version(self):
return self._toolset_version
def is_native(self):
return self._is_native
def is_crosstool(self):
return self._crosstool
def get_arch_compile_flags(self):
return self._arch_compile_flags
def get_arch_link_flags(self, description):
if self._target_os == TAG_PLATFORM_WINDOWS:
flags = self._arch_link_flags[:]
if description.win_console:
flags.append('-Wl,-subsystem:console:{}'.format(self._os_version))
else:
flags.append('-Wl,-subsystem:windows:{}'.format(self._os_version))
return flags
else:
return self._arch_link_flags
class ToolsetGCC(ToolsetBase):
def __init__(self, name, tools, sysinfo, loader, toolset_custom_models):
ToolsetBase.__init__(self)
self._name = name
self._platform_name = None
self._sysinfo = sysinfo
self._loader = loader
self._tools = tools
self._nasm_checked = False
models = []
toolset_version = tools.eval_version_info()
if self._tools.is_mingw:
self._platform_name = TAG_PLATFORM_WINDOWS
if TAG_ARCH_X86 in self._tools.arch_list:
winapi_level_x86 = self._tools.api_levels[TAG_ARCH_X86]
ntddi_level_x86 = IMPLIED_NTDDI_VALUES[winapi_level_x86]
os_version_x86 = IMPLIED_WINDOWS_SUBSYSTEM_VALUES[winapi_level_x86]
mingw_x86_compile_flags = ['-m32',
'-D_WIN32_WINNT={}'.format(winapi_level_x86),
'-DWINVER={}'.format(winapi_level_x86),
'-DNTDDI_VERSION={}'.format(ntddi_level_x86)]
mingw_x86_link_flags = ['-m32']
mingw_x86_model_name = GCC_MODEL_MINGW32 if toolset_custom_models is None else toolset_custom_models[TAG_ARCH_X86]
model_win32 = GccModel(
model_name=mingw_x86_model_name, toolset_version=toolset_version, is_native=is_windows_32bit(),
target_os=TAG_PLATFORM_WINDOWS, target_os_alias=None, arch_name=TAG_ARCH_X86,
arch_flags=mingw_x86_compile_flags, arch_link_flags=mingw_x86_link_flags, os_version=os_version_x86)
models.append(model_win32)
if TAG_ARCH_X86_64 in self._tools.arch_list:
winapi_level_x86_64 = self._tools.api_levels[TAG_ARCH_X86_64]
ntddi_level_x86_64 = IMPLIED_NTDDI_VALUES[winapi_level_x86_64]
os_version_x86_64 = IMPLIED_WINDOWS_SUBSYSTEM_VALUES[winapi_level_x86_64]
mingw_x86_64_compile_flags = ['-m64',
'-D_WIN32_WINNT={}'.format(winapi_level_x86_64),
'-DWINVER={}'.format(winapi_level_x86_64),
'-DNTDDI_VERSION={}'.format(ntddi_level_x86_64)]
mingw_x86_64_link_flags = ['-m64']
mingw_x86_64_model_name = GCC_MODEL_MINGW64 if toolset_custom_models is None else toolset_custom_models[TAG_ARCH_X86_64]
model_win64 = GccModel(
model_name=mingw_x86_64_model_name, toolset_version=toolset_version, is_native=is_windows_64bit(),
target_os=TAG_PLATFORM_WINDOWS, target_os_alias=None, arch_name=TAG_ARCH_X86_64,
arch_flags=mingw_x86_64_compile_flags, arch_link_flags=mingw_x86_64_link_flags, os_version=os_version_x86_64)
models.append(model_win64)
elif self._tools.is_crosstool:
if self._tools.crosstool_target_platform == TAG_PLATFORM_LINUX:
self._platform_name = TAG_PLATFORM_LINUX
for x_arch in self._tools.arch_list:
x_model_name = CROSSTOOL_MODEL_NAMES[x_arch] if toolset_custom_models is None else toolset_custom_models[x_arch]
x_is_native = CROSSTOOL_NATIVE_STATUS[x_arch]
x_model = GccModel(
model_name=x_model_name, toolset_version=toolset_version, is_native=x_is_native,
target_os=TAG_PLATFORM_LINUX, target_os_alias=TAG_PLATFORM_ALIAS_POSIX, arch_name=x_arch,
arch_flags=[], arch_link_flags=[], crosstool=True)
models.append(x_model)
elif self._tools.crosstool_target_platform == TAG_PLATFORM_MACOSX and self._tools.is_clang and self._tools.arch_list == [TAG_ARCH_X86_64]:
self._platform_name = TAG_PLATFORM_MACOSX
x_model_name = CLANG_CROSSTOOL_MODEL_MACOSX_X86_64 if toolset_custom_models is None else toolset_custom_models[TAG_ARCH_X86_64]
x_is_native = is_macosx_x86_64()
sdk_path = self._tools.sysroot
osxapi_level_x86_64 = self._tools.api_levels[TAG_ARCH_X86_64]
if not osxapi_level_x86_64:
osxapi_level_x86_64 = MACOSX_API_DEFAULT_LEVEL[TAG_ARCH_X86_64]
x_arch_flags = [
'-target', 'x86_64-apple-darwin',
'-mmacosx-version-min=' + osxapi_level_x86_64,
]
x_arch_link_flags = [
'-target', 'x86_64-apple-darwin',
'-mmacosx-version-min=' + osxapi_level_x86_64,
'-Wl,-syslibroot,' + sdk_path,
'-L' + os.path.normpath(os.path.join(sdk_path, 'usr/lib/system')),
'-F' + os.path.normpath(os.path.join(sdk_path, 'System/Library/Frameworks')),
]
x_model = GccModel(
model_name=x_model_name, toolset_version=toolset_version, is_native=x_is_native,
target_os=TAG_PLATFORM_MACOSX, target_os_alias=TAG_PLATFORM_ALIAS_POSIX, arch_name=TAG_ARCH_X86_64,
arch_flags=x_arch_flags, arch_link_flags=x_arch_link_flags, crosstool=True)
models.append(x_model)
else:
if is_linux_x86_64():
self._platform_name = TAG_PLATFORM_LINUX
model_name_native_x86 = GCC_MODEL_LINUX_X86 if toolset_custom_models is None else toolset_custom_models[TAG_ARCH_X86]
model_name_native_x86_64 = GCC_MODEL_LINUX_X86_64 if toolset_custom_models is None else toolset_custom_models[TAG_ARCH_X86_64]
model_linux_x86 = GccModel(
model_name=model_name_native_x86, toolset_version=toolset_version, is_native=False,
target_os=TAG_PLATFORM_LINUX, target_os_alias=TAG_PLATFORM_ALIAS_POSIX, arch_name=TAG_ARCH_X86,
arch_flags=['-m32'], arch_link_flags=['-m32'])
model_linux_x86_64 = GccModel(
model_name=model_name_native_x86_64, toolset_version=toolset_version, is_native=True,
target_os=TAG_PLATFORM_LINUX, target_os_alias=TAG_PLATFORM_ALIAS_POSIX, arch_name=TAG_ARCH_X86_64,
arch_flags=[], arch_link_flags=[])
models.extend([model_linux_x86, model_linux_x86_64])
elif is_linux_x86():
self._platform_name = TAG_PLATFORM_LINUX
model_name_native = GCC_MODEL_LINUX_X86 if toolset_custom_models is None else toolset_custom_models[TAG_ARCH_X86]
model_linux_x86 = GccModel(
model_name=model_name_native, toolset_version=toolset_version, is_native=True,
target_os=TAG_PLATFORM_LINUX, target_os_alias=TAG_PLATFORM_ALIAS_POSIX, arch_name=TAG_ARCH_X86,
arch_flags=[], arch_link_flags=[])
models.append(model_linux_x86)
elif is_linux_arm():
self._platform_name = TAG_PLATFORM_LINUX
model_name_native = GCC_MODEL_LINUX_ARM if toolset_custom_models is None else toolset_custom_models[TAG_ARCH_ARM]
model_linux_arm = GccModel(
model_name=model_name_native, toolset_version=toolset_version, is_native=True,
target_os=TAG_PLATFORM_LINUX, target_os_alias=TAG_PLATFORM_ALIAS_POSIX, arch_name=TAG_ARCH_ARM,
arch_flags=[], arch_link_flags=[])
models.append(model_linux_arm)
elif is_linux_arm64():
self._platform_name = TAG_PLATFORM_LINUX
model_name_native = GCC_MODEL_LINUX_ARM64 if toolset_custom_models is None else toolset_custom_models[TAG_ARCH_ARM64]
model_linux_arm = GccModel(
model_name=model_name_native, toolset_version=toolset_version, is_native=True,
target_os=TAG_PLATFORM_LINUX, target_os_alias=TAG_PLATFORM_ALIAS_POSIX, arch_name=TAG_ARCH_ARM64,
arch_flags=[], arch_link_flags=[])
models.append(model_linux_arm)
elif is_macosx_x86_64():
if self._name == 'clang':
self._platform_name = TAG_PLATFORM_MACOSX
model_name_native = CLANG_MODEL_MACOSX_X86_64 if toolset_custom_models is None else toolset_custom_models[TAG_ARCH_X86_64]
osxapi_level_x86_64 = self._tools.api_levels[TAG_ARCH_X86_64]
osx_arch_flags = []
osx_arch_link_flags = []
if osxapi_level_x86_64:
osx_arch_flags += [
'-mmacosx-version-min=' + osxapi_level_x86_64,
]
osx_arch_link_flags += [
'-mmacosx-version-min=' + osxapi_level_x86_64,
]
model_macosx_x86_64 = GccModel(
model_name=model_name_native, toolset_version=toolset_version, is_native=True,
target_os=TAG_PLATFORM_MACOSX, target_os_alias=TAG_PLATFORM_ALIAS_POSIX, arch_name=TAG_ARCH_X86_64,
arch_flags=osx_arch_flags, arch_link_flags=osx_arch_link_flags)
models.append(model_macosx_x86_64)
if self._platform_name is None:
platform = sys.platform
if platform.startswith('linux'):
platform = 'linux'
if hasattr(os, 'uname'):
platform = platform + ',' + os.uname()[4]
raise BuildSystemException("Unsupported platform: '{}'".format(platform))
self._models = {}
for model in models:
self._models[model.model_name] = model
@property
def supported_models(self):
return self._models
@property
def toolset_name(self):
return self._name
@property
def platform_name(self):
return self._platform_name
def create_cpp_build_action(self, description, cpp_source, obj_directory, obj_name, build_model, build_config):
return SourceBuildActionGCC(self._tools, self._sysinfo, description, cpp_source, BUILD_TYPE_CPP, obj_directory, obj_name, build_model, build_config)
def create_c_build_action(self, description, c_source, obj_directory, obj_name, build_model, build_config):
return SourceBuildActionGCC(self._tools, self._sysinfo, description, c_source, BUILD_TYPE_C, obj_directory, obj_name, build_model, build_config)
def create_asm_build_action(self, description, asm_source, obj_directory, obj_name, build_model, build_config):
if description.nasm:
if not self._tools.nasm_enabled:
raise BuildSystemException("NASM is not enabled for build model '{}', it is required to compile: '{}'".format(build_model.model_name, asm_source))
if not self._nasm_checked:
try:
subprocess.check_output([self._tools.nasm_executable, '-v'], stderr=subprocess.STDOUT)
self._nasm_checked = True
except Exception:
pass
if not self._nasm_checked:
raise BuildSystemException("NASM executable '{}' is not ready, it is required to compile: '{}'".format(self._tools.nasm_executable, asm_source))
return NasmSourceBuildAction(self._tools.nasm_executable, self._sysinfo, description, asm_source, obj_directory, obj_name, build_model, build_config)
else:
return SourceBuildActionGCC(self._tools, self._sysinfo, description, asm_source, BUILD_TYPE_ASM, obj_directory, obj_name, build_model, build_config)
def create_lib_static_link_action(self, description, lib_directory, obj_directory, obj_names, build_model, build_config):
return StaticLibLinkActionGCC(self._tools, self._sysinfo, description, lib_directory, obj_directory, obj_names, build_model, build_config)
def create_exe_link_action(self, description, exe_directory, sharedlib_directory, lib_directory, obj_directory, obj_names, build_model, build_config):
return LinkActionGCC(self._tools, self._sysinfo, self._loader, description, exe_directory, sharedlib_directory, lib_directory, obj_directory, obj_names, build_model, build_config)
def create_lib_shared_link_action(self, description, sharedlib_directory, lib_directory, obj_directory, obj_names, build_model, build_config):
return LinkActionGCC(self._tools, self._sysinfo, self._loader, description, None, sharedlib_directory, lib_directory, obj_directory, obj_names, build_model, build_config)
class ToolsInfoGCC:
def __init__(self, dir_prefix=None, sysroot=None, bin_prefix=None, is_mingw=None, is_clang=None, is_crosstool=None, arch_list=None, nasm=None, api_levels=None, toolset_version=None, crosstool_target_platform=None, env=None):
tool_gcc = 'clang' if is_clang else 'gcc'
tool_gpp = 'clang' if is_clang else 'g++'
tool_ar = 'libtool' if is_clang and not is_crosstool else 'ar'
tool_windres = 'windres' if is_mingw else None
if is_crosstool:
if crosstool_target_platform not in [TAG_PLATFORM_LINUX, TAG_PLATFORM_MACOSX]:
raise BuildSystemException("Got unsupported target platform '{}' for cross build.".format(crosstool_target_platform))
if sys.platform == 'win32':
tool_gcc = tool_gcc + '.exe'
tool_gpp = tool_gpp + '.exe'
tool_ar = tool_ar + '.exe'
if tool_windres is not None:
tool_windres = tool_windres + '.exe'
if bin_prefix is not None:
tool_gcc = bin_prefix + tool_gcc
tool_gpp = bin_prefix + tool_gpp
tool_ar = bin_prefix + tool_ar
if tool_windres is not None:
tool_windres = bin_prefix + tool_windres
if dir_prefix is not None:
tool_gcc = os.path.join(dir_prefix, tool_gcc)
tool_gpp = os.path.join(dir_prefix, tool_gpp)
tool_ar = os.path.join(dir_prefix, tool_ar)
if tool_windres is not None:
tool_windres = os.path.join(dir_prefix, tool_windres)
self.bin_prefix = bin_prefix
self.dir_prefix = dir_prefix
self.env = env
self.is_mingw = is_mingw
| |
# get the model as DPC-RNN
import sys
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
sys.path.append('../backbone')
# to extract the features
from select_backbone import select_resnet
# to aggregate the features in one
from convrnn import ConvGRU
# to calculate loss
from uncertainty import process_uncertainty
class DPC_RNN(nn.Module):
'''DPC with RNN'''
def __init__(self, sample_size, num_seq=8, seq_len=5,
pred_step=3, network='resnet18', distance='cosine',
distance_type='uncertain', weighting=True,
margin=10, pool='None', radius_location='Phi',
loss_type='MSE', action_cls_head=False,
dropout=0.5, num_class=101):
super(DPC_RNN, self).__init__()
# to reproduce the experiments
torch.cuda.manual_seed(233)
print('Using DPC-RNN model')
print('[model_3d.py] using loss function: %s' % loss_type)
print('[model_3d.py] using distance type: %s' % distance_type)
print('[model_3d.py] using distance metric: %s' % distance)
# number of dimensions in the image
self.sample_size = sample_size
self.num_seq = num_seq
self.seq_len = seq_len
self.distance = distance
self.distance_type = distance_type
self.weighting = weighting
# how many futures to predict
self.pred_step = pred_step
self.margin = margin
self.pool = pool # TODO verify pooling settings
self.radius_location = radius_location
self.loss_type = loss_type
self.action_cls_head = action_cls_head # If true, added a LC layer at the end
# 2 if seq_len is 5
if network == 'resnet8' or network == 'resnet10':
self.last_duration = int(math.ceil(seq_len / 2))
else:
self.last_duration = int(math.ceil(seq_len / 4))
# 4 if size of the image is 128
# change for toy experiment
#self.last_size = 1
if self.pool in ['avg', 'max']:
self.last_size = 1
else:
self.last_size = int(math.ceil(sample_size / 32))
print('final feature map has size %dx%d' %
(self.last_size, self.last_size))
# f - choose an appropriate feature extractor. In this case, a resent
if self.radius_location == 'Phi':
self.backbone, self.param = select_resnet(
network, track_running_stats=False)
elif self.radius_location == 'F':
self.backbone, self.param = select_resnet(
network, track_running_stats=False)
self.param['num_layers'] = 1 # param for GRU
self.param['hidden_size'] = self.param['feature_size'] # param for GRU
self.agg = ConvGRU(input_size=self.param['feature_size'],
hidden_size=self.param['hidden_size'],
kernel_size=1,
num_layers=self.param['num_layers'])
# two layered network \phi
if self.radius_location == 'Phi':
if self.distance_type == 'certain':
output_size = self.param['feature_size']
elif self.distance_type == 'uncertain':
output_size = self.param['feature_size'] + 1
self.network_pred = nn.Sequential(
nn.Conv2d(self.param['feature_size'],
self.param['feature_size'], kernel_size=1, padding=0),
nn.ReLU(inplace=True),
nn.Conv2d(self.param['feature_size'],
output_size, kernel_size=1, padding=0)
)
elif self.radius_location == 'F':
self.network_pred = nn.Sequential(
nn.Conv2d(self.param['feature_size'],
self.param['feature_size'], kernel_size=1, padding=0),
nn.ReLU(inplace=True),
nn.Conv2d(self.param['feature_size'],
self.param['feature_size'], kernel_size=1, padding=0)
)
self.avg_pool = nn.AdaptiveAvgPool3d(
(1, self.last_size, self.last_size))
self.max_pool = nn.AdaptiveMaxPool3d(
(1, self.last_size, self.last_size))
# mask can be used to retrieve positive and negative distance
self.mask = None
self.relu = nn.ReLU(inplace=False)
self._initialize_weights(self.agg)
self._initialize_weights(self.network_pred)
self.action_cls_head = action_cls_head
self.num_class = num_class
self.dropout = dropout
if action_cls_head:
print('Using FC head for action classification')
# See eval/model_3d_lc.py
self.num_class = num_class
self.final_bn = nn.BatchNorm1d(self.param['feature_size'])
self.final_bn.weight.data.fill_(1)
self.final_bn.bias.data.zero_()
self.final_fc = nn.Sequential(nn.Dropout(dropout),
nn.Linear(self.param['feature_size'], self.num_class))
self._initialize_weights(self.final_fc)
def forward(self, block):
# block: [B, N, C, SL, W, H]
### extract feature ###
# [ Batch , Number of sequences, Channels, Sequence Length, Height, Weight ]
# print(block.shape)
(B, N, C, SL, H, W) = block.shape
# [ 4, 8, 3, 256/257, 128, 128 ]
# batch and number of sequences can be combined
block = block.view(B * N, C, SL, H, W)
# [ 32, 3, 256/257, 128, 128 ]
# pass through backbone (f)
feature = self.backbone(block)
#[32, 256/257, 2, 4, 4]
del block
# pool{2} as denoted in the paper
feature = F.avg_pool3d(
feature, (self.last_duration, 1, 1), stride=(1, 1, 1))
# [32, 256/257, 1, 4, 4]
if self.pool == 'avg':
feature = self.avg_pool(feature)
elif self.pool == 'max':
feature = self.max_pool(feature)
feature_inf_all = feature.view(
B, N, self.param['feature_size'], self.last_size, self.last_size) # before ReLU, (-inf, +inf)
# [4, 8, 256/257, 4, 4]
feature = self.relu(feature) # [0, +inf)
# [32, 256/257, 1, 4, 4]
# [B,N,D,6,6], [0, +inf)
feature = feature.view(
B, N, self.param['feature_size'], self.last_size, self.last_size)
# [4, 8, 256/257, 4, 4]
# makes a copy of the tensor (why do we need this ?)
feature_inf = feature_inf_all[:, N - self.pred_step::, :].contiguous()
# [4, 3, 256/257, 4, 4]
del feature_inf_all
### aggregate, predict future ###
# [4, 5, 256/257, 4, 4]
context, hidden = self.agg(
feature[:, 0:N - self.pred_step, :].contiguous())
# [4, 1, 256/257, 4, 4]
#print (context[:,-1,:]==hidden)
# after tanh, (-1,1). get the hidden state of last layer, last time step
hidden = hidden[:, -1, :]
# [4, 256/257, 4, 4]
# aggregate the results for pre_step number of steps
# check this out ??
pred = []
for i in range(self.pred_step):
# sequentially pred future for pred_step number of times
p_tmp = self.network_pred(hidden)
# print(p_tmp.shape)
pred.append(p_tmp)
if self.distance_type == 'uncertain' and self.radius_location == 'Phi':
# remove radius channel before passing to agg
p_tmp = p_tmp[:, :-1, :, :]
# print(p_tmp.shape)
context, hidden = self.agg(
self.relu(p_tmp).unsqueeze(1), hidden.unsqueeze(0))
hidden = hidden[:, -1, :]
if self.action_cls_head:
# Supervised operation
# Classify last context into action, see model_lc_future.py
context = context[:, -1, :].unsqueeze(1)
context = F.avg_pool3d(
context, (1, self.last_size, self.last_size), stride=1).squeeze(-1).squeeze(-1)
context = self.final_bn(
context.transpose(-1, -2)).transpose(-1, -2)
action_output = self.final_fc(context).view(B, -1, self.num_class)
# DEBUG:
# print('action_output:', action_output.shape) # TODO: we expect & want second dimension to be 1
result = (action_output, context)
return result
#[4, 256/257, 4, 4]
pred = torch.stack(pred, 1) # B, pred_step, xxx
#[4, 3, 256/257, 4, 4]
del hidden
### Get similarity score ###
# pred: [B, pred_step, D, last_size, last_size]
# GT: [B, N, D, last_size, last_size]
N = self.pred_step
# dot product D dimension in pred-GT pair, get a 6d tensor. First 3 dims are from pred, last 3 dims are from GT.
# predicted
if self.distance_type == 'certain':
pred = pred.permute(0, 1, 3, 4, 2).contiguous().view(
B * self.pred_step * self.last_size**2, self.param['feature_size'])
elif self.distance_type == 'uncertain':
pred = pred.permute(0, 1, 3, 4, 2).contiguous().view(
B * self.pred_step * self.last_size**2, self.param['feature_size'] + 1)
# GT
feature_inf = feature_inf.permute(0, 1, 3, 4, 2).contiguous().view(
B * N * self.last_size**2, self.param['feature_size']) # .transpose(0, 1)
if self.distance_type == 'uncertain':
pred_embedding = pred[:, :-1]
pred_radius = pred[:, -1].expand(1, -1)
elif self.distance_type == 'certain':
pred_embedding = pred
gt_embedding = feature_inf
#########################################Similarity Score#########################################
if self.distance == 'dot':
gt_embedding = gt_embedding.transpose(0, 1)
score = torch.matmul(pred_embedding, gt_embedding)
# print(score)
elif self.distance == 'cosine':
pred_norm = torch.norm(pred_embedding, dim=1)
gt_norm = torch.norm(gt_embedding, dim=1)
gt_embedding = gt_embedding.transpose(0, 1)
score = torch.matmul(pred_embedding, gt_embedding)
# row-wise division
score = torch.div(score, pred_norm.expand(1, -1).T)
# column-wise division
score = torch.div(score, gt_norm)
# score = 1 - (score + 1) / 2
# print(score[:10, :10])
del pred_embedding, gt_embedding
# division by the magnitude of respective vectors
elif self.distance == 'L2':
pred_embedding_mult = pred_embedding.reshape(
pred_embedding.shape[0], 1, pred_embedding.shape[1])
difference = pred_embedding_mult - gt_embedding
score = torch.sqrt(torch.einsum(
'ijk,ijk->ij', difference, difference))
# print(score)
del pred_embedding_mult, gt_embedding, difference
if self.mask is None: # only compute mask once
# mask meaning:
# -2: omit,
# -1: temporal neg (hard),
# 0: easy neg,
# 1: pos,
# -3: spatial neg
# easy negatives (do not take gradient here)
mask = torch.zeros((B, self.pred_step, self.last_size**2, B, N, self.last_size**2),
dtype=torch.int8, requires_grad=False).detach().cuda()
# spatial negative (mark everything in the same batch as spatial negative)
mask[torch.arange(B), :, :, torch.arange(B),
:, :] = -3 # spatial neg
# Uncertainty#############################################\
if self.distance_type == 'uncertain':
pred_radius_matrix = pred_radius.expand(score.shape).T
# print('here')
[final_score, pred_radius, score, final_radius] = process_uncertainty(score, pred_radius,
weighting=self.weighting,
distance=self.distance,
margin=self.margin,
distance_type=self.distance_type,
loss_type=self.loss_type)
elif self.distance_type == 'certain':
# .view(B, self.pred_step, self.last_size**2, B, N, self.last_size**2)
[final_score, pred_radius, score, final_radius] = process_uncertainty(score, None,
weighting=self.weighting,
distance=self.distance,
margin=self.margin,
distance_type=self.distance_type,
loss_type=self.loss_type)
# temporal negetive
for k in range(B):
mask[k, :, torch.arange(
self.last_size**2), k, :, torch.arange(self.last_size**2)] = -1 # temporal neg
# positive
tmp = mask.permute(0, 2, 1, 3, 5, 4).contiguous().view(
B * self.last_size**2, self.pred_step, B * self.last_size**2, N)
for j in range(B * self.last_size**2):
tmp[j, torch.arange(self.pred_step), j, torch.arange(
N - self.pred_step, N)] = 1 # pos
mask = tmp.view(B, self.last_size**2, self.pred_step,
B, self.last_size**2, N).permute(0, 2, 1, 3, 5, 4)
self.mask = mask
if self.distance_type == 'uncertain':
return [final_score, self.mask, pred_radius, score]
return [final_score, self.mask, torch.zeros(len(final_score)).cuda(), score]
def _initialize_weights(self, module):
for name, param in module.named_parameters():
if 'bias' in name:
nn.init.constant_(param, 0.0)
elif 'weight' in name:
nn.init.orthogonal_(param, 1)
# other resnet weights have been initialized in resnet itself
def reset_mask(self):
self.mask = None
def apply_weight(self, score, mask, criterion='MSE'):
score_view = score.view(mask.shape).cuda()
weight = mask
weight = weight.type(torch.DoubleTensor).cuda()
if criterion == 'MSE':
weight_value = 1.0 / math.sqrt(len(score))
elif criterion == 'CE':
| |
from dataclasses import dataclass
from . import B
class Keyboard:
WRAP_TOP = "wrap_top"
WRAP_BOTTOM = "wrap_bottom"
WRAP_LEFT = "wrap_left"
WRAP_RIGHT = "wrap_right"
EXIT_TOP = "exit_top"
EXIT_BOTTOM = "exit_bottom"
EXIT_LEFT = "exit_left"
EXIT_RIGHT = "exit_right"
EXIT_DIRECTIONS = [EXIT_TOP, EXIT_BOTTOM, EXIT_LEFT, EXIT_RIGHT]
ENTER_TOP = "enter_top"
ENTER_BOTTOM = "enter_bottom"
ENTER_LEFT = "enter_left"
ENTER_RIGHT = "enter_right"
REGULAR_KEY_FONT = "regular"
COMPACT_KEY_FONT = "compact"
KEY_BACKSPACE = {
"letter": "del",
"font": COMPACT_KEY_FONT,
"size": 2,
}
KEY_PREVIOUS_PAGE = {
"letter": "prev"
}
ADDITIONAL_KEYS = {
KEY_BACKSPACE["letter"]: KEY_BACKSPACE,
KEY_PREVIOUS_PAGE["letter"]: KEY_PREVIOUS_PAGE,
}
@dataclass
class Key:
"""
Simple python3.x dataclass (akin to a strut) to store info about each
individual key in the keyboard and its state. Attrs with defaults must be
listed last.
"""
letter: str
screen_x: int
screen_y: int
keyboard: any
size: int = 1
is_active: bool = True
is_selected: bool = False
is_additional_key: bool = False
def render_key(self):
font = self.keyboard.font
if self.letter in Keyboard.ADDITIONAL_KEYS:
if Keyboard.ADDITIONAL_KEYS[self.letter]["font"] == Keyboard.COMPACT_KEY_FONT:
font = self.keyboard.additonal_key_compact_font
outline_color = "#333"
if not self.is_active:
rect_color = self.keyboard.background_color
font_color = "#666" # Show the letter but render as gray
if self.is_selected:
# Inactive, selected just gets highlighted outline
outline_color = self.keyboard.highlight_color
elif self.is_selected:
rect_color = self.keyboard.highlight_color # Render solid background with the UI's hero color
font_color = self.keyboard.background_color
else:
rect_color = self.keyboard.background_color
font_color = self.keyboard.highlight_color
self.keyboard.draw.rectangle((self.screen_x, self.screen_y, self.screen_x + self.keyboard.x_width * self.size, self.screen_y + self.keyboard.y_height), outline=outline_color, fill=rect_color)
tw, th = self.keyboard.draw.textsize(self.letter, font=font)
self.keyboard.draw.text((self.screen_x + int((self.keyboard.x_width * self.size - tw) / 2), self.screen_y + int((self.keyboard.y_height - th)/2)), self.letter, fill=font_color, font=font)
def __init__(self,
draw,
charset="1234567890abcdefghijklmnopqrstuvwxyz",
selected_char="a",
rows=4,
cols=10,
rect=(0,40, 240,240),
font=None,
additional_keys=[KEY_BACKSPACE],
auto_wrap=[WRAP_TOP, WRAP_BOTTOM, WRAP_LEFT, WRAP_RIGHT],
render_now=True):
"""
`auto_wrap` specifies which edges the keyboard is allowed to loop back when
navigating past the end.
"""
# Import here to avoid circular import problems
from seedsigner.views import View
self.draw = draw
self.charset = charset
self.rows = rows
self.cols = cols
self.rect = rect
if font:
self.font = font
else:
self.font = View.ROBOTOCONDENSED_REGULAR_24
self.auto_wrap = auto_wrap
self.background_color = "black"
self.highlight_color = View.color
# Does the specified layout work?
additional_key_spaces = 0
for additional_key in additional_keys:
additional_key_spaces += additional_key["size"] # e.g. backspace takes up 2 slots
if rows * cols < len(charset) + additional_key_spaces:
raise Exception(f"charset will not fit in a {rows}x{cols} layout | additional_keys: {additional_keys}")
if not selected_char:
raise Exception("`selected_char` cannot be None")
# Set up the rendering and state params
self.active_keys = list(self.charset)
self.additonal_key_compact_font = View.ROBOTOCONDENSED_BOLD_18
self.x_start = rect[0]
self.y_start = rect[1]
self.x_gap = 1
self.x_width = int((rect[2] - rect[0]) / cols) - self.x_gap
self.y_gap = 6
self.y_height = int((rect[3] - rect[1]) / rows) - self.y_gap
# Two-dimensional list of Key obj row data
self.keys = []
self.selected_key = {"x": 0, "y": 0} # Indices in the `keys` 2D list
cur_y = self.y_start
for i in range(0, rows):
cur_row = []
cur_x = self.x_start
for j, letter in enumerate(charset[i*cols:(i+1)*cols]):
is_selected = False
if letter == selected_char:
is_selected = True
self.selected_key["y"] = i
self.selected_key["x"] = j
cur_row.append(self.Key(
letter=letter,
screen_x=cur_x,
screen_y=cur_y,
is_selected=is_selected,
keyboard=self
))
cur_x += self.x_width + self.x_gap
self.keys.append(cur_row)
if i < rows -1:
# increment to the next row and continue
cur_y += self.y_height + self.y_gap
else:
# It's the last row; add the additional keys at the end
for additional_key in additional_keys:
self.keys[-1].append(self.Key(
letter=additional_key["letter"],
screen_x=cur_x,
screen_y=cur_y,
keyboard=self,
size=additional_key["size"],
is_additional_key=True,
))
cur_x += self.x_width + self.x_gap
if render_now:
# Render the keys
self.render_keys()
# Render the initial highlighted character
self.update_from_input(input=None)
def update_active_keys(self, active_keys):
self.active_keys = active_keys
for i, row_keys in enumerate(self.keys):
for j, key in enumerate(row_keys):
if key.letter not in self.active_keys and key.letter not in Keyboard.ADDITIONAL_KEYS:
# Note: ADDITIONAL_KEYS are never deactivated.
key.is_active = False
else:
key.is_active = True
def render_keys(self, selected_letter=None):
"""
Renders just the keys of the keyboard. Useful when you need to redraw just
that section, as in when changing `active_keys` or swapping to alternate
charsets (e.g. alpha to special symbols).
Does NOT call View.DispShowImage to avoid multiple calls on the same screen.
"""
# Start with a clear screen
self.draw.rectangle(self.rect, outline=0, fill=0)
for i, row_keys in enumerate(self.keys):
for j, key in enumerate(row_keys):
if selected_letter and key.letter == selected_letter:
key.is_selected = True
self.selected_key["y"] = i
self.selected_key["x"] = j
key.render_key()
def update_from_input(self, input, enter_from=None):
"""
Managing code must handle its own input/update loop since other action buttons
will be active on the same screen outside of the keyboard rect (e.g. "Ok",
"Back", etc). Pass relevant input here to update the keyboard.
`enter_from` tells the keyboard that the external UI has caused a loop back
navigation.
(e.g. pressing up from a submit button below the keyboard = ENTER_BOTTOM)
Returns the character currently highlighted or one of the EXIT_* codes if the
user has navigated off the keyboard past an edge that is not in `auto_wrap`.
Does NOT call View.DispShowImage to avoid multiple calls on the same screen.
"""
key = self.keys[self.selected_key["y"]][self.selected_key["x"]]
# Before we update, undo our previously self.selected_key key
key.is_selected = False
key.render_key()
if input == B.KEY_RIGHT:
self.selected_key["x"] += 1
if self.selected_key["x"] == len(self.keys[self.selected_key["y"]]):
if Keyboard.WRAP_RIGHT in self.auto_wrap:
# Loop it back to the right side
self.selected_key["x"] = 0
else:
# Undo selection change and notify controlling loop that we've left
# the keyboard
self.selected_key["x"] -= 1
return Keyboard.EXIT_RIGHT
elif input == B.KEY_LEFT:
self.selected_key["x"] -= 1
if self.selected_key["x"] < 0:
if Keyboard.WRAP_LEFT in self.auto_wrap:
# Loop it back to the left side
self.selected_key["x"] = len(self.keys[self.selected_key["y"]]) - 1
else:
# Undo selection change and notify controlling loop that we've left
# the keyboard
self.selected_key["x"] += 1
return Keyboard.EXIT_LEFT
elif input == B.KEY_DOWN:
self.selected_key["y"] += 1
if self.selected_key["y"] == len(self.keys):
if Keyboard.WRAP_BOTTOM in self.auto_wrap:
# Loop it back to the top
self.selected_key["y"] = 0
else:
# Undo selection change and notify controlling loop that we've left
# the keyboard
self.selected_key["y"] -= 1
return Keyboard.EXIT_BOTTOM
elif self.selected_key["x"] >= len(self.keys[self.selected_key["y"]]):
# We're moving into the bottom line but there's no key directly below.
if self.selected_key["x"] - 1 == len(self.keys[self.selected_key["y"]]) - 1 and \
self.keys[self.selected_key["y"]][-1].size == 2:
# The last, adjacent key in this row is a double. Go ahead and select it
self.selected_key["x"] = self.selected_key["x"] - 1
else:
if Keyboard.WRAP_BOTTOM in self.auto_wrap:
# This line is too short to land here
self.selected_key["y"] = 0
else:
# Undo selection change and notify controlling loop that we've left
# the keyboard
self.selected_key["y"] -= 1
return Keyboard.EXIT_BOTTOM
elif input == B.KEY_UP:
self.selected_key["y"] -= 1
if self.selected_key["y"] < 0:
if Keyboard.WRAP_TOP in self.auto_wrap:
# Loop it back to the bottom
self.selected_key["y"] = len(self.keys) - 1
else:
# Undo selection change and notify controlling loop that we've left
# the keyboard
self.selected_key["y"] += 1
return Keyboard.EXIT_TOP
if self.selected_key["x"] >= len(self.keys[self.selected_key["y"]]):
# We're moving into the bottom line but there's no key directly below.
if self.selected_key["x"] - 1 == len(self.keys[self.selected_key["y"]]) - 1 and \
self.keys[self.selected_key["y"]][-1].size == 2:
# The last, adjacent key in this row is a double. Go ahead and select it
self.selected_key["x"] = self.selected_key["x"] - 1
else:
if Keyboard.WRAP_TOP in self.auto_wrap:
# This line is too short to land here
self.selected_key["y"] -= 1
else:
# Undo selection change and notify controlling loop that we've left
# the keyboard
self.selected_key["y"] += 1
return Keyboard.EXIT_TOP
elif input == Keyboard.ENTER_LEFT:
# User has returned to the keyboard along the left edge
# Keep the last y position that was selected.
self.selected_key["x"] = 0
elif input == Keyboard.ENTER_RIGHT:
# User has returned to the keyboard along the right edge
# Keep the last y position that was selected.
self.selected_key["x"] = len(self.keys[self.selected_key["y"]]) - 1
elif input == Keyboard.ENTER_TOP:
# User has returned to the keyboard along the top edge
# Keep the last x position that was selected.
self.selected_key["y"] = 0
elif input == Keyboard.ENTER_BOTTOM:
# User has returned to the keyboard along the bottom edge
# Keep the last x position that was selected.
self.selected_key["y"] = len(self.keys) - 1
if self.selected_key["x"] > len(self.keys[self.selected_key["y"]]) - 1:
if self.selected_key["x"] - 1 | |
5-6 of the Microscan MS3 manual for reference
"""
Disabled = b'0'
Enabled = b'1'
class EAN128Status(Enum):
"""Enables/disables/requires the EAN-128 subset of the Code 128 symbology
EAN-128 is commonly used in shipping applications, defining a wide variaty
of application specific extensions while using a subset of the possible
symbols of the Code 128 symbology.
See page 5-7 of the Microscan MS3 manual for reference
"""
Disabled = b'0'
Enabled = b'1'
Required = b'2'
class Code128OutputFormat(Enum):
"""When EAN-128 is enabled, this setting controls the format of the output
This setting only takes effect when EAN128Status is set to Enabled or
Required.
When this setting is set to ApplicationRecord, the following settings may
be used for further configuration of the output format:
- ApplicationRecordSeparatorStatus
- ApplicationRecordSeparatorCharacter
- ApplicationRecordBrackets
- ApplicationRecordPadding
See page 5-7 of the Microscan MS3 manual for reference
"""
Standard = b'0'
ApplicationRecord = b'1'
class ApplicationRecordSeparatorStatus(Enum):
"""Used in conjunction with the Code128OutputFormat setting
See page 5-8 of the Microscan MS3 manual for reference
"""
Disabled = b'0'
Enabled = b'1'
class ApplicationRecordBrackets(Enum):
"""Used in conjunction with the Code128OutputFormat setting
See page 5-8 of the Microscan MS3 manual for reference
"""
Disabled = b'0'
Enabled = b'1'
class ApplicationRecordPadding(Enum):
"""Used in conjunction with the Code128OutputFormat setting
See page 5-8 of the Microscan MS3 manual for reference
"""
Disabled = b'0'
Enabled = b'1'
class Code128(KSetting):
"""See page 5-6 of Microscan MS3 manual for reference
Code128 is a family of high density symbologies that can encode
all ASCII characters. The three variants (Code 128-A to C) differ
in the table of characters, trading off character set with
density. 128-B allows for all 127 ASCII characters while, while
128-C is numeric only but encodes two digits in the same space as
128-B needs for one character.
Wikipedia: https://en.wikipedia.org/wiki/Code_128
Properties available in this configuration setting:
- status (enable/disable Code 128)
- fixed_symbol_length_status
- symbol_length
- ean128_status
- output_format
- application_record_separator_status
- application_record_separator_character
- application_record_brackets
- application_record_padding
"""
K_CODE = b'K474'
K_PATTERN = (
b'^<%s,([0-1])?,([0-1])?,([\d]{1,2})?,([0-2])?,([0-1])?,([0-1])?,'
b'(%s)?,([0-1])?,([0-1])?>$' % (K_CODE, ASCII_CHAR))
def __init__(
self,
status=Code128Status.Disabled,
fixed_symbol_length_status=FixedSymbolLengthStatus.Disabled,
symbol_length=10,
ean128_status=EAN128Status.Disabled,
output_format=Code128OutputFormat.Standard,
application_record_separator_status=(
ApplicationRecordSeparatorStatus.Disabled),
application_record_separator_character=b',',
application_record_brackets=ApplicationRecordBrackets.Disabled,
application_record_padding=ApplicationRecordPadding.Disabled):
self.status = status
self.fixed_symbol_length_status = fixed_symbol_length_status
self.symbol_length = symbol_length
self.ean128_status = ean128_status
self.output_format = output_format
self.application_record_separator_status = (
application_record_separator_status)
self.application_record_separator_character = (
application_record_separator_character)
self.application_record_brackets = application_record_brackets
self.application_record_padding = application_record_padding
def is_valid(self):
return all([
isinstance(self.status, Code128Status),
isinstance(
self.fixed_symbol_length_status, FixedSymbolLengthStatus),
isinstance(self.symbol_length, int),
self.symbol_length >= 1,
self.symbol_length <= 64,
isinstance(self.ean128_status, EAN128Status),
isinstance(self.output_format, Code128OutputFormat),
isinstance(
self.application_record_brackets, ApplicationRecordBrackets),
isinstance(
self.application_record_padding, ApplicationRecordPadding),
])
def to_config_string(self):
return super().to_config_string([
self.status.value,
self.fixed_symbol_length_status.value,
self.symbol_length,
self.ean128_status.value,
self.output_format.value,
self.application_record_separator_status.value,
self.application_record_separator_character,
self.application_record_brackets.value,
self.application_record_padding.value,
])
@classmethod
def from_config_string(cls, str_):
"""Create Code128 object from string returned by the device
The str_ argument should be the device response to0the <K474?>
command, for example '<K474,1,0,10,1,0,0,,,0,0>'
"""
match = re.match(cls.K_PATTERN, str_)
try:
(
status,
fixed_symbol_length_status,
symbol_length,
ean128_status,
output_format,
application_record_separator_status,
application_record_separator_character,
application_record_brackets,
application_record_padding
) = match.groups()
except (ValueError, AttributeError):
raise InvalidConfigString(
'Cannot decode config string %s for K-code %s' %
(str_, cls.K_CODE))
return cls(
status=Code128Status(status),
fixed_symbol_length_status=FixedSymbolLengthStatus(
fixed_symbol_length_status),
symbol_length=int(symbol_length),
ean128_status=EAN128Status(ean128_status),
output_format=Code128OutputFormat(output_format),
application_record_separator_status=(
ApplicationRecordSeparatorStatus(
application_record_separator_status)),
application_record_separator_character=(
application_record_separator_character),
application_record_brackets=ApplicationRecordBrackets(
application_record_brackets),
application_record_padding=ApplicationRecordPadding(
application_record_padding)
)
# === Interleaved 2 of 5 setting and corresponding enums ===
class Interleaved2Of5Status(Enum):
Disabled = b'0'
Enabled = b'1'
class Interleaved2Of5(KSetting):
"""See page 5-10 of Microscan MS3 manual for reference
"""
K_CODE = b'K472'
# TODO
# === Codabar setting and corresponding enums ===
class CodabarStatus(Enum):
Disabled = b'0'
Enabled = b'1'
class Codabar(KSetting):
"""See page 5-13 of Microscan MS3 manual for reference
"""
K_CODE = b'K471'
# TODO
# === EAN/UPC setting and corresponding enums ===
class UPCStatus(Enum):
Disabled = b'0'
Enabled = b'1'
class EANStatus(Enum):
Disabled = b'0'
Enabled = b'1'
class SupplementalsStatus(Enum):
Disabled = b'0'
Enabled = b'1'
Required = b'2'
class SeparatorStatus(Enum):
Disabled = b'0'
Enabled = b'1'
class UPC_EoutputAsUPC_A(Enum):
Disabled = b'0'
Enabled = b'1'
class UPC_EAN(KSetting):
"""See page 5-16 of Microscan MS3 manual for reference
"""
K_CODE = b'K473'
# K-codes for this setting can be tricky to read because the last five
# characters before the closing ">" are likely to be commas:
# - the second to last sub-setting is unused, i.e. empty
# - the last and third to last sub-settings default to ","
K_PATTERN = (
b'^<%s,([0-1])?,([0-1])?,([0-2])?,([0-1])?,(.)?,,([0-1])?,([0-1])?>$'
% K_CODE)
def __init__(
self,
upc_status=UPCStatus.Disabled,
ean_status=EANStatus.Disabled,
supplementals_status=SupplementalsStatus.Disabled,
separator_status=SeparatorStatus.Disabled,
separator_character=',',
upc_e_output_to_upc_a=UPC_EoutputAsUPC_A.Disabled, # docs wrong
undocumented_field=0):
self.upc_status = upc_status
self.ean_status = ean_status
self.supplementals_status = supplementals_status
self.separator_status = separator_status
self.separator_character = separator_character
self.upc_e_output_to_upc_a = upc_e_output_to_upc_a
self.undocumented_field = undocumented_field
def is_valid(self):
return all([
isinstance(self.upc_status, UPCStatus),
isinstance(self.ean_status, EANStatus),
isinstance(self.supplementals_status, SupplementalsStatus),
isinstance(self.separator_status, SeparatorStatus),
isinstance(self.separator_character, str),
isinstance(self.upc_e_output_to_upc_a, UPC_EoutputAsUPC_A),
])
def to_config_string(self):
return super().to_config_string([
self.upc_status.value,
self.ean_status.value,
self.supplementals_status.value,
self.separator_status.value,
self.separator_character,
None, # accomodates for the "unused" sub-setting
self.upc_e_output_to_upc_a.value,
self.undocumented_field,
])
@classmethod
def from_config_string(cls, str_):
"""Create UPC_EAN object from string returned by the device
The str_ argument should be the device response to the <K473?>
command, for example '<K473,1,0,0,0,,,,>'
"""
match = re.match(cls.K_PATTERN, str_)
try:
(
upc_status, ean_status, supplementals_status, separator_status,
separator_character, upc_e_output_to_upc_a, undocumented_field
) = match.groups()
except (ValueError, AttributeError):
raise InvalidConfigString(
'Cannot decode config string %s for K-code %s' %
(str_, cls.K_CODE))
return cls(
upc_status=UPCStatus(upc_status),
ean_status=EANStatus(ean_status),
supplementals_status=SupplementalsStatus(supplementals_status),
separator_status=SeparatorStatus(separator_status),
separator_character=separator_character,
upc_e_output_to_upc_a=UPC_EoutputAsUPC_A(upc_e_output_to_upc_a),
undocumented_field=int(undocumented_field),
)
# === Code 93 setting and corresponding enums ===
class Code93Status(Enum):
Disabled = b'0'
Enabled = b'1'
class Code93(KSetting):
"""See page 5-19 of Microscan MS3 manual for reference
"""
K_CODE = b'K475'
K_PATTERN = (
b'^<%s,([0-1])?,([0-1])?,([\d]{1,2})?>$' % K_CODE)
def __init__(
self,
status=Code93Status.Disabled,
fixed_symbol_length_status=FixedSymbolLengthStatus.Disabled,
fixed_symbol_length=10,):
self.status = status
self.fixed_symbol_length_status = fixed_symbol_length_status
self.fixed_symbol_length = fixed_symbol_length
def is_valid(self):
return all([
isinstance(self.status, Code93Status),
isinstance(
self.fixed_symbol_length_status, FixedSymbolLengthStatus),
isinstance(self.fixed_symbol_length, int),
self.fixed_symbol_length >= 1,
self.fixed_symbol_length <= 64,
])
def to_config_string(self):
return super().to_config_string([
self.status.value,
self.fixed_symbol_length_status.value,
self.fixed_symbol_length,
])
@classmethod
def from_config_string(cls, str_):
"""Create Code93 object from string returned by the device
The str_ argument should be the device response to the <K475?>
command, for example '<K475,1,0,10>'
"""
match = re.match(cls.K_PATTERN, str_)
try:
status, fsl_status, fsl = match.groups()
except (ValueError, AttributeError):
raise InvalidConfigString(
'Cannot decode config string %s for K-code %s' %
(str_, cls.K_CODE))
return cls(
status=Code93Status(status),
fixed_symbol_length_status=FixedSymbolLengthStatus(fsl_status),
fixed_symbol_length=int(fsl),
)
# === Pharmacode setting and corresponding enums ===
class PharmacodeStatus(Enum):
Disabled = b'0'
Enabled = b'1'
class Pharmacode(KSetting):
"""See page 5-19 of Microscan MS3 manual for reference
"""
K_CODE = b'K475'
# TODO
# === Narrow Margins and Symbology ID setting and corresponding enums ===
class NarrowMarginsStatus(Enum):
Disabled = b'0'
Enabled = b'1'
class SymbologyIDStatus(Enum):
Disabled = b'0'
Enabled = b'1'
class NarrowMarginsAndSymbologyID(KSetting):
"""See page 5-22 of Microscan MS3 manual for reference
"""
K_CODE = b'K450'
K_PATTERN = b'^<%s,([0-1])?,([0-1])?>$' % K_CODE
def __init__(
self,
narrow_margins_status=NarrowMarginsStatus.Disabled,
symbology_id_status=SymbologyIDStatus.Disabled):
self.narrow_margins_status = narrow_margins_status
self.symbology_id_status = symbology_id_status
def is_valid(self):
return all([
isinstance(self.narrow_margins_status, NarrowMarginsStatus),
isinstance(self.symbology_id_status, SymbologyIDStatus)
])
def to_config_string(self):
return super().to_config_string([
self.narrow_margins_status.value,
self.symbology_id_status.value,
])
@classmethod
def from_config_string(cls, str_):
"""Create NarrowMargins object from string returned by the device
The str_ argument should be the device response to the <K450?>
command, for example '<K450,1,0>'
"""
match = re.match(cls.K_PATTERN, str_)
try:
narrow_margins_status, symbology_id_status = match.groups()
except (ValueError, AttributeError):
raise InvalidConfigString(
'Cannot decode config string %s for K-code %s' %
(str_, cls.K_CODE))
return cls(
narrow_margins_status=NarrowMarginsStatus(narrow_margins_status),
symbology_id_status=SymbologyIDStatus(symbology_id_status)
)
# === Background Color setting and corresponding enums ===
class Color(Enum):
White = b'0'
Black = b'1'
class BackgroundColor(KSetting):
"""See page 5-24 of Microscan MS3 manual for reference
"""
K_CODE = b'K451'
K_PATTERN = b'^<%s,([0-1])?>$' % K_CODE
def __init__(self, color=Color.White):
self.color = color
def is_valid(self):
return all([
isinstance(self.color, Color),
])
def to_config_string(self):
return super().to_config_string([
self.color.value,
])
@classmethod
def from_config_string(cls, str_):
"""Create BackgroundColor object from string returned by the device
The str_ argument should be the device response to the <K451?>
command, for example '<K451,1>'
"""
match = re.match(cls.K_PATTERN, str_)
try:
color, = match.groups()
except (ValueError, AttributeError):
raise InvalidConfigString(
'Cannot decode config string %s for K-code %s' %
(str_, cls.K_CODE))
return cls(
color=Color(color),
)
# === Symbol Ratio Mode setting and corresponding enums ===
class SymbolRatio(Enum):
Tight = b'0'
Standard = b'1'
Aggressive = b'2'
class SymbolRatioMode(KSetting):
"""See page 5-25 of Microscan MS3 manual for reference
"""
K_CODE = b'K452'
K_PATTERN = b'^<%s,([0-2])?,([0-2])?,([0-2])?,([0-2])?>$' % K_CODE
def __init__(
self,
code39=SymbolRatio.Standard,
codabar=SymbolRatio.Standard,
interleaved_2_of_5=SymbolRatio.Standard,
code93=SymbolRatio.Standard):
self.code39 = code39
self.codabar = codabar
self.interleaved_2_of_5 = interleaved_2_of_5
self.code93 = code93
def is_valid(self):
return all([
isinstance(self.code39, | |
From here on, modules need certain
# libraries, are platform-specific, albo present other surprises.
#
# Multimedia modules
# These don't work dla 64-bit platforms!!!
# These represent audio samples albo images jako strings:
# Operations on audio samples
# According to #993173, this one should actually work fine on
# 64-bit platforms.
exts.append( Extension('audioop', ['audioop.c']) )
# readline
do_readline = self.compiler.find_library_file(lib_dirs, 'readline')
readline_termcap_library = ""
curses_library = ""
# Cannot use os.popen here w py3k.
tmpfile = os.path.join(self.build_temp, 'readline_termcap_lib')
jeżeli nie os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
# Determine jeżeli readline jest already linked against curses albo tinfo.
jeżeli do_readline:
jeżeli cross_compiling:
ret = os.system("%s -d %s | grep '(NEEDED)' > %s" \
% (sysconfig.get_config_var('READELF'),
do_readline, tmpfile))
albo_inaczej find_executable('ldd'):
ret = os.system("ldd %s > %s" % (do_readline, tmpfile))
inaczej:
ret = 256
jeżeli ret >> 8 == 0:
przy open(tmpfile) jako fp:
dla ln w fp:
jeżeli 'curses' w ln:
readline_termcap_library = re.sub(
r'.*lib(n?cursesw?)\.so.*', r'\1', ln
).rstrip()
przerwij
# termcap interface split out z ncurses
jeżeli 'tinfo' w ln:
readline_termcap_library = 'tinfo'
przerwij
jeżeli os.path.exists(tmpfile):
os.unlink(tmpfile)
# Issue 7384: If readline jest already linked against curses,
# use the same library dla the readline oraz curses modules.
jeżeli 'curses' w readline_termcap_library:
curses_library = readline_termcap_library
albo_inaczej self.compiler.find_library_file(lib_dirs, 'ncursesw'):
curses_library = 'ncursesw'
albo_inaczej self.compiler.find_library_file(lib_dirs, 'ncurses'):
curses_library = 'ncurses'
albo_inaczej self.compiler.find_library_file(lib_dirs, 'curses'):
curses_library = 'curses'
jeżeli host_platform == 'darwin':
os_release = int(os.uname()[2].split('.')[0])
dep_target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET')
jeżeli (dep_target oraz
(tuple(int(n) dla n w dep_target.split('.')[0:2])
< (10, 5) ) ):
os_release = 8
jeżeli os_release < 9:
# MacOSX 10.4 has a broken readline. Don't try to build
# the readline module unless the user has installed a fixed
# readline package
jeżeli find_file('readline/rlconf.h', inc_dirs, []) jest Nic:
do_readline = Nieprawda
jeżeli do_readline:
jeżeli host_platform == 'darwin' oraz os_release < 9:
# In every directory on the search path search dla a dynamic
# library oraz then a static library, instead of first looking
# dla dynamic libraries on the entire path.
# This way a staticly linked custom readline gets picked up
# before the (possibly broken) dynamic library w /usr/lib.
readline_extra_link_args = ('-Wl,-search_paths_first',)
inaczej:
readline_extra_link_args = ()
readline_libs = ['readline']
jeżeli readline_termcap_library:
dalej # Issue 7384: Already linked against curses albo tinfo.
albo_inaczej curses_library:
readline_libs.append(curses_library)
albo_inaczej self.compiler.find_library_file(lib_dirs +
['/usr/lib/termcap'],
'termcap'):
readline_libs.append('termcap')
exts.append( Extension('readline', ['readline.c'],
library_dirs=['/usr/lib/termcap'],
extra_link_args=readline_extra_link_args,
libraries=readline_libs) )
inaczej:
missing.append('readline')
# crypt module.
jeżeli self.compiler.find_library_file(lib_dirs, 'crypt'):
libs = ['crypt']
inaczej:
libs = []
exts.append( Extension('_crypt', ['_cryptmodule.c'], libraries=libs) )
# CSV files
exts.append( Extension('_csv', ['_csv.c']) )
# POSIX subprocess module helper.
exts.append( Extension('_posixsubprocess', ['_posixsubprocess.c']) )
# socket(2)
exts.append( Extension('_socket', ['socketmodule.c'],
depends = ['socketmodule.h']) )
# Detect SSL support dla the socket module (via _ssl)
search_for_ssl_incs_in = [
'/usr/local/ssl/include',
'/usr/contrib/ssl/include/'
]
ssl_incs = find_file('openssl/ssl.h', inc_dirs,
search_for_ssl_incs_in
)
jeżeli ssl_incs jest nie Nic:
krb5_h = find_file('krb5.h', inc_dirs,
['/usr/kerberos/include'])
jeżeli krb5_h:
ssl_incs += krb5_h
ssl_libs = find_library_file(self.compiler, 'ssl',lib_dirs,
['/usr/local/ssl/lib',
'/usr/contrib/ssl/lib/'
] )
jeżeli (ssl_incs jest nie Nic oraz
ssl_libs jest nie Nic):
exts.append( Extension('_ssl', ['_ssl.c'],
include_dirs = ssl_incs,
library_dirs = ssl_libs,
libraries = ['ssl', 'crypto'],
depends = ['socketmodule.h']), )
inaczej:
missing.append('_ssl')
# find out which version of OpenSSL we have
openssl_ver = 0
openssl_ver_re = re.compile(
'^\s*#\s*define\s+OPENSSL_VERSION_NUMBER\s+(0x[0-9a-fA-F]+)' )
# look dla the openssl version header on the compiler search path.
opensslv_h = find_file('openssl/opensslv.h', [],
inc_dirs + search_for_ssl_incs_in)
jeżeli opensslv_h:
name = os.path.join(opensslv_h[0], 'openssl/opensslv.h')
jeżeli host_platform == 'darwin' oraz is_macosx_sdk_path(name):
name = os.path.join(macosx_sdk_root(), name[1:])
spróbuj:
przy open(name, 'r') jako incfile:
dla line w incfile:
m = openssl_ver_re.match(line)
jeżeli m:
openssl_ver = int(m.group(1), 16)
przerwij
wyjąwszy IOError jako msg:
print("IOError dopóki reading opensshv.h:", msg)
#print('openssl_ver = 0x%08x' % openssl_ver)
min_openssl_ver = 0x00907000
have_any_openssl = ssl_incs jest nie Nic oraz ssl_libs jest nie Nic
have_usable_openssl = (have_any_openssl oraz
openssl_ver >= min_openssl_ver)
jeżeli have_any_openssl:
jeżeli have_usable_openssl:
# The _hashlib module wraps optimized implementations
# of hash functions z the OpenSSL library.
exts.append( Extension('_hashlib', ['_hashopenssl.c'],
depends = ['hashlib.h'],
include_dirs = ssl_incs,
library_dirs = ssl_libs,
libraries = ['ssl', 'crypto']) )
inaczej:
print("warning: openssl 0x%08x jest too old dla _hashlib" %
openssl_ver)
missing.append('_hashlib')
# We always compile these even when OpenSSL jest available (issue #14693).
# It's harmless oraz the object code jest tiny (40-50 KB per module,
# only loaded when actually used).
exts.append( Extension('_sha256', ['sha256module.c'],
depends=['hashlib.h']) )
exts.append( Extension('_sha512', ['sha512module.c'],
depends=['hashlib.h']) )
exts.append( Extension('_md5', ['md5module.c'],
depends=['hashlib.h']) )
exts.append( Extension('_sha1', ['sha1module.c'],
depends=['hashlib.h']) )
# Modules that provide persistent dictionary-like semantics. You will
# probably want to arrange dla at least one of them to be available on
# your machine, though none are defined by default because of library
# dependencies. The Python module dbm/__init__.py provides an
# implementation independent wrapper dla these; dbm/dumb.py provides
# similar functionality (but slower of course) implemented w Python.
# Sleepycat^WOracle Berkeley DB interface.
# http://www.oracle.com/database/berkeley-db/db/index.html
#
# This requires the Sleepycat^WOracle DB code. The supported versions
# are set below. Visit the URL above to download
# a release. Most open source OSes come przy one albo more
# versions of BerkeleyDB already installed.
max_db_ver = (5, 3)
min_db_ver = (3, 3)
db_setup_debug = Nieprawda # verbose debug prints z this script?
def allow_db_ver(db_ver):
"""Returns a boolean jeżeli the given BerkeleyDB version jest acceptable.
Args:
db_ver: A tuple of the version to verify.
"""
jeżeli nie (min_db_ver <= db_ver <= max_db_ver):
zwróć Nieprawda
zwróć Prawda
def gen_db_minor_ver_nums(major):
jeżeli major == 4:
dla x w range(max_db_ver[1]+1):
jeżeli allow_db_ver((4, x)):
uzyskaj x
albo_inaczej major == 3:
dla x w (3,):
jeżeli allow_db_ver((3, x)):
uzyskaj x
inaczej:
podnieś ValueError("unknown major BerkeleyDB version", major)
# construct a list of paths to look dla the header file w on
# top of the normal inc_dirs.
db_inc_paths = [
'/usr/include/db4',
'/usr/local/include/db4',
'/opt/sfw/include/db4',
'/usr/include/db3',
'/usr/local/include/db3',
'/opt/sfw/include/db3',
# Fink defaults (http://fink.sourceforge.net/)
'/sw/include/db4',
'/sw/include/db3',
]
# 4.x minor number specific paths
dla x w gen_db_minor_ver_nums(4):
db_inc_paths.append('/usr/include/db4%d' % x)
db_inc_paths.append('/usr/include/db4.%d' % x)
db_inc_paths.append('/usr/local/BerkeleyDB.4.%d/include' % x)
db_inc_paths.append('/usr/local/include/db4%d' % x)
db_inc_paths.append('/pkg/db-4.%d/include' % x)
db_inc_paths.append('/opt/db-4.%d/include' % x)
# MacPorts default (http://www.macports.org/)
db_inc_paths.append('/opt/local/include/db4%d' % x)
# 3.x minor number specific paths
dla x w gen_db_minor_ver_nums(3):
db_inc_paths.append('/usr/include/db3%d' % x)
db_inc_paths.append('/usr/local/BerkeleyDB.3.%d/include' % x)
db_inc_paths.append('/usr/local/include/db3%d' % x)
db_inc_paths.append('/pkg/db-3.%d/include' % x)
db_inc_paths.append('/opt/db-3.%d/include' % x)
jeżeli cross_compiling:
db_inc_paths = []
# Add some common subdirectories dla Sleepycat DB to the list,
# based on the standard include directories. This way DB3/4 gets
# picked up when it jest installed w a non-standard prefix oraz
# the user has added that prefix into inc_dirs.
std_variants = []
dla dn w inc_dirs:
std_variants.append(os.path.join(dn, 'db3'))
std_variants.append(os.path.join(dn, 'db4'))
dla x w gen_db_minor_ver_nums(4):
std_variants.append(os.path.join(dn, "db4%d"%x))
std_variants.append(os.path.join(dn, "db4.%d"%x))
dla x w gen_db_minor_ver_nums(3):
std_variants.append(os.path.join(dn, "db3%d"%x))
std_variants.append(os.path.join(dn, "db3.%d"%x))
db_inc_paths = std_variants + db_inc_paths
db_inc_paths = [p dla p w db_inc_paths jeżeli os.path.exists(p)]
db_ver_inc_map = {}
jeżeli host_platform == 'darwin':
sysroot = macosx_sdk_root()
klasa db_found(Exception): dalej
spróbuj:
# See whether there jest a Sleepycat header w the standard
# search path.
dla d w inc_dirs + db_inc_paths:
f = os.path.join(d, "db.h")
jeżeli host_platform == 'darwin' oraz is_macosx_sdk_path(d):
f = os.path.join(sysroot, d[1:], "db.h")
jeżeli db_setup_debug: print("db: looking dla db.h in", f)
jeżeli os.path.exists(f):
przy open(f, 'rb') jako file:
f = file.read()
m = re.search(br"#define\WDB_VERSION_MAJOR\W(\d+)", f)
jeżeli m:
db_major = int(m.group(1))
m = re.search(br"#define\WDB_VERSION_MINOR\W(\d+)", f)
db_minor = int(m.group(1))
db_ver = (db_major, db_minor)
# Avoid 4.6 prior to 4.6.21 due to a BerkeleyDB bug
jeżeli db_ver == (4, 6):
m = re.search(br"#define\WDB_VERSION_PATCH\W(\d+)", f)
db_patch = int(m.group(1))
jeżeli db_patch < 21:
print("db.h:", db_ver, "patch", db_patch,
"being ignored (4.6.x must be >= 4.6.21)")
kontynuuj
jeżeli ( (db_ver nie w db_ver_inc_map) oraz
allow_db_ver(db_ver) ):
# save the include directory przy the db.h version
# (first occurrence only)
db_ver_inc_map[db_ver] = d
jeżeli db_setup_debug:
print("db.h: found", db_ver, "in", d)
inaczej:
# we already found a header dla this library version
jeżeli db_setup_debug: print("db.h: ignoring", d)
inaczej:
# ignore this header, it didn't contain a version number
jeżeli db_setup_debug:
print("db.h: no version number version in", d)
| |
import boto3
import botocore
import configparser
import json
import logging
import os
import re
import uuid
try:
from collections.abc import namedtuple
except ImportError:
from collections import namedtuple
from ..config import get_config_file, rlock
__all__ = [
"clients",
"get_tags",
"get_ecr_repo",
"set_ecr_repo",
"get_s3_params",
"set_s3_params",
"get_region",
"set_region",
"list_profiles",
"get_user",
"get_profile",
"set_profile",
"refresh_clients",
"ResourceExistsException",
"ResourceDoesNotExistException",
"ResourceClobberedException",
"CannotDeleteResourceException",
"CannotCreateResourceException",
"RegionException",
"ProfileException",
"CKTimeoutError",
"BatchJobFailedError",
"CloudknotConfigurationError",
"CloudknotInputError",
"NamedObject",
]
mod_logger = logging.getLogger(__name__)
def get_tags(name, additional_tags=None):
tag_list = []
if additional_tags is not None:
if isinstance(additional_tags, list):
if not all(
[set(item.keys()) == set(["Key", "Value"]) for item in additional_tags]
):
raise ValueError(
"If additional_tags is a list, it must be a list of "
"dictionaries of the form {'Key': key_val, 'Value': "
"value_val}."
)
tag_list += additional_tags
elif isinstance(additional_tags, dict):
if "Key" in additional_tags.keys() or "Value" in additional_tags.keys():
raise ValueError(
"If additional_tags is a dict, it cannot contain keys named 'Key' or "
"'Value'. It looks like you are trying to pass in tags of the form "
"{'Key': key_val, 'Value': value_val}. If that's the case, please put "
"it in a list, i.e. [{'Key': key_val, 'Value': value_val}]."
)
tag_list = [{"Key": k, "Value": v} for k, v in additional_tags.items()]
else:
raise ValueError(
"additional_tags must be a dictionary or a list of dictionaries."
)
if not [tag for tag in tag_list if tag["Key"] == "Name"]:
tag_list.append({"Key": "Name", "Value": name})
if not [tag for tag in tag_list if tag["Key"] == "Owner"]:
tag_list.append({"Key": "Owner", "Value": get_user()})
if not [tag for tag in tag_list if tag["Key"] == "Environment"]:
tag_list.append({"Key": "Environment", "Value": "cloudknot"})
return tag_list
def get_ecr_repo():
"""Get the cloudknot ECR repository
First, check the cloudknot config file for the ecr-repo option.
If that fails, check for the CLOUDKNOT_ECR_REPO environment variable.
If that fails, use 'cloudknot'
Returns
-------
repo : string
Cloudknot ECR repository name
"""
config_file = get_config_file()
config = configparser.ConfigParser()
with rlock:
config.read(config_file)
option = "ecr-repo"
if config.has_section("aws") and config.has_option("aws", option):
repo = config.get("aws", option)
else:
# Set `repo`, the fallback repo in case the cloudknot
# repo environment variable is not set
try:
# Get the region from an environment variable
repo = os.environ["CLOUDKNOT_ECR_REPO"]
except KeyError:
repo = "cloudknot"
# Use set_ecr_repo to check for name availability
# and write to config file
set_ecr_repo(repo)
return repo
def set_ecr_repo(repo):
"""Set the cloudknot ECR repo
Set repo by modifying the cloudknot config file
Parameters
----------
repo : string
Cloudknot ECR repo name
"""
# Update the config file
config_file = get_config_file()
config = configparser.ConfigParser()
with rlock:
config.read(config_file)
if not config.has_section("aws"): # pragma: nocover
config.add_section("aws")
config.set("aws", "ecr-repo", repo)
with open(config_file, "w") as f:
config.write(f)
# Flake8 will see that repo_arn is set in the try/except clauses
# and claim that we are referencing it before assignment below
# so we predefine it here. Also, it should be predefined as a
# string to pass parameter validation by boto.
repo_arn = "test"
try:
# If repo exists, retrieve its info
response = clients["ecr"].describe_repositories(repositoryNames=[repo])
repo_arn = response["repositories"][0]["repositoryArn"]
except clients["ecr"].exceptions.RepositoryNotFoundException:
# If it doesn't exists already, then create it
response = clients["ecr"].create_repository(repositoryName=repo)
repo_arn = response["repository"]["repositoryArn"]
except botocore.exceptions.ClientError as e:
error_code = e.response["Error"]["Code"]
if error_code == "RepositoryNotFoundException":
# If it doesn't exist already, then create it
response = clients["ecr"].create_repository(repositoryName=repo)
repo_arn = response["repository"]["repositoryArn"]
try:
clients["ecr"].tag_resource(
resourceArn=repo_arn,
tags=get_tags(
name=repo, additional_tags={"Project": "Cloudknot global config"}
),
)
except NotImplementedError as e:
moto_msg = "The tag_resource action has not been implemented"
if moto_msg in e.args:
# This exception is here for compatibility with moto
# testing since the tag_resource action has not been
# implemented in moto. Simply move on.
pass
else:
raise e
def get_s3_params():
"""Get the cloudknot S3 bucket and corresponding access policy
For the bucket name, first check the cloudknot config file for the bucket
option. If that fails, check for the CLOUDKNOT_S3_BUCKET environment
variable. If that fails, use
'cloudknot-' + get_user().lower() + '-' + uuid4()
For the policy name, first check the cloudknot config file. If that fails,
use 'cloudknot-bucket-access-' + str(uuid.uuid4())
For the region, first check the cloudknot config file. If that fails,
use the current cloudknot region
Returns
-------
bucket : NamedTuple
A namedtuple with fields ['bucket', 'policy', 'policy_arn', 'sse']
"""
config_file = get_config_file()
config = configparser.ConfigParser()
BucketInfo = namedtuple("BucketInfo", ["bucket", "policy", "policy_arn", "sse"])
with rlock:
config.read(config_file)
option = "s3-bucket-policy"
if config.has_section("aws") and config.has_option("aws", option):
# Get policy name from the config file
policy = config.get("aws", option)
else:
# or set policy to None to create it in the call to
# set_s3_params()
policy = None
option = "s3-bucket"
if config.has_section("aws") and config.has_option("aws", option):
bucket = config.get("aws", option)
else:
try:
# Get the bucket name from an environment variable
bucket = os.environ["CLOUDKNOT_S3_BUCKET"]
except KeyError:
# Use the fallback bucket b/c the cloudknot
# bucket environment variable is not set
bucket = "cloudknot-" + get_user().lower() + "-" + str(uuid.uuid4())
if policy is not None:
# In this case, the bucket name is new, but the policy is not.
# Update the policy to reflect the new bucket name.
update_s3_policy(policy=policy, bucket=bucket)
option = "s3-sse"
if config.has_section("aws") and config.has_option("aws", option):
sse = config.get("aws", option)
if sse not in ["AES256", "aws:kms", "None"]:
raise CloudknotInputError(
'The server-side encryption option "sse" must must be '
'one of ["AES256", "aws:kms", "None"]'
)
else:
sse = None
if sse == "None":
sse = None
# Use set_s3_params to check for name availability
# and write to config file
set_s3_params(bucket=bucket, policy=policy, sse=sse)
if policy is None:
config.read(config_file)
policy = config.get("aws", "s3-bucket-policy")
# Get all local policies with cloudknot prefix
paginator = clients["iam"].get_paginator("list_policies")
response_iterator = paginator.paginate(Scope="Local", PathPrefix="/cloudknot/")
# response_iterator is a list of dicts. First convert to list of lists
# and then flatten to a single list
response_policies = [response["Policies"] for response in response_iterator]
policies = [lst for sublist in response_policies for lst in sublist]
aws_policies = {d["PolicyName"]: d["Arn"] for d in policies}
policy_arn = aws_policies[policy]
return BucketInfo(bucket=bucket, policy=policy, policy_arn=policy_arn, sse=sse)
def set_s3_params(bucket, policy=None, sse=None):
"""Set the cloudknot S3 bucket
Set bucket by modifying the cloudknot config file
Parameters
----------
bucket : string
Cloudknot S3 bucket name
policy : string
Cloudknot S3 bucket access policy name
Default: None means that cloudknot will create a new policy
sse : string
S3 server side encryption method. If provided, must be one of
['AES256', 'aws:kms'].
Default: None
"""
if sse is not None and sse not in ["AES256", "aws:kms"]:
raise CloudknotInputError(
'The server-side encryption option "sse" '
'must be one of ["AES256", "aws:kms"]'
)
# Update the config file
config_file = get_config_file()
config = configparser.ConfigParser()
def test_bucket_put_get(bucket_, sse_):
key = "cloudnot-test-permissions-key"
try:
if sse_:
clients["s3"].put_object(
Bucket=bucket_, Body=b"test", Key=key, ServerSideEncryption=sse_
)
else:
clients["s3"].put_object(Bucket=bucket_, Body=b"test", Key=key)
clients["s3"].get_object(Bucket=bucket_, Key=key)
except clients["s3"].exceptions.ClientError:
raise CloudknotInputError(
"The requested bucket name already "
"exists and you do not have permission "
"to put or get objects in it."
)
try:
clients["s3"].delete_object(Bucket=bucket_, Key=key)
except Exception:
pass
with rlock:
config.read(config_file)
if not config.has_section("aws"): # pragma: nocover
config.add_section("aws")
config.set("aws", "s3-bucket", bucket)
# Create the bucket
try:
if get_region() == "us-east-1":
clients["s3"].create_bucket(Bucket=bucket)
else:
clients["s3"].create_bucket(
Bucket=bucket,
CreateBucketConfiguration={"LocationConstraint": get_region()},
)
except clients["s3"].exceptions.BucketAlreadyOwnedByYou:
pass
except clients["s3"].exceptions.BucketAlreadyExists:
test_bucket_put_get(bucket, sse)
except clients["s3"].exceptions.ClientError as e:
# Check for Illegal Location Constraint
error_code = e.response["Error"]["Code"]
if error_code in [
"IllegalLocationConstraintException",
"InvalidLocationConstraint",
]:
response = clients["s3"].get_bucket_location(Bucket=bucket)
location = response.get("LocationConstraint")
try:
if location == "us-east-1" or location is None:
clients["s3"].create_bucket(Bucket=bucket)
else:
clients["s3"].create_bucket(
Bucket=bucket,
CreateBucketConfiguration={"LocationConstraint": location},
)
except clients["s3"].exceptions.BucketAlreadyOwnedByYou:
pass
except clients["s3"].exceptions.BucketAlreadyExists:
test_bucket_put_get(bucket, sse)
else:
# Pass exception to user
raise e
# Add the cloudknot tags to the bucket
clients["s3"].put_bucket_tagging(
Bucket=bucket,
Tagging={
"TagSet": get_tags(
name=bucket, additional_tags={"Project": "Cloudknot global config"}
)
},
)
if policy is None:
policy = "cloudknot-bucket-access-" + str(uuid.uuid4())
try:
# Create the policy
s3_policy_doc = bucket_policy_document(bucket)
clients["iam"].create_policy(
PolicyName=policy,
Path="/cloudknot/",
PolicyDocument=json.dumps(s3_policy_doc),
Description="Grants access to S3 bucket {0:s}" "".format(bucket),
)
except clients["iam"].exceptions.EntityAlreadyExistsException:
# Policy already exists, do nothing
pass
config.set("aws", "s3-bucket-policy", policy)
config.set("aws", "s3-sse", str(sse))
with open(config_file, "w") as f:
config.write(f)
def bucket_policy_document(bucket):
"""Return the policy document to access an S3 bucket
Parameters
----------
bucket: string
An Amazon S3 bucket name
Returns
-------
s3_policy_doc: dict
A dictionary | |
<gh_stars>0
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
Finetuning Callback
^^^^^^^^^^^^^^^^^^^^
Freeze and unfreeze models for finetuning purposes
"""
import logging
from typing import Any, Callable, Dict, Generator, Iterable, List, Optional, Union
import torch
from torch.nn import Module
from torch.nn.modules.batchnorm import _BatchNorm
from torch.optim.optimizer import Optimizer
import pytorch_lightning as pl
from pytorch_lightning.callbacks.base import Callback
from pytorch_lightning.utilities import rank_zero_warn
from pytorch_lightning.utilities.exceptions import MisconfigurationException
log = logging.getLogger(__name__)
def multiplicative(epoch):
return 2
class BaseFinetuning(Callback):
r"""
This class implements the base logic for writing your own Finetuning Callback.
Override ``freeze_before_training`` and ``finetune_function`` methods with your own logic.
``freeze_before_training``: This method is called before ``configure_optimizers``
and should be used to freeze any modules parameters.
``finetune_function``: This method is called on every train epoch start and should be used to
``unfreeze`` any parameters. Those parameters needs to be added in a new ``param_group``
within the optimizer.
.. note:: Make sure to filter the parameters based on ``requires_grad``.
Example::
class MyModel(LightningModule)
...
def configure_optimizer(self):
# Make sure to filter the parameters based on `requires_grad`
return Adam(filter(lambda p: p.requires_grad, self.parameters))
class FeatureExtractorFreezeUnfreeze(BaseFinetuning):
def __init__(self, unfreeze_at_epoch=10):
self._unfreeze_at_epoch = unfreeze_at_epoch
def freeze_before_training(self, pl_module):
# freeze any module you want
# Here, we are freezing ``feature_extractor``
self.freeze(pl_module.feature_extractor)
def finetune_function(self, pl_module, current_epoch, optimizer, optimizer_idx):
# When `current_epoch` is 10, feature_extractor will start training.
if current_epoch == self._unfreeze_at_epoch:
self.unfreeze_and_add_param_group(
modules=pl_module.feature_extractor,
optimizer=optimizer,
train_bn=True,
)
"""
def __init__(self):
self._internal_state: Dict[int, List[Dict[str, Any]]] = {}
def on_save_checkpoint(
self,
trainer: 'pl.Trainer',
pl_module: 'pl.LightningModule',
checkpoint: Dict[str, Any],
) -> Dict[int, List[Dict[str, Any]]]:
return self._internal_state
def on_load_checkpoint(
self, trainer: 'pl.Trainer', pl_module: 'pl.LightningModule', callback_state: Dict[int, List[Dict[str, Any]]]
) -> None:
self._internal_state = callback_state
# restore the param_groups created during the previous training.
named_parameters = dict(pl_module.named_parameters())
for opt_idx, optimizer in enumerate(trainer.optimizers):
param_groups = self.__apply_mapping_to_param_groups(self._internal_state[opt_idx], named_parameters)
optimizer.param_groups = param_groups
@staticmethod
def flatten_modules(modules: Union[Module, Iterable[Union[Module, Iterable]]]) -> List[Module]:
"""
This function is used to flatten a module or an iterable of modules into a list of its leaf modules (modules
with no children) and parent modules that have parameters directly themselves.
Args:
modules: A given module or an iterable of modules
Returns:
List of modules
"""
if isinstance(modules, Iterable):
_modules = []
for m in modules:
_modules.extend(BaseFinetuning.flatten_modules(m))
else:
_modules = modules.modules()
# Capture all leaf modules as well as parent modules that have parameters directly themsleves
return [m for m in _modules if not list(m.children()) or m._parameters]
@staticmethod
def filter_params(
modules: Union[Module, Iterable[Union[Module, Iterable]]],
train_bn: bool = True,
requires_grad: bool = True
) -> Generator:
"""Yields the `requires_grad` parameters of a given module or list of modules.
Args:
modules: A given module or an iterable of modules
train_bn: Whether to train BatchNorm module
requires_grad: Whether to create a generator for trainable or non-trainable parameters.
Returns:
Generator
"""
modules = BaseFinetuning.flatten_modules(modules)
for mod in modules:
if isinstance(mod, _BatchNorm) and not train_bn:
continue
# recursion could yield duplicate parameters for parent modules w/ parameters so disabling it
for param in mod.parameters(recurse=False):
if param.requires_grad == requires_grad:
yield param
@staticmethod
def make_trainable(modules: Union[Module, Iterable[Union[Module, Iterable]]]) -> None:
"""
Unfreezes the parameters of the provided modules
Args:
modules: A given module or an iterable of modules
"""
modules = BaseFinetuning.flatten_modules(modules)
for module in modules:
# recursion could yield duplicate parameters for parent modules w/ parameters so disabling it
for param in module.parameters(recurse=False):
param.requires_grad = True
@staticmethod
def freeze(modules: Union[Module, Iterable[Union[Module, Iterable]]], train_bn: bool = True) -> None:
"""
Freezes the parameters of the provided modules
Args:
modules: A given module or an iterable of modules
train_bn: If True, leave the BatchNorm layers in training mode
Returns:
None
"""
modules = BaseFinetuning.flatten_modules(modules)
for mod in modules:
if isinstance(mod, _BatchNorm) and train_bn:
BaseFinetuning.make_trainable(mod)
else:
# recursion could yield duplicate parameters for parent modules w/ parameters so disabling it
for param in mod.parameters(recurse=False):
param.requires_grad = False
@staticmethod
def filter_on_optimizer(optimizer: Optimizer, params: Iterable) -> List:
"""
This function is used to exclude any parameter which already exists in
this optimizer
Args:
optimizer: Optimizer used for parameter exclusion
params: Iterable of parameters used to check against the provided optimizer
Returns:
List of parameters not contained in this optimizer param groups
"""
out_params = []
removed_params = []
for param in params:
if not any(torch.equal(p, param) for group in optimizer.param_groups for p in group["params"]):
out_params.append(param)
else:
removed_params.append(param)
if removed_params:
rank_zero_warn(
"The provided params to be freezed already exist within another group of this optimizer."
" Those parameters will be skipped.\n"
"HINT: Did you init your optimizer in `configure_optimizer` as such:\n"
f" {type(optimizer)}(filter(lambda p: p.requires_grad, self.parameters()), ...) ", UserWarning
)
return out_params
@staticmethod
def unfreeze_and_add_param_group(
modules: Union[Module, Iterable[Union[Module, Iterable]]],
optimizer: Optimizer,
lr: Optional[float] = None,
initial_denom_lr: float = 10.,
train_bn: bool = True,
) -> None:
"""
Unfreezes a module and adds its parameters to an optimizer.
Args:
modules: A module or iterable of modules to unfreeze.
Their parameters will be added to an optimizer as a new param group.
optimizer: The provided optimizer will receive new parameters and will add them to
`add_param_group`
lr: Learning rate for the new param group.
initial_denom_lr: If no lr is provided, the learning from the first param group will be used
and divided by initial_denom_lr.
train_bn: Whether to train the BatchNormalization layers.
Returns:
None
"""
BaseFinetuning.make_trainable(modules)
params_lr = optimizer.param_groups[0]['lr'] if lr is None else float(lr)
denom_lr = initial_denom_lr if lr is None else 1.
params = BaseFinetuning.filter_params(modules, train_bn=train_bn, requires_grad=True)
params = BaseFinetuning.filter_on_optimizer(optimizer, params)
if params:
optimizer.add_param_group({
'params': params,
'lr': params_lr / denom_lr,
})
def on_before_accelerator_backend_setup(self, trainer, pl_module):
self.freeze_before_training(pl_module)
@staticmethod
def __apply_mapping_to_param_groups(param_groups: List[Dict[str, Any]], mapping: dict) -> List[Dict[str, Any]]:
output = []
for g in param_groups:
# skip params to save memory
group_state = {k: v for k, v in g.items() if k != 'params'}
group_state['params'] = [mapping[p] for p in g['params']]
output.append(group_state)
return output
def _store(
self,
pl_module: 'pl.LightningModule',
opt_idx: int,
num_param_groups: int,
current_param_groups: List[Dict[str, Any]],
) -> None:
mapping = {p: n for n, p in pl_module.named_parameters()}
if opt_idx not in self._internal_state:
self._internal_state[opt_idx] = self.__apply_mapping_to_param_groups(current_param_groups, mapping)
elif num_param_groups != len(current_param_groups):
# save new param_groups possibly created by the users.
self._internal_state[opt_idx].extend(
self.__apply_mapping_to_param_groups(current_param_groups[num_param_groups:], mapping)
)
def on_train_epoch_start(self, trainer, pl_module):
"""Called when the epoch begins."""
for opt_idx, optimizer in trainer.fit_loop.get_active_optimizers():
num_param_groups = len(optimizer.param_groups)
self.finetune_function(pl_module, trainer.current_epoch, optimizer, opt_idx)
current_param_groups = optimizer.param_groups
self._store(pl_module, opt_idx, num_param_groups, current_param_groups)
def finetune_function(self, pl_module: 'pl.LightningModule', epoch: int, optimizer: Optimizer, opt_idx: int):
"""
Override to add your unfreeze logic
"""
raise NotImplementedError
def freeze_before_training(self, pl_module: 'pl.LightningModule'):
"""
Override to add your freeze logic
"""
raise NotImplementedError
class BackboneFinetuning(BaseFinetuning):
r"""
Finetune a backbone model based on a learning rate user-defined scheduling.
When the backbone learning rate reaches the current model learning rate
and ``should_align`` is set to True, it will align with it for the rest of the training.
Args:
unfreeze_backbone_at_epoch: Epoch at which the backbone will be unfreezed.
lambda_func: Scheduling function for increasing backbone learning rate.
backbone_initial_ratio_lr:
Used to scale down the backbone learning rate compared to rest of model
backbone_initial_lr: Optional, Inital learning rate for the backbone.
By default, we will use current_learning / backbone_initial_ratio_lr
should_align: Wheter to align with current learning rate when backbone learning
reaches it.
initial_denom_lr: When unfreezing the backbone, the intial learning rate will
current_learning_rate / initial_denom_lr.
train_bn: Wheter to make Batch Normalization trainable.
verbose: Display current learning rate for model and backbone
round: Precision for displaying learning rate
Example::
>>> from pytorch_lightning import Trainer
>>> from pytorch_lightning.callbacks import BackboneFinetuning
>>> multiplicative = lambda epoch: 1.5
>>> backbone_finetuning = BackboneFinetuning(200, multiplicative)
>>> trainer = Trainer(callbacks=[backbone_finetuning])
"""
def __init__(
self,
unfreeze_backbone_at_epoch: int = 10,
lambda_func: Callable | |
<reponame>Sotilrac/shared_playlist
# -*- coding:utf-8 -*-
#
# Copyright (C) 2012, <NAME> <<EMAIL>>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import contextlib
import hashlib
import json
import pickle
import random
import time
import uuid
if sys.version_info.major == 3:
import urllib.request as urllib
else:
import urllib2 as urllib
import grooveshark.const
from grooveshark.classes import *
from grooveshark.version import *
# time in seconds until a new communication token is needed
TOKEN_TIMEOUT = 1200
# user agent used to access grooveshark
USER_AGENT = 'Mozilla/5.0 (Windows NT 6.2; rv:9.0.1) Gecko/20100101 Firefox/9.0.1'
# url where album covers are located
ALBUM_COVER_URL = 'http://images.grooveshark.com/static/albums/'
# url where playlists covers are located
PLAYLIST_COVER_URL = 'http://images.grooveshark.com/static/playlists/'
# no cover url
NO_COVER_URL = 'http://images.grooveshark.com/static/albums/70_album.png'
# the grooveshark clients - using API tokens from SciLor (scilor.com) now
# because the ones I extracted out of Grooveshark do not work for some reason
# (I meet SciLor, he tells me that they do some type of encryption but don't
# know more) - thanks.
CLIENTS = {'htmlshark' : {'version' : '20130520',
'token' : '<PASSWORD>'},
'jsqueue' : {'version' : '20130520',
'token' : '<PASSWORD>'}}
# grooveshark country settings
COUNTRY = {'ID': 221, 'CC1': 0, 'CC2': 0, 'CC3': 0, 'CC4': 0, 'DMA': 0, 'IPR': 0}
__all__ = ['Session', 'Client', 'Connection', 'GroovesharkError', 'RequestError', 'UnknownError']
class GroovesharkError(Exception): pass
class RequestError(GroovesharkError): pass
class UnknownError(GroovesharkError): pass
class Session():
def __init__(self):
self.user = str(uuid.uuid4())
self.session = hashlib.md5(self.user.encode('utf-8')).hexdigest()
self.secret = hashlib.md5(self.session.encode('utf-8')).hexdigest()
self.country = COUNTRY
self.queue = None
self.token = None
self.time = None
def __repr__(self):
return '<Session user="{}", sessions="{}", secret="{}", country="{}">'.format(self.user, self.session, self.secret, self.country)
@classmethod
def open(cls, filename):
with open(filename, 'rb') as input:
return pickle.load(input)
def save(self, filename):
with open(filename, 'wb') as output:
pickle.dump(self, output)
class Connection():
'''
Lowlevel API communication.
:param session: a :class:`Session` object with session information
:param proxies: dictionary mapping protocol to proxy
'''
def __init__(self, session=None, proxies=None):
self.session = Session() if session is None else session
self.urlopen = urllib.build_opener(urllib.ProxyHandler(proxies)).open
def _random_hex(self):
'''
generates a random hex string
'''
return ''.join([random.choice('0123456789abcdef') for i in range(6)])
def _json_request_header(self):
'''
generates json http request headers
'''
return {'Cookie' : 'PHPSESSID=' + self.session.session, 'Content-Type' : 'application/json',
'User-Agent' : USER_AGENT, 'Content-Type' : 'application/json'}
def _get_token(self):
'''
requests an communication token from Grooveshark
'''
self.session.token = self.request('getCommunicationToken', {'secretKey' : self.session.secret},
{'uuid' :self.session.user,
'session' : self.session.session,
'clientRevision' : CLIENTS['htmlshark']['version'],
'country' : self.session.country,
'privacy' : 0,
'client' : 'htmlshark'})[1]
self.session.time = time.time()
def _request_token(self, method, client):
'''
generates a request token
'''
if time.time() - self.session.time > TOKEN_TIMEOUT:
self._get_token()
random_value = self._random_hex()
return random_value + hashlib.sha1((method + ':' + self.session.token + ':' + CLIENTS[client]['token'] + ':' + random_value).encode('utf-8')).hexdigest()
def init(self):
'''
initiate token and queue.
'''
return self.init_token(), self.init_queue()
def init_token(self):
'''
initiate token
'''
self._get_token()
def init_queue(self):
'''
request queue id
'''
self.session.queue = self.request('initiateQueue', None, self.header('initiateQueue', 'jsqueue'))[1]
def request(self, method, parameters, header):
'''
Grooveshark API request
'''
data = json.dumps({'parameters' : parameters, 'method' : method, 'header' : header})
request = urllib.Request('https://grooveshark.com/more.php?%s' % (method),
data=data.encode('utf-8'), headers=self._json_request_header())
with contextlib.closing(self.urlopen(request)) as response:
result = json.loads(response.read().decode('utf-8'))
if 'result' in result:
return response.info(), result['result']
elif 'fault' in result:
raise RequestError(result['fault']['message'], result['fault']['code'])
else:
raise UnknownError(result)
def header(self, method, client='htmlshark'):
'''
generates Grooveshark API Json header
'''
return {'token' : self._request_token(method, client),
'privacy' : 0,
'uuid' : self.session.user,
'clientRevision' : CLIENTS[client]['version'],
'session' : self.session.session,
'client' : client,
'country' : self.session.country}
class Client(object):
'''
A client for Grooveshark's API which supports:
* radio (songs by genre)
* search for songs, artists and albums
* popular songs
:param session: a :class:`Session` object with session information
:param proxies: dictionary mapping protocol to proxy
'''
DAILY = 'daily'
MONTHLY = 'monthly'
SONGS = 'Songs'
ARTISTS = 'Artists'
ALBUMS = 'Albums'
PLAYLISTS = 'Playlists'
def __init__(self, session=None, proxies=None):
self.connection = Connection(session, proxies)
def init(self):
'''
Fetch Grooveshark's token and queue id.
:rtype: tuple: (:meth:`init_session()`, :meth:`init_token()`, :meth:`init_queue()`)
'''
self.connection.init()
def init_token(self):
'''
Fetch Grooveshark's communication token.
'''
return self.connection.init_token()
def init_queue(self):
'''
Initiate queue.
Make sure to call :meth:`init_token()` first.
'''
return self.connection.init_queue()
def radio(self, radio):
'''
Get songs belong to a specific genre.
:param radio: genre to listen to
:rtype: a :class:`Radio` object
Genres:
This list is incomplete because there isn't an English translation for some genres.
Please look at the sources for all possible Tags.
+-------------------------------------+---------------------------------+
| Constant | Genre |
+=====================================+=================================+
| :const:`Radio.GENRE_RNB` | R and B |
+-------------------------------------+---------------------------------+
| :const:`Radio.GENRE_JAZZ` | Jazz |
+-------------------------------------+---------------------------------+
| :const:`Radio.GENRE_ROCK` | Rock |
+-------------------------------------+---------------------------------+
| :const:`Radio.GENRE_CLASSICAL` | Classical |
+-------------------------------------+---------------------------------+
| :const:`Radio.GENRE_DUBSTEP` | Dubstep |
+-------------------------------------+---------------------------------+
| :const:`Radio.GENRE_BLUES` | Blues |
+-------------------------------------+---------------------------------+
| :const:`Radio.GENRE_FOLK` | Folk |
+-------------------------------------+---------------------------------+
| :const:`Radio.GENRE_ELECTRONICA` | Electronica |
+-------------------------------------+---------------------------------+
| :const:`Radio.GENRE_CHRISTMAS` | Christmas |
+-------------------------------------+---------------------------------+
| :const:`Radio.GENRE_OLDIES` | Oldies |
+-------------------------------------+---------------------------------+
| :const:`Radio.GENRE_COUNTRY` | Country |
+-------------------------------------+---------------------------------+
| :const:`Radio.GENRE_EXPERIMENTAL` | Experimental |
+-------------------------------------+---------------------------------+
| :const:`Radio.GENRE_POP` | Pop |
+-------------------------------------+---------------------------------+
| :const:`Radio.GENRE_INDIE` | Indie |
+-------------------------------------+---------------------------------+
| :const:`Radio.GENRE_BLUEGRASS` | Bluegrass |
+-------------------------------------+---------------------------------+
| :const:`Radio.GENRE_RAP` | Rap |
+-------------------------------------+---------------------------------+
| :const:`Radio.GENRE_AMBIENT` | Ambient |
+-------------------------------------+---------------------------------+
| :const:`Radio.GENRE_TRANCE` | Trance |
+-------------------------------------+---------------------------------+
| :const:`Radio.GENRE_REGGAE` | Reggae |
+-------------------------------------+---------------------------------+
| :const:`Radio.GENRE_METAL` | Metal |
+-------------------------------------+---------------------------------+
'''
artists = self.connection.request('getArtistsForTagRadio', {'tagID' : radio},
self.connection.header('getArtistsForTagRadio', 'jsqueue'))[1]
return Radio(artists, radio, self.connection)
def _parse_album(self, album):
'''
Parse search json-data and create an :class:`Album` object.
'''
if album['CoverArtFilename']:
cover_url = '%sm%s' % (ALBUM_COVER_URL, album['CoverArtFilename'])
else:
cover_url = None
return Album(album['AlbumID'], album['Name'], album['ArtistID'], album['ArtistName'], cover_url, self.connection)
def _parse_playlist(self, playlist):
'''
Parse search json-data and create a :class:`Playlist` object.
'''
if playlist['Picture']:
cover_url = '%s70_%s' % (PLAYLIST_COVER_URL, playlist['Picture'])
else:
cover_url = None
return Playlist(playlist['PlaylistID'], playlist['Name'], cover_url, self.connection)
def search(self, query, type=SONGS):
'''
Search for songs, artists and albums.
:param query: search string
:param type: type to search for
:rtype: a generator generates :class:`Song`, :class:`Artist` and :class:`Album` objects
Search Types:
+---------------------------------+---------------------------------+
| Constant | Meaning |
+=================================+=================================+
| :const:`Client.SONGS` | Search for songs |
+---------------------------------+---------------------------------+
| :const:`Client.ARTISTS` | Search for artists |
+---------------------------------+---------------------------------+
| :const:`Client.ALBUMS` | Search for albums |
+---------------------------------+---------------------------------+
| :const:`Client.PLAYLISTS` | Search for playlists |
+---------------------------------+---------------------------------+
'''
result = self.connection.request('getResultsFromSearch', {'query' : query, 'type' : type, 'guts' : 0, 'ppOverride' : False},
self.connection.header('getResultsFromSearch'))[1]['result']
if type == self.SONGS:
return (Song.from_response(song, self.connection) for song in result)
elif type == self.ARTISTS:
return (Artist(artist['ArtistID'], artist['Name'], self.connection) for artist in result)
elif type == self.ALBUMS:
return (self._parse_album(album) for album in result)
elif type == self.PLAYLISTS:
return (self._parse_playlist(playlist) for playlist in result)
def popular(self, period=DAILY):
'''
Get popular songs.
:param period: time period
:rtype: a generator generates :class:`Song` objects
Time periods:
+---------------------------------+-------------------------------------+
| Constant | Meaning |
+=================================+=====================================+
| :const:`Client.DAILY` | Popular songs of this day |
+---------------------------------+-------------------------------------+
| :const:`Client.MONTHLY` | Popular songs of this month |
+---------------------------------+-------------------------------------+
'''
songs = self.connection.request('popularGetSongs', {'type' : period}, self.connection.header('popularGetSongs'))[1]['Songs']
return (Song.from_response(song, self.connection) for song in songs)
def playlist(self, playlist_id):
'''
Get a playlist from it's ID
:param playlist_id: ID of the playlist
:rtype: a :class:`Playlist` object
'''
playlist = self.connection.request('getPlaylistByID', {'playlistID' : playlist_id}, self.connection.header('getPlaylistByID'))[1]
return self._parse_playlist(playlist)
def collection(self, user_id):
"""
Get the song collection of a user.
:param user_id: ID of a user.
:rtype: list of :class:`Song`
"""
# TODO further evaluation of the page param, I don't know where the limit is.
dct = {'userID' : user_id, 'page' : 0}
r = 'userGetSongsInLibrary'
result = self.connection.request(r, dct, self.connection.header(r))
songs = result[1]['Songs']
return [Song.from_response(song, self.connection) for song in songs]
def favorites(self, user_id):
"""
Get the favorite songs of a user.
:param user_id: ID of a user.
:rtype: list of :class:`Song`
"""
dct = {'userID' : user_id, "ofWhat" : "Songs"}
r = 'getFavorites'
result = self.connection.request(r, dct, self.connection.header(r))
songs = result[1]
return [Song.from_response(song, self.connection) for song in | |
stopping")
return
self._send_queue.put((priority, (message, callback)))
def _start_greenlets(self):
"""Start standard greenlets that should always run, and put them in a dict indexed by their name,
to allow special operations to refer to them specifically."""
self._named_greenlets = {
name: self._group.spawn(getattr(self, name))
for name in ('_send_loop', '_recv_loop', '_idle_watchdog')
}
def start(self):
if self.stopped:
self.logger.info("Ignoring start() - already stopped (please create a new Client instead)")
return
if self.started:
self.logger.info("Ignoring start() - already started")
return
self.started = True
self.logger.info("Starting client for {self.nick} on {self.hostname}:{self.port}".format(self=self))
try:
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if self.ssl:
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) if self.ssl == 'insecure' else ssl.create_default_context()
self._socket = context.wrap_socket(self._socket, server_hostname=self.hostname)
self.stop_handlers.add(lambda self: self._socket.close())
self._socket.connect((self.hostname, self.port))
except Exception as ex:
self.logger.exception("Error while connecting client")
self.stop(ex)
raise
# registration is a delicate dance...
with self._nick_lock, self._send_queue.limit_to(-1):
# by limiting to 0, we block all messages except pongs and registration
reg_done = gevent.event.Event()
reg_handlers = set()
@self.handler(command=replycodes.replies.WELCOME, sync=True)
def reg_got_welcome(client, msg):
reg_done.set()
for handler in reg_handlers:
handler.unregister(self)
reg_handlers.add(reg_got_welcome)
# Some anal servers require sending registration messages in a precise order
# and/or can't handle PINGs being sent during registration. This makes the standard
# nick-setting behaviour unsuitable. We're pretty sure we won't get a NICK
# forced change from the server during registration, so we only need to special-case
# handle a NICKNAMEINUSE message, and send the Nick() message manually.
@self.handler(command=replycodes.errors.NICKNAMEINUSE, sync=True)
def reg_nick_in_use(client, msg):
self._nick = self.increment_nick(self._nick)
message.Nick(self, self._nick).send(priority=-2)
reg_handlers.add(reg_nick_in_use)
if self.password:
message.Message(self, 'PASS', self.password).send(priority=-2)
message.Nick(self, self._nick).send(priority=-2)
message.User(self, self.ident, self.real_name).send(priority=-2)
self._start_greenlets()
if not reg_done.wait(self.REGISTRATION_TIMEOUT):
ex = Exception("Registration timeout")
self.stop(ex)
raise ex
self.logger.debug("Registration complete")
def _idle_watchdog(self):
"""Sends a ping if no activity for PING_IDLE_TIME seconds.
Disconnect if there is no response within PING_TIMEOUT seconds."""
try:
while True:
if self._activity.wait(self.PING_IDLE_TIME):
self._activity.clear()
continue
self.logger.info("No activity for {}s, sending PING".format(self.PING_IDLE_TIME))
if not self.wait_for_messages(self.PING_TIMEOUT, priority=0):
self.logger.error("No response to watchdog PING after {}s".format(self.PING_TIMEOUT))
self.stop(ConnectionClosed())
return
except Exception as ex:
self.logger.exception("error in _idle_watchdog")
self.stop(ex)
def _recv_loop(self):
error = None
try:
while True:
if self._kill_recv:
return
self._recv_waiting = True
try:
data = self._socket.recv(4096)
except socket.error as ex:
if ex.errno == errno.EINTR: # retry on EINTR
continue
raise
finally:
self._recv_waiting = False
if not data:
self.logger.info("no data from recv, socket closed")
break
lines = (self._recv_buf + data).split('\r\n')
self._recv_buf = lines.pop() # everything after final \r\n
if lines:
self._activity.set()
for line in lines:
self._process(line)
except Exception as ex:
self.logger.exception("error in _recv_loop")
error = ex
if self._recv_buf:
self.logger.warning("recv stream cut off mid-line, unused data: {!r}".format(self._recv_buf))
self.stop(error or ConnectionClosed())
def _send_loop(self):
send_queue = self._send_queue
try:
while True:
priority, (message, callback) = send_queue.get()
line = "{}\r\n".format(message.encode())
self.logger.debug("Sending message: {!r}".format(line))
try:
self._socket.sendall(line)
except socket.error as ex:
if ex.errno == errno.EPIPE:
self.logger.info("failed to send, socket closed")
self.stop(ConnectionClosed())
return
raise
self._activity.set()
if callback is not None:
self._group.spawn(callback, self, message)
if message.command == 'QUIT':
self.logger.info("QUIT sent, client shutting down")
self.stop()
return
except Exception as ex:
self.logger.exception("error in _send_loop")
self.stop(ex)
def _process(self, line):
self.logger.debug("Received message: {!r}".format(line))
line = line.strip()
if not line:
return
try:
msg = message.decode(line, self)
except message.InvalidMessage:
self.logger.warning("Could not decode message from server: {!r}".format(line), exc_info=True)
return
self.logger.debug("Getting handlers for message: {}".format(msg))
self._dispatch_handlers(msg)
def _dispatch_handlers(self, msg):
"""Carefully builds a set of greenlets for all message handlers, obeying ordering metadata for each handler.
Returns when all sync=True handlers have been executed."""
def normalize(handler):
# handler might be a Handler, BoundHandler or "sync"
return handler.handler if isinstance(handler, BoundHandler) else handler
# build dependency graph
graph = {handler: set() for handler in self.message_handlers}
graph['sync'] = set()
for handler in self.message_handlers:
for other in map(normalize, handler.after):
if other in graph:
graph[handler].add(other)
for other in map(normalize, handler.before):
if other in graph:
graph[other].add(handler)
# check for cycles
def check_cycles(handler, chain=()):
if handler in chain:
chain_text = " -> ".join(map(str, chain + (handler,)))
raise ValueError("Dependency cycle in handlers: {}".format(chain_text))
chain += handler,
for dep in graph[handler]:
check_cycles(dep, chain)
for handler in graph:
check_cycles(handler)
# set up the greenlets
greenlets = {}
def wait_and_handle(handler):
for dep in graph[handler]:
greenlets[dep].join()
return handler.handle(self, msg)
def wait_for_sync():
for dep in graph['sync']:
greenlets[dep].join()
for handler in self.message_handlers:
greenlets[handler] = self._group.spawn(wait_and_handle, handler)
greenlets['sync'] = self._group.spawn(wait_for_sync)
# wait for sync to finish
greenlets['sync'].get()
def stop(self, ex=None):
if self._stopping:
return self.wait_for_stop()
self._stopping = True
# we spawn a child greenlet so things don't screw up if current greenlet is in self._group
def _stop():
self._group.kill()
for fn in self.stop_handlers:
fn(self)
# post-stop: we clear a few structures to break reference loops
# since they no longer make sense.
for channel in self._channels.values():
channel.client = None
for user in self._users.values():
user.client = None
for handler in self.message_handlers.copy():
handler.unregister_for_client(self)
# queues might contain some final messages
self._send_queue = None
self._recv_queue = None
# act of setting _stopped will make wait_for_stop()s fire
if ex:
self._stopped.set_exception(ex)
else:
self._stopped.set(None)
gevent.spawn(_stop).join()
def msg(self, to, content, priority=16, block=False):
"""Shortcut to send a Privmsg. See Message.send()"""
message.Privmsg(self, to, content).send(priority=priority, block=block)
def quit(self, msg=None, priority=16, block=True):
"""Shortcut to send a Quit. See Message.send().
Note that sending a quit automatically stops the client."""
message.Quit(self, msg).send(priority=priority)
if block:
self.wait_for_stop()
def channel(self, name):
"""Fetch a channel object, or create it if it doesn't exist.
Note that the channel is not joined automatically."""
name = self.normalize_channel(name)
if name not in self._channels:
Channel(self, name) # this will register itself into _channels
return self._channels[name]
@property
def joined_channels(self):
"""Returns a list of channels we are currently joined to"""
return set(channel for channel in self._channels.values() if channel.joined)
def wait_for(self, **match_args):
"""Block until a message matching given args is received.
The matching message is returned.
See geventirc.message.match() for match_args"""
result = gevent.event.AsyncResult()
@self.handler(**match_args)
def wait_callback(self, msg):
result.set(msg)
return True # unregister
return result.get()
def wait_for_stop(self):
"""Wait for client to exit, raising if it failed"""
self._stopped.get()
def wait_for_messages(self, timeout=None, priority=16):
"""This function will attempt to block until the server has received and processed
all current messages. We rely on the fact that servers will generally react to messages
in order, and so we queue up a Ping and wait for the corresponding Pong."""
# We're conservative here with our payload - 8 characters only, letters and digits,
# and we assume it's case insensitive. This still gives us about 40 bits of information.
# Also, some servers set the payload to their server name in the reply
# and attach the payload as a second arg. Finally, we just dump a reasonable timeout
# over the whole thing, just in case.
payload = ''.join(random.choice(string.lowercase + string.digits) for x in range(8))
received = gevent.event.Event()
def match_payload(params):
return any(value.lower() == payload for value in params)
@self.handler(command=message.Pong, params=match_payload)
def on_pong(client, msg):
received.set()
return True # unregister
message.Ping(self, payload).send()
if received.wait(self.WAIT_FOR_MESSAGES_TIMEOUT if timeout is None else timeout):
return True
self.logger.warning("Timed out while waiting for matching pong in wait_for_messages()")
return False
# aliases - the wait_for_* names are more descriptive, but they map to common async concepts:
join = wait_for_stop
sync = wait_for_messages
def normalize_channel(self, name):
"""Ensures that a channel name has a correct prefix, defaulting to the first entry in CHANTYPES."""
if not name:
raise ValueError("Channel name cannot be empty")
if name[0] in self.server_properties.CHANTYPES:
return name
return "{prefix}{name}".format(name=name, prefix=self.server_properties.CHANTYPES[0])
@Handler(command=message.ISupport, sync=True)
def recv_support(self, client, msg):
self.server_properties.update(msg.properties)
@Handler(command=message.Ping)
def on_ping(self, client, msg):
message.Pong(client, msg.payload).send(priority=-1)
@Handler(command=replycodes.errors.NICKNAMEINUSE, sync=True)
def nick_in_use(self, client, msg):
server_nick, bad_nick = msg.params[:2] # server_nick is what server thinks our nick is right now
self.logger.debug("Nick {!r} in use (our nick: {!r} -> {!r})".format(bad_nick, self._nick, self._new_nick))
if self._new_nick:
# if we're changing nicks, ignore it unless it matches the new one
if bad_nick == self._new_nick:
# cancel current change
self.logger.debug("New nick in use, cancelling")
self._new_nick = self._nick
return
# if we aren't changing nicks, something has gone wrong.
self.logger.warning("Got nick-in-use while not changing nicks, _nick={!r}, params={!r}".format(self._nick, msg.params))
if bad_nick != self._nick:
return # this is some kind of weird race, but ok to ignore
# if we've reached here, we must have messed up earlier and thought we got a nick when we didn't.
# easiest way to recover: force our nick back to whatever the server thinks it is right now.
self.logger.warning("Recovering from nick-in-use confusion by forcing nick {!r} -> {!r}".format(self._nick, server_nick))
self._nick = server_nick
@Handler(command='NICK', sender=matches_nick, sync=True)
def forced_nick_change(self, client, msg):
if msg.sender == self._new_nick:
# we are changing, and this was sent after our change was recieved so we must respect it.
self._new_nick = msg.nickname
elif msg.sender == self._nick:
# either we aren't changing and everything is fine, or we are changing but this was
# sent before the NICK command was processed by the server, so we change our old value
# so further forced_nick_changes and matches_nick() still works.
self._nick = msg.nickname
@Handler(command='JOIN', sender=matches_nick, sync=True)
def forced_join(self, client, msg):
for name in msg.channels:
channel = self.channel(name)
channel._join()
@Handler(command='PRIVMSG', ctcp=lambda v: v and v[0].upper() == 'VERSION')
def ctcp_version(self, client, msg):
if self.version:
message.Notice(self, msg.sender, ('VERSION', self.version)).send()
@Handler(command='PRIVMSG', ctcp=lambda v: v and v[0].upper() == 'TIME')
def ctcp_time(self, client, msg):
if self.time is 'utc':
now = time.gmtime()
elif self.time is 'local':
now = time.localtime()
else:
return
now = time.strftime('%s|%F %T', now)
message.Notice(self, msg.sender, ('TIME', now)).send()
@Handler(command='PRIVMSG', ctcp=lambda v: v and v[0].upper() == 'PING')
def ctcp_ping(self, client, msg):
cmd, arg = msg.ctcp
message.Notice(self, msg.sender, ('PING', arg)).send()
def _get_handoff_data(self):
"""Collect all data needed for a connection handoff and return as dict.
Make sure _prepare_for_handoff has been called first."""
return dict(
recv_buf = b64encode(self._recv_buf),
channels = [channel.name for channel in self._channels.values() if channel.joined],
hostname = self.hostname,
nick = self._nick,
port = self.port,
password = <PASSWORD>,
ident = self.ident,
real_name = self.real_name,
)
def _prepare_for_handoff(self):
"""Stop operations and prepare for a connection handoff.
Note that, among other things, this stops the client from responding to PINGs from the server,
and so effectively begins a timeout until the server drops the connection."""
if self.ssl:
raise ValueError("Handing off of an ssl connection is not supported")
# wait until we aren't changing nick, then permanently acquire the lock to prevent further changes
# (note that forced_nick_change could still change it, but that's ok because we're stopping recv_loop)
self._nick_lock.acquire()
self._named_greenlets['_idle_watchdog'].kill(block=True)
self._kill_recv = True # recv_loop will exit after processing current lines
if self._recv_waiting:
# recv_loop is stuck in a socket.recv call and should be | |
'y': 1104, 'width': 542, 'height': 108}, # Left bank top-right button
'Joy_8': {'Type': 'Digital', 'x': 1124, 'y': 1220, 'width': 542, 'height': 108}, # Left bank bottom-right button
'Joy_9': {'Type': 'Digital', 'x': 574, 'y': 1220, 'width': 542, 'height': 108}, # Left bank bottom-middle button
'Joy_10': {'Type': 'Digital', 'x': 24, 'y': 1220, 'width': 542, 'height': 108}, # Left bank bottom-left button
'Joy_11': {'Type': 'Digital', 'x': 3264, 'y': 1104, 'width': 542, 'height': 108}, # Right bank top-right button
'Joy_12': {'Type': 'Digital', 'x': 2714, 'y': 1104, 'width': 542, 'height': 108}, # Right bank top-middle button
'Joy_13': {'Type': 'Digital', 'x': 2164, 'y': 1104, 'width': 542, 'height': 108}, # Right bank top-left button
'Joy_14': {'Type': 'Digital', 'x': 2164, 'y': 1220, 'width': 542, 'height': 108}, # Right bank bottom-left button
'Joy_15': {'Type': 'Digital', 'x': 2714, 'y': 1220, 'width': 542, 'height': 108}, # Right bank bottom-middle button
'Joy_16': {'Type': 'Digital', 'x': 3264, 'y': 1220, 'width': 542, 'height': 108}, # Right bank bottom-right button
'Joy_POV1Up': {'Type': 'Digital', 'x': 1542, 'y': 214, 'width': 1532}, # PoV hat up
'Joy_POV1Right': {'Type': 'Digital', 'x': 1542, 'y': 270, 'width': 1532}, # PoV hat right
'Joy_POV1Down': {'Type': 'Digital', 'x': 1542, 'y': 326, 'width': 1532}, # PoV hat down
'Joy_POV1Left': {'Type': 'Digital', 'x': 1542, 'y': 382, 'width': 1532}, # PoV hat left
'Joy_RZAxis': {'Type': 'Analogue', 'x': 2357, 'y': 980, 'width': 1132}, # Stick twist
'Joy_UAxis': {'Type': 'Analogue', 'x': 2584, 'y': 1750, 'width': 832}, # Stick throttle slider
'Joy_XAxis': {'Type': 'Analogue', 'x': 2357, 'y': 924, 'width': 1132}, # Stick pitch
'Joy_YAxis': {'Type': 'Analogue', 'x': 2357, 'y': 868, 'width': 1132}, # Stick roll
},
'T16000MFCS': {
'Joy_1': {'Type': 'Digital', 'x': 1804, 'y': 844, 'width': 642, 'height': 108}, # Primary trigger
'Joy_2': {'Type': 'Digital', 'x': 2764, 'y': 604, 'width': 992}, # Bottom button
'Joy_3': {'Type': 'Digital', 'x': 1754, 'y': 484, 'width': 692}, # Left-hand button
'Joy_4': {'Type': 'Digital', 'x': 2764, 'y': 504, 'width': 992}, # Right-hand button
'Joy_5': {'Type': 'Digital', 'x': 1254, 'y': 1244, 'width': 292, 'height': 108}, # Left bank top-left button
'Joy_6': {'Type': 'Digital', 'x': 1554, 'y': 1244, 'width': 292, 'height': 108}, # Left bank top-middle button
'Joy_7': {'Type': 'Digital', 'x': 1854, 'y': 1244, 'width': 292, 'height': 108}, # Left bank top-right button
'Joy_8': {'Type': 'Digital', 'x': 1854, 'y': 1360, 'width': 292, 'height': 108}, # Left bank bottom-right button
'Joy_9': {'Type': 'Digital', 'x': 1554, 'y': 1360, 'width': 292, 'height': 108}, # Left bank bottom-middle button
'Joy_10': {'Type': 'Digital', 'x': 1254, 'y': 1360, 'width': 292, 'height': 108}, # Left bank bottom-left button
'Joy_11': {'Type': 'Digital', 'x': 3484, 'y': 992, 'width': 292, 'height': 108}, # Right bank top-right button
'Joy_12': {'Type': 'Digital', 'x': 3184, 'y': 992, 'width': 292, 'height': 108}, # Right bank top-middle button
'Joy_13': {'Type': 'Digital', 'x': 2884, 'y': 992, 'width': 292, 'height': 108}, # Right bank top-left button
'Joy_14': {'Type': 'Digital', 'x': 2884, 'y': 1108, 'width': 292, 'height': 108}, # Right bank bottom-left button
'Joy_15': {'Type': 'Digital', 'x': 3184, 'y': 1108, 'width': 292, 'height': 108}, # Right bank bottom-middle button
'Joy_16': {'Type': 'Digital', 'x': 3484, 'y': 1108, 'width': 292, 'height': 108}, # Right bank bottom-right button
'Joy_POV1Up': {'Type': 'Digital', 'x': 2524, 'y': 214, 'width': 1192}, # PoV hat up
'Joy_POV1Right': {'Type': 'Digital', 'x': 2524, 'y': 270, 'width': 1192}, # PoV hat right
'Joy_POV1Down': {'Type': 'Digital', 'x': 2524, 'y': 326, 'width': 1192}, # PoV hat down
'Joy_POV1Left': {'Type': 'Digital', 'x': 2524, 'y': 382, 'width': 1192}, # PoV hat left
'Joy_RZAxis': {'Type': 'Analogue', 'x': 2824, 'y': 856, 'width': 932}, # Stick twist
'Joy_UAxis': {'Type': 'Analogue', 'x': 2944, 'y': 1593, 'width': 832}, # Stick throttle slider
'Joy_XAxis': {'Type': 'Analogue', 'x': 2824, 'y': 800, 'width': 932}, # Stick pitch
'Joy_YAxis': {'Type': 'Analogue', 'x': 2824, 'y': 744, 'width': 932}, # Stick roll
},
'T16000MTHROTTLE': {
'Joy_1': {'Type': 'Digital', 'x': 294, 'y': 1244, 'width': 792}, # Thumb button
'Joy_2': {'Type': 'Digital', 'x': 1774, 'y': 1774, 'width': 892}, # Pinky button
'Joy_3': {'Type': 'Digital', 'x': 1714, 'y': 1664, 'width': 892}, # Ring finger button
'Joy_4': {'Type': 'Digital', 'x': 1484, 'y': 1514, 'width': 692}, # Middle finger rocker up
'Joy_5': {'Type': 'Digital', 'x': 1484, 'y': 1570, 'width': 692}, # Middle finger rocker down
'Joy_6': {'Type': 'Digital', 'x': 244, 'y': 1866, 'width': 732}, # Index finger mouse push
'Joy_7': {'Type': 'Digital', 'x': 1024, 'y': 654, 'width': 692}, # Middle hat up
'Joy_8': {'Type': 'Digital', 'x': 1024, 'y': 710, 'width': 692}, # Middle hat right
'Joy_9': {'Type': 'Digital', 'x': 1024, 'y': 766, 'width': 692}, # Middle hat down
'Joy_10': {'Type': 'Digital', 'x': 1024, 'y': 820, 'width': 692}, # Middle hat left
'Joy_11': {'Type': 'Digital', 'x': 1034, 'y': 929, 'width': 692}, # Middle hat up
'Joy_12': {'Type': 'Digital', 'x': 1034, 'y': 985, 'width': 692}, # Castle hat right
'Joy_13': {'Type': 'Digital', 'x': 1034, 'y': 1041, 'width': 692}, # Castle hat down
'Joy_14': {'Type': 'Digital', 'x': 1034, 'y': 1097, 'width': 692}, # Castle hat left
'Joy_POV1Up': {'Type': 'Digital', 'x': 1014, 'y': 364, 'width': 692}, # PoV hat up
'Joy_POV1Right': {'Type': 'Digital', 'x': 1014, 'y': 420, 'width': 692}, # PoV hat right
'Joy_POV1Down': {'Type': 'Digital', 'x': 1014, 'y': 476, 'width': 692}, # PoV hat down
'Joy_POV1Left': {'Type': 'Digital', 'x': 1014, 'y': 532, 'width': 692}, # PoV hat left
'Joy_XAxis': {'Type': 'Analogue', 'x': 244, 'y': 1810, 'width': 732}, # Index finger mouse X
'Joy_YAxis': {'Type': 'Analogue', 'x': 244, 'y': 1754, 'width': 732}, # Index finger mouse Y
'Joy_ZAxis': {'Type': 'Analogue', 'x': 84, 'y': 555, 'width': 572}, # Throttle
'Joy_UAxis': {'Type': 'Analogue', 'x': 1874, 'y': 1914, 'width': 832}, # Pinky dial
'Joy_RZAxis': {'Type': 'Analogue', 'x': 1954, 'y': 2054, 'width': 832}, # Paddle
},
'SaitekFLY5': {
'Joy_1': {'Type': 'Digital', 'x': 684, 'y': 794, 'width': 1092}, # Primary trigger
'Joy_2': {'Type': 'Digital', 'x': 734, 'y': 874, 'width': 1092}, # Bottom-left button
'Joy_3': {'Type': 'Digital', 'x': 2414, 'y': 744, 'width': 1092}, # Bottom-right button
'Joy_4': {'Type': 'Digital', 'x': 624, 'y': 704, 'width': 1092}, # Top-left button
'Joy_5': {'Type': 'Digital', 'x': 2414, 'y': 594, 'width': 1092}, # Top-right button
'Joy_6': {'Type': 'Digital', 'x': 584, 'y': 1847, 'width': 1092}, # Bottom bank first button
'Joy_7': {'Type': 'Digital', 'x': 734, 'y': 1927, 'width': 1092}, # Bottom bank second button
'Joy_8': {'Type': 'Digital', 'x': 884, 'y': 2007, 'width': 1092}, # Bottom bank third button
'Joy_9': {'Type': 'Digital', 'x': 1034, 'y': 2090, 'width': 1092}, # Bottom bank fourth button
'Joy_10': {'Type': 'Digital', 'x': 304, 'y': 1604, 'width': 1092}, # Rear left button
'Joy_11': {'Type': 'Digital', 'x': 664, 'y': 1204, 'width': 1092}, # Rear right button
'Joy_12': {'Type': 'Digital', 'x': 2474, 'y': 864, 'width': 1132}, # Scroll wheel up
'Joy_13': {'Type': 'Digital', 'x': 2474, 'y': 920, 'width': 1132}, # Scroll wheel down
'Joy_14': {'Type': 'Digital', 'x': 2594, 'y': 1504, 'width': 1092}, # Base button
'Joy_POV1Up': {'Type': 'Digital', 'x': 990, 'y': 304, 'width': 1932}, # PoV hat up
'Joy_POV1Right': {'Type': 'Digital', 'x': 990, 'y': 360, 'width': 1932}, # PoV hat right
'Joy_POV1Down': {'Type': 'Digital', 'x': 990, 'y': 416, 'width': 1932}, # PoV hat down
'Joy_POV1Left': {'Type': 'Digital', 'x': 990, 'y': 472, 'width': 1932}, # PoV hat left
'Joy_RZAxis': {'Type': 'Analogue', 'x': 2484, 'y': 1136, 'width': 1132}, # Stick twist
'Joy_XAxis': {'Type': 'Analogue', 'x': 2484, 'y': 1080, 'width': 1132}, # Stick roll
'Joy_YAxis': {'Type': 'Analogue', 'x': 2484, 'y': 1024, 'width': 1132}, # Stick pitch
'Joy_ZAxis': {'Type': 'Analogue', 'x': 264, 'y': 1404, 'width': 1032}, # Throttle
},
'06A30836': { # TODO collapse alias
'displayName': 'SaitekFLY5',
'Joy_1': {'Type': 'Digital', 'x': 684, 'y': 794, 'width': 1092}, # Primary trigger
'Joy_2': {'Type': 'Digital', 'x': 734, 'y': 874, 'width': 1092}, # Bottom-left button
'Joy_3': {'Type': 'Digital', 'x': 2414, 'y': 744, 'width': 1092}, # Bottom-right button
'Joy_4': {'Type': 'Digital', 'x': 624, 'y': 704, 'width': 1092}, # Top-left button
'Joy_5': {'Type': 'Digital', 'x': 2414, 'y': 594, 'width': 1092}, # Top-right button
'Joy_6': {'Type': 'Digital', 'x': 584, 'y': 1847, 'width': 1092}, # Bottom bank first button
'Joy_7': {'Type': 'Digital', 'x': 734, 'y': 1927, 'width': 1092}, # Bottom bank second | |
if k == 0:
return []
map_fit_ind = defaultdict(list)
for ind in individuals:
map_fit_ind[ind.fitness].append(ind)
fits = list(map_fit_ind)
current_front = []
next_front = []
dominating_fits = defaultdict(int)
dominated_fits = defaultdict(list)
# Rank first Pareto front
for i, fit_i in enumerate(fits):
for fit_j in fits[i + 1 :]:
if self.__dominated(fit_i, fit_j):
dominating_fits[fit_j] += 1
dominated_fits[fit_i].append(fit_j)
elif self.__dominated(fit_j, fit_i):
dominating_fits[fit_i] += 1
dominated_fits[fit_j].append(fit_i)
if dominating_fits[fit_i] == 0:
current_front.append(fit_i)
fronts = [[]]
for fit in current_front:
fronts[-1].extend(map_fit_ind[fit])
pareto_sorted = len(fronts[-1])
# Rank the next front until all individuals are sorted or
# the given number of individual are sorted.
if not first_front_only:
N = min(len(individuals), k)
while pareto_sorted < N:
fronts.append([])
for fit_p in current_front:
for fit_d in dominated_fits[fit_p]:
dominating_fits[fit_d] -= 1
if dominating_fits[fit_d] == 0:
next_front.append(fit_d)
pareto_sorted += len(map_fit_ind[fit_d])
fronts[-1].extend(map_fit_ind[fit_d])
current_front = next_front
next_front = []
return fronts
def __dominated(self, ind1, ind2):
"""TTaken from deap and modified slightly to make pareto sorting less strict.
Return true if each objective of *self* is not strictly worse than
the corresponding objective of *other* and at least one objective is
strictly better.
:param obj: Slice indicating on which objectives the domination is
tested. The default value is `slice(None)`, representing
every objectives.
"""
not_equal = False
mean1 = np.mean(ind1.wvalues)
mean2 = np.mean(ind2.wvalues)
std1 = np.std(ind1.wvalues)
if mean1 > mean2:
not_equal = True
elif mean1 < mean2:
return False
return not_equal
def __assignCrowdingDist(self, individuals):
"""taken from deap. Assign a crowding distance to each individual's fitness. The
crowding distance can be retrieve via the :attr:`crowding_dist`
attribute of each individual's fitness.
"""
if len(individuals) == 0:
return
distances = [0.0] * len(individuals)
crowd = [(ind.fitness.values, i) for i, ind in enumerate(individuals)]
nobj = len(individuals[0].fitness.values)
for i in range(nobj):
crowd.sort(key=lambda element: element[0][i])
distances[crowd[0][1]] = float("inf")
distances[crowd[-1][1]] = float("inf")
if crowd[-1][0][i] == crowd[0][0][i]:
continue
norm = nobj * float(crowd[-1][0][i] - crowd[0][0][i])
for prev, cur, next in zip(crowd[:-2], crowd[1:-1], crowd[2:]):
distances[cur[1]] += 1.0 * (next[0][i] - prev[0][i]) / norm
for i, dist in enumerate(distances):
individuals[i].fitness.crowding_dist = dist
def __selNSGA2(self, individuals, k):
"""Calculate fitness for an individual. NSGA2 selection taken from deap
Apply NSGA-II selection operator on the *individuals*. Usually, the
size of *individuals* will be larger than *k* because any individual
present in *individuals* will appear in the returned list at most once.
Having the size of *individuals* equals to *k* will have no effect other
than sorting the population according to their front rank. The
list returned contains references to the input *individuals*. For more
details on the NSGA-II operator see [Deb2002]_.
:param individuals: A list of individuals to select from.
:param k: The number of individuals to select.
:returns: A list of selected individuals.
.. [Deb2002] Deb, Pratab, Agarwal, and Meyarivan, "A fast elitist
non-dominated sorting genetic algorithm for multi-objective
optimization: NSGA-II", 2002.
"""
pareto_fronts = self.__sortNondominatedAdapt(individuals, k)
for front in pareto_fronts:
self.__assignCrowdingDist(front)
chosen = list(chain(*pareto_fronts[:-1]))
k = k - len(chosen)
if k > 0:
sorted_front = sorted(
pareto_fronts[-1], key=attrgetter("fitness.crowding_dist"), reverse=True
)
chosen.extend(sorted_front[:k])
return chosen
def __bitList(self, n, x):
templist = [1 if digit == "1" else 0 for digit in bin(n)[::-1]]
while len(templist) < x:
templist.append(0)
while (len(templist)) > x:
templist.pop()
return templist
def writeModel(self, individual, model):
"""iterate over nodes to generate a BooleanNet representation for the entire model"""
addString = ""
for i in range(0, len(model.nodePositions)):
addString = addString + model._ruleMaker__writeNode(
i,
individual[model.individualParse[i] : model.individualParse[i + 1]],
model,
)
addString = addString + "\n"
return addString[:-1]
def __findInEdges(self, model, node):
"""find the incoming edges to each 'and' connection for a given node"""
inEdges = []
for lister in model.andNodeList[node]:
tempTup = tuple(lister)
inEdges.append(set(tempTup))
return inEdges
def __simplifyRule(self, rule, inEdges):
"""find the simplest form of a rule"""
for i in range(len(rule)):
if rule[i] == 1:
for j in range(len(rule)):
if rule[j] == 1 and not i == j:
if inEdges[i].issubset(inEdges[j]):
rule[j] = 0
return rule
def __writeNode(self, currentNode, nodeIndividual, model):
"""write out evaluation instructions in BooleanNet format. This follows the exact same code as updateNode (for switch=0), but writes a string instead of actually updating the values of the nodes"""
andNodes = model.andNodeList[
currentNode
] # find the list of shadow and nodes we must compute before computing value of current nodes
andNodeInvertList = model.andNodeInvertList[
currentNode
] # find list of lists of whether input nodes need to be inverted (corresponds to inputOrder)
writenode = (
"" + model.nodeList[currentNode] + " *= "
) # set up the initial string to use to write node
inEdges = self.__findInEdges(model, currentNode)
nodeIndividual = self.__simplifyRule(nodeIndividual, inEdges)
if model.andLenList[currentNode] == 0 or sum(nodeIndividual) == 0:
# print(writenode + ' ' + model.nodeList[currentNode])
return (
writenode + " " + model.nodeList[currentNode]
) # if no inputs, maintain value
elif len(andNodes) == 1:
# if only one input, then can either affect or not affect the node. so either keep the value or update to the single input's value
value = ""
# if only one input, then set to that number
if andNodeInvertList[0][0] == 0:
value = value + model.nodeList[andNodes[0][0]]
else:
value = value + "not " + model.nodeList[andNodes[0][0]]
print(writenode + value)
return writenode + value
else:
# update nodes with more than one input
# first deal with case of simple logic without need of linear regression
orset = []
# go through list of possible shadow and nodes to see which ones actually contribute
for andindex in range(len(nodeIndividual)):
newval = "("
if nodeIndividual[andindex] == 1:
# if a shadow and contributes, compute its value using its upstream nodes
if andNodeInvertList[andindex][0]:
newval = newval + "not "
newval = newval + self.nodeList[andNodes[andindex][0]]
for addnode in range(1, len(andNodes[andindex])):
newval = newval + " and "
if andNodeInvertList[andindex][addnode]:
newval = newval + " not "
newval = newval + self.nodeList[andNodes[andindex][addnode]]
orset.append(newval + ")")
# combine the shadow and nodes with or operations
writenode = writenode + orset.pop()
for val in orset:
writenode = writenode + " or " + val
# print(writenode)
return writenode
def __writeNode_BoolNet(self, currentNode, nodeIndividual, model):
"""write out evaluation instructions in BoolNet format.
This follows the exact same code as updateNode (for switch=0), but writes a string instead of actually updating the values of the nodes"""
andNodes = model.andNodeList[
currentNode
] # find the list of shadow and nodes we must compute before computing value of current nodes
andNodeInvertList = model.andNodeInvertList[
currentNode
] # find list of lists of whether input nodes need to be inverted (corresponds to inputOrder)
writenode = (
"" + model.nodeList[currentNode] + " , "
) # set up the initial string to use to write node
inEdges = self.__findInEdges(model, currentNode)
nodeIndividual = self.__simplifyRule(nodeIndividual, inEdges)
if model.andLenList[currentNode] == 0 or sum(nodeIndividual) == 0:
return (
writenode + " " + model.nodeList[currentNode]
) # if no inputs, maintain value
elif len(andNodes) == 1:
# if only one input, then can either affect or not affect the node. so either keep the value or update to the single input's value
value = ""
# if only one input, then set to that number
if andNodeInvertList[0][0] == 0:
value = value + model.nodeList[andNodes[0][0]]
else:
value = value + "!" + model.nodeList[andNodes[0][0]]
print(writenode + value)
return writenode + value
else:
# update nodes with more than one input
# first deal with case of simple logic without need of linear regression
orset = []
# go through list of possible shadow and nodes to see which ones actually contribute
for andindex in range(len(nodeIndividual)):
newval = ""
if nodeIndividual[andindex] == 1:
# if a shadow and contributes, compute its value using its upstream nodes
if andNodeInvertList[andindex][0]:
newval = newval + "!"
newval = newval + self.nodeList[andNodes[andindex][0]]
for addnode in range(1, len(andNodes[andindex])):
newval = newval + " & "
if andNodeInvertList[andindex][addnode]:
newval = newval + " !"
newval = newval + self.nodeList[andNodes[andindex][addnode]]
orset.append(newval)
# combine the shadow and nodes with or operations
writenode = writenode | |
radius, in the deflected configuration); please note that this clearance is only approximate because the calculation assumes that the blade is a line with no volume (however, the calculation does use the local tower radius); when B2N2 is above the tower top (or below the tower base), the absolute distance to the tower top (or base) minus the local tower radius, in the deflected configuration, is output
self.B2N3Clrnc = False #Tower clearance at Blade 2, Node 3 (based on the absolute distance to the nearest point in the tower from B2N3 minus the local tower radius, in the deflected configuration); please note that this clearance is only approximate because the calculation assumes that the blade is a line with no volume (however, the calculation does use the local tower radius); when B2N3 is above the tower top (or below the tower base), the absolute distance to the tower top (or base) minus the local tower radius, in the deflected configuration, is output
self.B2N4Clrnc = False #Tower clearance at Blade 2, Node 4 (based on the absolute distance to the nearest point in the tower from B2N4 minus the local tower radius, in the deflected configuration); please note that this clearance is only approximate because the calculation assumes that the blade is a line with no volume (however, the calculation does use the local tower radius); when B2N4 is above the tower top (or below the tower base), the absolute distance to the tower top (or base) minus the local tower radius, in the deflected configuration, is output
self.B2N5Clrnc = False #Tower clearance at Blade 2, Node 5 (based on the absolute distance to the nearest point in the tower from B2N5 minus the local tower radius, in the deflected configuration); please note that this clearance is only approximate because the calculation assumes that the blade is a line with no volume (however, the calculation does use the local tower radius); when B2N5 is above the tower top (or below the tower base), the absolute distance to the tower top (or base) minus the local tower radius, in the deflected configuration, is output
self.B2N6Clrnc = False #Tower clearance at Blade 2, Node 6 (based on the absolute distance to the nearest point in the tower from B2N6 minus the local tower radius, in the deflected configuration); please note that this clearance is only approximate because the calculation assumes that the blade is a line with no volume (however, the calculation does use the local tower radius); when B2N6 is above the tower top (or below the tower base), the absolute distance to the tower top (or base) minus the local tower radius, in the deflected configuration, is output
self.B2N7Clrnc = False #Tower clearance at Blade 2, Node 7 (based on the absolute distance to the nearest point in the tower from B2N7 minus the local tower radius, in the deflected configuration); please note that this clearance is only approximate because the calculation assumes that the blade is a line with no volume (however, the calculation does use the local tower radius); when B2N7 is above the tower top (or below the tower base), the absolute distance to the tower top (or base) minus the local tower radius, in the deflected configuration, is output
self.B2N8Clrnc = False #Tower clearance at Blade 2, Node 8 (based on the absolute distance to the nearest point in the tower from B2N8 minus the local tower radius, in the deflected configuration); please note that this clearance is only approximate because the calculation assumes that the blade is a line with no volume (however, the calculation does use the local tower radius); when B2N8 is above the tower top (or below the tower base), the absolute distance to the tower top (or base) minus the local tower radius, in the deflected configuration, is output
self.B2N9Clrnc = False #Tower clearance at Blade 2, Node 9 (based on the absolute distance to the nearest point in the tower from B2N9 minus the local tower radius, in the deflected configuration); please note that this clearance is only approximate because the calculation assumes that the blade is a line with no volume (however, the calculation does use the local tower radius); when B2N9 is above the tower top (or below the tower base), the absolute distance to the tower top (or base) minus the local tower radius, in the deflected configuration, is output
self.B3N1Clrnc = False #Tower clearance at Blade 3, Node 1 (based on the absolute distance to the nearest point in the tower from B3N1 minus the local tower radius, in the deflected configuration); please note that this clearance is only approximate because the calculation assumes that the blade is a line with no volume (however, the calculation does use the local tower radius); when B3N1 is above the tower top (or below the tower base), the absolute distance to the tower top (or base) minus the local tower radius, in the deflected configuration, is output
self.B3N2Clrnc = False #Tower clearance at Blade 3, Node 2 (based on the absolute distance to the nearest point in the tower from B3N2 minus the local tower radius, in the deflected configuration); please note that this clearance is only approximate because the calculation assumes that the blade is a line with no volume (however, the calculation does use the local tower radius); when B3N2 is above the tower top (or below the tower base), the absolute distance to the tower top (or base) minus the local tower radius, in the deflected configuration, is output
self.B3N3Clrnc = False #Tower clearance at Blade 3, Node 3 (based on the absolute distance to the nearest point in the tower from B3N3 minus the local tower radius, in the deflected configuration); please note that this clearance is only approximate because the calculation assumes that the blade is a line with no volume (however, the calculation does use the local tower radius); when B3N3 is above the tower top (or below the tower base), the absolute distance to the tower top (or base) minus the local tower radius, in the deflected configuration, is output
self.B3N4Clrnc = False #Tower clearance at Blade 3, Node 4 (based on the absolute distance to the nearest point in the tower from B3N4 minus the local tower radius, in the deflected configuration); please note that this clearance is only approximate because the calculation assumes that the blade is a line with no volume (however, the calculation does use the local tower radius); when B3N4 is above the tower top (or below the tower base), the absolute distance to the tower top (or base) minus the local tower radius, in the deflected configuration, is output
self.B3N5Clrnc = False #Tower clearance at Blade 3, Node 5 (based on the absolute distance to the nearest point in the tower from B3N5 minus the local tower radius, in the deflected configuration); please note that this clearance is only approximate because the calculation assumes that the blade is a line with no volume (however, the calculation does use the local tower radius); when B3N5 is above the tower top (or below the tower base), the absolute distance to the tower top (or base) minus the local tower radius, in the deflected configuration, is output
self.B3N6Clrnc = False #Tower clearance at Blade 3, Node 6 (based on the absolute distance to the nearest point in the tower from B3N6 minus the local tower radius, in the deflected configuration); please note that this clearance is only approximate because the calculation assumes that the blade is a line with no volume (however, the calculation does use the local tower radius); when B3N6 is above the tower top (or below the tower base), the absolute distance to the tower top (or base) minus the | |
dictionary."""
args = {}
if 'id' in _dict:
args['id'] = _dict.get('id')
else:
raise ValueError('Required property \'id\' not present in FirewallRuleUpdateInputFilter JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a FirewallRuleUpdateInputFilter object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'id') and self.id is not None:
_dict['id'] = self.id
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this FirewallRuleUpdateInputFilter object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'FirewallRuleUpdateInputFilter') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'FirewallRuleUpdateInputFilter') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class FirewallRulesUpdateInputItem():
"""
FirewallRulesUpdateInputItem.
:attr str id: Identifier of the firewall rule.
:attr str action: The firewall action to perform, "log" action is only available
for enterprise plan instances.
:attr bool paused: (optional) Indicates if the firewall rule is active.
:attr str description: (optional) To briefly describe the firewall rule, omitted
from object if empty.
:attr FirewallRulesUpdateInputItemFilter filter: (optional) An existing filter.
"""
def __init__(self,
id: str,
action: str,
*,
paused: bool = None,
description: str = None,
filter: 'FirewallRulesUpdateInputItemFilter' = None) -> None:
"""
Initialize a FirewallRulesUpdateInputItem object.
:param str id: Identifier of the firewall rule.
:param str action: The firewall action to perform, "log" action is only
available for enterprise plan instances.
:param bool paused: (optional) Indicates if the firewall rule is active.
:param str description: (optional) To briefly describe the firewall rule,
omitted from object if empty.
:param FirewallRulesUpdateInputItemFilter filter: (optional) An existing
filter.
"""
self.id = id
self.action = action
self.paused = paused
self.description = description
self.filter = filter
@classmethod
def from_dict(cls, _dict: Dict) -> 'FirewallRulesUpdateInputItem':
"""Initialize a FirewallRulesUpdateInputItem object from a json dictionary."""
args = {}
if 'id' in _dict:
args['id'] = _dict.get('id')
else:
raise ValueError('Required property \'id\' not present in FirewallRulesUpdateInputItem JSON')
if 'action' in _dict:
args['action'] = _dict.get('action')
else:
raise ValueError('Required property \'action\' not present in FirewallRulesUpdateInputItem JSON')
if 'paused' in _dict:
args['paused'] = _dict.get('paused')
if 'description' in _dict:
args['description'] = _dict.get('description')
if 'filter' in _dict:
args['filter'] = FirewallRulesUpdateInputItemFilter.from_dict(_dict.get('filter'))
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a FirewallRulesUpdateInputItem object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'id') and self.id is not None:
_dict['id'] = self.id
if hasattr(self, 'action') and self.action is not None:
_dict['action'] = self.action
if hasattr(self, 'paused') and self.paused is not None:
_dict['paused'] = self.paused
if hasattr(self, 'description') and self.description is not None:
_dict['description'] = self.description
if hasattr(self, 'filter') and self.filter is not None:
_dict['filter'] = self.filter.to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this FirewallRulesUpdateInputItem object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'FirewallRulesUpdateInputItem') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'FirewallRulesUpdateInputItem') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ActionEnum(str, Enum):
"""
The firewall action to perform, "log" action is only available for enterprise plan
instances.
"""
LOG = 'log'
ALLOW = 'allow'
CHALLENGE = 'challenge'
JS_CHALLENGE = 'js_challenge'
BLOCK = 'block'
class FirewallRulesUpdateInputItemFilter():
"""
An existing filter.
:attr str id: Identifier of the filter.
"""
def __init__(self,
id: str) -> None:
"""
Initialize a FirewallRulesUpdateInputItemFilter object.
:param str id: Identifier of the filter.
"""
self.id = id
@classmethod
def from_dict(cls, _dict: Dict) -> 'FirewallRulesUpdateInputItemFilter':
"""Initialize a FirewallRulesUpdateInputItemFilter object from a json dictionary."""
args = {}
if 'id' in _dict:
args['id'] = _dict.get('id')
else:
raise ValueError('Required property \'id\' not present in FirewallRulesUpdateInputItemFilter JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a FirewallRulesUpdateInputItemFilter object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'id') and self.id is not None:
_dict['id'] = self.id
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this FirewallRulesUpdateInputItemFilter object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'FirewallRulesUpdateInputItemFilter') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'FirewallRulesUpdateInputItemFilter') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ListFirewallRulesRespResultInfo():
"""
Statistics of results.
:attr int page: Page number.
:attr int per_page: Number of results per page.
:attr int count: Number of results.
:attr int total_count: Total number of results.
"""
def __init__(self,
page: int,
per_page: int,
count: int,
total_count: int) -> None:
"""
Initialize a ListFirewallRulesRespResultInfo object.
:param int page: Page number.
:param int per_page: Number of results per page.
:param int count: Number of results.
:param int total_count: Total number of results.
"""
self.page = page
self.per_page = per_page
self.count = count
self.total_count = total_count
@classmethod
def from_dict(cls, _dict: Dict) -> 'ListFirewallRulesRespResultInfo':
"""Initialize a ListFirewallRulesRespResultInfo object from a json dictionary."""
args = {}
if 'page' in _dict:
args['page'] = _dict.get('page')
else:
raise ValueError('Required property \'page\' not present in ListFirewallRulesRespResultInfo JSON')
if 'per_page' in _dict:
args['per_page'] = _dict.get('per_page')
else:
raise ValueError('Required property \'per_page\' not present in ListFirewallRulesRespResultInfo JSON')
if 'count' in _dict:
args['count'] = _dict.get('count')
else:
raise ValueError('Required property \'count\' not present in ListFirewallRulesRespResultInfo JSON')
if 'total_count' in _dict:
args['total_count'] = _dict.get('total_count')
else:
raise ValueError('Required property \'total_count\' not present in ListFirewallRulesRespResultInfo JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ListFirewallRulesRespResultInfo object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'page') and self.page is not None:
_dict['page'] = self.page
if hasattr(self, 'per_page') and self.per_page is not None:
_dict['per_page'] = self.per_page
if hasattr(self, 'count') and self.count is not None:
_dict['count'] = self.count
if hasattr(self, 'total_count') and self.total_count is not None:
_dict['total_count'] = self.total_count
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ListFirewallRulesRespResultInfo object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'ListFirewallRulesRespResultInfo') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ListFirewallRulesRespResultInfo') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class DeleteFirewallRuleResp():
"""
DeleteFirewallRuleResp.
:attr bool success: Operation success flag.
:attr List[List[str]] errors: Array of errors encountered.
:attr List[List[str]] messages: Array of messages returned.
:attr DeleteFirewallRuleRespResult result: Container for response information.
"""
def __init__(self,
success: bool,
errors: List[List[str]],
messages: List[List[str]],
result: 'DeleteFirewallRuleRespResult') -> None:
"""
Initialize a DeleteFirewallRuleResp object.
:param bool success: Operation success flag.
:param List[List[str]] errors: Array of errors encountered.
:param List[List[str]] messages: Array of messages returned.
:param DeleteFirewallRuleRespResult result: Container for response
information.
"""
self.success = success
self.errors = errors
self.messages = messages
self.result = result
@classmethod
def from_dict(cls, _dict: Dict) -> 'DeleteFirewallRuleResp':
"""Initialize a DeleteFirewallRuleResp object from a json dictionary."""
args = {}
if 'success' in _dict:
args['success'] = _dict.get('success')
else:
raise ValueError('Required property \'success\' not present in DeleteFirewallRuleResp JSON')
if 'errors' in _dict:
args['errors'] = _dict.get('errors')
else:
raise ValueError('Required property \'errors\' not present in DeleteFirewallRuleResp JSON')
if 'messages' in _dict:
args['messages'] = _dict.get('messages')
else:
raise ValueError('Required property \'messages\' not present in DeleteFirewallRuleResp JSON')
if 'result' in _dict:
args['result'] = DeleteFirewallRuleRespResult.from_dict(_dict.get('result'))
else:
raise ValueError('Required property \'result\' not present in DeleteFirewallRuleResp JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a DeleteFirewallRuleResp object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
| |
then its path_id will be a/b/abc/temp_file.py.
path_id: str = models.TextField(db_index=True, unique=True)
owner: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
realm: Optional[Realm] = models.ForeignKey(Realm, blank=True, null=True, on_delete=CASCADE)
create_time: datetime.datetime = models.DateTimeField(
default=timezone_now,
db_index=True,
)
# Size of the uploaded file, in bytes
size: int = models.IntegerField()
# The two fields below lets us avoid looking up the corresponding
# messages/streams to check permissions before serving these files.
# Whether this attachment has been posted to a public stream, and
# thus should be available to all non-guest users in the
# organization (even if they weren't a recipient of a message
# linking to it).
is_realm_public: bool = models.BooleanField(default=False)
# Whether this attachment has been posted to a web-public stream,
# and thus should be available to everyone on the internet, even
# if the person isn't logged in.
is_web_public: bool = models.BooleanField(default=False)
class Meta:
abstract = True
def __str__(self) -> str:
return f"<{self.__class__.__name__}: {self.file_name}>"
class ArchivedAttachment(AbstractAttachment):
"""Used as a temporary holding place for deleted Attachment objects
before they are permanently deleted. This is an important part of
a robust 'message retention' feature.
"""
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
messages: Manager = models.ManyToManyField(ArchivedMessage)
class Attachment(AbstractAttachment):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
messages: Manager = models.ManyToManyField(Message)
def is_claimed(self) -> bool:
return self.messages.count() > 0
def to_dict(self) -> Dict[str, Any]:
return {
"id": self.id,
"name": self.file_name,
"path_id": self.path_id,
"size": self.size,
# convert to JavaScript-style UNIX timestamp so we can take
# advantage of client timezones.
"create_time": int(time.mktime(self.create_time.timetuple()) * 1000),
"messages": [
{
"id": m.id,
"date_sent": int(time.mktime(m.date_sent.timetuple()) * 1000),
}
for m in self.messages.all()
],
}
post_save.connect(flush_used_upload_space_cache, sender=Attachment)
post_delete.connect(flush_used_upload_space_cache, sender=Attachment)
def validate_attachment_request(user_profile: UserProfile, path_id: str) -> Optional[bool]:
try:
attachment = Attachment.objects.get(path_id=path_id)
except Attachment.DoesNotExist:
return None
if user_profile == attachment.owner:
# If you own the file, you can access it.
return True
if (
attachment.is_realm_public
and attachment.realm == user_profile.realm
and user_profile.can_access_public_streams()
):
# Any user in the realm can access realm-public files
return True
messages = attachment.messages.all()
if UserMessage.objects.filter(user_profile=user_profile, message__in=messages).exists():
# If it was sent in a private message or private stream
# message, then anyone who received that message can access it.
return True
# The user didn't receive any of the messages that included this
# attachment. But they might still have access to it, if it was
# sent to a stream they are on where history is public to
# subscribers.
# These are subscriptions to a stream one of the messages was sent to
relevant_stream_ids = Subscription.objects.filter(
user_profile=user_profile,
active=True,
recipient__type=Recipient.STREAM,
recipient__in=[m.recipient_id for m in messages],
).values_list("recipient__type_id", flat=True)
if len(relevant_stream_ids) == 0:
return False
return Stream.objects.filter(
id__in=relevant_stream_ids, history_public_to_subscribers=True
).exists()
def get_old_unclaimed_attachments(weeks_ago: int) -> Sequence[Attachment]:
# TODO: Change return type to QuerySet[Attachment]
delta_weeks_ago = timezone_now() - datetime.timedelta(weeks=weeks_ago)
old_attachments = Attachment.objects.filter(messages=None, create_time__lt=delta_weeks_ago)
return old_attachments
class Subscription(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
recipient: Recipient = models.ForeignKey(Recipient, on_delete=CASCADE)
# Whether the user has since unsubscribed. We mark Subscription
# objects as inactive, rather than deleting them, when a user
# unsubscribes, so we can preserve user customizations like
# notification settings, stream color, etc., if the user later
# resubscribes.
active: bool = models.BooleanField(default=True)
# This is a denormalization designed to improve the performance of
# bulk queries of Subscription objects, Whether the subscribed user
# is active tends to be a key condition in those queries.
# We intentionally don't specify a default value to promote thinking
# about this explicitly, as in some special cases, such as data import,
# we may be creating Subscription objects for a user that's deactivated.
is_user_active: bool = models.BooleanField()
ROLE_STREAM_ADMINISTRATOR = 20
ROLE_MEMBER = 50
ROLE_TYPES = [
ROLE_STREAM_ADMINISTRATOR,
ROLE_MEMBER,
]
role: int = models.PositiveSmallIntegerField(default=ROLE_MEMBER, db_index=True)
# Whether this user had muted this stream.
is_muted: Optional[bool] = models.BooleanField(null=True, default=False)
DEFAULT_STREAM_COLOR = "#c2c2c2"
color: str = models.CharField(max_length=10, default=DEFAULT_STREAM_COLOR)
pin_to_top: bool = models.BooleanField(default=False)
# These fields are stream-level overrides for the user's default
# configuration for notification, configured in UserProfile. The
# default, None, means we just inherit the user-level default.
desktop_notifications: Optional[bool] = models.BooleanField(null=True, default=None)
audible_notifications: Optional[bool] = models.BooleanField(null=True, default=None)
push_notifications: Optional[bool] = models.BooleanField(null=True, default=None)
email_notifications: Optional[bool] = models.BooleanField(null=True, default=None)
wildcard_mentions_notify: Optional[bool] = models.BooleanField(null=True, default=None)
class Meta:
unique_together = ("user_profile", "recipient")
indexes = [
models.Index(
fields=("recipient", "user_profile"),
name="zerver_subscription_recipient_id_user_profile_id_idx",
condition=Q(active=True, is_user_active=True),
),
]
def __str__(self) -> str:
return f"<Subscription: {self.user_profile} -> {self.recipient}>"
@property
def is_stream_admin(self) -> bool:
return self.role == Subscription.ROLE_STREAM_ADMINISTRATOR
# Subscription fields included whenever a Subscription object is provided to
# Zulip clients via the API. A few details worth noting:
# * These fields will generally be merged with Stream.API_FIELDS
# data about the stream.
# * "user_profile" is usually implied as full API access to Subscription
# is primarily done for the current user; API access to other users'
# subscriptions is generally limited to boolean yes/no.
# * "id" and "recipient_id" are not included as they are not used
# in the Zulip API; it's an internal implementation detail.
# Subscription objects are always looked up in the API via
# (user_profile, stream) pairs.
# * "active" is often excluded in API use cases where it is implied.
# * "is_muted" often needs to be copied to not "in_home_view" for
# backwards-compatibility.
API_FIELDS = [
"color",
"is_muted",
"pin_to_top",
"audible_notifications",
"desktop_notifications",
"email_notifications",
"push_notifications",
"wildcard_mentions_notify",
"role",
]
@cache_with_key(user_profile_by_id_cache_key, timeout=3600 * 24 * 7)
def get_user_profile_by_id(uid: int) -> UserProfile:
return UserProfile.objects.select_related().get(id=uid)
def get_user_profile_by_email(email: str) -> UserProfile:
"""This function is intended to be used for
manual manage.py shell work; robust code must use get_user or
get_user_by_delivery_email instead, because Zulip supports
multiple users with a given (delivery) email address existing on a
single server (in different realms).
"""
return UserProfile.objects.select_related().get(delivery_email__iexact=email.strip())
@cache_with_key(user_profile_by_api_key_cache_key, timeout=3600 * 24 * 7)
def maybe_get_user_profile_by_api_key(api_key: str) -> Optional[UserProfile]:
try:
return UserProfile.objects.select_related().get(api_key=api_key)
except UserProfile.DoesNotExist:
# We will cache failed lookups with None. The
# use case here is that broken API clients may
# continually ask for the same wrong API key, and
# we want to handle that as quickly as possible.
return None
def get_user_profile_by_api_key(api_key: str) -> UserProfile:
user_profile = maybe_get_user_profile_by_api_key(api_key)
if user_profile is None:
raise UserProfile.DoesNotExist()
return user_profile
def get_user_by_delivery_email(email: str, realm: Realm) -> UserProfile:
"""Fetches a user given their delivery email. For use in
authentication/registration contexts. Do not use for user-facing
views (e.g. Zulip API endpoints) as doing so would violate the
EMAIL_ADDRESS_VISIBILITY_ADMINS security model. Use get_user in
those code paths.
"""
return UserProfile.objects.select_related().get(
delivery_email__iexact=email.strip(), realm=realm
)
def get_users_by_delivery_email(emails: Set[str], realm: Realm) -> QuerySet:
"""This is similar to get_user_by_delivery_email, and
it has the same security caveats. It gets multiple
users and returns a QuerySet, since most callers
will only need two or three fields.
If you are using this to get large UserProfile objects, you are
probably making a mistake, but if you must,
then use `select_related`.
"""
"""
Django doesn't support delivery_email__iexact__in, so
we simply OR all the filters that we'd do for the
one-email case.
"""
email_filter = Q()
for email in emails:
email_filter |= Q(delivery_email__iexact=email.strip())
return UserProfile.objects.filter(realm=realm).filter(email_filter)
@cache_with_key(user_profile_cache_key, timeout=3600 * 24 * 7)
def get_user(email: str, realm: Realm) -> UserProfile:
"""Fetches the user by its visible-to-other users username (in the
`email` field). For use in API contexts; do not use in
authentication/registration contexts as doing so will break
authentication in organizations using
EMAIL_ADDRESS_VISIBILITY_ADMINS. In those code paths, use
get_user_by_delivery_email.
"""
return UserProfile.objects.select_related().get(email__iexact=email.strip(), realm=realm)
def get_active_user(email: str, realm: Realm) -> UserProfile:
"""Variant of get_user_by_email that excludes deactivated users.
See get_user docstring for important usage notes."""
user_profile = get_user(email, realm)
if not user_profile.is_active:
raise UserProfile.DoesNotExist()
return user_profile
def get_user_profile_by_id_in_realm(uid: int, realm: Realm) -> UserProfile:
return UserProfile.objects.select_related().get(id=uid, realm=realm)
def get_active_user_profile_by_id_in_realm(uid: int, realm: Realm) -> UserProfile:
user_profile = get_user_profile_by_id_in_realm(uid, realm)
if not user_profile.is_active:
raise UserProfile.DoesNotExist()
return user_profile
def get_user_including_cross_realm(email: str, realm: Realm) -> UserProfile:
if is_cross_realm_bot_email(email):
return get_system_bot(email, realm.id)
assert realm is not None
return get_user(email, realm)
@cache_with_key(bot_profile_cache_key, timeout=3600 * 24 * 7)
def get_system_bot(email: str, realm_id: int) -> UserProfile:
"""
This function doesn't use the realm_id argument yet, but requires
passing it | |
"""
Run to generate figures for presentation.
Requires TeX; may need to install texlive-extra-utils on linux
Requires xppy and Py_XPPCall
the main() function at the end calls the preceding individual figure functions.
figures are saved as both png and pdf.
Copyright (c) 2016, <NAME>, <NAME>
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
# last compiled using python 2.7.6
# numpy version 1.8.2
# scipy version 0.13.3
# matplotlib version 1.3.1
import os
from sys import stdout
import numpy as np
import scipy as sp
import matplotlib
import copy
#from matplotlib.ticker import MultipleLocator
#import matplotlib.ticker as mticker
import matplotlib.colors as colors
from matplotlib import pyplot as plt
import matplotlib.pylab as mp
#import matplotlib.gridspec as gridspec
from mpl_toolkits.mplot3d import proj3d
import matplotlib.gridspec as gridspec
import matplotlib.patches as patches
from mpl_toolkits.axes_grid1.inset_locator import inset_axes, zoomed_inset_axes
from mpl_toolkits.axes_grid1.anchored_artists import AnchoredSizeBar
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
from mpl_toolkits.mplot3d.art3d import Line3DCollection
from matplotlib.collections import LineCollection
from mpl_toolkits.mplot3d import axes3d
# 3d plotting is generated in twod_full_square.py, then beautified in this file.
from matplotlib import rc
rc('text', usetex=True)
rc('font', family='serif', serif=['Computer Modern Roman'])
matplotlib.rcParams['text.latex.preamble'] = [r'\boldmath \usepackage{bm} \usepackage{xcolor} \setlength{\parindent}{0pt}']
matplotlib.rcParams.update({'figure.autolayout': True})
sizeOfFont = 20
fontProperties = {'weight' : 'bold', 'size' : sizeOfFont}
from scipy.interpolate import interp1d
import oned_simple
import fourier_2d as f2d
import twod_full as twod
import twod_phase as twodp
from xppy.utils import diagram
from xppcall import xpprun
from generate_figures import beautify_phase
cos = np.cos
sin = np.sin
pi = np.pi;Pi=pi
sqrt = np.sqrt
Sqrt = np.sqrt
exp = np.exp
erfc = sp.special.erfc;Erfc=erfc
erf = sp.special.erf;Erf=erf
E = np.exp(1)#2.7182818284590452353602874713527
cosh = np.cosh;Cosh=cosh
class MyAxes3D(axes3d.Axes3D):
def __init__(self, baseObject, sides_to_draw):
self.__class__ = type(baseObject.__class__.__name__,
(self.__class__, baseObject.__class__),
{})
self.__dict__ = baseObject.__dict__
self.sides_to_draw = list(sides_to_draw)
self.mouse_init()
def set_some_features_visibility(self, visible):
for t in self.w_zaxis.get_ticklines() + self.w_zaxis.get_ticklabels():
t.set_visible(visible)
self.w_zaxis.line.set_visible(visible)
self.w_zaxis.pane.set_visible(visible)
self.w_zaxis.label.set_visible(visible)
def draw(self, renderer):
# set visibility of some features False
self.set_some_features_visibility(False)
# draw the axes
super(MyAxes3D, self).draw(renderer)
# set visibility of some features True.
# This could be adapted to set your features to desired visibility,
# e.g. storing the previous values and restoring the values
self.set_some_features_visibility(True)
zaxis = self.zaxis
draw_grid_old = zaxis.axes._draw_grid
# disable draw grid
zaxis.axes._draw_grid = False
tmp_planes = zaxis._PLANES
if 'l' in self.sides_to_draw :
# draw zaxis on the left side
zaxis._PLANES = (tmp_planes[2], tmp_planes[3],
tmp_planes[0], tmp_planes[1],
tmp_planes[4], tmp_planes[5])
zaxis.draw(renderer)
if 'r' in self.sides_to_draw :
# draw zaxis on the right side
zaxis._PLANES = (tmp_planes[3], tmp_planes[2],
tmp_planes[1], tmp_planes[0],
tmp_planes[4], tmp_planes[5])
zaxis.draw(renderer)
zaxis._PLANES = tmp_planes
# disable draw grid
zaxis.axes._draw_grid = draw_grid_old
def collect_disjoint_branches(diagram,all_sv=True,return_eval=False,sv_tol=.1,remove_isolated=True,isolated_number=2,remove_redundant=True,redundant_threshold=.01,N=20,fix_reverse=True):
"""
collect all disjoint branches into disjoint arrays in a dict.
diagram.dat: all_info.dat from xppauto version 8. currently not compatible with info.dat.
recall org for xpp version 8:
type, branch, 0, par1, par2, period, uhigh[1..n], ulow[1..n], evr[1] evm[1] ... evr[n] evm[n]
yes there is a zero there as of xpp version 8. I don't know why.
for more information on how diagram is organized, see tree.pdf in the xpp source home directory.
all_sv: True or False. in each branch, return all state variables (to be implemented)
return_eval: return eigenvalues (to be implemented)
sv_tol: difference in consecutive state variables. If above this value, break branch.
remove_isolated: True/False. If a branch has fewer than isolated_number of points, do not include.
remove_redundant: if branches overlap, remove. we require the max diff to be above redundant_threshold
by default, we keep branches with a longer arc length.
N: number of points to check for redundancy
fix_reverse: True/False. some branches are computed backwards as a function of the parameter. If so, reverse.
"""
# get number of state variables (both hi and lo values, hence the 2*)
varnum = 2*len(diagram[0,6:])/4
# numer of preceding entries (tbparper stands for xpp type xpp branch parameter period)
# diagram[:,6] is the first state variable over all parameter values
# diagram[:,:6] are all the xpp types, xpp branches, parameters, periods for all parameter values
tbparper = 6
# column index of xpp branch type
typeidx = 0
# column index of xpp branch number
bridx = 1
# column index of 0 guy
zeroidx = 2
# column index of bifurcation parameters
par1idx = 3
par2idx = 4
# set up array values for retreival
c1 = []
c2 = []
c1.append(typeidx)
c1.append(bridx)
c2.append(par1idx)
c2.append(par2idx)
for i in range(varnum):
c2.append(tbparper+i)
c1 = np.array(c1,dtype=int)
c2 = np.array(c2,dtype=int)
# store various branches to dictionary
# this dict is for actual plotting values
val_dict = {}
# this dict is for type and xpp branch values
type_dict = {}
# loop over each coordinate. begin new branch if type, branch change values
# or if parval, period, sv1, sv2, .. svn change discontinuously.
# first set of comparisons is called c1
# second set of comparisons is called c2
brnum = 0
val_dict['br'+str(brnum)] = np.zeros((1,2+varnum)) # branches are named in order they are created
type_dict['br'+str(brnum)] = np.zeros((1,2))
# initialize
c1v_prev = np.array([list(diagram[0,c1])])
c1v = np.array([list(diagram[1,c1])])
c2v_prev = np.array([list(diagram[0,c2])])
c2v = np.array([list(diagram[1,c2])])
# val_dict has entries [par1, par2, sv1hi, sv1lo, ..., svnhi, svnlo]
# type_dict has entries [type, br]
# for a given xpp branch, consecutive terms are appended as a new row
val_dict['br'+str(brnum)] = c2v_prev
type_dict['br'+str(brnum)] = c1v_prev
for i in range(2,len(diagram[:,0])):
# get values for type and branch
c1v_prev = np.array([list(diagram[i-1,c1])])
c1v = np.array([list(diagram[i,c1])])
# get values for svs and parameters
c2v_prev = np.array([list(diagram[i-1,c2])])
c2v = np.array([list(diagram[i,c2])])
# append above values to current branch
val_dict['br'+str(brnum)] = np.append(val_dict['br'+str(brnum)],c2v_prev,axis=0)
type_dict['br'+str(brnum)] = np.append(type_dict['br'+str(brnum)],c1v_prev,axis=0)
# if either above threshold, start new key.
if np.any( np.abs((c1v - c1v_prev))>=1):
brnum += 1
val_dict['br'+str(brnum)] = c2v
type_dict['br'+str(brnum)] = c1v
elif np.any( np.abs((c2v - c2v_prev))>=sv_tol):
brnum += 1
val_dict['br'+str(brnum)] = c2v
type_dict['br'+str(brnum)] = c1v
# remove isolated points
if remove_isolated:
keyvals = val_dict.keys()
for i in range(len(keyvals)):
if len(val_dict[keyvals[i]]) <= isolated_number:
val_dict.pop(keyvals[i])
type_dict.pop(keyvals[i])
# remove redundant branches
# a python branch is removed if it shares multiple points (N) with another xpp branch.
if remove_redundant:
val_dict_final = {}
type_dict_final = {}
# get all xpp branch numbers
brlist = np.unique(diagram[:,1])
# collect all branches for each xpp branch number
keyvals = val_dict.keys()
keyignorelist = []
keysavelist = []
# loop over keys of python branches
for i in range(len(keyvals)):
key = keyvals[i]
if not(key in keyignorelist):
# get xpp branch
xppbrnum = type_dict[key][0,1]
for j in range(i+1,len(keyvals)):
key2 = keyvals[j]
if not(key2 in keyignorelist) and (key2 != key):
# make sure xpp branches are different
if xppbrnum != type_dict[key2][0,1]:
# loop over N different values
N = 20
belowthresholdcount = 0
dN = len(val_dict[key][:,0])/N
for i in range(N):
# check if 2 points in val_dict[key] are in val_dict[key2]
# first point
par1diff = np.amin(np.abs(val_dict[key][dN*i,0]-val_dict[key2][:,0]))
par2diff = np.amin(np.abs(val_dict[key][dN*i,1]-val_dict[key2][:,1]))
sv1diff = np.amin(np.abs(val_dict[key][dN*i,2]-val_dict[key2][:,2]))
sv2diff = np.amin(np.abs(val_dict[key][dN*i,3]-val_dict[key2][:,3]))
diff1 = par1diff + par2diff + sv1diff + sv2diff
if diff1 <= redundant_threshold:
#print 'delete', key2
belowthresholdcount += 1
if belowthresholdcount >= 4:
keyignorelist.append(key2)
#print 'del', key2
else:
if not(key2 in keysavelist):
#print 'keep', key2
val_dict_final[key2] = val_dict[key2]
type_dict_final[key2] = type_dict[key2]
keysavelist.append(key2)
for key in keyignorelist:
if key in keysavelist:
val_dict_final.pop(key)
type_dict_final.pop(key)
else:
val_dict_final | |
from enum import Enum, unique
@unique
class WordType(Enum):
UNKNOWN = 0
SYMBOL = 1
TOGGLE = 2
VALUE = 3
FLAG = 4
DESTINATION = 5
words = {
"'": WordType.SYMBOL,
"-": WordType.SYMBOL,
"*": WordType.SYMBOL,
":": WordType.SYMBOL,
"\\": WordType.SYMBOL,
"_": WordType.SYMBOL,
"{": WordType.SYMBOL,
"|": WordType.SYMBOL,
"}": WordType.SYMBOL,
"~": WordType.SYMBOL,
"ab": WordType.TOGGLE,
"absh": WordType.VALUE,
"abslock": WordType.FLAG,
"absnoovrlp": WordType.TOGGLE,
"absw": WordType.VALUE,
"acaps": WordType.TOGGLE,
"acccircle": WordType.TOGGLE,
"acccomma": WordType.TOGGLE,
"accdot": WordType.TOGGLE,
"accnone": WordType.TOGGLE,
"accunderdot": WordType.TOGGLE,
"acf": WordType.VALUE,
"adeff": WordType.VALUE,
"additive": WordType.FLAG,
"adeflang": WordType.VALUE,
"adjustright": WordType.FLAG,
"adn": WordType.VALUE,
"aenddoc": WordType.FLAG,
"aendnotes": WordType.FLAG,
"aexpnd": WordType.VALUE,
"af": WordType.VALUE,
"afelev": WordType.FLAG,
"afs": WordType.VALUE,
"aftnbj": WordType.FLAG,
"aftncn": WordType.DESTINATION,
"aftnnalc": WordType.FLAG,
"aftnnar": WordType.FLAG,
"aftnnauc": WordType.FLAG,
"aftnnchi": WordType.FLAG,
"aftnnchosung": WordType.FLAG,
"aftnncnum": WordType.FLAG,
"aftnndbar": WordType.FLAG,
"aftnndbnum": WordType.FLAG,
"aftnndbnumd": WordType.FLAG,
"aftnndbnumk": WordType.FLAG,
"aftnndbnumt": WordType.FLAG,
"aftnnganada": WordType.FLAG,
"aftnngbnum": WordType.FLAG,
"aftnngbnumd": WordType.FLAG,
"aftnngbnumk": WordType.FLAG,
"aftnngbnuml": WordType.FLAG,
"aftnnrlc": WordType.FLAG,
"aftnnruc": WordType.FLAG,
"aftnnzodiac": WordType.FLAG,
"aftnnzodiacd": WordType.FLAG,
"aftnnzodiacl": WordType.FLAG,
"aftnrestart": WordType.FLAG,
"aftnrstcont": WordType.FLAG,
"aftnsep": WordType.DESTINATION,
"aftnsepc": WordType.DESTINATION,
"aftnstart": WordType.VALUE,
"aftntj": WordType.FLAG,
"ai": WordType.TOGGLE,
"alang": WordType.VALUE,
"allowfieldendsel": WordType.FLAG,
"allprot": WordType.FLAG,
"alntblind": WordType.FLAG,
"alt": WordType.FLAG,
"animtext": WordType.VALUE,
"annotation": WordType.DESTINATION,
"annotprot": WordType.FLAG,
"ansi": WordType.FLAG,
"ansicpg": WordType.VALUE,
"aoutl": WordType.TOGGLE,
"ApplyBrkRules": WordType.FLAG,
"ascaps": WordType.TOGGLE,
"ashad": WordType.TOGGLE,
"asianbrkrule": WordType.FLAG,
"aspalpha": WordType.TOGGLE,
"aspnum": WordType.TOGGLE,
"astrike": WordType.TOGGLE,
"atnauthor": WordType.DESTINATION,
"atndate": WordType.DESTINATION,
"atnicn": WordType.DESTINATION,
"atnid": WordType.DESTINATION,
"atnparent": WordType.DESTINATION,
"atnref": WordType.DESTINATION,
"atntime": WordType.DESTINATION,
"atrfend": WordType.DESTINATION,
"atrfstart": WordType.DESTINATION,
"aul": WordType.TOGGLE,
"auld": WordType.TOGGLE,
"auldb": WordType.TOGGLE,
"aulnone": WordType.TOGGLE,
"aulw": WordType.TOGGLE,
"aup": WordType.VALUE,
"author": WordType.DESTINATION,
"autofmtoverride": WordType.FLAG,
"b": WordType.TOGGLE,
"background": WordType.DESTINATION,
"bdbfhdr": WordType.FLAG,
"bdrrlswsix": WordType.FLAG,
"bgbdiag": WordType.FLAG,
"bgcross": WordType.FLAG,
"bgdcross": WordType.FLAG,
"bgdkbdiag": WordType.FLAG,
"bgdkcross": WordType.FLAG,
"bgdkdcross": WordType.FLAG,
"bgdkfdiag": WordType.FLAG,
"bgdkhoriz": WordType.FLAG,
"bgdkvert": WordType.FLAG,
"bgfdiag": WordType.FLAG,
"bghoriz": WordType.FLAG,
"bgvert": WordType.FLAG,
"bin": WordType.VALUE,
"binfsxn": WordType.VALUE,
"binsxn": WordType.VALUE,
"bkmkcolf": WordType.VALUE,
"bkmkcoll": WordType.VALUE,
"bkmkend": WordType.DESTINATION,
"bkmkpub": WordType.FLAG,
"bkmkstart": WordType.DESTINATION,
"bliptag": WordType.VALUE,
"blipuid": WordType.DESTINATION,
"blipupi": WordType.VALUE,
"blue": WordType.VALUE,
"bookfold": WordType.FLAG,
"bookfoldrev": WordType.FLAG,
"bookfoldsheets": WordType.VALUE,
"box": WordType.FLAG,
"brdrart": WordType.VALUE,
"brdrb": WordType.FLAG,
"brdrbar": WordType.FLAG,
"brdrbtw": WordType.FLAG,
"brdrcf": WordType.VALUE,
"brdrdash": WordType.FLAG,
"brdrdashd": WordType.FLAG,
"brdrdashdd": WordType.FLAG,
"brdrdashdot": WordType.FLAG,
"brdrdashdotdot": WordType.FLAG,
"brdrdashdotstr": WordType.FLAG,
"brdrdashsm": WordType.FLAG,
"brdrdb": WordType.FLAG,
"brdrdot": WordType.FLAG,
"brdremboss": WordType.FLAG,
"brdrengrave": WordType.FLAG,
"brdrframe": WordType.FLAG,
"brdrhair": WordType.FLAG,
"brdrinset": WordType.FLAG,
"brdrl": WordType.FLAG,
"brdrnil": WordType.FLAG,
"brdrnone": WordType.FLAG,
"brdroutset": WordType.FLAG,
"brdrr": WordType.FLAG,
"brdrs": WordType.FLAG,
"brdrsh": WordType.FLAG,
"brdrt": WordType.FLAG,
"brdrtbl": WordType.FLAG,
"brdrth": WordType.FLAG,
"brdrthtnlg": WordType.FLAG,
"brdrthtnmg": WordType.FLAG,
"brdrthtnsg": WordType.FLAG,
"brdrtnthlg": WordType.FLAG,
"brdrtnthmg": WordType.FLAG,
"brdrtnthsg": WordType.FLAG,
"brdrtnthtnlg": WordType.FLAG,
"brdrtnthtnmg": WordType.FLAG,
"brdrtnthtnsg": WordType.FLAG,
"brdrtriple": WordType.FLAG,
"brdrw": WordType.VALUE,
"brdrwavy": WordType.FLAG,
"brdrwavydb": WordType.FLAG,
"brkfrm": WordType.FLAG,
"brsp": WordType.VALUE,
"bullet": WordType.SYMBOL,
"buptim": WordType.DESTINATION,
"bxe": WordType.FLAG,
"caccentfive": WordType.FLAG,
"caccentfour": WordType.FLAG,
"caccentone": WordType.FLAG,
"caccentsix": WordType.FLAG,
"caccentthree": WordType.FLAG,
"caccenttwo": WordType.FLAG,
"cachedcolbal": WordType.FLAG,
"caps": WordType.TOGGLE,
"category": WordType.DESTINATION,
"cb": WordType.VALUE,
"cbackgroundone": WordType.FLAG,
"cbackgroundtwo": WordType.FLAG,
"cbpat": WordType.VALUE,
"cchs": WordType.VALUE,
"cell": WordType.SYMBOL,
"cellx": WordType.VALUE,
"cf": WordType.VALUE,
"cfollowedhyperlink": WordType.FLAG,
"cfpat": WordType.VALUE,
"cgrid": WordType.VALUE,
"charrsid": WordType.VALUE,
"charscalex": WordType.VALUE,
"chatn": WordType.SYMBOL,
"chbgbdiag": WordType.FLAG,
"chbgcross": WordType.FLAG,
"chbgdcross": WordType.FLAG,
"chbgdkbdiag": WordType.FLAG,
"chbgdkcross": WordType.FLAG,
"chbgdkdcross": WordType.FLAG,
"chbgdkfdiag": WordType.FLAG,
"chbgdkhoriz": WordType.FLAG,
"chbgdkvert": WordType.FLAG,
"chbgfdiag": WordType.FLAG,
"chbghoriz": WordType.FLAG,
"chbgvert": WordType.FLAG,
"chbrdr": WordType.FLAG,
"chcbpat": WordType.VALUE,
"chcfpat": WordType.VALUE,
"chdate": WordType.SYMBOL,
"chdpa": WordType.SYMBOL,
"chdpl": WordType.SYMBOL,
"chftn": WordType.SYMBOL,
"chftnsep": WordType.SYMBOL,
"chftnsepc": WordType.SYMBOL,
"chpgn": WordType.SYMBOL,
"chhres": WordType.VALUE,
"chshdng": WordType.VALUE,
"chtime": WordType.SYMBOL,
"chyperlink": WordType.FLAG,
"clbgbdiag": WordType.FLAG,
"clbgcross": WordType.FLAG,
"clbgdcross": WordType.FLAG,
"clbgdkbdiag": WordType.FLAG,
"clbgdkcross": WordType.FLAG,
"clbgdkdcross": WordType.FLAG,
"clbgdkfdiag": WordType.FLAG,
"clbgdkhor": WordType.FLAG,
"clbgdkvert": WordType.FLAG,
"clbgfdiag": WordType.FLAG,
"clbghoriz": WordType.FLAG,
"clbgvert": WordType.FLAG,
"clbrdrb": WordType.FLAG,
"clbrdrl": WordType.FLAG,
"clbrdrr": WordType.FLAG,
"clbrdrt": WordType.FLAG,
"clcbpat": WordType.VALUE,
"clcbpatraw": WordType.VALUE,
"clcfpat": WordType.VALUE,
"clcfpatraw": WordType.VALUE,
"cldel": WordType.FLAG,
"cldelauth": WordType.VALUE,
"cldeldttm": WordType.VALUE,
"cldgll": WordType.FLAG,
"cldglu": WordType.FLAG,
"clFitText": WordType.FLAG,
"clftsWidth": WordType.VALUE,
"clhidemark": WordType.FLAG,
"clins": WordType.FLAG,
"clinsauth": WordType.VALUE,
"clinsdttm": WordType.VALUE,
"clmgf": WordType.FLAG,
"clmrg": WordType.FLAG,
"clmrgd": WordType.FLAG,
"clmrgdauth": WordType.VALUE,
"clmrgddttm": WordType.VALUE,
"clmrgdr": WordType.FLAG,
"clNoWrap": WordType.FLAG,
"clpadb": WordType.VALUE,
"clpadfb": WordType.VALUE,
"clpadfl": WordType.VALUE,
"clpadfr": WordType.VALUE,
"clpadft": WordType.VALUE,
"clpadl": WordType.VALUE,
"clpadr": WordType.VALUE,
"clpadt": WordType.VALUE,
"clspb": WordType.VALUE,
"clspfb": WordType.VALUE,
"clspfl": WordType.VALUE,
"clspfr": WordType.VALUE,
"clspft": WordType.VALUE,
"clspl": WordType.VALUE,
"clspr": WordType.VALUE,
"clspt": WordType.VALUE,
"clshdng": WordType.VALUE,
"clshdngraw": WordType.VALUE,
"clshdrawnil": WordType.FLAG,
"clsplit": WordType.FLAG,
"clsplitr": WordType.FLAG,
"cltxbtlr": WordType.FLAG,
"cltxlrtb": WordType.FLAG,
"cltxlrtbv": WordType.FLAG,
"cltxtbrl": WordType.FLAG,
"cltxtbrlv": WordType.FLAG,
"clvertalb": WordType.FLAG,
"clvertalc": WordType.FLAG,
"clvertalt": WordType.FLAG,
"clvmgf": WordType.FLAG,
"clvmrg": WordType.FLAG,
"clwWidth": WordType.VALUE,
"cmaindarkone": WordType.FLAG,
"cmaindarktwo": WordType.FLAG,
"cmainlightone": WordType.FLAG,
"cmainlighttwo": WordType.FLAG,
"collapsed": WordType.FLAG,
"colno": WordType.VALUE,
"colorschememapping": WordType.DESTINATION,
"colortbl": WordType.DESTINATION,
"cols": WordType.VALUE,
"colsr": WordType.VALUE,
"colsx": WordType.VALUE,
"column": WordType.SYMBOL,
"colw": WordType.VALUE,
"comment": WordType.DESTINATION,
"company": WordType.DESTINATION,
"contextualspace": WordType.FLAG,
"cpg": WordType.VALUE,
"crauth": WordType.VALUE,
"crdate": WordType.VALUE,
"creatim": WordType.DESTINATION,
"cs": WordType.VALUE,
"cshade": WordType.VALUE,
"ctextone": WordType.FLAG,
"ctexttwo": WordType.FLAG,
"ctint": WordType.VALUE,
"ctrl": WordType.FLAG,
"cts": WordType.VALUE,
"cufi": WordType.VALUE,
"culi": WordType.VALUE,
"curi": WordType.VALUE,
"cvmme": WordType.FLAG,
"datafield": WordType.DESTINATION,
"datastore": WordType.DESTINATION,
"date": WordType.FLAG,
"dbch": WordType.FLAG,
"defchp": WordType.DESTINATION,
"deff": WordType.VALUE,
"defformat": WordType.FLAG,
"deflang": WordType.VALUE,
"deflangfe": WordType.VALUE,
"defpap": WordType.DESTINATION,
"defshp": WordType.FLAG,
"deftab": WordType.VALUE,
"deleted": WordType.TOGGLE,
"delrsid": WordType.VALUE,
"dfrauth": WordType.VALUE,
"dfrdate": WordType.VALUE,
"dfrmtxtx": WordType.VALUE,
"dfrmtxty": WordType.VALUE,
"dfrstart": WordType.VALUE,
"dfrstop": WordType.VALUE,
"dfrxst": WordType.VALUE,
"dghorigin": WordType.VALUE,
"dghshow": WordType.VALUE,
"dghspace": WordType.VALUE,
"dgmargin": WordType.FLAG,
"dgsnap": WordType.FLAG,
"dgvorigin": WordType.VALUE,
"dgvshow": WordType.VALUE,
"dgvspace": WordType.VALUE,
"dibitmap": WordType.VALUE,
"disabled": WordType.TOGGLE,
"dn": WordType.VALUE,
"dntblnsbdb": WordType.FLAG,
"do": WordType.DESTINATION,
"dobxcolumn": WordType.FLAG,
"dobxmargin": WordType.FLAG,
"dobxpage": WordType.FLAG,
"dobymargin": WordType.FLAG,
"dobypage": WordType.FLAG,
"dobypara": WordType.FLAG,
"doccomm": WordType.DESTINATION,
"doctemp": WordType.FLAG,
"doctype": WordType.VALUE,
"docvar": WordType.DESTINATION,
"dodhgt": WordType.VALUE,
"dolock": WordType.FLAG,
"donotembedlingdata": WordType.VALUE,
"donotembedsysfont": WordType.VALUE,
"donotshowcomments": WordType.FLAG,
"donotshowinsdel": WordType.FLAG,
"donotshowmarkup": WordType.FLAG,
"donotshowprops": WordType.FLAG,
"dpaendhol": WordType.FLAG,
"dpaendl": WordType.VALUE,
"dpaendsol": WordType.FLAG,
"dpaendw": WordType.VALUE,
"dparc": WordType.FLAG,
"dparcflipx": WordType.FLAG,
"dparcflipy": WordType.FLAG,
"dpastarthol": WordType.FLAG,
"dpastartl": WordType.VALUE,
"dpastartsol": WordType.FLAG,
"dpastartw": WordType.VALUE,
"dpcallout": WordType.FLAG,
"dpcoa": WordType.VALUE,
"dpcoaccent": WordType.FLAG,
"dpcobestfit": WordType.FLAG,
"dpcoborder": WordType.FLAG,
"dpcodabs": WordType.FLAG,
"dpcodbottom": WordType.FLAG,
"dpcodcenter": WordType.FLAG,
"dpcodescent": WordType.VALUE,
"dpcodtop": WordType.FLAG,
"dpcolength": WordType.VALUE,
"dpcominusx": WordType.FLAG,
"dpcominusy": WordType.FLAG,
"dpcooffset": WordType.VALUE,
"dpcosmarta": WordType.FLAG,
"dpcotdouble": WordType.FLAG,
"dpcotright": WordType.FLAG,
"dpcotsingle": WordType.FLAG,
"dpcottriple": WordType.FLAG,
"dpcount": WordType.VALUE,
"dpellipse": WordType.FLAG,
"dpendgroup": WordType.FLAG,
"dpfillbgcb": WordType.VALUE,
"dpfillbgcg": WordType.VALUE,
"dpfillbgcr": WordType.VALUE,
"dpfillbggray": WordType.VALUE,
"dpfillbgpal": WordType.FLAG,
"dpfillfgcb": WordType.VALUE,
"dpfillfgcg": WordType.VALUE,
"dpfillfgcr": WordType.VALUE,
"dpfillfggray": WordType.VALUE,
"dpfillfgpal": WordType.FLAG,
"dpfillpat": WordType.VALUE,
"dpgroup": WordType.FLAG,
"dpline": WordType.FLAG,
"dplinecob": WordType.VALUE,
"dplinecog": WordType.VALUE,
"dplinecor": WordType.VALUE,
"dplinedado": WordType.FLAG,
"dplinedadodo": WordType.FLAG,
"dplinedash": WordType.FLAG,
"dplinedot": WordType.FLAG,
"dplinegray": WordType.VALUE,
"dplinehollow": WordType.FLAG,
"dplinepal": WordType.FLAG,
"dplinesolid": WordType.FLAG,
"dplinew": WordType.VALUE,
"dppolycount": WordType.VALUE,
"dppolygon": WordType.FLAG,
"dppolyline": WordType.FLAG,
"dpptx": WordType.VALUE,
"dppty": WordType.VALUE,
"dprect": WordType.FLAG,
"dproundr": WordType.FLAG,
"dpshadow": WordType.FLAG,
"dpshadx": WordType.VALUE,
"dpshady": WordType.VALUE,
"dptxbtlr": WordType.FLAG,
"dptxbx": WordType.FLAG,
"dptxbxmar": WordType.VALUE,
"dptxbxtext": WordType.DESTINATION,
"dptxlrtb": WordType.FLAG,
"dptxlrtbv": WordType.FLAG,
"dptxtbrl": WordType.FLAG,
"dptxtbrlv": WordType.FLAG,
"dpx": WordType.VALUE,
"dpxsize": WordType.VALUE,
"dpy": WordType.VALUE,
"dpysize": WordType.VALUE,
"dropcapli": WordType.VALUE,
"dropcapt": WordType.VALUE,
"ds": WordType.VALUE,
"dxfrtext": WordType.VALUE,
"dy": WordType.VALUE,
"ebcend": WordType.DESTINATION,
"ebcstart": WordType.DESTINATION,
"edmins": WordType.VALUE,
"embo": WordType.TOGGLE,
"emdash": WordType.SYMBOL,
"emfblip": WordType.FLAG,
"emspace": WordType.SYMBOL,
"endash": WordType.SYMBOL,
"enddoc": WordType.FLAG,
"endnhere": WordType.FLAG,
"endnotes": WordType.FLAG,
"enforceprot": WordType.VALUE,
"enspace": WordType.SYMBOL,
"expnd": WordType.VALUE,
"expndtw": WordType.VALUE,
"expshrtn": WordType.FLAG,
"f": WordType.VALUE,
"faauto": WordType.FLAG,
"facenter": WordType.FLAG,
"facingp": WordType.FLAG,
"factoidname": WordType.DESTINATION,
"fafixed": WordType.FLAG,
"fahang": WordType.FLAG,
"falt": WordType.DESTINATION,
"faroman": WordType.FLAG,
"favar": WordType.FLAG,
"fbias": WordType.VALUE,
"fbidi": WordType.FLAG,
"fbidis": WordType.FLAG,
"fbimajor": WordType.FLAG,
"fbiminor": WordType.FLAG,
"fchars": WordType.DESTINATION,
"fcharset": WordType.VALUE,
"fcs": WordType.VALUE,
"fdbmajor": WordType.FLAG,
"fdbminor": WordType.FLAG,
"fdecor": WordType.FLAG,
"felnbrelev": WordType.FLAG,
"fet": WordType.VALUE,
"fetch": WordType.FLAG,
"ffdefres": WordType.VALUE,
"ffdeftext": WordType.DESTINATION,
"ffentrymcr": WordType.DESTINATION,
"ffexitmcr": WordType.DESTINATION,
"ffformat": WordType.DESTINATION,
"ffhaslistbox": WordType.VALUE,
"ffhelptext": WordType.DESTINATION,
"ffhps": WordType.VALUE,
"ffl": WordType.DESTINATION,
"ffmaxlen": WordType.VALUE,
"ffname": WordType.DESTINATION,
"ffownhelp": WordType.VALUE,
"ffownstat": WordType.VALUE,
"ffprot": WordType.VALUE,
"ffrecalc": WordType.VALUE,
"ffres": WordType.VALUE,
"ffsize": WordType.VALUE,
"ffstattext": WordType.DESTINATION,
"fftype": WordType.VALUE,
"fftypetxt": WordType.VALUE,
"fhimajor": WordType.FLAG,
"fhiminor": WordType.FLAG,
"fi": WordType.VALUE,
"fid": WordType.VALUE,
"field": WordType.DESTINATION,
"file": WordType.DESTINATION,
"filetbl": WordType.DESTINATION,
"fittext": WordType.VALUE,
"fjgothic": WordType.FLAG,
"fjminchou": WordType.FLAG,
"fldalt": WordType.FLAG,
"flddirty": WordType.FLAG,
"fldedit": WordType.FLAG,
"fldinst": WordType.DESTINATION,
"fldlock": WordType.FLAG,
"fldpriv": WordType.FLAG,
"fldrslt": WordType.DESTINATION,
"fldtype": WordType.DESTINATION,
"flomajor": WordType.FLAG,
"flominor": WordType.FLAG,
"fmodern": WordType.FLAG,
"fn": WordType.VALUE,
"fname": WordType.DESTINATION,
"fnetwork": WordType.FLAG,
"fnil": WordType.FLAG,
"fnonfilesys": WordType.FLAG,
"fontemb": WordType.DESTINATION,
"fontfile": WordType.DESTINATION,
"fonttbl": WordType.DESTINATION,
"footer": WordType.DESTINATION,
"footerf": WordType.DESTINATION,
"footerl": WordType.DESTINATION,
"footerr": WordType.DESTINATION,
"footery": WordType.VALUE,
"footnote": WordType.DESTINATION,
"forceupgrade": WordType.FLAG,
"formatConverter": WordType.DESTINATION,
"formdisp": WordType.FLAG,
"formfield": WordType.DESTINATION,
"formprot": WordType.FLAG,
"formshade": WordType.FLAG,
"fosnum": WordType.VALUE,
"fprq": WordType.VALUE,
"fracwidth": WordType.FLAG,
"frelative": WordType.VALUE,
"frmtxbtlr": WordType.FLAG,
"frmtxlrtb": WordType.FLAG,
"frmtxlrtbv": WordType.FLAG,
"frmtxtbrl": WordType.FLAG,
"frmtxtbrlv": WordType.FLAG,
"froman": WordType.FLAG,
"fromhtml": WordType.VALUE,
"fromtext": WordType.FLAG,
"fs": WordType.VALUE,
"fscript": WordType.FLAG,
"fswiss": WordType.FLAG,
"ftech": WordType.FLAG,
"ftnalt": WordType.FLAG,
"ftnbj": WordType.FLAG,
"ftncn": WordType.DESTINATION,
"ftnil": WordType.FLAG,
"ftnlytwnine": WordType.FLAG,
"ftnnalc": WordType.FLAG,
"ftnnar": WordType.FLAG,
"ftnnauc": WordType.FLAG,
"ftnnchi": WordType.FLAG,
"ftnnchosung": WordType.FLAG,
"ftnncnum": WordType.FLAG,
"ftnndbar": WordType.FLAG,
"ftnndbnum": WordType.FLAG,
"ftnndbnumd": WordType.FLAG,
"ftnndbnumk": WordType.FLAG,
"ftnndbnumt": WordType.FLAG,
"ftnnganada": WordType.FLAG,
"ftnngbnum": WordType.FLAG,
"ftnngbnumd": WordType.FLAG,
"ftnngbnumk": WordType.FLAG,
"ftnngbnuml": WordType.FLAG,
"ftnnrlc": WordType.FLAG,
"ftnnruc": WordType.FLAG,
"ftnnzodiac": WordType.FLAG,
"ftnnzodiacd": WordType.FLAG,
"ftnnzodiacl": WordType.FLAG,
"ftnrestart": WordType.FLAG,
"ftnrstcont": WordType.FLAG,
"ftnrstpg": WordType.FLAG,
"ftnsep": WordType.DESTINATION,
"ftnsepc": WordType.DESTINATION,
"ftnstart": WordType.VALUE,
"ftntj": WordType.FLAG,
"fttruetype": WordType.FLAG,
"fvaliddos": WordType.FLAG,
"fvalidhpfs": WordType.FLAG,
"fvalidmac": WordType.FLAG,
"fvalidntfs": WordType.FLAG,
"g": WordType.DESTINATION,
"gcw": WordType.VALUE,
"generator": WordType.DESTINATION,
"green": WordType.VALUE,
"grfdocevents": WordType.VALUE,
"gridtbl": WordType.DESTINATION,
"gutter": WordType.VALUE,
"gutterprl": WordType.FLAG,
| |
As the tree is being populated lazily we create a
# dummy that will be removed when the node is expanded for the
# first time.
cnid._dummy = TreeItem( cnid )
# Return the newly created node:
return cnid
#---------------------------------------------------------------------------
# Deletes a specified tree node and all its children:
#---------------------------------------------------------------------------
def _delete_node ( self, nid ):
""" Deletes a specified tree node and all its children.
"""
for cnid in self._nodes_for( nid ):
self._delete_node( cnid )
if nid is self._tree.getItem( 0 ):
return
# See if it is a dummy.
pnid = nid.parent()
if pnid is not None and getattr(pnid, '_dummy', None) is nid:
pnid.removeItem( nid )
del pnid._dummy
return
expanded, node, object = self._get_node_data( nid )
id_object = id( object )
object_info = self._map[id_object]
for i, info in enumerate( object_info ):
if nid == info[1]:
del object_info[i]
break
if len( object_info ) == 0:
self._remove_listeners( node, object )
del self._map[ id_object ]
if pnid is None:
# self._tree.takeTopLevelItem(self._tree.indexOfTopLevelItem(nid))
# self._tree.removeItem( self._tree.findDeepestOpenChild() )
pass
else:
pnid.removeItem(nid)
# If the deleted node had an active editor panel showing, remove it:
if (self._editor is not None) and (nid == self._editor._editor_nid):
self._clear_editor()
#---------------------------------------------------------------------------
# Expands the contents of a specified node (if required):
#---------------------------------------------------------------------------
def _expand_node ( self, nid ):
""" Expands the contents of a specified node (if required).
"""
expanded, node, object = self._get_node_data( nid )
# Lazily populate the item's children:
if not expanded:
# Remove any dummy node.
dummy = getattr(nid, '_dummy', None)
if dummy is not None:
nid.removeItem( dummy )
del nid._dummy
for child in node.get_children( object ):
child, child_node = self._node_for( child )
if child_node is not None:
self._append_node( nid, child_node, child )
# Indicate the item is now populated:
self._set_node_data( nid, ( True, node, object) )
#---------------------------------------------------------------------------
# Returns each of the child nodes of a specified node id:
#---------------------------------------------------------------------------
def _nodes_for ( self, nid ):
""" Returns all child node ids of a specified node id.
"""
return [nid.child(i) for i in range( nid.childCount() )]
#---------------------------------------------------------------------------
# Return the index of a specified node id within its parent:
#---------------------------------------------------------------------------
def _node_index ( self, nid ):
pnid = nid.parent()
if pnid is None:
return ( None, None, None )
for i in range( pnid.childCount() ):
if pnid.child(i) is nid:
_, pnode, pobject = self._get_node_data( pnid )
return ( pnode, pobject, i )
#---------------------------------------------------------------------------
# Returns whether a specified object has any children:
#---------------------------------------------------------------------------
def _has_children ( self, node, object ):
""" Returns whether a specified object has any children.
"""
return (node.allows_children( object ) and node.has_children( object ))
#---------------------------------------------------------------------------
# Returns the icon index for the specified object:
#---------------------------------------------------------------------------
STD_ICON_MAP = {
'<item>': Image("images/file.png"),
'<group>': Image("images/dir_closed.png"),
'<open>': Image("images/dir_open.png")
}
def _get_icon ( self, node, object, is_expanded = False ):
""" Returns the index of the specified object icon.
"""
if not self.factory.show_icons:
return Image("images/blank.png")
icon_name = node.get_icon(object, is_expanded)
if isinstance(icon_name, basestring):
icon = self.STD_ICON_MAP.get(icon_name)
if icon is not None:
# return self._tree.style().standardIcon(icon)
return icon
# path = node.get_icon_path( object )
# if isinstance( path, basestring ):
# path = [ path, node ]
# else:
# path.append( node )
# reference = resource_manager.locate_image( icon_name, path )
# if reference is None:
# return QtGui.QIcon()
# file_name = reference.filename
# else:
# # Assume it is an ImageResource, and get its file name directly:
# file_name = icon_name.absolute_path
#
# return QtGui.QIcon(pixmap_cache(file_name))
#---------------------------------------------------------------------------
# Adds the event listeners for a specified object:
#---------------------------------------------------------------------------
def _add_listeners ( self, node, object ):
""" Adds the event listeners for a specified object.
"""
if node.allows_children( object ):
node.when_children_replaced( object, self._children_replaced, False)
node.when_children_changed( object, self._children_updated, False)
node.when_label_changed( object, self._label_updated, False )
#---------------------------------------------------------------------------
# Removes any event listeners from a specified object:
#---------------------------------------------------------------------------
def _remove_listeners ( self, node, object ):
""" Removes any event listeners from a specified object.
"""
if node.allows_children( object ):
node.when_children_replaced( object, self._children_replaced, True )
node.when_children_changed( object, self._children_updated, True )
node.when_label_changed( object, self._label_updated, True )
#---------------------------------------------------------------------------
# Returns the tree node data for a specified object in the form
# ( expanded, node, nid ):
#---------------------------------------------------------------------------
def _object_info ( self, object, name = '' ):
""" Returns the tree node data for a specified object in the form
( expanded, node, nid ).
"""
info = self._map[ id( object ) ]
for name2, nid in info:
if name == name2:
break
else:
nid = info[0][1]
expanded, node, ignore = self._get_node_data( nid )
return ( expanded, node, nid )
def _object_info_for ( self, object, name = '' ):
""" Returns the tree node data for a specified object as a list of the
form: [ ( expanded, node, nid ), ... ].
"""
result = []
for name2, nid in self._map[ id( object ) ]:
if name == name2:
expanded, node, ignore = self._get_node_data( nid )
result.append( ( expanded, node, nid ) )
return result
#---------------------------------------------------------------------------
# Returns the TreeNode associated with a specified object:
#---------------------------------------------------------------------------
def _node_for ( self, object ):
""" Returns the TreeNode associated with a specified object.
"""
if ((type( object ) is tuple) and (len( object ) == 2) and
isinstance( object[1], TreeNode )):
return object
# Select all nodes which understand this object:
factory = self.factory
nodes = [ node for node in factory.nodes
if node.is_node_for( object ) ]
# If only one found, we're done, return it:
if len( nodes ) == 1:
return ( object, nodes[0] )
# If none found, give up:
if len( nodes ) == 0:
return ( object, None )
# Use all selected nodes that have the same 'node_for' list as the
# first selected node:
base = nodes[0].node_for
nodes = [ node for node in nodes if base == node.node_for ]
# If only one left, then return that node:
if len( nodes ) == 1:
return ( object, nodes[0] )
# Otherwise, return a MultiTreeNode based on all selected nodes...
# Use the node with no specified children as the root node. If not
# found, just use the first selected node as the 'root node':
root_node = None
for i, node in enumerate( nodes ):
if node.children == '':
root_node = node
del nodes[i]
break
else:
root_node = nodes[0]
# If we have a matching MultiTreeNode already cached, return it:
key = ( root_node, ) + tuple( nodes )
if key in factory.multi_nodes:
return ( object, factory.multi_nodes[ key ] )
# Otherwise create one, cache it, and return it:
factory.multi_nodes[ key ] = multi_node = MultiTreeNode(
root_node = root_node,
nodes = nodes )
return ( object, multi_node )
#---------------------------------------------------------------------------
# Returns the TreeNode associated with a specified class:
#---------------------------------------------------------------------------
def _node_for_class ( self, klass ):
""" Returns the TreeNode associated with a specified class.
"""
for node in self.factory.nodes:
if issubclass( klass, tuple( node.node_for ) ):
return node
return None
#---------------------------------------------------------------------------
# Returns the node and class associated with a specified class name:
#---------------------------------------------------------------------------
def _node_for_class_name ( self, class_name ):
""" Returns the node and class associated with a specified class name.
"""
for node in self.factory.nodes:
for klass in node.node_for:
if class_name == klass.__name__:
return ( node, klass )
return ( None, None )
#---------------------------------------------------------------------------
# Updates the icon for a specified node:
#---------------------------------------------------------------------------
def _update_icon(self, nid):
""" Updates the icon for a specified node.
"""
raise NotImplementedError, "Tree icons not implemented."
expanded, node, object = self._get_node_data(nid)
# nid.setIcon(0, self._get_icon(node, object, expanded))
#---------------------------------------------------------------------------
# Begins an 'undoable' transaction:
#---------------------------------------------------------------------------
def _begin_undo ( self ):
""" Begins an "undoable" transaction.
"""
ui = self.ui
self._undoable.append( ui._undoable )
if (ui._undoable == -1) and (ui.history is not None):
ui._undoable = ui.history.now
#---------------------------------------------------------------------------
# Ends an 'undoable' transaction:
#---------------------------------------------------------------------------
def _end_undo ( self ):
if self._undoable.pop() == -1:
self.ui._undoable = -1
#---------------------------------------------------------------------------
# Gets an 'undo' item for a change made to a node's children:
#---------------------------------------------------------------------------
def _get_undo_item ( self, object, name, event ):
return ListUndoItem( object = object,
name = name,
index | |
<reponame>py-sdl/py-sdl2<filename>sdl2/test/render_test.py
import sys
import copy
import pytest
import ctypes
from ctypes import byref, POINTER, c_int, c_float, sizeof
import sdl2
from sdl2 import SDL_Init, SDL_Quit, SDL_INIT_EVERYTHING, SDL_GetError
import itertools
from sdl2.stdinc import Uint8, Uint32, SDL_TRUE, SDL_FALSE
from sdl2.rect import SDL_FPoint
from sdl2.pixels import SDL_Color
from sdl2 import video, surface, pixels, blendmode, rect
from sdl2.ext.compat import byteify, stringify
from sdl2.ext.pixelaccess import PixelView
# TODO: Write tests for more functions
def _create_window(pos, size, flags=video.SDL_WINDOW_HIDDEN):
# Convenience function to create renderer and window for tests
sdl2.SDL_ClearError()
window = video.SDL_CreateWindow(
b"Test", pos[0], pos[1], size[0], size[1], video.SDL_WINDOW_HIDDEN
)
assert SDL_GetError() == b""
assert isinstance(window.contents, video.SDL_Window)
return window
def _get_renderflags():
flags = sdl2.SDL_RENDERER_ACCELERATED
if video.SDL_GetCurrentVideoDriver() == b"dummy":
flags = sdl2.SDL_RENDERER_SOFTWARE
return flags
@pytest.fixture
def testsurf(with_sdl):
# Create a solid black surface for tests
sf = surface.SDL_CreateRGBSurface(
0, 100, 100, 32, 0xFF000000, 0x00FF0000, 0x0000FF00, 0x000000FF
)
assert SDL_GetError() == b""
pixfmt = sf.contents.format.contents
fill = pixels.SDL_MapRGBA(pixfmt, 0, 0, 0, 255)
surface.SDL_FillRect(sf, None, fill)
assert SDL_GetError() == b""
yield sf
surface.SDL_FreeSurface(sf)
@pytest.fixture
def sw_renderer(testsurf):
renderer = sdl2.SDL_CreateSoftwareRenderer(testsurf)
assert SDL_GetError() == b""
assert isinstance(renderer.contents, sdl2.SDL_Renderer)
yield (renderer, testsurf)
sdl2.SDL_DestroyRenderer(renderer)
@pytest.fixture
def with_renderer(with_sdl):
flags = _get_renderflags()
sdl2.SDL_ClearError()
window = video.SDL_CreateWindow(
b"Test", 30, 30, 100, 100, video.SDL_WINDOW_HIDDEN
)
assert SDL_GetError() == b""
renderer = sdl2.SDL_CreateRenderer(window, -1, flags)
assert SDL_GetError() == b""
yield (renderer, window)
sdl2.SDL_DestroyRenderer(renderer)
video.SDL_DestroyWindow(window)
@pytest.fixture
def texture(with_renderer):
renderer, win = with_renderer
fmt = pixels.SDL_PIXELFORMAT_ARGB8888
access = sdl2.SDL_TEXTUREACCESS_STREAMING
tx = sdl2.SDL_CreateTexture(renderer, fmt, access, 16, 16)
assert SDL_GetError() == b""
assert isinstance(tx.contents, sdl2.SDL_Texture)
yield tx
sdl2.SDL_DestroyTexture(tx)
# Test structs and classes
def test_SDL_RendererInfo():
# Tested extensively in SDL_GetRenderDriverInfo
info = sdl2.SDL_RendererInfo()
assert isinstance(info, sdl2.SDL_RendererInfo)
def test_SDL_Renderer():
val = sdl2.SDL_Renderer()
assert isinstance(val, sdl2.SDL_Renderer)
def test_SDL_Texture():
val = sdl2.SDL_Texture()
assert isinstance(val, sdl2.SDL_Texture)
class TestSDLVertex(object):
__tags__ = ["sdl"]
def test_init(self):
# Test creating an SDL vertex without any args
vtx = sdl2.SDL_Vertex()
assert type(vtx.position) == rect.SDL_FPoint
assert type(vtx.color) == pixels.SDL_Color
assert type(vtx.tex_coord) == rect.SDL_FPoint
# Test creating a vertex with a custom position and color
pos = rect.SDL_FPoint(20, 30)
col = pixels.SDL_Color(255, 0, 0, 255)
vtx2 = sdl2.SDL_Vertex(pos, col)
assert vtx2.position.x == 20 and vtx2.position.y == 30
assert vtx2.color.r == 255 and vtx2.color.g == 0
# Test creating an SDL vertex using Python types
vtx3 = sdl2.SDL_Vertex([15, 25], [128, 127, 126], [5, 5])
assert vtx3.position.x == 15 and vtx3.position.y == 25
assert vtx3.color.r == 128 and vtx3.color.g == 127
assert vtx3.color.a == 255
assert vtx3.tex_coord.x == 5
# Test exceptions on bad input
with pytest.raises(ValueError):
sdl2.SDL_Vertex(10)
with pytest.raises(ValueError):
sdl2.SDL_Vertex(color="red")
def test_repr(self):
vtx = sdl2.SDL_Vertex([1.5, 4], [0, 0, 0, 255])
assert repr(vtx) == "SDL_Vertex(x=1.5, y=4.0, color=[0, 0, 0, 255])"
def test_copy(self):
vtx = sdl2.SDL_Vertex([15, 25], [128, 127, 126], [5, 5])
vtx2 = copy.copy(vtx)
assert vtx.position == vtx2.position
assert vtx.color == vtx2.color
assert vtx.tex_coord == vtx2.tex_coord
# Make sure editing the new copy doesn't affect the original
vtx2.position.x = 7
vtx2.color.r = 200
vtx2.tex_coord.y = 7
assert vtx.position != vtx2.position
assert vtx.color != vtx2.color
assert vtx.tex_coord != vtx2.tex_coord
# Test SDL2 renderer bindings
def test_SDL_GetNumRenderDrivers(with_sdl):
val = sdl2.SDL_GetNumRenderDrivers()
assert val >= 1
def test_SDL_GetRenderDriverInfo(with_sdl):
renderers = []
errs = []
pxformats = {}
drivers = sdl2.SDL_GetNumRenderDrivers()
for x in range(drivers):
sdl2.SDL_ClearError()
info = sdl2.SDL_RendererInfo()
ret = sdl2.SDL_GetRenderDriverInfo(x, info)
if ret != 0:
err = stringify(sdl2.SDL_GetError())
errs.append("Renderer {0} error: {1}".format(x, err))
continue
rname = stringify(info.name)
renderers.append(rname)
pxformats[rname] = []
for i in range(info.num_texture_formats):
fmt_name = pixels.SDL_GetPixelFormatName(info.texture_formats[i])
pxformats[rname].append(stringify(fmt_name).split("_")[-1])
assert len(renderers)
assert "software" in renderers
print("Render drivers supported by current SDL2 binary:")
print(renderers)
print("\nTexture formats supported by each renderer:")
for rname in renderers:
print(rname)
print(" - " + " ".join(pxformats[rname]))
def test_SDL_CreateWindowAndRenderer(with_sdl):
window = POINTER(video.SDL_Window)()
renderer = POINTER(sdl2.SDL_Renderer)()
ret = sdl2.SDL_CreateWindowAndRenderer(
10, 10, video.SDL_WINDOW_HIDDEN, byref(window), byref(renderer)
)
sdl2.SDL_DestroyRenderer(renderer)
video.SDL_DestroyWindow(window)
assert SDL_GetError() == b""
assert ret == 0
def test_SDL_CreateDestroyRenderer(with_sdl):
flags = _get_renderflags()
errs = {}
rcount = sdl2.SDL_GetNumRenderDrivers()
for i in range(rcount):
window = _create_window((30, 30), (100, 100))
renderer = sdl2.SDL_CreateRenderer(window, i, flags)
if (renderer and renderer.contents):
assert isinstance(renderer.contents, sdl2.SDL_Renderer)
sdl2.SDL_DestroyRenderer(renderer)
else:
name = "Renderer {0}".format(i)
errs[name] = stringify(sdl2.SDL_GetError())
video.SDL_DestroyWindow(window)
assert rcount > len(errs.keys()) # Make sure at least one working renderer
def test_SDL_CreateSoftwareRenderer(with_sdl):
sf = surface.SDL_CreateRGBSurface(
0, 100, 100, 32, 0xFF000000, 0x00FF0000, 0x0000FF00, 0x000000FF
)
renderer = sdl2.SDL_CreateSoftwareRenderer(sf)
assert SDL_GetError() == b""
assert isinstance(renderer.contents, sdl2.SDL_Renderer)
sdl2.SDL_DestroyRenderer(renderer)
surface.SDL_FreeSurface(sf)
def test_SDL_GetRenderer(with_sdl):
flags = _get_renderflags()
usable = 0
rcount = sdl2.SDL_GetNumRenderDrivers()
for i in range(rcount):
window = _create_window((30, 30), (100, 100))
renderer = sdl2.SDL_CreateRenderer(window, i, flags)
if (renderer and renderer.contents):
usable += 1
ren = sdl2.SDL_GetRenderer(window)
assert SDL_GetError() == b""
assert isinstance(ren.contents, sdl2.SDL_Renderer)
sdl2.SDL_DestroyRenderer(renderer)
assert not sdl2.SDL_GetRenderer(window)
video.SDL_DestroyWindow(window)
assert usable > 0
def test_SDL_GetRendererInfo(with_sdl):
renderers = []
max_sizes = {}
errs = []
flags = _get_renderflags()
rcount = sdl2.SDL_GetNumRenderDrivers()
for i in range(rcount):
sdl2.SDL_ClearError()
window = _create_window((30, 30), (100, 100))
renderer = sdl2.SDL_CreateRenderer(window, i, flags)
if not (renderer and renderer.contents):
err = stringify(sdl2.SDL_GetError())
errs.append("Unable to create renderer {0}: {1}".format(i, err))
video.SDL_DestroyWindow(window)
continue
assert isinstance(renderer.contents, sdl2.SDL_Renderer)
info = sdl2.SDL_RendererInfo()
ret = sdl2.SDL_GetRendererInfo(renderer, byref(info))
if ret == 0:
rname = stringify(info.name)
max_size = (info.max_texture_width, info.max_texture_height)
renderers.append(rname)
max_sizes[rname] = max_size
else:
err = stringify(sdl2.SDL_GetError())
errs.append("Renderer {0} error: {1}".format(i, err))
sdl2.SDL_DestroyRenderer(renderer)
video.SDL_DestroyWindow(window)
assert len(renderers)
assert "software" in renderers
print("Render drivers loadable on the current system:")
for rname in renderers:
w, h = max_sizes[rname]
print(" - " + rname + " (max texture size: {0}x{1})".format(w, h))
@pytest.mark.skip("not implemented")
def test_SDL_GetRendererOutputSize(self):
pass
def test_SDL_CreateDestroyTexture(with_renderer):
renderer, win = with_renderer
formats = (
pixels.SDL_PIXELFORMAT_ARGB8888,
pixels.SDL_PIXELFORMAT_RGB555,
pixels.SDL_PIXELFORMAT_RGBA4444,
pixels.SDL_PIXELFORMAT_RGBA8888,
pixels.SDL_PIXELFORMAT_ARGB2101010,
pixels.SDL_PIXELFORMAT_YUY2,
)
access = (
sdl2.SDL_TEXTUREACCESS_STATIC,
sdl2.SDL_TEXTUREACCESS_STREAMING,
sdl2.SDL_TEXTUREACCESS_TARGET,
)
sizes = [(4, 4), (7, 7), (64, 32), (256, 256), (512, 512)]
for fmt in formats:
for acc in access:
for w, h in sizes:
tx = sdl2.SDL_CreateTexture(renderer, fmt, acc, w, h)
assert SDL_GetError() == b""
assert isinstance(tx.contents, sdl2.SDL_Texture)
sdl2.SDL_DestroyTexture(tx)
# Test SDL error on bad input
sdl2.SDL_CreateTexture(
renderer, pixels.SDL_PIXELFORMAT_RGB555, 1, -8, 8
)
assert len(SDL_GetError()) > 0
def test_SDL_CreateTextureFromSurface(with_renderer, testsurf):
renderer, win = with_renderer
tx = sdl2.SDL_CreateTextureFromSurface(renderer, testsurf)
if sdl2.dll.version != 2008: # Weird non-fatal colorkey error on 2.0.8
assert SDL_GetError() == b""
assert isinstance(tx.contents, sdl2.SDL_Texture)
sdl2.SDL_DestroyTexture(tx)
def test_SDL_QueryTexture(with_renderer):
renderer, win = with_renderer
formats = (
pixels.SDL_PIXELFORMAT_ARGB8888,
pixels.SDL_PIXELFORMAT_RGB555,
pixels.SDL_PIXELFORMAT_RGBA4444,
pixels.SDL_PIXELFORMAT_RGBA8888,
pixels.SDL_PIXELFORMAT_ARGB2101010,
pixels.SDL_PIXELFORMAT_YUY2,
)
access = (
sdl2.SDL_TEXTUREACCESS_STATIC,
sdl2.SDL_TEXTUREACCESS_STREAMING,
sdl2.SDL_TEXTUREACCESS_TARGET,
)
sizes = [(4, 4), (7, 7), (64, 32), (256, 256), (512, 512)]
for fmt in formats:
for acc in access:
for w, h in sizes:
tx = sdl2.SDL_CreateTexture(renderer, fmt, acc, w, h)
assert isinstance(tx.contents, sdl2.SDL_Texture)
txf, txa, txw, txh = Uint32(0), c_int(0), c_int(0), c_int(0)
ret = sdl2.SDL_QueryTexture(
tx, byref(txf), byref(txa), byref(txw), byref(txh)
)
assert SDL_GetError() == b""
assert ret == 0
assert txf.value == fmt
assert txa.value == acc
assert txw.value == w
assert txh.value == h
sdl2.SDL_DestroyTexture(tx)
def test_SDL_GetSetTextureColorMod(texture):
colors = [
(16, 22, 185),
(32, 64, 128),
(64, 32, 128),
(64, 32, 255),
(255, 32, 128),
(255, 255, 255),
(128, 128, 128),
(0, 0, 0),
]
for r, g, b in colors:
ret = sdl2.SDL_SetTextureColorMod(texture, r, g, b)
assert SDL_GetError() == b""
assert ret == 0
tr, tg, tb = Uint8(0), Uint8(0), Uint8(0)
ret = sdl2.SDL_GetTextureColorMod(
texture, byref(tr), byref(tg), byref(tb)
)
assert SDL_GetError() == b""
assert ret == 0
assert (tr.value, tg.value, tb.value) == (r, g, b)
def test_SDL_GetSetTextureAlphaMod(texture):
for alpha in range(0, 255, 7):
ret = sdl2.SDL_SetTextureAlphaMod(texture, alpha)
assert SDL_GetError() == b""
assert ret == 0
talpha = Uint8(0)
ret = sdl2.SDL_GetTextureAlphaMod(texture, byref(talpha))
assert SDL_GetError() == b""
assert ret == 0
assert talpha.value == alpha
def test_SDL_GetSetTextureBlendMode(texture):
modes = (
blendmode.SDL_BLENDMODE_NONE,
blendmode.SDL_BLENDMODE_ADD,
blendmode.SDL_BLENDMODE_BLEND,
blendmode.SDL_BLENDMODE_MOD,
)
for mode in modes:
ret = sdl2.SDL_SetTextureBlendMode(texture, mode)
assert SDL_GetError() == b""
assert ret == 0
tmode = blendmode.SDL_BlendMode()
ret = sdl2.SDL_GetTextureBlendMode(texture, byref(tmode))
assert SDL_GetError() == b""
assert ret == 0
assert tmode.value == mode
@pytest.mark.skipif(sdl2.dll.version < 2012, reason="not available")
def test_SDL_GetSetTextureScaleMode(texture):
modes = (
sdl2.SDL_ScaleModeNearest,
sdl2.SDL_ScaleModeLinear,
sdl2.SDL_ScaleModeBest,
)
for mode in modes:
ret = sdl2.SDL_SetTextureScaleMode(texture, mode)
assert SDL_GetError() == b""
assert ret == 0
tmode = sdl2.SDL_ScaleMode()
ret = sdl2.SDL_GetTextureScaleMode(texture, byref(tmode))
assert SDL_GetError() == b""
assert ret == 0
assert tmode.value == mode
@pytest.mark.skipif(sdl2.dll.version < 2018, reason="not available")
def test_SDL_GetSetTextureUserData(texture):
# Create some user data and add it to the texture
dat_raw = ctypes.c_char_p(b"hello!")
dat = ctypes.cast(dat_raw, ctypes.c_void_p)
ret = sdl2.SDL_SetTextureUserData(texture, dat)
assert SDL_GetError() == b""
assert ret == 0
# Try | |
<gh_stars>1-10
"""
Weighted mini-bucket elimination for graphical models
Computes upper or lower bounds on the partition function or MAP/MPE configurations, depending on weights
Supports incremental construction
Supports TRW-based importance sampling
class WMB:
# attributes:
# elimOrder[i]
# priority[Xj] = i if elimOrder[i]=Xj
# bucket[i] = [nodei1 nodei2 ... ]
# matchlist[i] = [matchi1 matchi2 ...]
#
# class Node:
# clique = VarSet
# theta = factor (or list of factors?)
# weight = float
# parent = ref, children = [refs...] or index?
# msgFwd, msgBwd = factor
"""
from .factor import *
from .graphmodel import *
from builtins import range
try:
from itertools import izip
except:
izip = zip
reverse_enumerate = lambda l: izip(range(len(l)-1, -1, -1), reversed(l))
class WMB(object):
'''Class implementing weighted mini-bucket elimination inference'''
# Internal object / structure for representing a mini-bucket
class Node:
"""Internal container object for mini-bucket nodes"""
def __init__(self):
self.clique = VarSet()
self.theta = Factor().log()
self.weight = 1.0
self.parent = None
self.children = []
self.msgFwd = Factor().log()
self.msgBwd = Factor().log()
self.originals = []
def __repr__(self):
return "{}^{}".format(self.clique,self.weight)
def __str__(self):
return "{}".format(self.clique)
def __lt__(self, other):
return False # don't care about ordering nodes
class ConstantList:
def __init__(self, val):
self.val = val
def __getitem__(self, loc):
return self.val
def __init__(self, model, elimOrder=None, iBound=0, sBound=0, weights=1.0, attach=True, **kwargs):
# TODO: check if model isLog() true
# save a reference to our model
self.model = model
self.X = model.X
self.logValue = model.logValue
# create & process elimination ordering of the model:
if elimOrder is None: elimOrder = 'wtminfill'
if type(elimOrder) is str: # auto elim order: check that weights is string or float
if not type(weights) in {float, str}:
raise ValueError("Must specify elimination order or use all-equal weights (float or string)");
elimOrder = eliminationOrder(self.model, orderMethod=elimOrder)[0];
self.elimOrder = elimOrder
self.priority = [-1 for i in range(model.nvar)] # build priority of each var
for i,x in enumerate(elimOrder): self.priority[x] = i
# now build the mini-bucket data structure
self.buckets = [ [] for x in range(model.nvar) ] # bucket for each var: list of minibuckets
self.matches = [ [] for x in range(model.nvar) ] # matching sets for each bucket
self.setWeights(weights) # TODO: duplicate to initialize (!)
for f in model.factors:
if len(f.vars)==0: continue; #TODO: should add anyway (somewhere)
n = self.addClique(f.vars)
if attach: n.theta += f.log() # include log f(x) in node's log-factor
n.originals.append(f) # append (pointer to) original f for later reference
# and set the weights of the buckets:
self.setWeights(weights)
def setWeights(self,weights):
"""Set the weights of the inference problem.
weights = 'max+' or 0.0 => upper bound the MAP configuration
'sum+' or 1.0 => upper bound the partition function
'sum-' or -1.0 => lower bound the partition function
For more general bounds, weights = list of floats (one per variable)
"""
if type(weights) is str:
if weights == 'sum+': weights = 1.0;
elif weights=='sum-': weights = -1.0;
elif weights=='max+': weights = 1e-8;
else: raise ValueError("Unknown weight / task type; must be max+, sum+, sum-, or float / float list")
if type(weights) is float: weights = WMB.ConstantList(weights)
self.weights = weights
for i,xi in enumerate(self.elimOrder): # (TODO?) set mini-bucket weights uniformly
ni = len(self.buckets[i])
for j in range(ni): # set uniformly
self.buckets[i][j].weight = self.weights[xi]/ni
if self.weights[xi] < 0 and ni > 0: # uniform for lower bound:
self.buckets[i][0].weight = 1.0 - self.weights[xi]*(ni-1)/ni
def __nodeID(self,node):
"""Helper function: get identifier (bucket & location) of a given node"""
if not isinstance(node, WMB.Node): return None,None
i = min([self.priority[x] for x in node.clique]) # get bucket
j = self.buckets[i].index(node)
return i,j
def __repr__(self):
to_return = ""
for i,b in enumerate(self.buckets):
to_return += "{:03d}: ".format(int(self.elimOrder[i]))
for j,mb in enumerate(b):
to_return += "{!s}^{:.2f} => {}; ".format(mb,mb.weight, self.__nodeID(mb.parent))
to_return += "\n"
return to_return
# def draw(self):
# import pygraphviz
# G = pygraphviz.AGraph()
# for i,b in enumerate(self.buckets):
# for j,mb in enumerate(b):
# G.add_node(self.__nodeID(mb))
# for i,b in enumerate(self.buckets):
# for j,mb in enumerate(b):
# G.add_edge(self.__nodeID(mb),self.__nodeID(mb.parent))
# G.layout() # layout with default (neato)
# G.draw('wmb.png') # draw png
def draw(self):
import networkx as nx
pos,labels = {},{}
G = nx.DiGraph()
for i,b in enumerate(self.buckets):
for j,mb in enumerate(b):
G.add_node(str(mb))
pos[str(mb)] = (j,-i)
labels[str(mb)] = str(mb)
for i,b in enumerate(self.buckets):
for j,mb in enumerate(b):
if mb.parent is not None: G.add_edge(str(mb),str(mb.parent))
nx.draw(G, pos=pos, labels=labels)
return G
def addClique(self,vars):
"""Add a clique with scope "vars", fixing up structure to be a valid MB tree"""
vs = VarSet(vars)
corder = np.argsort( [self.priority[x] for x in vars] ) # get order in which eliminated
corder = [vars[c] for c in corder]
added = []
found = False
for x in corder:
if found: break
#print "bucket ",x
b = self.buckets[self.priority[x]]
to_remove = []
for mb in b:
#print " check ",mb
if mb.clique < vs:
to_remove.append(mb)
if mb.clique >= vs: # if we found a minibucket we can just join, do:
if len(added) > 0: # if we've added nodes, connect them as descendants
mb.children.append( added[-1] ) # of the found node, and found node as parent of last
added[-1].parent = mb
found = True # now, we don't need to keep generating parents
#print " Found!"
added.append(mb) # not really added, but the end of the added chain
break
# if we didn't find any mini-buckets we can join, we need to add one:
if not found: #
n = WMB.Node()
n.clique = VarSet(vs)
n.weight = -1e-3 if self.weights[x] < 0 else 1e-3; # TODO: small non-zero weights
#print "adding ",n," to ",self.priority[x]
b.append(n)
if len(added) > 0: # then, last added node is the child of this one
n.children.append(added[-1])
added[-1].parent = n
added.append(n) # put in added list
vs -= [x] # next bucket is what's left after x is eliminated
for mb in to_remove:
for c in mb.children: c.parent = n # change children to point to new node, and
n.children.extend(mb.children) # merge with current child list
n.weight += mb.weight # join weights into new node
if mb.parent is not None: # if mb has a parent, shift factors around to preserve bound
mb.theta -= mb.msgFwd
mb.parent.theta += mb.msgFwd
mb.parent.children.remove(mb)
n.theta += mb.theta # join log-factors into new node
n.originals.extend(mb.originals) # move original factor pointers to new node
b.remove(mb)
#n.theta += Factor(n.clique,0.0); # need to do this at some point to ensure correct elim
# TODO: fix up match structure?
# done adding all required cliques; return 1st
return added[0]
def detachFactors(self):
"""Remove factor tables from their associated cliques; speeds up scope-based merging"""
for b in self.buckets:
for mb in b:
mb.theta = Factor([],0.)
def attachFactors(self):
"""Re-attach factor tables to their associated cliques for evaluation"""
for b in self.buckets:
for mb in b:
mb.theta = Factor([],0.)
for f in mb.originals: mb.theta += f.log()
# TODO: check if already in log form???
def memory(self, bucket=None, use_backward=True):
"""Compute the total memory (in MB) required for this mini-bucket approximation"""
mem = 0.
use_buckets = self.buckets if bucket is None else [self.buckets[bucket]]
for b in use_buckets:
for mb in b:
mem += mb.clique.nrStatesDouble() * mb.theta.table.itemsize
# TODO: add forward & backward message costs here also
return mem / 1024. / 1024.
# TODO: convert to external function? pass variable in; check if refinement of another?
# Is score correct, or inverted? check
def scoreByScope(self, ibound=None, sbound=None):
"""Returns a scope-based scoring function for use in merge()"""
def score(m1,m2):
jt = m1.clique | m2.clique
if ibound is not None and len(jt) > ibound: return -1
if sbound is not None and jt.nrStates() > sbound: return -1
# TODO: also disallow if not consistent with some specified scope sets?
mx,mn = max([len(m1.clique),len(m2.clique)]), min([len(m1.clique),len(m2.clique)])
return 1.0/(float(mx)+float(mn)/mx)
# return the scoring function
return score
# score = len(max)+len(min)/len(max) if union < iBound else -1 for scope
def merge(self, score):
from heapq import heappush,heappop
try:
from itertools import count
tiebreak = count().__next__ # need tiebreaker value for priority queue (?)
except:
tiebreak = lambda: 0
for b in self.buckets:
priority | |
import json
from datetime import datetime
import asyncio
import gspread_asyncio
from oauth2client.service_account import ServiceAccountCredentials
class Handlers:
"""
A collection of multiple handlers
"""
class JSON:
"""
Simple JSON handler for reading and dumping JSON data into files
"""
@staticmethod
def read(file):
"""
Reads files and returns their JSON data
:param (str) file: The file to read
:returns (dict) data: JSON data of the file
"""
with open(f"{file}.json", "r", encoding="utf8") as file:
data = json.load(file)
return data
@staticmethod
def dump(file, data):
"""
Dumps JSON data into a file
:param (str) file: The file to dump data to
:param (dict) data: The JSON data to dump into the file
"""
with open(f"{file}.json", "w", encoding="utf8") as file:
json.dump(data, file, indent=4)
class Mojang:
"""
An API handler for Mojang's API
"""
def __init__(self, session):
self.mojang_url = "https://api.mojang.com/"
self.session = session
async def get_player_uuid(self, username: str):
"""
Gets the Minecraft UUID of the player
:param (str) username: The player's username
:returns (str) uuid: The player's Minecraft UUID
"""
async with self.session.get(f"{self.mojang_url}users/profiles/minecraft/{username}") as data:
data = await data.text()
return json.loads(data)['id']
async def get_player_username(self, uuid: str):
"""
Gets the Minecraft username of the player
:param (str) uuid: The player's Minecraft UUID
:returns (str) username: The player's username
"""
async with self.session.get(f"{self.mojang_url}user/profiles/{uuid}/names") as data:
data = await data.text()
return json.loads(data)[-1]['name']
class Spreadsheet:
"""
An API handler for Google Spreadsheets
"""
def __init__(self, key):
self.key = key
self.client_manager = gspread_asyncio.AsyncioGspreadClientManager(self.get_credentials)
self.worksheet = None
@staticmethod
def get_credentials():
"""
Gets the credentials because it is required for the AsyncioGspreadClientManager
:returns (ServiceAccountCredentials): The credentials
"""
return ServiceAccountCredentials.from_json_keyfile_name(
"google_service_account_secret.json",
[
"https://spreadsheets.google.com/feeds",
"https://www.googleapis.com/auth/drive",
"https://www.googleapis.com/auth/spreadsheets",
],
)
async def auth(self):
"""
Authenticates the AsyncioGspreadClientManager for Google Spreadsheets
"""
agc = await self.client_manager.authorize()
spreadsheet = await agc.open_by_key(self.key)
self.worksheet = spreadsheet.sheet1
def get_all_users(self):
"""
Gets all users in the Google Spreadsheets worksheet and gets their 'paid' and 'paid_to' data
:returns (dict) users: The dict with all the users and their 'paid' and 'paid_to' data
"""
users = {}
records = self.worksheet.get_all_records()
for record in records:
paid = record['paid']
if paid == "TRUE":
paid = True
else:
paid = False
users[record['uuid']] = {
"paid": paid,
"paid_to": record['paid_to']
}
return users
def clear(self):
"""
Clears the worksheet
"""
self.worksheet.clear()
def append_row(self, row: list):
"""
Appends a row to the worksheet
:param (list) row: The row to append
"""
self.worksheet.append_row(row)
def insert_rows(self, rows: list, index: int = 1):
"""
Inserts multiple rows in the worksheet
:param (list) rows: The rows to insert
:param (int) index: The index to insert the rows at
"""
self.worksheet.insert_rows(rows, index)
class SkyBlock:
"""
An API handler for Hypixel's API
"""
def __init__(self, key, session):
self.key = key
self.hypixel_url = "https://api.hypixel.net/"
self.session = session
self.skill_xp = Handlers.JSON.read("skills")
self.skills = {
"experience_skill_combat": "skyblock_combat",
"experience_skill_mining": "skyblock_excavator",
"experience_skill_alchemy": "skyblock_concoctor",
"experience_skill_farming": "skyblock_harvester",
"experience_skill_enchanting": "skyblock_augmentation",
"experience_skill_fishing": "skyblock_angler",
"experience_skill_foraging": "skyblock_gatherer",
"experience_skill_taming": "skyblock_domesticator"
}
self.pet_levels = Handlers.JSON.read("pet_rarity")
async def api_request(self, endpoint: str, params: dict):
"""
Makes an API request to the Hypixel API
:param (str) endpoint: The endpoint to make the request to
:param (dict) params: The parameters of the request
:returns (dict) data: The output of the request
:raises (Exception) error: Raises an Exception based on what cause Hypixel's API returns
"""
async with self.session.get(self.hypixel_url + endpoint, params=params) as data:
data = await data.text()
data = json.loads(data)
if data['success']:
return data
else:
if data['cause'] == "Key throttle":
await asyncio.sleep(10)
await self.api_request(endpoint, params)
else:
raise Exception(data['cause'])
async def get_hypixel_profile(self, uuid):
"""
Gets the Hypixel profile of a user
:param (str) uuid: Player's UUID
:returns (dict) profile: Player's Hypixel profile
"""
data = await self.api_request("player", {"key": self.key, "uuid": uuid})
return data['player']
async def get_profiles(self, uuid: str):
"""
Gets all Hypixel SkyBlock profiles of a user
:param (str) uuid: Player's UUID
:returns (list) profiles: A list of all player's Hypixel SkyBlock profiles
"""
data = await self.api_request("skyblock/profiles", {"key": self.key, "uuid": uuid})
return data['profiles']
async def get_unclaimed_auctions(self, profile_id: str):
"""
Gets all Hypixel SkyBlock unclaimed auctions of a profile
:param (str) profile_id: The ID of the profile
:returns (list) auctions: The list of auctions for that profile ID
"""
data = await self.api_request("skyblock/auction", {"key": self.key, "profile": profile_id})
auctions = []
ahs = data['auctions']
for auction in ahs:
if not auction['claimed']:
auctions.append(auction)
return auctions
async def get_auctions(self, page: int):
"""
Gets Hypixel SkyBlock auctions
:param (int) page: The page number, each page returns a dict with a list which has a 1000 auctions
:returns (dict) data: The auction data
"""
data = await self.api_request("skyblock/auctions", {"key": self.key, "page": page})
return data
async def get_bazaar_product(self, product_id: str):
"""
Gets Hypixel SkyBlock bazaar information about a product
:param (str) product_id: The ID of the product to get the information from
:returns (dict) data: Bazaar product information
"""
data = await self.api_request("skyblock/bazaar", {"key": self.key})
return data['products'][product_id]
async def get_guild(self, guild_id: str):
"""
Gets information about a Hypixel guild
:param (str) guild_id: The ID of the guild
:returns (dict) data: Guild information
"""
data = await self.api_request("guild", {"key": self.key, "id": guild_id})
return data['guild']
@staticmethod
def calculate_latest_profile(profiles: list, uuid: str):
"""
Calculates the latest Hypixel SkyBlock profile
:param (list) profiles: A list of Hypixel SkyBlock profiles
:param (str) uuid: The UUID of the person to check the latest profile
:returns (dict) profile: The most recently used profile
"""
profile_timestamps = {}
profile_data = {}
for profile in profiles:
try:
last_save = profile['members'][uuid]['last_save']
last_save = datetime.fromtimestamp(last_save / 1000)
last_save_diff = (datetime.now() - last_save).total_seconds()
profile_id = profile['profile_id']
profile_timestamps[profile_id] = last_save_diff
profile_data[profile_id] = profile
except KeyError:
pass
profile_id = min(profile_timestamps, key=profile_timestamps.get)
return profile_data[profile_id]
def calculate_profile_skills(self, profile: dict, hypixel_profile: dict, uuid: str):
"""
Calculates Hypixel SkyBlock profile's skills
:param (dict) profile: The profile which should be used to calculate Hypixel SkyBlock skills
:param (dict) hypixel_profile: Hypixel profile of the person
:param (str) uuid: The UUID of the person to calculate the skills
:returns (list) skill_levels: A list of all Hypixel SkyBlock profile's skills and the average skill level
"""
player_profile = profile['members'][uuid]
skill_levels = {}
for skill in self.skills.keys():
try:
xp = player_profile[skill]
if xp < self.skill_xp['1']:
skill_levels[skill] = 0
break
for i in range(50, 0, -1):
required_xp = self.skill_xp[str(i)]
if xp > required_xp:
skill_levels[skill] = i
break
except KeyError:
try:
skill_levels[skill] = hypixel_profile['achievements'][self.skills[skill]]
except KeyError:
skill_levels[skill] = 0
skill_levels['average_skill_level'] = sum(skill_levels.values()) / 8
return skill_levels
def calculate_profile_skill_xp(self, profile: dict, uuid: str):
"""
Calculates Hypixel SkyBlock profile's skill xp
:param (dict) profile: The profile which should be used to calculate Hypixel SkyBlock skill xp
:param (str) uuid: The UUID of the person to calculate the skill xp
:returns (list or None) skill_level_xp: The skill level xp, None if the skill API is off
"""
player_profile = profile['members'][uuid]
skill_level_xp = {}
for skill in self.skills.keys():
try:
skill_level_xp[skill] = player_profile[skill]
except KeyError:
return None
skill_level_xp['experience_skill_catacombs'] = \
player_profile['dungeons']['dungeon_types']['catacombs']['experience']
return skill_level_xp
@staticmethod
def calculate_profile_slayers(profile: dict, uuid: str):
"""
Calculates Hypixel SkyBlock profile's slayers
:param (dict) profile: The profile which should be used to calculate Hypixel SkyBlock slayers
:param (str) uuid: The UUID of the person to calculate the slayers
:returns (list) slayer_bosses: A list of all Hypixel SkyBlock profile's slayers and the total slayer xp
"""
player_profile = profile['members'][uuid]
slayers = player_profile['slayer_bosses']
total_slayer_xp = 0
slayer_bosses = {}
for slayer_boss in slayers:
if 'xp' in slayers[slayer_boss]:
money_spent = 0
tiers = [0, 1, 2, 3]
tier_money = [100, 2000, 10000, 50000]
for tier in tiers:
try:
money_spent += slayers[slayer_boss][f'boss_kills_tier_{tier}'] * tier_money[tier]
except KeyError:
pass
slayer_bosses[slayer_boss] = {"xp": slayers[slayer_boss]['xp'], "money_spent": money_spent}
total_slayer_xp += slayers[slayer_boss]['xp']
else:
slayer_bosses[slayer_boss] = {"xp": 0, "money_spent": 0}
slayer_bosses['total'] = total_slayer_xp
return slayer_bosses
def calculate_profile_pets(self, profile: dict, uuid: str):
"""
Calculates Hypixel SkyBlock profile's pets
:param (dict) profile: The profile which should be used to calculate Hypixel SkyBlock pets
:param (str) uuid: The UUID of the person to calculate the pets
:returns (dict) pets: A dict of pets and the information about them
(level, xp, rarity, active, held_item, candy_used)
"""
player_profile = profile['members'][uuid]
| |
<reponame>ollimacp/spacial-boxcounting-cpu-gpu<filename>BoxcountFeatureExtr.py
import numpy as np
from numba import jit #Numba translates Python functions to optemized machine code at runtime and results in significant speedups
import time
#for debugging
import linecache
import sys
def PrintException():
exc_type, exc_obj, tb = sys.exc_info()
f = tb.tb_frame
lineno = tb.tb_lineno
filename = f.f_code.co_filename
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
print('EXCEPTION IN ({}, LINE {} "{}"): {}'.format(filename, lineno, line.strip(), exc_obj))
verbosity = False # False: no text displayed ; True: potentially useful printouts are shown for info/bugfixing.
#Numba translates Python functions to optemized machine code at runtime and results in significant speedups
from numba import jit
from PIL import Image #Imagemanipulation via pillow image module
import matplotlib.pyplot as plt # Python plotting libary for visualizing graphs, data and images
import matplotlib.image as img
#Helper-function to show any np.array as a picture with a chosen title and a colormapping
def showNPArrayAsImage(np2ddArray, title, colormap):
plt.figure() #Init figure
plt.imshow(np2ddArray, #Gererate a picture from np.array and add to figure
interpolation='none',
cmap = colormap)
plt.title(title) #Add title to figure
plt.show(block=False) #Show array as picture on screen, but dont block the programm to continue.
'''
class spacialBoxcounting():
def __init__(self):
super(spacialBoxcounting, self).__init__()
print("INIT------ENCODER----------------")
'''
#If you want to compare the jit-compiler to the standard python interpreter just un/comment all @jit(... lines.
@jit(nopython= True) # Set "nopython" mode for best performance, equivalent to @njit
def Z_boxcount(GlidingBox, boxsize,MaxValue):
continualIndexes = GlidingBox/boxsize # tells in which boxindex the given value is in a continual way
Boxindexes = np.floor(continualIndexes) # Round all boxindexes down to ints to get the boxindex of each value in the gliding box
# If a value is in a given box, the boxcount increases by 1, but no further, when another value is in the same box
unique_Boxes = np.unique(Boxindexes) # numpy helper function to create a list of the unique values by discarding doubles
counted_Boxes = len(unique_Boxes) # the lenght of the list of all unique indexes are all unique counted boxes within the gliding box and the z-range
if verbosity == True:
print(continualIndexes)
print("Boxindex",Boxindexes)
print("counted_Boxes",counted_Boxes)
#CREATE List of SumPixInBox for all boxes to calc lacunarity
InitalEntry = [0.0]
SumPixInBox = np.array(InitalEntry)
#For tiny boxes it can be process Consuming
#for every unique counted boxindex in the list of all unique boxindexes
for unique_BoxIndex in unique_Boxes:
#set element-wise to True, when a boxindex is equal to the chosen unique boxindex
ElementsCountedTRUTHTABLE = Boxindexes == unique_BoxIndex
#the sum of the True elements represent the count of datapoints/pixel/voxel within the chosen box
ElementsCounted = np.sum(ElementsCountedTRUTHTABLE)
#Append the list of ElementsCounted to the list of all Elementcounted-lists to calc lacunarity later
SumPixInBox = np.append(SumPixInBox, ElementsCounted)
if verbosity == True:
print("unique_BoxIndex",unique_BoxIndex)
print("ElementsCountedTRUTHTABLE",ElementsCountedTRUTHTABLE)
print("ElementsCounted",ElementsCounted)
# Because the lacunarity is calculated with the standard deviation of all elementscounted
# and the not counted boxes have a value of 0 boxes counted in this box
# we have to calc the number of empty boxes by subtracting all counted boxes from the total amount of possible boxes
Max_Num_Boxes = int(MaxValue/ boxsize)
Num_empty_Boxes = Max_Num_Boxes- counted_Boxes
if Num_empty_Boxes <1:
pass
# if there is are no empty boxes, just pass
else:
EmptyBoxes = np.zeros(Num_empty_Boxes)
SumPixInBox = np.append(SumPixInBox, EmptyBoxes)
# calcs the mean of the list of all counted datapoints/pixel/voxel within chosen boxes and then...
mean = np.mean(SumPixInBox) # ...calcs the standard deviation of the same
standardDeviation = np.std(SumPixInBox)
#The lacunarity or spacial heterogenity = (standard deviation/mean)^2
Lacunarity=np.power(standardDeviation/mean,2)
if verbosity == True:
print("Max_Num_Boxes",Max_Num_Boxes)
print("Num_empty_Boxes",Num_empty_Boxes)
print("mean",mean)
print("standardDeviation",standardDeviation)
print("die Lacunarity ist", Lacunarity)
return counted_Boxes, Lacunarity
@jit(nopython= True) #False,forceobj=True) # Set "nopython" mode for best performance, equivalent to @njit
def spacialBoxcount(npOutputFile, iteration,MaxValue):
'''
This function takes in a 2D np.array the iteration which determins the boxsize
and the maximum possible value to set up the value range. 8-Bit -> 256, hexadez ->16
The function returns a 2 channel-2d array containing the spacial boxcount ratio and the
spacial lacunarity scaled down in size by 1/Boxsize[iteration]
'''
Boxsize=[2,4,8,16,32,64,128,256,512,1024] #All boxsizes
boxsize = Boxsize[iteration] #specified boxsize
#Init counting box at x=0,y=0 und z=0
BoxBoundriesX = np.array([0,Boxsize[iteration]])
BoxBoundriesY = np.array([0,Boxsize[iteration]])
Boxcount = 0
YRange, XRange = npOutputFile.shape
#The maximum index of box with given boxsize in x and y direction
maxIndexY = YRange / boxsize
maxIndexY = int(maxIndexY)+1
maxIndexX = XRange / boxsize
maxIndexX = int(maxIndexX)+1
if verbosity == True:
print( "XRange, YRange", XRange, YRange)
print("maxIndexX: ",maxIndexX,"maxIndexY: ",maxIndexY)
#Initialize the BoxcountRatio_map and spacial_lacunarity_map with zeros in correct shape
BoxCountR_map = np.zeros((maxIndexY,maxIndexX))
spa_Lac_map = np.zeros((maxIndexY,maxIndexX))
while BoxBoundriesY[1]<=YRange:
while BoxBoundriesX[1]<=XRange:
#Set up Boxindex with boxsize
indexY = int(BoxBoundriesY[0]/boxsize)
indexX = int(BoxBoundriesX[0]/boxsize)
#Define Gliding box with Boundries ... for ex. Boxsize 4 -> Boxboundries [0,4],[4,8] -> geht auf für n² wie in bildern etc und stitching is möglich
GlidingBox = npOutputFile[BoxBoundriesY[0]:BoxBoundriesY[1],BoxBoundriesX[0]:BoxBoundriesX[1]]
counted_Boxes, Lacunarity = Z_boxcount(GlidingBox, boxsize, MaxValue)
#Despite counting the Boxes like in the original algorithm, the counts are normalized from 0...1
#0 means there was nothing counted inside and 1 means every possible box is filled in
Max_Num_Boxes = int(MaxValue/ boxsize)
counted_Box_Ratio = counted_Boxes / Max_Num_Boxes
BoxCountR_map[indexY,indexX] = counted_Box_Ratio
spa_Lac_map[indexY,indexX] = Lacunarity
#move box into x direction, while boxboundriesx are <= xrange
BoxBoundriesX[0]+=Boxsize[iteration]
BoxBoundriesX[1]+=Boxsize[iteration]
if verbosity == True:
print("indexX: ",indexX)
print("indexY: ",indexY)
print("BoxBoundriesX: ",BoxBoundriesX)
print("BoxBoundriesY: ",BoxBoundriesY)
print("GlidingBox: ", GlidingBox)
print("counted_Boxes, Lacunarity.: ",counted_Boxes, Lacunarity)
#By exit inner while loop, box has reached end of x axis in array, so reset boxboundriesX to start
BoxBoundriesX[0]=0
BoxBoundriesX[1]=Boxsize[iteration]
#and increase the counting box in y direction by a boxsize to scan the next line
BoxBoundriesY[0]+=Boxsize[iteration]
BoxBoundriesY[1]+=Boxsize[iteration]
BoxCountR_SpacialLac_map = [BoxCountR_map, spa_Lac_map]
if verbosity == True:
print(BoxCountR_map)
print(spa_Lac_map)
print("Iteration ", iteration, "calculation done")
return BoxCountR_SpacialLac_map
def MultithreadBoxcount(npOutputFile):
'''
To gain another speedup in the sequential generated output, multi threading is used
to calculate the spacial Boxcountratios/lacunaritys for each boxsize in a own thread.
'''
#MULTICORE APROACH
#print("Beginn Multithread Boxcount Lacunarity feature extraction")
BoxsizeDict={"2":0 ,"4":1,"8":2,"16":3,"32":4,"64":5,"128":6,"256":7,"512":8,"1024":9}
#Cut to lenght
Height , width = npOutputFile.shape
Height , width = int(Height) , int(width)
BaseITERMinVal = min(16,Height , width )
BaseIteration = BoxsizeDict[str(int(BaseITERMinVal))] #without 0 there are 1 more processes
maxiteration = BaseIteration +1 # to calc Lacunarity there have to be more than just one box into the z direction
#source: [17] https://stackoverflow.com/questions/6893968/how-to-get-the-return-value-from-a-thread-in-python
def BoxcountBoxsizeWorker(npOutputFile, iteration):
maxvalue = 256 # cause zheight is 0...255: 8-bit grayscale picture
#adjust for every specific input
BoxCountR_SpacialLac_map = spacialBoxcount(npOutputFile, iteration,maxvalue )
return BoxCountR_SpacialLac_map
from threading import Thread
#Create thread-class with ability to return a value, which is not possible in threading
class ThreadWithReturnValue(Thread):
def __init__(self, group=None, target=None, name=None,
args=(), kwargs={}, Verbose=None):
Thread.__init__(self, group, target, name, args, kwargs)
self._return = None
def run(self):
#print(type(self._target))
if self._target is not None:
self._return = self._target(*self._args,
**self._kwargs)
def join(self, *args):
Thread.join(self, *args)
return self._return
#Init number of needed threads
threads = [None] * maxiteration
if verbosity: print("Generate ",maxiteration,"threads")
start = time.time()
for i in range(len(threads)):
threads[i] = ThreadWithReturnValue(target=BoxcountBoxsizeWorker, args=(npOutputFile, i))
threads[i].start()
if verbosity == True:
print("thread ",i+1," has started")
BoxCountR_SpacialLac_map_Dict = {"iteration": np.array(["BoxcountRatio","spacialLacunarity"]) }
for i in range(len(threads)):
BoxCountR_SpacialLac_map = np.array(threads[i].join())
BoxCountR_SpacialLac_map_Dict[i]= BoxCountR_SpacialLac_map
if verbosity == True :
#print(BoxCountR_SpacialLac_map)
print(BoxCountR_SpacialLac_map.shape)
print(type(BoxCountR_SpacialLac_map))
print("Thread ",i," JOINED")
end = time.time()
print(round(end - start,3),"seconds for spacial boxcounting with ",i+1, "iterations/scalings")
if verbosity:
input("Press any key to continue with next file. \n Attention: verbosity adds much size to the output of jupyter notebook. If the file > 120'ish MB, jupyter notebook crashes. So use just for debugging for beware. ")
return BoxCountR_SpacialLac_map_Dict
class Visualize():
def __init__(self):
#super(Encoder, self).__init__()
print("INIT------Visualizer----------------")
#FUNCTION FOR ITERATING OVER A FOLDER, EXECUTING BOXCOUNTING AND DISPLAY ESULTS
self.foldername = "Images" # The foldername where the images are taken from
#self.foldername = "MISC" # for saving special fotos iterate over MISC
self.whereTObreakIteration = 100 #abort after 100 pictures for time testing
import pathlib #Import pathlib to create a link to the directory where the file is at.
#just has to | |
<gh_stars>10-100
import numpy as np
import pytest
import psyneulink as pnl
import psyneulink.core.llvm as pnlvm
from psyneulink.core.compositions.composition import Composition
from psyneulink.core.components.functions.nonstateful.combinationfunctions import Reduce
from psyneulink.core.components.functions.nonstateful.distributionfunctions import NormalDist
from psyneulink.core.components.functions.function import FunctionError, get_matrix
from psyneulink.core.components.functions.nonstateful.learningfunctions import Reinforcement
from psyneulink.core.components.functions.stateful.integratorfunctions import AccumulatorIntegrator
from psyneulink.core.components.functions.nonstateful.transferfunctions import Linear, Logistic
from psyneulink.core.components.mechanisms.mechanism import MechanismError
from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferError, TransferMechanism
from psyneulink.core.globals.keywords import MATRIX_KEYWORD_VALUES, RANDOM_CONNECTIVITY_MATRIX, RESULT
from psyneulink.core.globals.preferences.basepreferenceset import REPORT_OUTPUT_PREF, VERBOSE_PREF
from psyneulink.core.globals.parameters import ParameterError
from psyneulink.core.scheduling.condition import Never
from psyneulink.library.components.mechanisms.processing.transfer.recurrenttransfermechanism import \
RecurrentTransferError, RecurrentTransferMechanism
from psyneulink.library.components.projections.pathway.autoassociativeprojection import AutoAssociativeProjection
class TestMatrixSpec:
def test_recurrent_mech_matrix(self):
T = TransferMechanism(default_variable=[[0.0, 0.0, 0.0]])
recurrent_mech = RecurrentTransferMechanism(default_variable=[[0.0, 0.0, 0.0]],
matrix=[[1.0, 2.0, 3.0],
[2.0, 1.0, 2.0],
[3.0, 2.0, 1.0]])
c = Composition(pathways=[T, recurrent_mech])
results = []
def record_trial():
results.append(recurrent_mech.parameters.value.get(c))
c.run(inputs=[[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]],
call_after_trial=record_trial)
assert True
def test_recurrent_mech_auto_associative_projection(self):
T = TransferMechanism(default_variable=[[0.0, 0.0, 0.0]])
recurrent_mech = RecurrentTransferMechanism(default_variable=[[0.0, 0.0, 0.0]],
matrix=AutoAssociativeProjection)
c = Composition(pathways=[T, recurrent_mech])
results = []
def record_trial():
results.append(recurrent_mech.parameters.value.get(c))
c.run(inputs=[[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]],
call_after_trial=record_trial)
def test_recurrent_mech_auto_auto_hetero(self):
T = TransferMechanism(default_variable=[[0.0, 0.0, 0.0]])
recurrent_mech = RecurrentTransferMechanism(default_variable=[[0.0, 0.0, 0.0]],
auto=3.0,
hetero=-7.0)
c = Composition(pathways=[T, recurrent_mech])
results = []
def record_trial():
results.append(recurrent_mech.parameters.value.get(c))
c.run(inputs=[[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]],
call_after_trial=record_trial)
class TestRecurrentTransferMechanismInputs:
def test_recurrent_mech_empty_spec(self):
R = RecurrentTransferMechanism(auto=1.0)
np.testing.assert_allclose(R.value, R.defaults.value)
np.testing.assert_allclose(R.defaults.variable, [[0]])
np.testing.assert_allclose(R.matrix.base, [[1]])
def test_recurrent_mech_check_attrs(self):
R = RecurrentTransferMechanism(
name='R',
size=3,
auto=1.0
)
print("matrix = ", R.matrix.base)
print("auto = ", R.auto)
print("hetero = ", R.hetero)
# np.testing.assert_allclose(R.value, R.defaults.value)
# np.testing.assert_allclose(R.defaults.variable, [[0., 0., 0.]])
# np.testing.assert_allclose(R.matrix.base, [[1., 1., 1.], [1., 1., 1.], [1., 1., 1.]])
def test_recurrent_mech_check_proj_attrs(self):
R = RecurrentTransferMechanism(
name='R',
size=3
)
np.testing.assert_allclose(R.recurrent_projection.matrix.base, R.matrix.base)
assert R.recurrent_projection.sender is R.output_port
assert R.recurrent_projection.receiver is R.input_port
@pytest.mark.mechanism
@pytest.mark.recurrent_transfer_mechanism
@pytest.mark.benchmark(group="RecurrentTransferMechanism")
def test_recurrent_mech_inputs_list_of_ints(self, benchmark, mech_mode):
R = RecurrentTransferMechanism(
name='R',
default_variable=[0, 0, 0, 0]
)
EX = pytest.helpers.get_mech_execution(R, mech_mode)
val1 = EX([10, 12, 0, -1])
val2 = EX([1, 2, 3, 0])
# The outputs match inputs because recurrent projection is
# not used when executing: mech is reset each time
np.testing.assert_allclose(val1, [[10.0, 12.0, 0, -1]])
np.testing.assert_allclose(val2, [[1, 2, 3, 0]])
if benchmark.enabled:
benchmark(EX, [1, 2, 3, 0])
@pytest.mark.mechanism
@pytest.mark.recurrent_transfer_mechanism
@pytest.mark.benchmark(group="RecurrentTransferMechanism")
def test_recurrent_mech_inputs_list_of_floats(self, benchmark, mech_mode):
R = RecurrentTransferMechanism(
name='R',
size=4
)
EX = pytest.helpers.get_mech_execution(R, mech_mode)
val = benchmark(EX, [10.0, 10.0, 10.0, 10.0])
np.testing.assert_allclose(val, [[10.0, 10.0, 10.0, 10.0]])
@pytest.mark.mechanism
@pytest.mark.recurrent_transfer_mechanism
@pytest.mark.benchmark(group="RecurrentTransferMechanism")
def test_recurrent_mech_integrator(self, benchmark, mech_mode):
R = RecurrentTransferMechanism(size=2,
function=Logistic(),
hetero=-2.0,
integrator_mode=True,
integration_rate=0.01,
output_ports = [RESULT])
EX = pytest.helpers.get_mech_execution(R, mech_mode)
val1 = EX([[1.0, 2.0]])
val2 = EX([[1.0, 2.0]])
# execute 10 times
for i in range(10):
val10 = EX([[1.0, 2.0]])
assert np.allclose(val1, [[0.50249998, 0.50499983]])
assert np.allclose(val2, [[0.50497484, 0.50994869]])
assert np.allclose(val10, [[0.52837327, 0.55656439]])
if benchmark.enabled:
benchmark(EX, [[1.0, 2.0]])
@pytest.mark.mechanism
@pytest.mark.recurrent_transfer_mechanism
@pytest.mark.benchmark(group="RecurrentTransferMechanism")
def test_recurrent_mech_lci(self, benchmark, mech_mode):
LCI = pnl.LeakyCompetingIntegrator(rate=0.4)
R = RecurrentTransferMechanism(size=2,
hetero=-2.0,
integrator_mode=True,
integrator_function=LCI,
output_ports = [RESULT])
EX = pytest.helpers.get_mech_execution(R, mech_mode)
val1 = EX([[1.0, 2.0]])
val2 = EX([[1.0, 2.0]])
# execute 10 times
for i in range(10):
val10 = EX([[1.0, 2.0]])
assert np.allclose(val1, [[0.1, 0.2]])
assert np.allclose(val2, [[0.196, 0.392]])
assert np.allclose(val10, [[0.96822561, 1.93645121]])
if benchmark.enabled:
benchmark(EX, [[1.0, 2.0]])
# def test_recurrent_mech_inputs_list_of_fns(self):
# R = RecurrentTransferMechanism(
# name='R',
# size=4,
# integrator_mode=True
# )
# val = R.execute([Linear().execute(), NormalDist().execute(), Exponential().execute(), ExponentialDist().execute()])
# expected = [[np.array([0.]), 0.4001572083672233, np.array([1.]), 0.7872011523172707]]
# assert len(val) == len(expected) == 1
# assert len(val[0]) == len(expected[0])
# for i in range(len(val[0])):
# np.testing.assert_allclose(val[0][i], expected[0][i])
@pytest.mark.mechanism
@pytest.mark.recurrent_transfer_mechanism
@pytest.mark.benchmark(group="RecurrentTransferMechanism")
def test_recurrent_mech_no_inputs(self, benchmark, mech_mode):
R = RecurrentTransferMechanism(
name='R'
)
np.testing.assert_allclose(R.defaults.variable, [[0]])
EX = pytest.helpers.get_mech_execution(R, mech_mode)
val = EX([10])
np.testing.assert_allclose(val, [[10.]])
if benchmark.enabled:
benchmark(EX, [1])
def test_recurrent_mech_inputs_list_of_strings(self):
with pytest.raises(FunctionError) as error_text:
R = RecurrentTransferMechanism(
name='R',
default_variable=[0, 0, 0, 0],
integrator_mode=True
)
R.execute(["one", "two", "three", "four"])
assert "Unrecognized type" in str(error_text.value)
def test_recurrent_mech_var_list_of_strings(self):
with pytest.raises(ParameterError) as error_text:
R = RecurrentTransferMechanism(
name='R',
default_variable=['a', 'b', 'c', 'd'],
integrator_mode=True
)
assert "non-numeric entries" in str(error_text.value)
def test_recurrent_mech_inputs_mismatched_with_default_longer(self):
with pytest.raises(MechanismError) as error_text:
R = RecurrentTransferMechanism(
name='R',
size=4
)
R.execute([1, 2, 3, 4, 5])
assert "does not match required length" in str(error_text.value)
def test_recurrent_mech_inputs_mismatched_with_default_shorter(self):
with pytest.raises(MechanismError) as error_text:
R = RecurrentTransferMechanism(
name='R',
size=6
)
R.execute([1, 2, 3, 4, 5])
assert "does not match required length" in str(error_text.value)
class TestRecurrentTransferMechanismMatrix:
@pytest.mark.parametrize("matrix", MATRIX_KEYWORD_VALUES)
def test_recurrent_mech_matrix_keyword_spec(self, matrix):
if matrix == RANDOM_CONNECTIVITY_MATRIX:
pytest.skip("Random test")
R = RecurrentTransferMechanism(
name='R',
size=4,
matrix=matrix
)
val = R.execute([10, 10, 10, 10])
np.testing.assert_allclose(val, [[10., 10., 10., 10.]])
np.testing.assert_allclose(R.recurrent_projection.matrix.base, get_matrix(matrix, R.size[0], R.size[0]))
@pytest.mark.parametrize("matrix", [np.matrix('1 2; 3 4'), np.array([[1, 2], [3, 4]]), [[1, 2], [3, 4]], '1 2; 3 4'])
def test_recurrent_mech_matrix_other_spec(self, matrix):
R = RecurrentTransferMechanism(
name='R',
size=2,
matrix=matrix
)
val = R.execute([10, 10])
# np.testing.assert_allclose(val, [[10., 10.]])
# assert isinstance(R.matrix.base, np.ndarray)
# np.testing.assert_allclose(R.matrix.base, [[1, 2], [3, 4]])
# np.testing.assert_allclose(R.recurrent_projection.matrix.base, [[1, 2], [3, 4]])
# assert isinstance(R.recurrent_projection.matrix.base, np.ndarray)
def test_recurrent_mech_matrix_auto_spec(self):
R = RecurrentTransferMechanism(
name='R',
size=3,
auto=2
)
assert isinstance(R.matrix.base, np.ndarray)
np.testing.assert_allclose(R.matrix.base, [[2, 1, 1], [1, 2, 1], [1, 1, 2]])
np.testing.assert_allclose(run_twice_in_composition(R, [1, 2, 3], [10, 11, 12]), [17, 19, 21])
def test_recurrent_mech_matrix_hetero_spec(self):
R = RecurrentTransferMechanism(
name='R',
size=3,
hetero=-1
)
# (7/28/17 CW) these numbers assume that execute() leaves its value in the outputPort of the mechanism: if
# the behavior of execute() changes, feel free to change these numbers
val = R.execute([-1, -2, -3])
np.testing.assert_allclose(val, [[-1, -2, -3]])
assert isinstance(R.matrix.base, np.ndarray)
np.testing.assert_allclose(R.matrix.base, [[0, -1, -1], [-1, 0, -1], [-1, -1, 0]])
# Execution 1:
# Recurrent input = [5, 4, 3] | New input = [1, 2, 3] | Total input = [6, 6, 6]
# Output 1 = [6, 6, 6]
# Execution 2:
# Recurrent input =[-12, -12, -12] | New input = [10, 11, 12] | Total input = [-2, -1, 0]
# Output 2 = [-2, -1, 0]
np.testing.assert_allclose(run_twice_in_composition(R, [1, 2, 3], [10, 11, 12]), [-2., -1., 0.])
def test_recurrent_mech_matrix_auto_hetero_spec_size_1(self):
R = RecurrentTransferMechanism(
name='R',
size=1,
auto=-2,
hetero=4.4
)
val = R.execute([10])
np.testing.assert_allclose(val, [[10.]])
assert isinstance(R.matrix.base, np.ndarray)
np.testing.assert_allclose(R.matrix.base, [[-2]])
def test_recurrent_mech_matrix_auto_hetero_spec_size_4(self):
R = RecurrentTransferMechanism(
name='R',
size=4,
auto=2.2,
hetero=-3
)
val = R.execute([10, 10, 10, 10])
np.testing.assert_allclose(val, [[10., 10., 10., 10.]])
np.testing.assert_allclose(R.matrix.base, [[2.2, -3, -3, -3], [-3, 2.2, -3, -3], [-3, -3, 2.2, -3], [-3, -3, -3, 2.2]])
assert isinstance(R.matrix.base, np.ndarray)
def test_recurrent_mech_matrix_auto_hetero_matrix_spec(self):
# when auto, hetero, and matrix are all specified, auto and hetero should take precedence
R = RecurrentTransferMechanism(
name='R',
size=4,
auto=2.2,
hetero=-3,
matrix=[[1, 2, 3, 4]] * 4
)
val = R.execute([10, 10, 10, 10])
np.testing.assert_allclose(val, [[10., 10., 10., 10.]])
np.testing.assert_allclose(R.matrix.base, [[2.2, -3, -3, -3], [-3, 2.2, -3, -3], [-3, -3, 2.2, -3], [-3, -3, -3, 2.2]])
assert isinstance(R.matrix.base, np.ndarray)
def test_recurrent_mech_auto_matrix_spec(self):
# auto should override the diagonal only
R = RecurrentTransferMechanism(
name='R',
size=4,
auto=2.2,
matrix=[[1, 2, 3, 4]] * 4
)
val = R.execute([10, 11, 12, 13])
np.testing.assert_allclose(val, [[10., 11., 12., 13.]])
np.testing.assert_allclose(R.matrix.base, [[2.2, 2, 3, 4], [1, 2.2, 3, 4], [1, 2, 2.2, 4], [1, 2, 3, 2.2]])
def test_recurrent_mech_auto_array_matrix_spec(self):
R = RecurrentTransferMechanism(
name='R',
size=4,
auto=[1.1, 2.2, 3.3, 4.4],
matrix=[[1, 2, 3, 4]] * 4
)
val = R.execute([10, 11, 12, 13])
np.testing.assert_allclose(val, [[10., 11., 12., 13.]])
np.testing.assert_allclose(R.matrix.base, [[1.1, 2, 3, 4], [1, 2.2, 3, 4], [1, 2, 3.3, 4], [1, 2, 3, 4.4]])
def test_recurrent_mech_hetero_float_matrix_spec(self):
# hetero should override off-diagonal only
R = RecurrentTransferMechanism(
name='R',
size=4,
hetero=-2.2,
matrix=[[1, 2, 3, 4]] * 4
)
val = R.execute([1, 2, 3, 4])
np.testing.assert_allclose(val, [[1., 2., 3., 4.]])
np.testing.assert_allclose(
R.matrix.base,
[[1, -2.2, -2.2, -2.2], [-2.2, 2, -2.2, -2.2], [-2.2, -2.2, 3, -2.2], [-2.2, -2.2, -2.2, 4]]
)
def test_recurrent_mech_hetero_matrix_matrix_spec(self):
R = RecurrentTransferMechanism(
name='R',
size=4,
hetero=np.array([[-4, -3, -2, -1]] * 4),
matrix=[[1, 2, 3, 4]] * 4
)
val = R.execute([1, 2, 3, 4])
np.testing.assert_allclose(val, [[1., 2., 3., 4.]])
np.testing.assert_allclose(
R.matrix.base,
[[1, -3, -2, -1], [-4, 2, -2, -1], [-4, -3, 3, -1], [-4, -3, -2, 4]]
)
def test_recurrent_mech_auto_hetero_matrix_spec_v1(self):
# auto and hetero should override matrix
R = RecurrentTransferMechanism(
name='R',
size=4,
auto=[1, 3, 5, 7],
hetero=np.array([[-4, -3, -2, -1]] * 4),
matrix=[[1, 2, 3, 4]] * 4
)
val = R.execute([1, 2, 3, 4])
np.testing.assert_allclose(val, [[1., 2., 3., 4.]])
np.testing.assert_allclose(
R.matrix.base,
[[1, -3, -2, -1], [-4, 3, -2, -1], [-4, -3, 5, -1], [-4, -3, -2, 7]]
)
def test_recurrent_mech_auto_hetero_matrix_spec_v2(self):
R = RecurrentTransferMechanism(
name='R',
size=4,
auto=[3],
hetero=np.array([[-4, -3, -2, -1]] * 4),
matrix=[[1, 2, 3, 4]] * 4
)
val = | |
<filename>tests/transformers/test_model_wrapper.py
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 14 11:46:59 2018
@author: <NAME>
"""
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.base import TransformerMixin, BaseEstimator
from aikit.transformers.model_wrapper import ModelWrapper, ColumnsSelector
from aikit.transformers.model_wrapper import (
_concat,
DebugPassThrough,
try_to_find_features_names
)
from aikit.enums import DataTypes
import pytest
def test_ColumnsSelector():
dfX = pd.DataFrame(
{
"cat1": ["A", "B", "A", "D"],
"cat2": ["toto", "tata", "truc", "toto"],
"num1": [0, 1, 2, 3],
"num2": [1.1, 1.5, -2, -3.5],
"num3": [-1, 1, 25, 4],
"text1": ["aa bb", "bb bb cc", "dd aa cc", "ee"],
"text2": ["a z", "b e", "d t", "a b c"],
}
)
dfX2 = pd.DataFrame(
{
"cat1": ["D", "B"],
"cat2": ["toto", "newcat"],
"num1": [5, 6],
"num2": [0.1, -5.2],
"num3": [2, -1],
"text1": ["dd ee", "aa"],
"text2": ["t a c", "z b"],
}
)
selector = ColumnsSelector(columns_to_use=["text1", "text2"])
r1 = dfX.loc[:, ["text1", "text2"]]
r2 = dfX2.loc[:, ["text1", "text2"]]
assert (selector.fit_transform(dfX) == r1).all().all()
assert (selector.transform(dfX2) == r2).all().all()
assert selector.get_feature_names() == ["text1", "text2"]
with pytest.raises(ValueError):
selector.transform(dfX2.loc[:, ["text2", "text1"]]) # Error because not correct number of columns
selector = ColumnsSelector(columns_to_use=["text1", "text2", "text3"])
with pytest.raises(ValueError):
selector.fit(dfX)
selector = ColumnsSelector(columns_to_use=["text1", "text2"])
selector.fit(dfX)
dfX3 = dfX2.copy()
del dfX3["text1"]
with pytest.raises(ValueError):
selector.transform(dfX3)
dfX3 = dfX2.copy()
dfX3.columns = ["cat1", "cat2", "num1", "num2", "num3", "textAA", "text2"]
with pytest.raises(ValueError):
selector.transform(dfX3)
selector = ColumnsSelector(columns_to_use=["^text"], regex_match=True)
r1 = dfX.loc[:, ["text1", "text2"]]
r2 = dfX2.loc[:, ["text1", "text2"]]
dfX3 = dfX.loc[:, ["text2", "cat1", "cat2", "num1", "num2", "num3", "text1"]].copy()
assert (selector.fit_transform(dfX) == r1).all().all()
assert (selector.transform(dfX2) == r2).all().all()
assert (selector.transform(dfX3) == r1).all().all()
assert selector.get_feature_names() == ["text1", "text2"]
selector = ColumnsSelector(columns_to_use=["^text"], regex_match=False)
r1 = dfX.loc[:, ["text1", "text2"]]
r2 = dfX2.loc[:, ["text1", "text2"]]
with pytest.raises(ValueError):
assert selector.fit_transform(dfX).shape[1] == 0
selector2 = ColumnsSelector(columns_to_use=[5, 6])
assert (selector2.fit_transform(dfX) == r1).all().all()
assert (selector2.transform(dfX2) == r2).all().all()
selector3 = ColumnsSelector(columns_to_use=[10, 5])
with pytest.raises(ValueError):
selector3.fit(dfX)
selector3 = ColumnsSelector(columns_to_use=[5, 6])
selector3.fit(dfX)
dfX2 = dfX.copy()
del dfX2["text1"]
with pytest.raises(ValueError):
selector3.transform(dfX2)
selector_none = ColumnsSelector(columns_to_use=None)
assert (selector_none.fit_transform(dfX) == dfX).all().all()
antiselector = ColumnsSelector(columns_to_drop=["cat1", "cat2"])
assert (antiselector.fit_transform(dfX) == dfX.loc[:, ["num1", "num2", "num3", "text1", "text2"]]).all().all()
assert antiselector.get_feature_names() == ["num1", "num2", "num3", "text1", "text2"]
antiselector = ColumnsSelector(columns_to_drop=["^cat"], regex_match=True)
assert (antiselector.fit_transform(dfX) == dfX.loc[:, ["num1", "num2", "num3", "text1", "text2"]]).all().all()
assert antiselector.get_feature_names() == ["num1", "num2", "num3", "text1", "text2"]
cols = ["cat1", "cat2", "num1", "num2", "num3", "text1", "text2"]
antiselector2 = ColumnsSelector(columns_to_drop=cols)
assert antiselector2.fit_transform(dfX).shape == (4, 0) # No column
cols = [0, 1, 2, 3, 4, 5, 6]
antiselector3 = ColumnsSelector(columns_to_drop=cols)
assert antiselector3.fit_transform(dfX.values).shape == (4, 0) # No column
selector3 = ColumnsSelector(columns_to_use="num1")
n1 = dfX.loc[:, ["num1"]]
n2 = dfX2.loc[:, ["num1"]]
dfX2 = dfX.copy()
r1 = selector3.fit_transform(dfX)
r2 = selector3.transform(dfX2)
assert isinstance(r1, pd.DataFrame)
assert isinstance(r2, pd.DataFrame)
assert (r1 == n1).all().all()
assert (r2 == n2).all().all()
dfrest = dfX.loc[:, ["num1", "num2", "num3", "text1", "text2"]]
dfrest2 = dfX2.loc[:, ["num1", "num2", "num3", "text1", "text2"]]
selector4 = ColumnsSelector(columns_to_drop=["cat1", "cat2"])
assert (selector4.fit_transform(dfX) == dfrest).all().all()
assert (selector4.fit_transform(dfX2) == dfrest2).all().all()
selector5 = ColumnsSelector(columns_to_drop=[0, 1])
assert (selector5.fit_transform(dfX) == dfrest).all().all()
assert (selector5.fit_transform(dfX2) == dfrest2).all().all()
selector6 = ColumnsSelector(columns_to_use=[0, 1])
xx = np.random.randn(10, 5)
xx2 = np.random.randn(3, 5)
assert np.array_equal(selector6.fit_transform(xx), xx[:, 0:2])
assert np.array_equal(selector6.fit_transform(xx2), xx2[:, 0:2])
selector7 = ColumnsSelector(columns_to_use=["num1", "num2"])
with pytest.raises(ValueError):
selector7.fit(xx)
selector_and_antiselector = ColumnsSelector(columns_to_use=["num1", "num2", "num3"], columns_to_drop=["num3"])
assert (selector_and_antiselector.fit_transform(dfX) == dfX.loc[:, ["num1", "num2"]]).all().all()
assert selector_and_antiselector.get_feature_names() == ["num1", "num2"]
selector_and_antiselector2 = ColumnsSelector(columns_to_use=["num"], columns_to_drop=["3"], regex_match=True)
assert (selector_and_antiselector2.fit_transform(dfX) == dfX.loc[:, ["num1", "num2"]]).all().all()
assert selector_and_antiselector2.get_feature_names() == ["num1", "num2"]
X = np.random.randn(20, 10)
input_features = [("COL_%d" % i) for i in range(10)]
selector = ColumnsSelector(columns_to_use=[0, 1, 5, 9])
Xsubset = selector.fit_transform(X)
assert (Xsubset == X[:, [0, 1, 5, 9]]).all()
assert selector.get_feature_names() == [0, 1, 5, 9]
assert selector.get_feature_names(input_features=input_features) == ["COL_0", "COL_1", "COL_5", "COL_9"]
def test__concat():
assert _concat("text1", "BAG", "toto", sep="__") == "text1__BAG__toto"
assert _concat("text", None, "") == "text"
assert _concat("text", None, "word1") == "text__word1"
def test_try_to_find_features_names():
list_of_words = ["aa bb", "bb bb cc", "dd aa cc", "ee"]
vec = CountVectorizer()
vec.fit_transform(list_of_words)
assert try_to_find_features_names(vec) == ["aa", "bb", "cc", "dd", "ee"]
pipe = Pipeline([("nothing", DebugPassThrough()), ("vec", CountVectorizer())])
pipe.fit_transform(list_of_words)
assert try_to_find_features_names(pipe) == ["aa", "bb", "cc", "dd", "ee"]
union = FeatureUnion(
transformer_list=[("bagword", CountVectorizer()), ("bagchar", CountVectorizer(analyzer="char"))]
)
union.fit_transform(list_of_words)
assert try_to_find_features_names(union) == [
"bagword__aa",
"bagword__bb",
"bagword__cc",
"bagword__dd",
"bagword__ee",
"bagchar__ ",
"bagchar__a",
"bagchar__b",
"bagchar__c",
"bagchar__d",
"bagchar__e",
]
pipe1 = Pipeline([("nothing", DebugPassThrough()), ("vec", CountVectorizer())])
pipe2 = Pipeline([("nothing", DebugPassThrough()), ("vec", CountVectorizer(analyzer="char"))])
union = FeatureUnion(transformer_list=[("bagword", pipe1), ("bagchar", pipe2)])
union.fit_transform(list_of_words)
assert try_to_find_features_names(union) == [
"bagword__aa",
"bagword__bb",
"bagword__cc",
"bagword__dd",
"bagword__ee",
"bagchar__ ",
"bagchar__a",
"bagchar__b",
"bagchar__c",
"bagchar__d",
"bagchar__e",
]
class DummyModelAcceptInputFeature(object):
def get_feature_names(self, input_features=None):
if input_features is None:
return [0, 1, 2, 3]
else:
return input_features
class DummyModelDontInputFeature(object):
def get_feature_names(self):
return [0, 1, 2, 3]
class DummyModelDoesntHaveGetFeatures(object):
pass
m = DummyModelAcceptInputFeature()
assert try_to_find_features_names(m) == [0, 1, 2, 3]
assert try_to_find_features_names(m, input_features=["a", "b", "c", "d"]) == ["a", "b", "c", "d"]
m = DummyModelDontInputFeature()
assert try_to_find_features_names(m) == [0, 1, 2, 3]
assert try_to_find_features_names(m, input_features=["a", "b", "c", "d"]) == [0, 1, 2, 3]
m = DummyModelDoesntHaveGetFeatures()
assert try_to_find_features_names(m) is None
assert try_to_find_features_names(m, input_features=["a", "b", "c", "d"]) is None
# In[]
# from sklearn.preprocessing import PolynomialFeatures
#
# poly = PolynomialFeatures()
#
# xx = np.random.randn(100,5)
# cols = ["COL_%d" % i for i in range(xx.shape[1])]
# df = pd.DataFrame(xx, columns = cols)
#
# xxres = poly.fit_transform(xx)
#
# poly.get_feature_names()
# poly.get_feature_names(cols)
#
# class WrappedPoly(ModelWrapper):
#
# def __init__(self, degree = 2, columns_to_use = None):
# self.degree = degree
#
# super(WrappedPoly,self).__init__(
# columns_to_use = columns_to_use,
# regex_match = False,
# work_on_one_column_only = False,
# all_columns_at_once = True,
# accepted_input_types = None,
# column_prefix = None,
# desired_output_type = DataTypes.DataFrame,
# must_transform_to_get_features_name = True,
# dont_change_columns = False,
# keep_other_columns = "drop"
# )
#
# def _get_model(self, X , y = None):
# return PolynomialFeatures(degree = self.degree)
#
# poly = WrappedPoly()
# poly.fit_transform(xx)
# poly.get_feature_names()
# poly.get_feature_names(cols)
#
#
# poly.fit_transform(df)
# poly.get_feature_names()
# poly.get_feature_names(cols)
# cols2 = ["A_%d" % i for i in range(xx.shape[1])]
# poly.get_feature_names(cols2)
#
class _DummyToWrap(BaseEstimator, TransformerMixin):
def __init__(self, n):
self.n = n
def fit(self, X, y=None):
return self
def transform(self, X):
return np.random.randn(X.shape[0], self.n)
class _DummyToWrapWithFeaturesNames(_DummyToWrap):
def get_feature_names(self):
return ["r%d" % i for i in range(self.n)]
class _DummyToWrapWithInputFeaturesNames(_DummyToWrap):
def get_feature_names(self, input_features=None):
if input_features is None:
return ["r%d" % i for i in range(self.n)]
else:
return ["c_%s_%d" % (str(input_features[i]), i) for i in range(self.n)]
# def _DummyToWrapWithFeaturesNa
class DummyWrapped(ModelWrapper):
def __init__(self, n, columns_to_use=None, column_prefix=None, keep_other_columns="drop"):
self.column_prefix = column_prefix
self.columns_to_use = columns_to_use
self.n = n
super(DummyWrapped, self).__init__(
columns_to_use=columns_to_use,
regex_match=False,
work_on_one_column_only=False,
all_columns_at_once=True,
accepted_input_types=None,
column_prefix=column_prefix,
desired_output_type=DataTypes.DataFrame,
must_transform_to_get_features_name=True,
dont_change_columns=False,
keep_other_columns=keep_other_columns,
)
def _get_model(self, X, y=None):
return _DummyToWrap(n=self.n)
class DummyWrappedWithFeaturesNames(ModelWrapper):
def __init__(self, n, columns_to_use=None, column_prefix=None, keep_other_columns="drop"):
self.columns_to_use = columns_to_use
self.n = n
self.column_prefix = column_prefix
super(DummyWrappedWithFeaturesNames, self).__init__(
columns_to_use=columns_to_use,
regex_match=False,
work_on_one_column_only=False,
all_columns_at_once=True,
accepted_input_types=None,
column_prefix=column_prefix,
desired_output_type=DataTypes.DataFrame,
must_transform_to_get_features_name=True,
dont_change_columns=False,
keep_other_columns=keep_other_columns,
)
def _get_model(self, X, y=None):
return _DummyToWrapWithFeaturesNames(n=self.n)
class DummyWrappedWithInputFeaturesNames(ModelWrapper):
def __init__(self, n, columns_to_use=None, column_prefix=None, keep_other_columns="drop"):
self.columns_to_use = columns_to_use
self.n = n
self.column_prefix = column_prefix
super(DummyWrappedWithInputFeaturesNames, self).__init__(
columns_to_use=columns_to_use,
regex_match=False,
work_on_one_column_only=False,
all_columns_at_once=True,
accepted_input_types=None,
column_prefix=column_prefix,
desired_output_type=DataTypes.DataFrame,
must_transform_to_get_features_name=True,
dont_change_columns=False,
keep_other_columns=keep_other_columns,
)
def _get_model(self, X, y=None):
return _DummyToWrapWithInputFeaturesNames(n=self.n)
def test_dummy_wrapper_features():
xx = np.random.randn(10, 5)
input_features = ["COL_%d" % i for i in range(xx.shape[1])]
df = pd.DataFrame(xx, columns=input_features)
for column_prefix in (None, "RAND"):
for i, klass in enumerate((DummyWrapped, DummyWrappedWithFeaturesNames)):
if i == 0:
if column_prefix is None:
expected = [0, 1]
else:
expected = ["RAND__0", "RAND__1"]
else:
if column_prefix is None:
expected = ["r0", "r1"]
else:
expected = ["RAND__r0", "RAND__r1"]
## On array ##
dummy = klass(n=2, column_prefix=column_prefix)
xxres = dummy.fit_transform(xx)
assert dummy.get_feature_names() == expected
assert list(xxres.columns) == expected
dummy = klass(n=2, columns_to_use=[0, 1], keep_other_columns="delta", column_prefix=column_prefix)
xxres = dummy.fit_transform(xx)
assert dummy.get_feature_names() == [2, 3, 4] + expected
assert dummy.get_feature_names() == list(xxres.columns)
assert dummy.get_feature_names(input_features) == ["COL_2", "COL_3", "COL_4"] + expected
dummy = klass(n=2, columns_to_use=[0, 1], keep_other_columns="keep", column_prefix=column_prefix)
xxres = dummy.fit_transform(xx)
assert dummy.get_feature_names() == [0, 1, 2, 3, 4] + expected
assert dummy.get_feature_names() == list(xxres.columns)
assert dummy.get_feature_names(input_features) == ["COL_0", "COL_1", "COL_2", "COL_3", "COL_4"] + expected
## on df ##
dummy = klass(n=2, column_prefix=column_prefix)
xxres = dummy.fit_transform(df)
assert dummy.get_feature_names() == expected
assert list(xxres.columns) == expected
for columns_to_use in ([0, 1], ["COL_0", "COL_1"]):
dummy = klass(
n=2, columns_to_use=columns_to_use, keep_other_columns="delta", column_prefix=column_prefix
)
xxres = dummy.fit_transform(df)
assert dummy.get_feature_names() == ["COL_2", "COL_3", "COL_4"] + expected
assert dummy.get_feature_names() == list(xxres.columns)
assert dummy.get_feature_names(input_features) == ["COL_2", "COL_3", "COL_4"] + expected
dummy = klass(
n=2, columns_to_use=columns_to_use, keep_other_columns="keep", column_prefix=column_prefix
)
xxres = dummy.fit_transform(df)
assert dummy.get_feature_names() == ["COL_0", "COL_1", "COL_2", "COL_3", "COL_4"] + expected
assert dummy.get_feature_names() == list(xxres.columns)
assert (
dummy.get_feature_names(input_features) == ["COL_0", "COL_1", "COL_2", "COL_3", "COL_4"] + expected
)
def test_dummy_wrapper_features_with_input_features():
xx = np.random.randn(10, | |
# coding=utf-8
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from knack.help_files import helps
helps['batchai'] = """
type: group
short-summary: Manage Batch AI resources.
"""
helps['batchai workspace'] = """
type: group
short-summary: Commands to manage workspaces.
"""
helps['batchai workspace create'] = """
type: command
short-summary: Create a workspace.
examples:
- name: Create a workspace in East US region.
text:
az batchai workspace create -g MyResourceGroup -n MyWorkspace -l eastus
"""
helps['batchai workspace delete'] = """
type: command
short-summary: Delete a workspace.
examples:
- name: Delete a workspace.
text:
az batchai workspace delete -g MyResourceGroup -n MyWorkspace
"""
helps['batchai workspace list'] = """
type: command
short-summary: List workspaces.
examples:
- name: List all workspaces under the current subscription.
text:
az batchai workspace list -o table
- name: List workspaces in the given resource group.
text:
az batchai workspace list -g MyResourceGroup -o table
"""
helps['batchai workspace show'] = """
type: command
short-summary: Show information about a workspace.
examples:
- name: Show information about a workspace.
text:
az batchai workspace show -g MyResourceGroup -n MyWorkspace -o table
"""
helps['batchai cluster'] = """
type: group
short-summary: Commands to manage clusters.
"""
helps['batchai cluster create'] = """
type: command
short-summary: Create a cluster.
examples:
- name: Create a single node GPU cluster with default image and auto-storage account.
text: |
az batchai cluster create -g MyResourceGroup -w MyWorkspace -n MyCluster \\
-s Standard_NC6 -t 1 --use-auto-storage --generate-ssh-keys
- name:
Create a cluster with a setup command which installs unzip on every node, the command output will be
stored on auto storage account Azure File Share.
text: |
az batchai cluster create -g MyResourceGroup -w MyWorkspace -n MyCluster \\
--use-auto-storage \\
-s Standard_NC6 -t 1 -k id_rsa.pub \\
--setup-task 'apt update; apt install unzip -y' \\
--setup-task-output '$AZ_BATCHAI_MOUNT_ROOT/autoafs'
- name: Create a cluster providing all parameters manually.
text: |
az batchai cluster create -g MyResourceGroup -w MyWorkspace -n MyCluster \\
-i UbuntuLTS -s Standard_NC6 --vm-priority lowpriority \\
--min 0 --target 1 --max 10 \\
--storage-account-name MyStorageAccount \\
--nfs-name MyNfsToMount --afs-name MyAzureFileShareToMount \\
--bfs-name MyBlobContainerNameToMount \\
-u AdminUserName -k id_rsa.pub -p <PASSWORD>
- name: Create a cluster using a configuration file.
text: >
az batchai cluster create -g MyResourceGroup -w MyWorkspace -n MyCluster -f cluster.json
"""
helps['batchai cluster resize'] = """
type: command
short-summary: Resize a cluster.
examples:
- name: Resize a cluster to zero size to stop paying for it.
text:
az batchai cluster resize -g MyResourceGroup -w MyWorkspace -n MyCluster -t 0
- name: Resize a cluster to have 10 nodes.
text:
az batchai cluster resize -g MyResourceGroup -w MyWorkspace -n MyCluster -t 10
"""
helps['batchai cluster auto-scale'] = """
type: command
short-summary: Set auto-scale parameters for a cluster.
examples:
- name: Make a cluster to auto scale between 0 and 10 nodes depending on number of queued and running jobs.
text:
az batchai auto-scale -g MyResourceGroup -w MyWorkspace -n MyCluster --min 0 --max 10
"""
helps['batchai cluster delete'] = """
type: command
short-summary: Delete a cluster.
examples:
- name: Delete a cluster and wait for deletion to be completed.
text:
az batchai cluster delete -g MyResourceGroup -w MyWorkspace -n MyCluster
- name: Send a delete command for a cluster and do not wait for deletion to be completed.
text:
az batchai cluster delete -g MyResourceGroup -w MyWorkspace -n MyCluster --no-wait
- name: Delete cluster without asking for confirmation (for non-interactive scenarios).
text:
az batchai cluster delete -g MyResourceGroup -w MyWorkspace -n MyCluster -y
"""
helps['batchai cluster list'] = """
type: command
short-summary: List clusters.
examples:
- name: List all clusters in a workspace.
text:
az batchai cluster list -g MyResourceGroup -w MyWorkspace -o table
"""
helps['batchai cluster show'] = """
type: command
short-summary: Show information about a cluster.
examples:
- name: Show full information about a cluster.
text:
az batchai cluster show -g MyResourceGroup -w MyWorkspace -n MyCluster
- name: Show cluster's summary.
text:
az batchai cluster show -g MyResourceGroup -w MyWorkspace -n MyCluster -o table
"""
helps['batchai cluster node'] = """
type: group
short-summary: Commands to work with cluster nodes.
"""
helps['batchai cluster node list'] = """
type: command
short-summary: List remote login information for cluster's nodes.
long-summary:
List remote login information for cluster nodes. You can ssh to a particular node using the provided public IP
address and the port number.\n
E.g. ssh <admin user name>@<public ip> -p <node's SSH port number>
examples:
- name: List remote login information for a cluster.
text:
az batchai cluster node list -g MyResourceGroup -w MyWorkspace -c MyCluster -o table
"""
helps['batchai cluster node exec'] = """
type: command
short-summary: Executes a command line on a cluster's node with optional ports forwarding.
examples:
- name: Report a snapshot of the current processes.
text: |
az batchai cluster node exec -g MyResourceGroup -w MyWorkspace -c MyCluster \\
-n tvm-xxx --exec "ps axu"
- name: Report a GPU information for a node.
text: |
az batchai cluster node exec -g MyResourceGroup -w MyWorkspace -c MyCluster \\
-n tvm-xxx --exec "nvidia-smi"
- name: Forward local 9000 to port 9001 on the node.
text: |
az batchai cluster node exec -g MyResourceGroup -w MyWorkspace -c MyCluster \\
-n tvm-xxx -L 9000:localhost:9001
"""
helps['batchai cluster file'] = """
type: group
short-summary: Commands to work with files generated by node setup task.
"""
helps['batchai cluster file list'] = """
type: command
short-summary: List files generated by the cluster's node setup task.
long-summary:
List files generated by the cluster's node setup task under $AZ_BATCHAI_STDOUTERR_DIR path. This functionality is
available only if the node setup task output directory is located on mounted Azure File Share or Azure Blob Container.
examples:
- name: List names and sizes of files and directories inside of $AZ_BATCHAI_STDOUTERR_DIR.
text: |
az batchai cluster list-files -g MyResourceGroup -w MyWorkspace -c MyCluster -o table
- name: List names, sizes and download URLs for files and directories inside of $AZ_BATCHAI_STDOUTERR_DIR.
text: |
az batchai cluster list-files -g MyResourceGroup -w MyWorkspace -c MyCluster
- name: List names, sizes and download URLs for files and directories inside of $AZ_BATCHAI_STDOUTERR_DIR/folder/subfolder.
text: |
az batchai cluster list-files -g MyResourceGroup -w MyWorkspace -c MyCluster \\
-p folder/subfolder
- name: List names, sizes and download URLs for files and directories inside of $AZ_BATCHAI_STDOUTERR_DIR making
download URLs to remain valid for one hour.
text: |
az batchai cluster list-files -g MyResourceGroup -w MyWorkspace -c MyCluster \\
--expiry 60
"""
helps['batchai experiment'] = """
type: group
short-summary: Commands to manage experiments.
"""
helps['batchai experiment create'] = """
type: command
short-summary: Create an experiment.
examples:
- name: Create an experiment.
text:
az batchai experiment create -g MyResourceGroup -w MyWorkspace -n MyExperiment
"""
helps['batchai experiment delete'] = """
type: command
short-summary: Delete an experiment.
examples:
- name: Delete an experiment. All running jobs will be terminated.
text:
az batchai experiment delete -g MyResourceGroup -w MyWorkspace -n MyExperiment
- name: Delete an experiment without asking for confirmation (for non-interactive scenarios).
text:
az batchai experiment delete -g MyResourceGroup -w MyWorkspace -e MyExperiment -y
- name: Request an experiment deletion without waiting for job to be deleted.
text:
az batchai experiment delete -g MyResourceGroup -w MyWorkspace -e MyExperiment --no-wait
"""
helps['batchai experiment list'] = """
type: command
short-summary: List experiments.
examples:
- name: List experiments.
text:
az batchai workspace list -g MyResourceGroup -w MyWorkspace -o table
"""
helps['batchai experiment show'] = """
type: command
short-summary: Show information about an experiment.
examples:
- name: Show information about an experiment.
text:
az batchai workspace show -g MyResourceGroup -w MyWorkspace -n MyExperiment -o table
"""
helps['batchai job'] = """
type: group
short-summary: Commands to manage jobs.
"""
helps['batchai job create'] = """
type: command
short-summary: Create a job.
examples:
- name:
Create a job to run on a cluster in the same resource group.
text: |
az batchai job create -g MyResourceGroup -w MyWorkspace -e MyExperiment -n MyJob \\
-r MyCluster -f job.json
- name:
Create a job to run on a cluster in a different workspace.
text: |
az batchai job | |
int
:param EndTime: 结束时间(Unix 时间戳,秒级), 为0 表示当前时间
:type EndTime: int
:param Context: 搜索上下文, 用作查询游标
:type Context: str
:param Size: 单次获取的历史数据项目的最大数量, 缺省10
:type Size: int
:param EventId: 事件标识符,可以用来指定查询特定的事件,如果不指定,则查询所有事件。
:type EventId: str
"""
self.ProductId = None
self.DeviceName = None
self.Type = None
self.StartTime = None
self.EndTime = None
self.Context = None
self.Size = None
self.EventId = None
def _deserialize(self, params):
self.ProductId = params.get("ProductId")
self.DeviceName = params.get("DeviceName")
self.Type = params.get("Type")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.Context = params.get("Context")
self.Size = params.get("Size")
self.EventId = params.get("EventId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeDeviceEventHistoryResponse(AbstractModel):
"""DescribeDeviceEventHistory返回参数结构体
"""
def __init__(self):
r"""
:param Context: 搜索上下文, 用作查询游标
注意:此字段可能返回 null,表示取不到有效值。
:type Context: str
:param Total: 搜索结果数量
注意:此字段可能返回 null,表示取不到有效值。
:type Total: int
:param Listover: 搜索结果是否已经结束
注意:此字段可能返回 null,表示取不到有效值。
:type Listover: bool
:param EventHistory: 搜集结果集
注意:此字段可能返回 null,表示取不到有效值。
:type EventHistory: list of EventHistoryItem
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Context = None
self.Total = None
self.Listover = None
self.EventHistory = None
self.RequestId = None
def _deserialize(self, params):
self.Context = params.get("Context")
self.Total = params.get("Total")
self.Listover = params.get("Listover")
if params.get("EventHistory") is not None:
self.EventHistory = []
for item in params.get("EventHistory"):
obj = EventHistoryItem()
obj._deserialize(item)
self.EventHistory.append(obj)
self.RequestId = params.get("RequestId")
class DescribeDeviceRequest(AbstractModel):
"""DescribeDevice请求参数结构体
"""
def __init__(self):
r"""
:param ProductId: 产品ID
:type ProductId: str
:param DeviceName: 设备名
:type DeviceName: str
"""
self.ProductId = None
self.DeviceName = None
def _deserialize(self, params):
self.ProductId = params.get("ProductId")
self.DeviceName = params.get("DeviceName")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeDeviceResponse(AbstractModel):
"""DescribeDevice返回参数结构体
"""
def __init__(self):
r"""
:param DeviceName: 设备名
:type DeviceName: str
:param Online: 设备是否在线,0不在线,1在线,2获取失败,3未激活
:type Online: int
:param LoginTime: 设备最后上线时间
:type LoginTime: int
:param DevicePsk: 设备密钥
:type DevicePsk: str
:param EnableState: 设备启用状态
:type EnableState: int
:param ExpireTime: 设备过期时间
:type ExpireTime: int
:param LogLevel: 设备的sdk日志等级,0:关闭,1:错误,2:告警,3:信息,4:调试
注意:此字段可能返回 null,表示取不到有效值。
:type LogLevel: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DeviceName = None
self.Online = None
self.LoginTime = None
self.DevicePsk = None
self.EnableState = None
self.ExpireTime = None
self.LogLevel = None
self.RequestId = None
def _deserialize(self, params):
self.DeviceName = params.get("DeviceName")
self.Online = params.get("Online")
self.LoginTime = params.get("LoginTime")
self.DevicePsk = params.get("DevicePsk")
self.EnableState = params.get("EnableState")
self.ExpireTime = params.get("ExpireTime")
self.LogLevel = params.get("LogLevel")
self.RequestId = params.get("RequestId")
class DescribeDeviceStatusLogRequest(AbstractModel):
"""DescribeDeviceStatusLog请求参数结构体
"""
def __init__(self):
r"""
:param MinTime: 开始时间(毫秒)
:type MinTime: int
:param MaxTime: 结束时间(毫秒)
:type MaxTime: int
:param ProductId: 产品ID
:type ProductId: str
:param DeviceName: 设备名称
:type DeviceName: str
:param Limit: 返回条数
:type Limit: int
:param Context: 检索上下文
:type Context: str
"""
self.MinTime = None
self.MaxTime = None
self.ProductId = None
self.DeviceName = None
self.Limit = None
self.Context = None
def _deserialize(self, params):
self.MinTime = params.get("MinTime")
self.MaxTime = params.get("MaxTime")
self.ProductId = params.get("ProductId")
self.DeviceName = params.get("DeviceName")
self.Limit = params.get("Limit")
self.Context = params.get("Context")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeDeviceStatusLogResponse(AbstractModel):
"""DescribeDeviceStatusLog返回参数结构体
"""
def __init__(self):
r"""
:param Listover: 数据是否已全部返回,true 表示数据全部返回,false 表示还有数据待返回,可将 Context 作为入参,继续查询返回结果。
注意:此字段可能返回 null,表示取不到有效值。
:type Listover: bool
:param Context: 检索上下文,当 ListOver 为false时,可以用此上下文,继续读取后续数据
注意:此字段可能返回 null,表示取不到有效值。
:type Context: str
:param Results: 日志数据结果数组,返回对应时间点及取值。
注意:此字段可能返回 null,表示取不到有效值。
:type Results: list of DeviceStatusLogItem
:param TotalCount: 日志数据结果总条数
注意:此字段可能返回 null,表示取不到有效值。
:type TotalCount: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Listover = None
self.Context = None
self.Results = None
self.TotalCount = None
self.RequestId = None
def _deserialize(self, params):
self.Listover = params.get("Listover")
self.Context = params.get("Context")
if params.get("Results") is not None:
self.Results = []
for item in params.get("Results"):
obj = DeviceStatusLogItem()
obj._deserialize(item)
self.Results.append(obj)
self.TotalCount = params.get("TotalCount")
self.RequestId = params.get("RequestId")
class DescribeDevicesRequest(AbstractModel):
"""DescribeDevices请求参数结构体
"""
def __init__(self):
r"""
:param ProductId: 需要查看设备列表的产品 ID
:type ProductId: str
:param Offset: 偏移量,Offset从0开始
:type Offset: int
:param Limit: 分页的大小,最大100
:type Limit: int
:param DeviceName: 需要过滤的设备名称
:type DeviceName: str
"""
self.ProductId = None
self.Offset = None
self.Limit = None
self.DeviceName = None
def _deserialize(self, params):
self.ProductId = params.get("ProductId")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
self.DeviceName = params.get("DeviceName")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeDevicesResponse(AbstractModel):
"""DescribeDevices返回参数结构体
"""
def __init__(self):
r"""
:param TotalCount: 设备总数
:type TotalCount: int
:param Devices: 设备详细信息列表
:type Devices: list of DeviceInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.Devices = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("Devices") is not None:
self.Devices = []
for item in params.get("Devices"):
obj = DeviceInfo()
obj._deserialize(item)
self.Devices.append(obj)
self.RequestId = params.get("RequestId")
class DescribeFirmwareRequest(AbstractModel):
"""DescribeFirmware请求参数结构体
"""
def __init__(self):
r"""
:param ProductID: 产品ID
:type ProductID: str
:param FirmwareVersion: 固件版本号
:type FirmwareVersion: str
"""
self.ProductID = None
self.FirmwareVersion = None
def _deserialize(self, params):
self.ProductID = params.get("ProductID")
self.FirmwareVersion = params.get("FirmwareVersion")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeFirmwareResponse(AbstractModel):
"""DescribeFirmware返回参数结构体
"""
def __init__(self):
r"""
:param Version: 固件版本号
:type Version: str
:param ProductId: 产品ID
:type ProductId: str
:param Name: 固件名称
注意:此字段可能返回 null,表示取不到有效值。
:type Name: str
:param Description: 固件描述
注意:此字段可能返回 null,表示取不到有效值。
:type Description: str
:param Md5sum: 固件Md5值
注意:此字段可能返回 null,表示取不到有效值。
:type Md5sum: str
:param Createtime: 固件上传的秒级时间戳
注意:此字段可能返回 null,表示取不到有效值。
:type Createtime: int
:param ProductName: 产品名称
:type ProductName: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Version = None
self.ProductId = None
self.Name = None
self.Description = None
self.Md5sum = None
self.Createtime = None
self.ProductName = None
self.RequestId = None
def _deserialize(self, params):
self.Version = params.get("Version")
self.ProductId = params.get("ProductId")
self.Name = params.get("Name")
self.Description = params.get("Description")
self.Md5sum = params.get("Md5sum")
self.Createtime = params.get("Createtime")
self.ProductName = params.get("ProductName")
self.RequestId = params.get("RequestId")
class DescribeFirmwareTaskDevicesRequest(AbstractModel):
"""DescribeFirmwareTaskDevices请求参数结构体
"""
def __init__(self):
r"""
:param ProductID: 产品ID
:type ProductID: str
:param FirmwareVersion: 固件版本
:type FirmwareVersion: str
:param Filters: 筛选条件
:type Filters: list of SearchKeyword
:param Offset: 查询偏移量 默认为0
:type Offset: int
:param Limit: 查询的数量 默认为50
:type Limit: int
"""
self.ProductID = None
self.FirmwareVersion = None
self.Filters = None
self.Offset = None
self.Limit = None
def _deserialize(self, params):
self.ProductID = params.get("ProductID")
self.FirmwareVersion = params.get("FirmwareVersion")
if params.get("Filters") is not None:
self.Filters = []
for item in params.get("Filters"):
obj = SearchKeyword()
obj._deserialize(item)
self.Filters.append(obj)
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeFirmwareTaskDevicesResponse(AbstractModel):
"""DescribeFirmwareTaskDevices返回参数结构体
"""
def __init__(self):
r"""
:param Total: 固件升级任务的设备总数
注意:此字段可能返回 null,表示取不到有效值。
:type Total: int
:param Devices: 固件升级任务的设备列表
:type Devices: list of DeviceUpdateStatus
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Total = None
self.Devices = None
self.RequestId = None
def _deserialize(self, params):
self.Total = params.get("Total")
if params.get("Devices") is not None:
self.Devices = []
for item in params.get("Devices"):
obj = DeviceUpdateStatus()
obj._deserialize(item)
self.Devices.append(obj)
self.RequestId = params.get("RequestId")
class DescribeFirmwareTaskDistributionRequest(AbstractModel):
"""DescribeFirmwareTaskDistribution请求参数结构体
"""
def __init__(self):
r"""
:param ProductID: 产品ID
:type ProductID: str
:param FirmwareVersion: 固件版本号
:type FirmwareVersion: str
:param TaskId: 固件升级任务ID
:type TaskId: int
"""
self.ProductID = None
self.FirmwareVersion = None
self.TaskId = None
def _deserialize(self, params):
self.ProductID = params.get("ProductID")
self.FirmwareVersion = params.get("FirmwareVersion")
self.TaskId = params.get("TaskId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeFirmwareTaskDistributionResponse(AbstractModel):
"""DescribeFirmwareTaskDistribution返回参数结构体
"""
def __init__(self):
r"""
:param StatusInfos: 固件升级任务状态分布信息
:type StatusInfos: list of StatusStatistic
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.StatusInfos = None
self.RequestId = None
def _deserialize(self, params):
if params.get("StatusInfos") is not None:
self.StatusInfos = []
for item in params.get("StatusInfos"):
obj = StatusStatistic()
obj._deserialize(item)
self.StatusInfos.append(obj)
self.RequestId = params.get("RequestId")
class DescribeFirmwareTaskRequest(AbstractModel):
"""DescribeFirmwareTask请求参数结构体
"""
def __init__(self):
r"""
:param ProductID: 产品ID
:type ProductID: str
:param FirmwareVersion: 固件版本号
:type FirmwareVersion: str
:param TaskId: 固件任务ID
:type TaskId: int
"""
self.ProductID = None
self.FirmwareVersion = None
self.TaskId = None
def _deserialize(self, params):
self.ProductID = params.get("ProductID")
self.FirmwareVersion = params.get("FirmwareVersion")
self.TaskId = params.get("TaskId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeFirmwareTaskResponse(AbstractModel):
"""DescribeFirmwareTask返回参数结构体
"""
def __init__(self):
r"""
:param TaskId: 固件任务ID
注意:此字段可能返回 null,表示取不到有效值。
:type TaskId: int
:param Status: 固件任务状态
注意:此字段可能返回 null,表示取不到有效值。
:type Status: int
:param CreateTime: 固件任务创建时间,单位:秒
注意:此字段可能返回 null,表示取不到有效值。
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.