input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
self.is_mcmc):
# #x.append(xblob[name][skip:stop][loc])
# y.append(yblob[loc])
# elif samples is not None:
# y.append(yblob[keep == 1])
# elif percentile:
# lo, hi = np.percentile(yblob[keep == 1], (q1, q2))
# y.append((lo, hi))
# else:
# dat = yblob[keep == 1]
# lo, hi = dat.min(), dat.max()
# y.append((lo, hi))
##
# Do the actual plotting
##
# Limit number of realizations
if samples is not None:
pass
#M = min(min(self.chain.shape[0], max_samples), len(y.T))
#
#if samples == 'all':
# pass
# # Unmasked elements only
# #mask1d = np.sum(self.mask, axis=1)
# #np.argwhere(mask1d == 0).squeeze()
#
# #for i, element in enumerate(y.T):
# # #if type(element) == float:
# # # continue
# # #elif len(element) != len(xarr):
# # # print('hello', i, element.shape, xarr.shape)
# # # continue
# #
# # #ok = np.isfinite(element)
# # ax.plot(xarr, element, **kwargs)
#else:
# # Choose randomly
# if type(samples) == int:
# elements = np.random.randint(0, M, size=samples)
# # Or take from list
# else:
# elements = samples
#
# for element in range(M):
# if element not in elements:
# continue
#
# ax.plot(xarr, y.T[element], **kwargs)
#elif use_best and self.is_mcmc:
# pass
# # Don't need to transpose in this case
# #ax.plot(xarr, y, **kwargs)
#else:
#
# #if not take_log:
# # # Where y is zero, set to small number?
# # zeros = np.argwhere(y == 0)
# # for element in zeros:
# # y[element[0],element[1]] = 1e-15
#
# if fill:
# ax.fill_between(xarr, y.T[0], y.T[1], **kwargs)
# else:
# ax.plot(xarr, y.T[0], **kwargs)
#
# if 'label' in kwargs:
# del kwargs['label']
#
# ax.plot(xarr, y.T[1], **kwargs)
ax.set_ylabel(self.labeler.label(name))
pl.draw()
if return_data:
return ax, xarr, yblob
else:
return ax
def CovarianceMatrix(self, pars, ivar=None):
"""
Compute covariance matrix for input parameters.
Parameters
----------
pars : list
List of parameter names to include in covariance estimate.
Returns
-------
Returns vector of mean, and the covariance matrix itself.
"""
data = self.ExtractData(pars, ivar=ivar)
blob_vec = []
for i in range(len(pars)):
blob_vec.append(data[pars[i]])
mu = np.ma.mean(blob_vec, axis=1)
cov = np.ma.cov(blob_vec)
return mu, cov
def PlotCovarianceMatrix(self, pars, ivar=None, fig=1, ax=None,\
cmap='RdBu_r'):
mu, cov = self.CovarianceMatrix(pars, ivar=ivar)
if ax is None:
fig = pl.figure(fig)
ax = fig.add_subplot(111)
cax = ax.imshow(cov, interpolation='none', cmap=cmap)
cb = pl.colorbar(cax)
return ax, cb
def AssembleParametersList(self, N=None, ids=None, include_bkw=False,
**update_kwargs):
"""
Return dictionaries of parameters corresponding to elements of the
chain. Really just a convenience thing -- converting 1-D arrays
(i.e, links of the chain) into dictionaries -- so that the parameters
can be passed into ares.simulations objects.
.. note :: Masked chain elements are excluded.
N : int
Maximum number of models to return, starting from beginning of
chain. If None, return all available.
include_bkw : bool
Include base_kwargs? If so, then each element within the returned
list can be supplied to an ares.simulations instance and recreate
that model exactly.
loc : int
If supplied, only the dictionary of parameters associated with
link `loc` in the chain will be returned.
update_kwargs : dict
New kwargs that you want added to each set of parameters. Will
override pre-existing keys.
Returns
-------
List of dictionaries. Maximum length: `N`.
"""
ct = 0
all_kwargs = []
for i, element in enumerate(self.chain):
if sum(self.mask[i]):
continue
if ids is not None:
if type(ids) in [int, np.int64]:
if (i != ids):
continue
else:
if (i not in ids):
continue
elif N is not None:
if i >= N:
break
if include_bkw:
if ct == 0:
# Only print first time...could be thousands of iterations
print(("WARNING: Any un-pickleable kwargs will not " +\
"have been saved in {!s}.binfo.pkl!").format(\
self.prefix))
kwargs = self.base_kwargs.copy()
else:
kwargs = {}
for j, parameter in enumerate(self.parameters):
if type(self.chain) == np.ma.core.MaskedArray:
if self.is_log[j]:
kwargs[parameter] = 10**self.chain.data[i,j]
else:
kwargs[parameter] = self.chain.data[i,j]
else:
if self.is_log[j]:
kwargs[parameter] = 10**self.chain[i,j]
else:
kwargs[parameter] = self.chain[i,j]
kwargs.update(update_kwargs)
all_kwargs.append(kwargs.copy())
ct += 1
return all_kwargs
def CorrelationMatrix(self, pars, ivar=None, fig=1, ax=None):
""" Plot correlation matrix. """
mu, cov = self.CovarianceMatrix(pars, ivar=ivar)
corr = correlation_matrix(cov)
if ax is None:
fig = pl.figure(fig); ax = fig.add_subplot(111)
cax = ax.imshow(corr.T, interpolation='none', cmap='RdBu_r',
vmin=-1, vmax=1, origin='lower left')
cb = pl.colorbar(cax)
return ax
def get_blob(self, name, ivar=None):
"""
Extract an array of values for a given quantity.
..note:: If ivar is not supplied, this is equivalent to just reading
all data from disk.
Parameters
----------
name : str
Name of quantity
ivar : list, tuple, array
Independent variables a given blob may depend on.
"""
i, j, nd, dims = self.blob_info(name)
if (i is None) and (j is None):
f = h5py.File('{!s}.hdf5'.format(self.prefix), 'r')
arr = np.array(f[('blobs')][name])
f.close()
return arr
blob = self.get_blob_from_disk(name)
if nd == 0:
return blob
elif nd == 1:
if ivar is None:
return blob
else:
# Cludgey...
biv = np.array(self.blob_ivars[i]).squeeze()
k = np.argmin(np.abs(biv - ivar))
if not np.allclose(biv[k], ivar):
print("WARNING: Looking for `{}` at ivar={}, closest found is {}.".format(name, ivar, biv[k]))
return blob[:,k]
elif nd == 2:
if ivar is None:
return blob
assert len(ivar) == 2, "Must supply 2-D coordinate for blob!"
k1 = np.argmin(np.abs(self.blob_ivars[i][0] - ivar[0]))
if not np.allclose(self.blob_ivars[i][0][k1], ivar[0]):
print("WARNING: Looking for `{}` at ivar={}, closest found is {}.".format(name,
ivar[0], self.blob_ivars[i][0][k1]))
if ivar[1] is None:
return blob[:,k1,:]
else:
k2 = np.argmin(np.abs(self.blob_ivars[i][1] - ivar[1]))
if self.blob_ivars[i][1][k2] != ivar[1]:
print("WARNING: Looking for `{}` at ivar={}, closest found is {}.".format(name,
ivar[1], self.blob_ivars[i][1][k2]))
return blob[:,k1,k2]
def max_likelihood_parameters(self, method='mode', min_or_max='max',
skip=0, stop=None, limit_to_dist=False, nu=0.68):
"""
Return parameter values at maximum likelihood point.
Parameters
----------
method : str
median or mode
"""
if method == 'median':
N = len(self.logL[skip:stop])
psorted = np.sort(self.logL[skip:stop])
logL_med = psorted[int(N / 2.)]
iML = np.argmin(np.abs(self.logL[skip:stop] - logL_med))
else:
if min_or_max == 'max':
iML = np.argmax(self.logL[skip:stop])
else:
iML = np.argmin(self.logL[skip:stop])
# Require that the best-fit model be in the bulk of the distribution?
if limit_to_dist:
iML_all = np.argsort(self.logL[skip:stop])[-1::-1]
ranges = {}
for par in self.parameters:
mu, (hi, lo) = self.get_1d_error(par, peak=method, skip=skip,
stop=stop, nu=nu)
ranges[par] = (mu - lo, mu + hi)
for h, _iML in enumerate(iML_all):
all_ok = True
for i, par in enumerate(self.parameters):
pval = self.chain[skip:stop][_iML,i]
if not ranges[par][0] <= pval <= ranges[par][1]:
all_ok = False
break
if all_ok:
break
if h != 0:
print("WARNING: Using {}th highest-likelihood point.".format(h))
self._max_like_pars = {}
for i, par in enumerate(self.parameters):
if self.is_log[i]:
self._max_like_pars[par] = 10**self.chain[skip:stop][iML,i]
else:
self._max_like_pars[par] = self.chain[skip:stop][iML,i]
return self._max_like_pars
def ExpensiveBlob(self, func, ivar, name, skip=0, clobber=False):
"""
Generate a new blob from parameters only, i.e., we need to re-run
some ARES calculation, which is wrapped by `func`.
No restart option yet. Should add that.
"""
kwargs = self.AssembleParametersList(include_bkw=True)
print("Generating new field={} for all {} samples...".format(name,
len(kwargs)))
nd = len(ivar)
shape = [len(kwargs)]
for k, dim in enumerate(range(nd)):
shape.append(len(ivar[k][1]))
fn = '{0!s}.blob_{1}d.{2!s}.pkl'.format(self.prefix, nd, name)
if os.path.exists(fn) and (not clobber):
print(('{!s} exists! Set clobber=True or remove by ' +\
'hand.').format(fn))
return
pb = ProgressBar(len(kwargs), name=name)
pb.start()
all_results = -99999 * np.ones(shape)
for k, kw in enumerate(kwargs):
if k < skip:
continue
if k % size != rank:
continue
result = func(ivar, **kw)
all_results[k] = result
pb.update(k)
pb.finish()
if size > 1:
tmp = np.zeros_like(all_results)
nothing = MPI.COMM_WORLD.Allreduce(all_results, tmp)
all_results = tmp
if rank > 0:
return
write_pickle_file(all_results, fn, open_mode='w', ndumps=1,\
safe_mode=False, verbose=False)
def DeriveBlob(self, func=None, fields=None, expr=None, varmap=None,
save=True, ivar=None, name=None, clobber=False):
"""
Derive new blob from pre-existing ones.
Parameters
----------
Either supply the first two arguments:
func : function!
A function of two variables: ``data`` (a dictionary containing the
data) and ``ivars``, which contain the independent variables for
each field in ``data``.
fields : list, tuple
List of quantities required by ``func``.
OR the second two:
expr : str
For example, 'x - y'
varmap : dict
Relates variables in `expr` to blobs. For example,
varmap = {'x': 'nu_D', 'y': 'nu_C'}
The remaining parameters are:
save : bool
Save to disk? If not, just returns array.
name : str
If save==True, this is a | |
Machete+7",
352408: "Magic Demon Great Machete+8",
352409: "Magic Demon Great Machete+9",
352410: "Mag. Demon Great Machete+10",
352500: "Ench. Demon Great Machete",
352501: "Ench. Demon Great Machete+1",
352502: "Ench. Demon Great Machete+2",
352503: "Ench. Demon Great Machete+3",
352504: "Ench. Demon Great Machete+4",
352505: "Ench. Demon Great Machete+5",
352600: "Divine Demon Great Machete",
352601: "Div. Demon Great Machete+1",
352602: "Div. Demon Great Machete+2",
352603: "Div. Demon Great Machete+3",
352604: "Div. Demon Great Machete+4",
352605: "Div. Demon Great Machete+5",
352606: "Div. Demon Great Machete+6",
352607: "Div. Demon Great Machete+7",
352608: "Div. Demon Great Machete+8",
352609: "Div. Demon Great Machete+9",
352610: "Div. Demon Great Machete+10",
352700: "Occult Demon Great Machete",
352701: "Occ. Demon Great Machete+1",
352702: "Occ. Demon Great Machete+2",
352703: "Occ. Demon Great Machete+3",
352704: "Occ. Demon Great Machete+4",
352705: "Occ. Demon Great Machete+5",
352800: "Fire Demon Great Machete",
352801: "Fire Demon Great Machete+1",
352802: "Fire Demon Great Machete+2",
352803: "Fire Demon Great Machete+3",
352804: "Fire Demon Great Machete+4",
352805: "Fire Demon Great Machete+5",
352806: "Fire Demon Great Machete+6",
352807: "Fire Demon Great Machete+7",
352808: "Fire Demon Great Machete+8",
352809: "Fire Demon Great Machete+9",
352810: "Fire Demon Great Machete+10",
352900: "Chaos Demon Great Machete",
352901: "Chaos Demon Great Machete+1",
352902: "Chaos Demon Great Machete+2",
352903: "Chaos Demon Great Machete+3",
352904: "Chaos Demon Great Machete+4",
352905: "Chaos Demon Great Machete+5",
354000: "Dragon Greatsword",
354001: "Dragon Greatsword+1",
354002: "Dragon Greatsword+2",
354003: "Dragon Greatsword+3",
354004: "Dragon Greatsword+4",
354005: "Dragon Greatsword+5",
355000: "Black Knight Greatsword",
355001: "Black Knight Greatsword+1",
355002: "Black Knight Greatsword+2",
355003: "Black Knight Greatsword+3",
355004: "Black Knight Greatsword+4",
355005: "Black Knight Greatsword+5",
400000: "Scimitar",
400001: "Scimitar+1",
400002: "Scimitar+2",
400003: "Scimitar+3",
400004: "Scimitar+4",
400005: "Scimitar+5",
400006: "Scimitar+6",
400007: "Scimitar+7",
400008: "Scimitar+8",
400009: "Scimitar+9",
400010: "Scimitar+10",
400011: "Scimitar+11",
400012: "Scimitar+12",
400013: "Scimitar+13",
400014: "Scimitar+14",
400015: "Scimitar+15",
400100: "Crystal Scimitar",
400101: "Crystal Scimitar+1",
400102: "Crystal Scimitar+2",
400103: "Crystal Scimitar+3",
400104: "Crystal Scimitar+4",
400105: "Crystal Scimitar+5",
400200: "Lightning Scimitar",
400201: "Lightning Scimitar+1",
400202: "Lightning Scimitar+2",
400203: "Lightning Scimitar+3",
400204: "Lightning Scimitar+4",
400205: "Lightning Scimitar+5",
400300: "Raw Scimitar",
400301: "Raw Scimitar+1",
400302: "Raw Scimitar+2",
400303: "Raw Scimitar+3",
400304: "Raw Scimitar+4",
400305: "Raw Scimitar+5",
400400: "Magic Scimitar",
400401: "Magic Scimitar+1",
400402: "Magic Scimitar+2",
400403: "Magic Scimitar+3",
400404: "Magic Scimitar+4",
400405: "Magic Scimitar+5",
400406: "Magic Scimitar+6",
400407: "Magic Scimitar+7",
400408: "Magic Scimitar+8",
400409: "Magic Scimitar+9",
400410: "Magic Scimitar+10",
400500: "Enchanted Scimitar",
400501: "Enchanted Scimitar+1",
400502: "Enchanted Scimitar+2",
400503: "Enchanted Scimitar+3",
400504: "Enchanted Scimitar+4",
400505: "Enchanted Scimitar+5",
400600: "Divine Scimitar",
400601: "Divine Scimitar+1",
400602: "Divine Scimitar+2",
400603: "Divine Scimitar+3",
400604: "Divine Scimitar+4",
400605: "Divine Scimitar+5",
400606: "Divine Scimitar+6",
400607: "Divine Scimitar+7",
400608: "Divine Scimitar+8",
400609: "Divine Scimitar+9",
400610: "Divine Scimitar+10",
400700: "Occult Scimitar",
400701: "Occult Scimitar+1",
400702: "Occult Scimitar+2",
400703: "Occult Scimitar+3",
400704: "Occult Scimitar+4",
400705: "Occult Scimitar+5",
400800: "Fire Scimitar",
400801: "Fire Scimitar+1",
400802: "Fire Scimitar+2",
400803: "Fire Scimitar+3",
400804: "Fire Scimitar+4",
400805: "Fire Scimitar+5",
400806: "Fire Scimitar+6",
400807: "Fire Scimitar+7",
400808: "Fire Scimitar+8",
400809: "Fire Scimitar+9",
400810: "Fire Scimitar+10",
400900: "Chaos Scimitar",
400901: "Chaos Scimitar+1",
400902: "Chaos Scimitar+2",
400903: "Chaos Scimitar+3",
400904: "Chaos Scimitar+4",
400905: "Chaos Scimitar+5",
401000: "Falchion",
401001: "Falchion+1",
401002: "Falchion+2",
401003: "Falchion+3",
401004: "Falchion+4",
401005: "Falchion+5",
401006: "Falchion+6",
401007: "Falchion+7",
401008: "Falchion+8",
401009: "Falchion+9",
401010: "Falchion+10",
401011: "Falchion+11",
401012: "Falchion+12",
401013: "Falchion+13",
401014: "Falchion+14",
401015: "Falchion+15",
401100: "Crystal Falchion",
401101: "Crystal Falchion+1",
401102: "Crystal Falchion+2",
401103: "Crystal Falchion+3",
401104: "Crystal Falchion+4",
401105: "Crystal Falchion+5",
401200: "Lightning Falchion",
401201: "Lightning Falchion+1",
401202: "Lightning Falchion+2",
401203: "Lightning Falchion+3",
401204: "Lightning Falchion+4",
401205: "Lightning Falchion+5",
401300: "Raw Falchion",
401301: "Raw Falchion+1",
401302: "Raw Falchion+2",
401303: "Raw Falchion+3",
401304: "Raw Falchion+4",
401305: "Raw Falchion+5",
401400: "Magic Falchion",
401401: "Magic Falchion+1",
401402: "Magic Falchion+2",
401403: "Magic Falchion+3",
401404: "Magic Falchion+4",
401405: "Magic Falchion+5",
401406: "Magic Falchion+6",
401407: "Magic Falchion+7",
401408: "Magic Falchion+8",
401409: "Magic Falchion+9",
401410: "Magic Falchion+10",
401500: "Enchanted Falchion",
401501: "Enchanted Falchion+1",
401502: "Enchanted Falchion+2",
401503: "Enchanted Falchion+3",
401504: "Enchanted Falchion+4",
401505: "Enchanted Falchion+5",
401600: "Divine Falchion",
401601: "Divine Falchion+1",
401602: "Divine Falchion+2",
401603: "Divine Falchion+3",
401604: "Divine Falchion+4",
401605: "Divine Falchion+5",
401606: "Divine Falchion+6",
401607: "Divine Falchion+7",
401608: "Divine Falchion+8",
401609: "Divine Falchion+9",
401610: "Divine Falchion+10",
401700: "Occult Falchion",
401701: "Occult Falchion+1",
401702: "Occult Falchion+2",
401703: "Occult Falchion+3",
401704: "Occult Falchion+4",
401705: "Occult Falchion+5",
401800: "Fire Falchion",
401801: "Fire Falchion+1",
401802: "Fire Falchion+2",
401803: "Fire Falchion+3",
401804: "Fire Falchion+4",
401805: "Fire Falchion+5",
401806: "Fire Falchion+6",
401807: "Fire Falchion+7",
401808: "Fire Falchion+8",
401809: "Fire Falchion+9",
401810: "Fire Falchion+10",
401900: "Chaos Falchion",
401901: "Chaos Falchion+1",
401902: "Chaos Falchion+2",
401903: "Chaos Falchion+3",
401904: "Chaos Falchion+4",
401905: "Chaos Falchion+5",
402000: "Shotel",
402001: "Shotel+1",
402002: "Shotel+2",
402003: "Shotel+3",
402004: "Shotel+4",
402005: "Shotel+5",
402006: "Shotel+6",
402007: "Shotel+7",
402008: "Shotel+8",
402009: "Shotel+9",
402010: "Shotel+10",
402011: "Shotel+11",
402012: "Shotel+12",
402013: "Shotel+13",
402014: "Shotel+14",
402015: "Shotel+15",
402100: "Crystal Shotel",
402101: "Crystal Shotel+1",
402102: "Crystal Shotel+2",
402103: "Crystal Shotel+3",
402104: "Crystal Shotel+4",
402105: "Crystal Shotel+5",
402200: "Lightning Shotel",
402201: "Lightning Shotel+1",
402202: "Lightning Shotel+2",
402203: "Lightning Shotel+3",
402204: "Lightning Shotel+4",
402205: "Lightning Shotel+5",
402300: "Raw Shotel",
402301: "Raw Shotel+1",
402302: "Raw Shotel+2",
402303: "Raw Shotel+3",
402304: "Raw Shotel+4",
402305: "Raw Shotel+5",
402400: "Magic Shotel",
402401: "Magic Shotel+1",
402402: "Magic Shotel+2",
402403: "Magic Shotel+3",
402404: "Magic Shotel+4",
402405: "Magic Shotel+5",
402406: "Magic Shotel+6",
402407: "Magic Shotel+7",
402408: "Magic Shotel+8",
402409: "Magic Shotel+9",
402410: "Magic Shotel+10",
402500: "Enchanted Shotel",
402501: "Enchanted Shotel+1",
402502: "Enchanted Shotel+2",
402503: "Enchanted Shotel+3",
402504: "Enchanted Shotel+4",
402505: "Enchanted Shotel+5",
402600: "Divine Shotel",
402601: "Divine Shotel+1",
402602: "Divine Shotel+2",
402603: "Divine Shotel+3",
402604: "Divine Shotel+4",
402605: "Divine Shotel+5",
402606: "Divine Shotel+6",
402607: "Divine Shotel+7",
402608: "Divine Shotel+8",
402609: "Divine Shotel+9",
402610: "Divine Shotel+10",
402700: "Occult Shotel",
402701: "Occult Shotel+1",
402702: "Occult Shotel+2",
402703: "Occult Shotel+3",
402704: "Occult Shotel+4",
402705: "Occult Shotel+5",
402800: "Fire Shotel",
402801: "Fire Shotel+1",
402802: "Fire Shotel+2",
402803: "Fire Shotel+3",
402804: "Fire Shotel+4",
402805: "Fire Shotel+5",
402806: "Fire Shotel+6",
402807: "Fire Shotel+7",
402808: "Fire Shotel+8",
402809: "Fire Shotel+9",
402810: "Fire Shotel+10",
402900: "Chaos Shotel",
402901: "Chaos Shotel+1",
402902: "Chaos Shotel+2",
402903: "Chaos Shotel+3",
402904: "Chaos | |
<filename>src/basic/video_detect_smooth_dlib.py
import cv2
import dlib
import time
import math
import numpy as np
import matplotlib.pyplot as plt
import sys
# http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2
<<<<<<< HEAD
predictorPath = r"../../dep/shape_predictor_68_face_landmarks.dat"
predictorIdx = [[1, 3, 31], [13, 15, 35]]
#videoPath = r"E:\Undergraduate\10_大四秋\软件工程 董渊\软件工程课大作业\数据\20181109_近距离_头部不固定\视频数据\PIC_0427.MP4"
videoPath = r"../../data/video/PIC_0401.MP4"
file = open(r'output_detect.txt', 'w')
startTime = 12 # Start the analysis from startTime
=======
predictorPath = r"shape_predictor_68_face_landmarks.dat"
predictorIdx = [[1, 3, 31], [13, 15, 35]]
#videoPath = r"video.MP4"
file = open(r'output_detect.txt', 'w')
startTime = 0 # Start the analysis from startTime
>>>>>>> 8d65bf9dd02952114a8c4a1e39aed0ab305d258a
cv2.destroyAllWindows()
plt.close('all')
def rect_to_bb(rect):
""" Transform a rectangle into a bounding box
Args:
rect: an instance of dlib.rectangle
Returns:
[x, y, w, h]: coordinates of the upper-left corner
and the width and height of the box
"""
x = rect.left()
y = rect.top()
w = rect.right() - x
h = rect.bottom() - y
return [x, y, w, h]
def shape_to_np(shape, dtype="int"):
""" Transform the detection results into points
Args:
shape: an instance of dlib.full_object_detection
Returns:
coords: an array of point coordinates
columns - x; y
"""
coords = np.zeros((68, 2), dtype=dtype)
for i in range(0, 68):
coords[i] = (shape.part(i).x, shape.part(i).y)
return coords
def np_to_bb(coords, ratio=5, dtype="int"):
""" Chooose ROI based on points and ratio
Args:
coords: an array of point coordinates
columns - x; y
ratio: the ratio of the length of the bounding box in each direction
to the distance between ROI and the bounding box
dtype: optional variable, type of the coordinates
Returns:
coordinates of the upper-left and bottom-right corner
"""
x = [xi for (xi,yi) in coords]
y = [yi for (xi,yi) in coords]
minx, maxx = min(x), max(x)
miny, maxy = min(y), max(y)
p, q = ratio - 1, ratio
roi = [minx * p / q + maxx / q, miny * p / q + maxy / q,
maxx * p / q + minx / q, maxy * p / q + miny / q]
return [int(i) for i in roi]
def resize(image, width=1200):
""" Resize the image with width
Args:
image: an instance of numpy.ndarray, the image
width: the width of the resized image
Returns:
resized: the resized image
size: size of the resized image
"""
r = width * 1.0 / image.shape[1]
size = (width, int(image.shape[0] * r))
resized = cv2.resize(image, size, interpolation=cv2.INTER_AREA)
return resized, size
def coordTrans(imShape, oriSize, rect):
"""Transform the coordinates into the original image
Args:
imShape: shape of the detected image
oriSize: size of the original image
rect: an instance of dlib.rectangle, the face region
Returns:
the rect in the original image
"""
<<<<<<< HEAD
left = int(rect.left() / oriSize[0] * imShape[1])
right = int(rect.right() / oriSize[0] * imShape[1])
top = int(rect.top() / oriSize[1] * imShape[0])
bottom = int(rect.bottom() / oriSize[1] * imShape[0])
=======
left = int(round(rect.left() / oriSize[0] * imShape[1]))
right = int(round(rect.right() / oriSize[0] * imShape[1]))
top = int(round(rect.top() / oriSize[1] * imShape[0]))
bottom = int(round(rect.bottom() / oriSize[1] * imShape[0]))
>>>>>>> 8d65bf9dd02952114a8c4a1e39aed0ab305d258a
return dlib.rectangle(left, top, right, bottom)
class Detector:
""" Detect and calculate ppg signal
roiRatio: a positive number, the roi gets bigger as it increases
smoothRatio: a real number between 0 and 1,
the landmarks get stabler as it increases
"""
detectSize = 500
clipSize = 540
roiRatio = 5
rectSmoothRatio = 0.98
rectDistThres = 4
markSmoothRatio = 0.95
<<<<<<< HEAD
markDistThres = 0.2
=======
markDistThres1 = 0.02
markDistThres2 = 0.025
>>>>>>> 8d65bf9dd02952114a8c4a1e39aed0ab305d258a
def __init__(self, detectorPath = None, predictorPath = None, predictorIdx = None):
""" Initialize the instance of Detector
detector: dlib.fhog_object_detector
predictor: dlib.shape_predictor
rect: dlib.rectangle, face region in the last frame
landmarks: numpy.ndarray, coordinates of face landmarks in the last frame
columns - x; y
Args:
detectorPath: path of the face detector
predictorPath: path of the shape predictor
"""
self.detector = dlib.get_frontal_face_detector()
self.predictor = dlib.shape_predictor(predictorPath)
self.idx = predictorIdx
self.rect = None
self.landmarks = None
def __call__(self, image):
""" Detect the face region and returns the ROI value
Face detection is the slowest part.
Args:
image: an instance of numpy.ndarray, the image
Return:
val: an array of ROI value in each color channel
"""
val = [0, 0, 0]
# Resize the image to limit the calculation
resized, detectionSize = resize(image, self.detectSize)
# Perform face detection on a grayscale image
gray = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)
# No need for upsample, because its effect is the same as resize
rects = self.detector(gray, upsample_num_times = 0)
num = len(rects) # there should be one face
if num == 0:
print("No face in the frame!")
return val
if num >= 2:
print("More than one face!")
return val
rect = rects[0]
# If not the first image, perform face region smoothing
if (self.rect!= None):
dist = self.distForRects(self.rect, rect)
<<<<<<< HEAD
smoothRatio = self.rectSmoothRatio * \
math.sqrt(1 - dist / self.rectDistThres)
print("%.3f"%(dist), end="", file = file)
print("%.3f %.3f"%(dist, smoothRatio))
print(self.rect, rect)
if (dist < self.rectDistThres):
rect = self.smoothRects(self.rect, rect, smoothRatio)
print(rect)
print("\t", end="", file = file)
=======
print("%.3f"%(dist))
if (dist < self.rectDistThres):
smoothRatio = self.rectSmoothRatio * \
math.sqrt(1 - dist / self.rectDistThres)
rect = self.smoothRects(self.rect, rect, smoothRatio)
print("%.3f"%(smoothRatio))
>>>>>>> 8d65bf9dd02952114a8c4a1e39aed0ab305d258a
# Perform landmark prediction on the face region
face = coordTrans(image.shape, detectionSize, rect)
shape = self.predictor(image, face)
landmarks = shape_to_np(shape)
# If not the first image, perform landmark smoothing
if (self.rect != None):
dist = self.distForMarks(self.rect, rect)
<<<<<<< HEAD
print("%.3f"%(dist), end="", file = file)
if (dist < self.markDistThres):
landmarks = self.smoothMarks(self.landmarks,
landmarks, self.markSmoothRatio)
print("\t", end="", file = file)
=======
print("%.3f"%(dist))
if (dist < self.markDistThres2):
tmp = dist - self.markDistThres1
smoothRatio = self.markSmoothRatio + 0.5 * (np.sign(tmp) + 1) \
* (math.exp(-1e3 * tmp) - self.markSmoothRatio)
if dist > self.markDistThres1:
smoothRatio = math.exp(1e3 * (self.markDistThres1 - dist))
landmarks = self.smoothMarks(self.landmarks,
landmarks, smoothRatio)
print("%.3f"%(smoothRatio))
print("")
>>>>>>> 8d65bf9dd02952114a8c4a1e39aed0ab305d258a
# ROI value
rois = [np_to_bb(landmarks[idx], self.roiRatio) for idx in self.idx]
vals = [np.mean(np.mean(image[roi[1]:roi[3], roi[0]:roi[2]], 0), 0) for roi in rois]
val = np.mean(vals, 0)
# Show detection results
if '-s' in sys.argv:
# Draw sample rectangles
for roi in rois:
<<<<<<< HEAD
cv2.rectangle(image, (roi[0], roi[1]), (roi[2], roi[3]), (0, 0, 255), 2)
# Draw feature points
for (i, (x, y)) in enumerate(landmarks):
cv2.putText(image, "{}".format(i), (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 0, 0), 1)
cv2.imshow("Face", resize(image[face.top():face.bottom(),
face.left():face.right()], self.detectSize)[0])
=======
cv2.rectangle(image, (roi[0], roi[1]),
(roi[2], roi[3]), (0, 0, 255), 2)
# Draw feature points
for (i, (x, y)) in enumerate(landmarks):
cv2.putText(image, "{}".format(i), (x, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 0, 0), 1)
face = dlib.rectangle(max(face.left(), 0), max(face.top(), 0),
min(face.right(), image.shape[1]),
min(face.bottom(), image.shape[0]))
image = resize(image[face.top():face.bottom(),
face.left():face.right()], self.detectSize)[0]
cv2.imshow("Face", image)
>>>>>>> 8d65bf9dd02952114a8c4a1e39aed0ab305d258a
self.rect = rect
self.landmarks = landmarks
return val
def distForRects(self, rect1, rect2):
"""Calculate the distance between two rectangles for rectangle smoothing
Arg:
rect1, rect2: dlib.rectangle
Return:
distance between rectangles
"""
distx = (rect1.left() - rect2.left()) + (rect1.left() - rect2.left())
disty = (rect1.top() - rect2.top()) + (rect1.bottom() - rect2.bottom())
return pow(pow(distx / (rect1.right() - rect1.left()), 2) +
pow(disty / (rect1.bottom() - rect1.top()), 2), 1/2)
def smoothRects(self, rect1, rect2, smoothRatio):
<<<<<<< HEAD
left = round(smoothRatio * rect1.left() + \
(1 - smoothRatio) * rect2.left())
right = round(smoothRatio * rect1.right() + \
(1 - smoothRatio) * rect2.right())
top = round(smoothRatio * rect1.top() + \
(1 - smoothRatio) * rect2.top())
bottom = round(smoothRatio * rect1.bottom() + \
(1 - smoothRatio) * rect2.bottom())
=======
left = int(round(smoothRatio * rect1.left() +
(1 - smoothRatio) * rect2.left()))
right = int(round(smoothRatio * rect1.right() +
(1 - smoothRatio) * rect2.right()))
top = int(round(smoothRatio * rect1.top() +
(1 - smoothRatio) * rect2.top()))
bottom = int(round(smoothRatio * rect1.bottom() +
(1 - smoothRatio) * rect2.bottom()))
>>>>>>> 8d65bf9dd02952114a8c4a1e39aed0ab305d258a
return dlib.rectangle(left, top, right, bottom)
def distForMarks(self, rect1, rect2):
""" Calculate the distance between two rectangles for landmark smoothing
Arg:
rect1, rect2: dlib.rectangle
Return:
distance between rectangles
"""
distx = abs(rect1.left() - rect2.left()) \
+ abs(rect1.right() - rect2.right())
disty = abs(rect1.top() - rect2.top()) \
+ abs(rect1.bottom() - rect2.bottom())
return abs(distx / (rect1.right() - rect1.left())) \
+ abs(disty / (rect1.bottom() - rect1.top()))
def smoothMarks(self, landmarks1, landmarks2, smoothRatio):
landmarks = smoothRatio * landmarks1 \
+ (1 - smoothRatio) * landmarks2
landmarks = np.array([[round(pair[0]), round(pair[1])]
for pair in landmarks])
| |
push_points = np.zeros((n_slices, n_columns, n_rows), dtype=np.complex)
push_points_1d = np.zeros(n_rows, dtype=np.complex)
# walk through the data column-by-column
for col in np.arange(n_columns):
# walk through the data spectrum-by-spectrum
for spec in np.arange(n_slices):
# first, initialize: push_points_1d is NaN for sampled points (this
# basically inverts which entries are NaN relative to
# stagger_sampling_mask)
push_points_1d = np.where(np.isnan(stagger_sampling_mask[spec]),
0, np.nan).astype(np.complex)
# now, for this particular {column,spectrum},
# and FOR EACH POINT IN PUSH_POINTS, walk through the other
# spectra and find a suitable data point
for i in np.arange(n_rows):
if ~np.isnan(push_points_1d[i]):
# now, walk through the spectra again: for e.g. a missing
# point in spectrum=3, look at spectra #4,2,5,1,...,etc.,
# until a point is found
for spec2 in np.arange(2 * n_slices):
# add +1, -1, +2, -2, +3, -3... to the spectrum index
dspec = spec + (spec2 // 2 + 1) * (-1)**spec2
# make sure our new spectrum index isn't out of bounds
if 0 <= dspec < n_slices:
# if the point has been sampled in this spectrum,
# use it, and stop looking
if stagger_sampling_mask[dspec][i] == 1:
push_points_1d[i] = data[dspec, col, i]
break
# if there were no sampled point found, break with error.
else:
print('ERROR: no suitable data point found!')
break
push_points[spec, col, :] = push_points_1d
return np.asarray(push_points)
def p1_proj(data_t, mask, offbool):
"""
Applies the P1 projection to a 1D array 'data_t', according to 'mask'.
Note: since P1 needs data to be in the phased frequency domain, this
function FFTs and phases the data, applies the mask, and then unphases and
IFFTs the data before returning it.
"""
# FFT input data
data_f = np.fft.fftshift(np.fft.fft(data_t))
# get the phase correction wave
phasecorr = get_phasecorr(len(data_f), offbool)
# phase correct input data and throw out the imaginary part
data_f_ph = (data_f * phasecorr).real
# apply p1 mask to data_f_ph
data_f_ph *= (mask != 0)
data_f_ph[mask == 1] *= (data_f_ph[mask == 1] > 0)
data_f_ph[mask == -1] *= (data_f_ph[mask == -1] < 0)
# put data back into time domain by unphasing and IFFTing
# the data becomes complex again, of course
p1_data_t = np.fft.ifft(np.fft.ifftshift(data_f_ph / phasecorr))
return p1_data_t
def p2_proj_star(data_t, measured_points, push_points, push_param):
"""
Applies the P2* projection to a 1D array 'data_t'.
Overwrites some of the points in 'data_t' with original measured values,
which are stored in 'measured_points'. 'measured_points' is NaN for points
that weren't sampled, and equal to the measured values for points that
were sampled.
"""
measured_points = np.array(measured_points)
push_points = np.array(push_points)
# copy data to output array, force to be complex
p2_data_t = np.asarray(data_t).astype(complex)
# measured points "m", and un-measured points "p"
m_points_list = ~np.isnan(measured_points)
p_points_list = ~np.isnan(push_points)
# reset measured values. For un-measured values, push towards the measured
# values from the other spectra.
p2_data_t[m_points_list] = measured_points[m_points_list]
p2_data_t[p_points_list] = (1.0 - push_param) * p2_data_t[p_points_list] \
+ push_param * push_points[p_points_list]
return p2_data_t
def difference_map_star(data_t, measured_points, mask, offbool, push_points,
push_param):
"""
Applies one step of the difference map algorithm to a 1D array 'data_t'.
Uses the functions p1_proj and p2_proj_star internally.
D = 1 + P1 (2 P2* - 1) - P2*
"""
# calculate (2 P2* - 1)[data_t] first, for clarity's sake
data_temp = 2.0 * p2_proj_star(data_t, measured_points, push_points,
push_param) - data_t
d_data_t = data_t + p1_proj(data_temp, mask, offbool) \
- p2_proj_star(data_t, measured_points, push_points, push_param)
return d_data_t
# ---- sampling
def sampling_mask(Ndense, Nsparse, offbool, lastpoint_bool):
"""
Makes a "row mask" array, for a given number of dense points and
a given number of sparse points. Ndense is number of dense points
in t >= 0! The output wave will be length (2*Ndense).
This version of sampling_mask ensures we have sampled the ``last'',
greatest-|t1| point.
if offbool == True, then the central point (at t = dw/2) is reflected
to the t < 0 side of the vector. If offbool == False, then the central
point is at t = 0 and isn't duplicated.
NOTE: the rounding convention in Igor is "away from zero." In numpy/python3
it's "towards even." I'm implementing the Igor version here, but I might
change it to the python version later. It will change the row choices in
some cases, though!
TODO: enforce 1 <= Nsparse <= Ndense properly
"""
# initialize positive side as nans, not zeroes
row_mask_pos = np.full(Ndense, np.nan)
# row_mask_pos = np.zeros(Ndense)
row_spacing = (Ndense - lastpoint_bool) / (Nsparse - lastpoint_bool)
# round away from zero for n.5
row_indices = \
(np.trunc((row_spacing * np.arange(Nsparse)) + 0.5)).astype(int)
# round towards even integers for n.5
# row_indices = np.round((row_spacing*np.arange(Nsparse))
# set half-wave to 1 at indicated places
row_mask_pos[row_indices] = 1
# make the length 2*Ndense output array, according to whether offbool is on
# Note: I wonder if it would make more sense, for the offbool = 0 case, to
# set the first point = 1 instead of nan. That way you'd have the
# same np.nansum(row_mask_out) in both cases. In that case, the first
# point is always a zero-pad anyway, so we can probably say we
# "measured" it?
if offbool:
row_mask_out = np.concatenate((row_mask_pos[::-1], row_mask_pos))
else:
row_mask_out = np.concatenate(([np.nan], row_mask_pos[:0:-1],
row_mask_pos))
# row_mask_out = np.concatenate(([1], row_mask_pos[:0:-1],
# row_mask_pos))
return row_mask_out
def stagger_sample(N3d, Ndense, Nsparse, offbool):
"""
This function takes the size of a 3d set of data (multiple 2d data sets),
and an Nt1 value, and makes a sampling pattern that is staggered across the
2d data sets in order to fill out every t1 value, where there are N1 bins
along t1.
Visualize with e.g. the following code:
sampling_mask_2=stagger_sample(10,128,30,1)
fig, ax = plt.subplots(figsize=(10,5))
for i in np.arange(10):
ax.bar(np.arange(len(sampling_mask_2[i])), sampling_mask_2[i],
1.0, 9-i, align='center')
ax.set_xlim(0,256)
ax.set_ylim(0,10)
"""
# get a quasi-even 1d sampling pattern
sampling_mask_1 = sampling_mask(Ndense, Nsparse, offbool, 1)
sampling_mask_2 = sampling_mask_1.copy()
# start off the sampling mask with this pattern
stagger_sampling_mask = []
# from here on down, Ndense refers to the full size of the sampling mask,
# so e.g. 128 -> 256 Ndense*=2
# now begin "stepping" the pattern further and further out, always keeping
# the first point.
for i in np.arange(N3d):
if offbool:
sampling_mask_2[Ndense - 1:Ndense + 1] = 1
else:
sampling_mask_2[Ndense] = 1
stagger_sampling_mask.append(list(sampling_mask_2))
sampling_mask_1 = sampling_mask_2.copy()
sampling_mask_2[:] = np.full(2 * Ndense, np.nan)
# in each case below, keep the middle one point (or middle two points)
# unchanged; should always be 1
if(offbool == 1):
# left side: shift all to left, but then make sure the 0th points
# wraps to the middle
sampling_mask_2[0:Ndense - 1] = sampling_mask_1[1:Ndense]
sampling_mask_2[Ndense - 2] = sampling_mask_1[0]
# right side: shift all to right, but then make sure the Nth point
# wraps to the middle
sampling_mask_2[Ndense + 1:2 * Ndense] = \
sampling_mask_1[Ndense:2 * Ndense - 1]
sampling_mask_2[Ndense + 1] = sampling_mask_1[2 * Ndense - 1]
else:
# if offbool=0, we have an empty "NaN" at the 0 index -- so IGNORE
# the 0 point! left side: shift all to left, but then make sure the
# 0th points wraps to the middle
sampling_mask_2[1:Ndense] = sampling_mask_1[2:Ndense + 1]
sampling_mask_2[Ndense - 1] = sampling_mask_1[1]
# right side: shift all to right, but then make sure the Nth point
# wraps to the middle
sampling_mask_2[Ndense + 1:2 * Ndense] = \
sampling_mask_1[Ndense:2 * Ndense - 1]
sampling_mask_2[Ndense + 1] = sampling_mask_1[2 * Ndense - 1]
return stagger_sampling_mask
def sparsify_staggered(data, Nsparse, N3D, offbool):
"""
"Undersamples" a dense data set using the staggered pattern.
**
*** THE INPUT DATA SET MUST INCLUDE THE DATA YOU WANT FROM *ALL* 2D SLICES
*ALONG THE 3RD DIM. ***
**
This function figures out the sampling
pattern for the given Nsparse and offbool (and len(data)), and sets all
points that aren't in that sampling set to 0.
IMPORTANT: Returns a tuple of arrays (sparse_data, measured_points).
These are the same except the former has 0s and the latter has
nans. They both are needed for run_codiffmap_1d()!
Notes: data | |
import os
import unittest
import json
import trebek
import entities
import fakeredis
import time
import datetime
# Reference this SO post on getting distances between strings:
# http://stackoverflow.com/a/1471603/98562
def get_clue_json():
with open('test-json-output.json') as json_data:
clue = json.load(json_data)
return clue
def fake_fetch_random_clue():
return entities.Question(**get_clue_json())
def fake_get_year_month():
now = datetime.datetime.now()
year, month = divmod(now.month + 1, 12)
if month == 0:
month = 12
year = year -1
next_month = datetime.datetime(now.year + year, month, 1)
return "{0}-{1}".format(next_month.year, str(next_month.month).zfill(2))
_fetch_count = 0
_invalid_clue = None
def fetch_invalid_clue():
global _fetch_count, _invalid_clue
clue = get_clue_json()
if _fetch_count == 0:
clue = _invalid_clue
_fetch_count += 1
return entities.Question(**clue)
class TestTrebek(unittest.TestCase):
def setUp(self):
d = self.get_setup_json()
self.room_message = entities.HipChatRoomMessage(**d)
self.trebek_bot = self.create_bot_with_dictionary(d)
def tearDown(self):
self.trebek_bot.redis.flushall()
def get_setup_json(self):
with open('test-room-message.json') as data:
d = json.load(data)
return d
def create_bot_with_dictionary(self, room_dictionary):
bot = trebek.Trebek(entities.HipChatRoomMessage(**room_dictionary))
bot.redis = fakeredis.FakeStrictRedis()
bot.fetch_random_clue = fake_fetch_random_clue
return bot
def create_user_scores(self, bot = None):
if bot != None:
r = bot.redis
else:
r = self.trebek_bot.redis
bot = self.trebek_bot
hipchat = trebek.Trebek.hipchat_user_key
r.set(hipchat.format(1), 'Aaron')
r.set(hipchat.format(2), 'Allen')
r.set(hipchat.format(3), 'Cordarrell')
r.set(hipchat.format(4), 'Melvin')
r.set(hipchat.format(5), 'Mark')
r.set(hipchat.format(6), 'Richard')
r.set(hipchat.format(7), '<NAME>')
r.set(hipchat.format(8), 'Arian')
r.set(hipchat.format(9), 'Zach')
r.set(hipchat.format(10), '<NAME>')
r.set(hipchat.format(11), 'Alex')
r.set(hipchat.format(12), 'Michael')
r.set(hipchat.format(13), 'Reggie')
r.set(hipchat.format(14), 'Legacy Score')
user = bot.user_score_prefix + ":{0}"
r.set(user.format(1), 100)
r.set(user.format(2), 20)
r.set(user.format(3), 70)
r.set(user.format(4), 50)
r.set(user.format(5), 30)
r.set(user.format(6), 200)
r.set(user.format(7), 500)
r.set(user.format(8), 5430)
r.set(user.format(9), 412)
r.set(user.format(10), 123)
r.set(user.format(11), 225)
r.set(user.format(12), 94)
r.set(user.format(13), 87)
# Regression test old score keys will still appear in lifetime loserboard
r.set("user_score:{0}".format(14), 5)
bot.get_year_month = fake_get_year_month
user = bot.user_score_prefix + ":{0}"
r.set(user.format(1), 100)
r.set(user.format(2), 20)
r.set(user.format(3), 70)
r.set(user.format(4), 50)
r.set(user.format(5), 30)
r.set(user.format(6), 200)
r.set(user.format(7), 500)
r.set(user.format(8), 5430)
r.set(user.format(9), 412)
r.set(user.format(10), 123)
r.set(user.format(11), 225)
r.set(user.format(12), 94)
r.set(user.format(13), 87)
def test_when_value_not_included_default_to_200(self):
test_clue = self.trebek_bot.fetch_random_clue()
self.assertEqual(test_clue.value, 200)
def test_when_answer_includes_html_answer_is_sanitized(self):
# example answer: <i>Let\\'s Make a Deal</i>
self.trebek_bot.fetch_random_clue = fake_fetch_random_clue
test_clue = self.trebek_bot.fetch_random_clue()
self.assertEqual(test_clue.answer, "Let's Make a Deal")
def test_when_response_doesNot_begin_with_question_return_none(self):
response = "some test response"
assert self.trebek_bot.response_is_a_question(response) == None
def test_when_response_is_question_return_true(self):
response = "what is some test response"
assert self.trebek_bot.response_is_a_question(response)
def test_fuzzy_matching_of_answer(self):
test_clue = fake_fetch_random_clue()
self.assertFalse(self.trebek_bot.is_correct_answer("polygamist", "polyamourus"))
self.assertTrue(self.trebek_bot.is_correct_answer(test_clue.answer, "what is Let's Make a Deal"))
self.assertTrue(self.trebek_bot.is_correct_answer(test_clue.answer, "what is let's make a deal"))
self.assertTrue(self.trebek_bot.is_correct_answer(test_clue.answer, "what is Lets Make a Deal"))
self.assertTrue(self.trebek_bot.is_correct_answer(test_clue.answer, "what is Let's Make Deal"))
self.assertTrue(self.trebek_bot.is_correct_answer(test_clue.answer, "what is Let's Make a Dela"))
self.assertTrue(self.trebek_bot.is_correct_answer(test_clue.answer, "what is Let's Mae a Deal"))
self.assertTrue(self.trebek_bot.is_correct_answer(test_clue.answer, "what is Let's Make a Deal"))
self.assertTrue(self.trebek_bot.is_correct_answer(test_clue.answer, "what is elt's Make a Deal"))
self.assertTrue(self.trebek_bot.is_correct_answer("a ukulele", "a ukelele"))
self.assertTrue(self.trebek_bot.is_correct_answer("Scrabble", "Scrablle"))
self.assertTrue(self.trebek_bot.is_correct_answer("(Aristotle) Onassis", "Onassis"))
self.assertTrue(self.trebek_bot.is_correct_answer("(William) Blake", "blake"))
self.assertTrue(self.trebek_bot.is_correct_answer("wings (or feathers)", "feathers"))
self.assertTrue(self.trebek_bot.is_correct_answer("A.D. (Anno Domini)", "AD"))
self.assertTrue(self.trebek_bot.is_correct_answer("(Little Orphan) Annie", "annie"))
self.assertTrue(self.trebek_bot.is_correct_answer("a turtle (or a tortoise)", "turtle"))
self.assertTrue(self.trebek_bot.is_correct_answer("a turtle (or a tortoise)", "tortoise"))
# self.assertTrue(self.trebek_bot.is_correct_answer("ben affleck and matt damon", "<NAME> & <NAME>"))
def test_given_json_dictionary_hipchat_object_is_parsed(self):
with open ('test-room-message.json') as data:
d = json.load(data)
t = entities.HipChatRoomMessage(**d)
self.assertEqual(t.item.message.message, "jeopardy")
self.assertEqual(t.item.message.user_from.name, "<NAME>")
def test_message_object_trims_leading_slash_command(self):
p = {}
p['from'] = { 'id':None, 'links': None, 'mention_name':None, 'name': None, 'version': None}
p['message'] = '/trebek jeopardy me'
msg = entities.HipChatMessage(p)
self.assertEqual(msg.message, "jeopardy me")
def test_when_get_response_message_is_called_user_name_is_saved(self):
self.trebek_bot.get_response_message()
key = trebek.Trebek.hipchat_user_key.format('582174')
self.assertTrue(self.trebek_bot.redis.exists(key))
user_name = self.trebek_bot.redis.get(trebek.Trebek.hipchat_user_key.format('582174')).decode()
self.assertEqual("<NAME>", user_name)
def test_number_is_formatted_as_currency(self):
currency = self.trebek_bot.format_currency("100")
self.assertEqual("$100", currency)
currency = self.trebek_bot.format_currency("1000")
self.assertEqual("$1,000", currency)
currency = self.trebek_bot.format_currency("1000000000")
self.assertEqual("$1,000,000,000", currency)
currency = self.trebek_bot.format_currency("-100")
self.assertEqual("<span style='color: red;'>-$100</span>", currency)
currency = self.trebek_bot.format_currency("-1000000000")
self.assertEqual("<span style='color: red;'>-$1,000,000,000</span>", currency)
def test_user_requests_score_value_returned(self):
d = self.get_setup_json()
d['item']['message']['message'] = "/trebek score"
bot = self.create_bot_with_dictionary(d)
key = "{0}:{1}".format(bot.user_score_prefix,
bot.room_message.item.message.user_from.id)
bot.redis.set(key, 500)
response = bot.get_response_message()
self.assertEqual("$500", response)
def test_user_leaderboard_value_returned(self):
d = self.get_setup_json()
d['item']['message']['message'] = "/trebek leaderboard"
bot = self.create_bot_with_dictionary(d)
self.create_user_scores(bot)
response = bot.get_response_message()
year, month = [int(x) for x in bot.get_year_month().split('-')]
dt = datetime.datetime(year, month, 1)
expected = "<p>Leaderboard for {0} {1}:</p>".format(dt.strftime("%B"), dt.year)
expected += "<ol><li>Arian: $5,430</li>"
expected += "<li><NAME>: $500</li>"
expected += "<li>Zach: $412</li>"
expected += "<li>Alex: $225</li>"
expected += "<li>Richard: $200</li></ol>"
self.assertEqual(expected, response)
def test_user_loserboard_value_returned(self):
d = self.get_setup_json()
d['item']['message']['message'] = "/trebek show me the loserboard"
bot = self.create_bot_with_dictionary(d)
self.create_user_scores(bot)
response = bot.get_response_message()
year, month = [int(x) for x in bot.get_year_month().split('-')]
dt = datetime.datetime(year, month, 1)
expected = "<p>Loserboard for {0} {1}:</p>".format(dt.strftime("%B"), dt.year)
expected += "<ol><li>Allen: $20</li>"
expected += "<li>Mark: $30</li>"
expected += "<li>Melvin: $50</li>"
expected += "<li>Cordarrell: $70</li>"
expected += "<li>Reggie: $87</li></ol>"
self.assertEqual(expected, response)
def test_jeopardy_round_can_start_from_nothing(self):
response = self.trebek_bot.get_response_message()
expected = "The category is <b>CLASSIC GAME SHOW TAGLINES</b> for $200: "
expected += "<b>\"CAVEAT EMPTOR. LET THE BUYER BEWARE\"</b> (Air Date: 18-Oct-2001)"
self.assertEqual(expected, response)
def test_user_cannot_answer_same_question_twice(self):
# Arrange
clue = self.trebek_bot.get_jeopardy_clue()
d = self.get_setup_json()
user_answer_key = trebek.Trebek.user_answer_key.format(
self.trebek_bot.room_id, clue.id, d['item']['message']['from']['id'])
self.trebek_bot.redis.set(user_answer_key, 'true')
self.trebek_bot.get_question()
d['item']['message']['message'] = '/trebek this is an answer'
bot = self.create_bot_with_dictionary(d)
bot.redis = self.trebek_bot.redis
# Act
response = bot.get_response_message()
# Assert
self.assertEqual("You have already answered <NAME>. Let someone else respond.", response)
def test_given_incorrect_answer_user_score_decreased(self):
# Arrange
d = self.get_setup_json()
d['item']['message']['message'] = '/trebek some test answer'
bot = self.create_bot_with_dictionary(d)
bot.redis = fakeredis.FakeStrictRedis()
bot.get_question()
response = bot.get_response_message()
user_score_key = "{0}:{1}".format(bot.user_score_prefix,
self.trebek_bot.room_message.item.message.user_from.id)
# Act
score = bot.redis.get(user_score_key)
bot.redis.flushdb()
# Assert
score_string = "<span style='color: red;'>-$200</span>"
self.assertEqual(score_string, bot.format_currency(score))
self.assertEqual("That is incorrect, <NAME>. Your score is now {0}".format(score_string), response)
def test_given_correct_answer_user_score_increased(self):
# Arrange
d = self.get_setup_json()
d['item']['message']['message'] = "/trebek what is Let's Make a deal"
bot = self.create_bot_with_dictionary(d)
bot.redis = fakeredis.FakeStrictRedis()
bot.get_question()
response = bot.get_response_message()
user_score_key = "{0}:{1}".format(bot.user_score_prefix,
self.trebek_bot.room_message.item.message.user_from.id)
# Act
score = bot.redis.get(user_score_key)
bot.redis.flushdb()
# Assert
self.assertEqual("$200", bot.format_currency(score))
self.assertEqual("That is correct, <NAME>. Your score is now $200 (Expected Answer: Let's Make a Deal)", response)
def test_given_correct_answer_nonQuestion_form_user_score_decreased(self):
# Arrange
d = self.get_setup_json()
d['item']['message']['message'] = "/trebek Let's Make a deal"
bot = self.create_bot_with_dictionary(d)
bot.redis = fakeredis.FakeStrictRedis()
bot.get_question()
response = bot.get_response_message()
user_score_key = "{0}:{1}".format(bot.user_score_prefix,
self.trebek_bot.room_message.item.message.user_from.id)
# Act
score = bot.redis.get(user_score_key)
bot.redis.flushdb()
# Assert
score_string = "<span style='color: red;'>-$200</span>"
self.assertEqual(score_string, bot.format_currency(score))
self.assertEqual("That is correct <NAME>, however responses should be in the form of a question. Your score is now {0}".format(score_string), response)
def test_given_incorrect_answer_time_is_up_response(self):
# Arrange
d = self.get_setup_json()
d['item']['message']['message'] = "/trebek foobar"
bot = self.create_bot_with_dictionary(d)
bot.redis = fakeredis.FakeStrictRedis()
bot.get_question()
clue = bot.get_active_clue()
clue.expiration = time.time() - (bot.seconds_to_expire + 1)
key = bot.clue_key.format(bot.room_id)
bot.redis.set(key, json.dumps(clue, cls = entities.QuestionEncoder))
response = bot.get_response_message()
user_score_key = "{0}:{1}".format(bot.user_score_prefix,
self.trebek_bot.room_message.item.message.user_from.id)
# Act
score = bot.redis.get(user_score_key)
bot.redis.flushdb()
# Assert
self.assertFalse(score)
self.assertEqual(response, "Time is up! The correct answer was: <b>Let's Make a Deal</b>")
def test_given_correct_answer_time_is_up_response(self):
# Arrange
d = self.get_setup_json()
d['item']['message']['message'] = "/trebek what is Let's Make a deal"
bot = self.create_bot_with_dictionary(d)
bot.redis = fakeredis.FakeStrictRedis()
bot.get_question()
clue = bot.get_active_clue()
clue.expiration = time.time() - (bot.seconds_to_expire + 1)
key = bot.clue_key.format(bot.room_id)
bot.redis.set(key, json.dumps(clue, cls = entities.QuestionEncoder))
response = bot.get_response_message()
user_score_key = "{0}:{1}".format(bot.user_score_prefix,
self.trebek_bot.room_message.item.message.user_from.id)
# Act
score = bot.redis.get(user_score_key)
bot.redis.flushdb()
# Assert
self.assertFalse(score)
self.assertEqual(response, "That is correct James A, however time is up. (Expected Answer: Let's Make a Deal)")
def test_when_asked_for_answer_bot_responds_with_answer(self):
d = self.get_setup_json()
bot = self.create_bot_with_dictionary(d)
bot.get_question()
d['item']['message']['message'] = "/trebek answer"
bot = self.create_bot_with_dictionary(d)
response = bot.get_response_message()
self.assertEqual("The answer was: Let's Make a Deal", response)
def test_when_no_question_exists_answer_returns_no_active_clue(self):
d = self.get_setup_json()
d['item']['message']['message'] = "/trebek answer"
bot = self.create_bot_with_dictionary(d)
bot.redis.flushdb()
response = bot.get_response_message()
self.assertEqual("No active clue. Type '/trebek jeopardy' to start a round", response)
def test_when_answer_contains_HTML_word_is_filtered(self):
# e.g.: ANSWER: the <i>Stegosaurus</i>
c = {'id':1, 'title': 'foo', 'created_at': 'bar', 'updated_at': 'foobar', 'clues_count':1}
q = entities.Question(1, answer= "the <i>Stegosaurus</i>", category = c)
self.assertEqual("the Stegosaurus", q.answer)
# e.g.: ANSWER: <i>the Seagull</i>
q = entities.Question(1, answer= "<i>the Seagull</i>", category = c)
self.assertEqual("the Seagull", q.answer)
q = entities.Question(1, answer= "Theodore Roosevelt", category = c)
self.assertEqual("Theodore Roosevelt", q.answer)
def test_when_fetched_clue_is_invalid_get_new_clue(self):
global _invalid_clue, _fetch_count
_fetch_count = 0
clue = get_clue_json()
clue['invalid_count'] = 1
_invalid_clue = clue
self.trebek_bot.fetch_random_clue = fetch_invalid_clue
clue = self.trebek_bot.get_jeopardy_clue()
self.assertEqual(clue.invalid_count, None)
def test_when_fetched_clue_is_missing_question_get_new_clue(self):
global _fetch_count, _invalid_clue
_fetch_count = 0
clue = get_clue_json()
clue['question'] = ""
_invalid_clue = clue
self.trebek_bot.fetch_random_clue = fetch_invalid_clue
clue = self.trebek_bot.get_jeopardy_clue()
self.assertNotEqual(clue.question.strip(), "")
def test_when_fetched_clue_contains_visual_clue_request_new_clue(self):
global _fetch_count, _invalid_clue
_fetch_count = 0
clue = get_clue_json()
clue['question'] = "the picture seen here, contains some test data"
_invalid_clue = clue
self.trebek_bot.fetch_random_clue = fetch_invalid_clue
clue = self.trebek_bot.get_jeopardy_clue()
self.assertFalse("seen here" in clue.question)
def test_when_fetched_clue_contains_audio_clue_request_new_clue(self):
global _fetch_count, _invalid_clue
_fetch_count = 0
clue = get_clue_json()
clue['question'] = "the audio heard here, contains some test data"
_invalid_clue = clue
self.trebek_bot.fetch_random_clue = fetch_invalid_clue
clue = self.trebek_bot.get_jeopardy_clue()
self.assertFalse("heard here" in clue.question)
def test_when_new_month_arrives_score_resets_to_zero(self):
self.trebek_bot.update_score(200)
self.trebek_bot.get_year_month = | |
<reponame>combinatorist/mac_apt
'''
Copyright 2011 <NAME>, <NAME>
Using New BSD License:
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
#
# This code has since been edited to improve HFS parsing, add lzvn/lzfse support
# and is now a part of the mac_apt framework
#
import os
import mmap
import sys
import struct
import tempfile
import zlib
import pytsk3
import logging
from plugins.helpers.common import CommonFunctions
from plugins.helpers.btree import AttributesTree, CatalogTree, ExtentsOverflowTree
from plugins.helpers.structs import *
log = logging.getLogger('MAIN.HELPERS.HFS_ALT')
lzfse_capable = False
try:
import liblzfse
lzfse_capable = True
except ImportError:
print("liblzfse not found. Won't decompress lzfse/lzvn streams")
def write_file(filename,data):
f = open(filename, "wb")
f.write(data)
f.close()
def lzvn_decompress(compressed_stream, compressed_size, uncompressed_size): #TODO: Move to a class!
'''Adds Prefix and Postfix bytes as required by decompressor,
then decompresses and returns uncompressed bytes buffer
'''
header = b'bvxn' + struct.pack('<I', uncompressed_size) + struct.pack('<I', compressed_size)
footer = b'bvx$'
return liblzfse.decompress(header + compressed_stream + footer)
class HFSFile(object):
def __init__(self, volume, hfsplusfork, fileID, deleted=False):
self.volume = volume
self.blockSize = volume.blockSize
self.fileID = fileID
self.totalBlocks = hfsplusfork.totalBlocks
self.logicalSize = hfsplusfork.logicalSize
self.extents = []
self.deleted = deleted
b = 0
for extent in hfsplusfork.HFSPlusExtentDescriptor:
self.extents.append(extent)
b += extent.blockCount
while b != hfsplusfork.totalBlocks:
#log.debug("extents overflow {}".format(b))
k,v = volume.getExtentsOverflowForFile(fileID, b)
if not v:
log.debug("extents overflow missing, startblock={}".format(b))
break
for extent in v:
self.extents.append(extent)
b += extent.blockCount
def copyOutFile(self, outputfile, truncate=True):
f = open(outputfile, "wb")
for i in range(self.totalBlocks):
f.write(self.readBlock(i))
if truncate:
f.truncate(self.logicalSize)
f.close()
'''def readAllBuffer(self, truncate=True):
r = b""
for i in range(self.totalBlocks):
r += self.readBlock(i)
if truncate:
r = r[:self.logicalSize]
return r
'''
def readAllBuffer(self, truncate=True, output_file=None):
'''Write to output_file if valid, else return a buffer of data.
Warning: If file size > 200 MiB, b'' is returned, file data is only written to output_file.
'''
r = b""
bs = self.volume.blockSize
blocks_max = 52428800 // bs # 50MB
for extent in self.extents:
if extent.blockCount == 0: continue
#if not self.deleted and self.fileID != kHFSAllocationFileID and not self.volume.isBlockInUse(lba):
# log.debug("FAIL, block "0x{:x}" not marked as used".format(n))
if extent.blockCount > blocks_max:
counter = blocks_max
remaining_blocks = extent.blockCount
while remaining_blocks > 0:
num_blocks_to_read = min(blocks_max, remaining_blocks)
data = self.volume.read(extent.startBlock * bs, num_blocks_to_read * bs)
if output_file:
output_file.write(data)
elif self.logicalSize < 209715200: # 200MiB
r += data
remaining_blocks -= num_blocks_to_read
else:
data = self.volume.read(extent.startBlock * bs, bs * extent.blockCount)
if output_file:
output_file.write(data)
elif self.logicalSize < 209715200: # 200MiB
r += data
if truncate:
if output_file:
output_file.truncate(self.logicalSize)
elif self.logicalSize < 209715200: # 200MiB
r = r[:self.logicalSize]
return r
def processBlock(self, block, lba):
return block
def readBlock(self, n):
bs = self.volume.blockSize
if n*bs > self.logicalSize:
raise ValueError("BLOCK OUT OF BOUNDS")
bc = 0
for extent in self.extents:
bc += extent.blockCount
if n < bc:
lba = extent.startBlock+(n-(bc-extent.blockCount))
if not self.deleted and self.fileID != kHFSAllocationFileID and not self.volume.isBlockInUse(lba):
raise ValueError("FAIL, block %x not marked as used" % n)
return self.processBlock(self.volume.read(lba*bs, bs), lba)
return b""
class HFSCompressedResourceFork(HFSFile):
def __init__(self, volume, hfsplusfork, fileID, compression_type, uncompressed_size):
super(HFSCompressedResourceFork,self).__init__(volume, hfsplusfork, fileID)
block0 = self.readBlock(0)
self.compression_type = compression_type
self.uncompressed_size = uncompressed_size
if compression_type in [8, 12]: # 8 is lzvn, 12 is lzfse
#only tested for 8
self.header = HFSPlusCmpfLZVNRsrcHead.parse(block0)
#print(self.header)
else:
self.header = HFSPlusCmpfRsrcHead.parse(block0)
#print(self.header)
self.blocks = HFSPlusCmpfRsrcBlockHead.parse(block0[self.header.headerSize:])
log.debug("HFSCompressedResourceFork numBlocks:{}".format(self.blocks.numBlocks))
#HAX, readblock not implemented
def readAllBuffer(self, truncate=True, output_file=None):
'''Warning: If output size > 200 MiB, b'' is returned, file data is only written to output_file.'''
if self.compression_type in [7, 8, 11, 12] and not lzfse_capable:
raise ValueError('LZFSE/LZVN compression detected, no decompressor available!')
if self.logicalSize >= 209715200:
temp_file = tempfile.SpooledTemporaryFile(209715200)
super(HFSCompressedResourceFork, self).readAllBuffer(True, temp_file)
temp_file.seek(0)
buff = mmap.mmap(temp_file.fileno(), 0) # memory mapped file to access as buffer
else:
buff = super(HFSCompressedResourceFork, self).readAllBuffer()
r = b""
if self.compression_type in [7, 11]: # lzvn or lzfse # Does it ever go here????
raise ValueError("Did not expect type " + str(self.compression_type) + " in resource fork")
try:
# The following is only for lzvn, not encountered lzfse yet!
data_start = self.header.headerSize
compressed_stream = buff[data_start:self.header.totalSize]
decompressed = lzvn_decompress(compressed_stream, self.header.totalSize - self.header.headerSize, self.uncompressed_size)
if output_file: output_file.write(decompressed)
elif self.uncompressed_size < 209715200: r += decompressed
except liblzfse.error as ex:
raise ValueError("Exception from lzfse_lzvn decompressor")
elif self.compression_type in [8, 12]: # lzvn or lzfse in 64k chunks
try:
# The following is only for lzvn, not encountered lzfse yet!
full_uncomp = self.uncompressed_size
chunk_uncomp = 65536
i = 0
src_offset = self.header.headerSize
for offset in self.header.chunkOffsets:
compressed_size = offset - src_offset
data = buff[src_offset:offset] #input_file.read(compressed_size)
src_offset = offset
if full_uncomp <= 65536:
chunk_uncomp = full_uncomp
else:
chunk_uncomp = 65536
if len(self.header.chunkOffsets) == i + 1: # last chunk
chunk_uncomp = full_uncomp - (65536 * i)
if chunk_uncomp < compressed_size and data[0] == 0x06:
decompressed = data[1:]
else:
decompressed = lzvn_decompress(data, compressed_size, chunk_uncomp)
if output_file: output_file.write(decompressed)
elif self.uncompressed_size < 209715200: r += decompressed
i += 1
except liblzfse.error as ex:
raise ValueError("Exception from lzfse_lzvn decompressor")
else:
base = self.header.headerSize + 4
for b in self.blocks.HFSPlusCmpfRsrcBlockArray:
decompressed = zlib.decompress(buff[base+b.offset:base+b.offset+b.size])
if output_file: output_file.write(decompressed)
elif self.uncompressed_size < 209715200: r += decompressed
if self.logicalSize >= 209715200:
mmap.close()
temp_file.close()
return r
class HFSVolume(object):
def __init__(self, pytsk_image, offset=0):
self.img = pytsk_image
self.offset = offset
try:
data = self.read(0, 0x1000)
self.header = HFSPlusVolumeHeader.parse(data[0x400:0x800])
assert self.header.signature == 0x4858 or self.header.signature == 0x482B
except AssertionError:
raise ValueError("Not an HFS+ image")
#self.is_hfsx = self.header.signature == 0x4858
self.blockSize = self.header.blockSize
self.allocationFile = HFSFile(self, self.header.allocationFile, kHFSAllocationFileID)
self.allocationBitmap = self.allocationFile.readAllBuffer()
self.extentsFile = HFSFile(self, self.header.extentsFile, kHFSExtentsFileID)
self.extentsTree = ExtentsOverflowTree(self.extentsFile)
self.catalogFile = HFSFile(self, self.header.catalogFile, kHFSCatalogFileID)
self.xattrFile = HFSFile(self, self.header.attributesFile, kHFSAttributesFileID)
self.catalogTree = CatalogTree(self.catalogFile)
self.xattrTree = AttributesTree(self.xattrFile)
self.hasJournal = self.header.attributes & (1 << kHFSVolumeJournaledBit)
def read(self, offset, size):
return self.img.read(self.offset + offset, size)
def volumeID(self):
return struct.pack(">LL", self.header.finderInfo[6], self.header.finderInfo[7])
def isBlockInUse(self, block):
thisByte = self.allocationBitmap[block // 8]
return (thisByte & (1 << (7 - (block % 8)))) != 0
def unallocatedBlocks(self):
for i in range(self.header.totalBlocks):
if not self.isBlockInUse(i):
yield i, self.read(i*self.blockSize, self.blockSize)
def getExtentsOverflowForFile(self, fileID, startBlock, forkType=kForkTypeData):
return self.extentsTree.searchExtents(fileID, forkType, startBlock)
def getXattr(self, fileID, name):
return self.xattrTree.searchXattr(fileID, name)
def getFileByPath(self, path):
return self.catalogTree.getRecordFromPath(path)
def getFinderDateAdded(self, path):
k,v = self.catalogTree.getRecordFromPath(path)
if k and v.recordType == kHFSPlusFileRecord:
return v.data.ExtendedFileInfo.finderDateAdded
elif k and v.recordType == kHFSPlusFolderRecord:
return v.data.ExtendedFolderInfo.finderDateAdded
return 0
def listFolderContents(self, path):
k,v = self.catalogTree.getRecordFromPath(path)
if not k or v.recordType != kHFSPlusFolderRecord:
return
for k,v in self.catalogTree.getFolderContents(v.data.folderID):
if v.recordType == kHFSPlusFolderRecord:
print(v.data.folderID, getString(k) + "/")
elif v.recordType == kHFSPlusFileRecord:
print(v.data.fileID, getString(k))
def listFinderData(self, path):
'''Returns finder data'''
finder_data = {}
k,v = self.catalogTree.getRecordFromPath(path)
date_added = 0
if k and v.recordType == kHFSPlusFileRecord:
date_added = v.data.ExtendedFileInfo.finderDateAdded
if v.data.FileInfo.fileType: finder_data['fileType'] = v.data.FileInfo.fileType
if v.data.FileInfo.fileCreator: finder_data['fileCreator'] = v.data.FileInfo.fileCreator
if v.data.FileInfo.finderFlags: finder_data['finderFlags'] = v.data.FileInfo.finderFlags
if v.data.ExtendedFileInfo.extendedFinderFlags: finder_data['extendedFinderFlags'] = v.data.ExtendedFileInfo.extendedFinderFlags
elif k and v.recordType == kHFSPlusFolderRecord:
date_added = v.data.ExtendedFolderInfo.finderDateAdded
if v.data.FolderInfo.finderFlags: finder_data['FinderFlags'] = v.data.FolderInfo.finderFlags
if v.data.ExtendedFolderInfo.extendedFinderFlags: finder_data['extendedFinderFlags'] = v.data.ExtendedFolderInfo.extendedFinderFlags
if date_added: finder_data['DateAdded'] = date_added
return finder_data
def getCnidForPath(self, path):
k,v = self.catalogTree.getRecordFromPath(path)
if not v:
raise ValueError("Path not found")
| |
<reponame>IMULMUL/etl-parser
# -*- coding: utf-8 -*-
"""
Microsoft-Windows-SEC
GUID : 16c6501a-ff2d-46ea-868d-8f96cb0cb52d
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("16c6501a-ff2d-46ea-868d-8f96cb0cb52d"), event_id=1, version=0)
class Microsoft_Windows_SEC_1_0(Etw):
pattern = Struct(
"SequenceNumber" / Int64ul,
"ProcessId" / Int32ul,
"ProcessTime" / Int64sl,
"ThreadId" / Int32ul,
"UserSid" / Sid,
"SessionId" / Int32ul,
"CreatorProcessId" / Int32ul,
"CreatorProcessTime" / Int64sl,
"CreatorProcessName" / WString,
"ProcessName" / WString,
"CommandLine" / WString,
"PartialCRC1" / Int32ul,
"PartialCRC2" / Int32ul,
"PartialCRC3" / Int32ul,
"MotW" / Int8ul,
"IntegrityLevel" / Int32ul,
"TokenElevationType" / Int32ul,
"Elevated" / Int8ul,
"Impersonation" / Int8ul,
"SubjectLogonId" / Int64ul,
"ProcessStartKey" / Int64ul,
"CreatorProcessStartKey" / Int64ul,
"CommandLineTruncated" / Int8ul,
"CommandLineSize" / Int32ul,
"MitigationPolicy" / Int64ul,
"ProtectionLevel" / Int8ul,
"EnterprisePolicy" / Int32ul
)
@declare(guid=guid("16c6501a-ff2d-46ea-868d-8f96cb0cb52d"), event_id=2, version=0)
class Microsoft_Windows_SEC_2_0(Etw):
pattern = Struct(
"DriverUnloadTime" / Int64sl
)
@declare(guid=guid("16c6501a-ff2d-46ea-868d-8f96cb0cb52d"), event_id=3, version=0)
class Microsoft_Windows_SEC_3_0(Etw):
pattern = Struct(
"DriverLoadTime" / Int64sl
)
@declare(guid=guid("16c6501a-ff2d-46ea-868d-8f96cb0cb52d"), event_id=4, version=0)
class Microsoft_Windows_SEC_4_0(Etw):
pattern = Struct(
"SequenceNumber" / Int64ul,
"ProcessId" / Int32ul,
"ProcessTime" / Int64sl,
"ThreadId" / Int32ul,
"UserSid" / Sid,
"SessionId" / Int32ul,
"FileName" / WString,
"FileAttributes" / Int32ul,
"Dispositon" / Int32ul,
"ProcessStartKey" / Int64ul
)
@declare(guid=guid("16c6501a-ff2d-46ea-868d-8f96cb0cb52d"), event_id=5, version=0)
class Microsoft_Windows_SEC_5_0(Etw):
pattern = Struct(
"SequenceNumber" / Int64ul,
"ProcessId" / Int32ul,
"ProcessTime" / Int64sl,
"ThreadId" / Int32ul,
"UserSid" / Sid,
"SessionId" / Int32ul,
"FileName" / WString,
"NewFileName" / WString,
"FileAttributes" / Int32ul,
"ProcessStartKey" / Int64ul
)
@declare(guid=guid("16c6501a-ff2d-46ea-868d-8f96cb0cb52d"), event_id=6, version=0)
class Microsoft_Windows_SEC_6_0(Etw):
pattern = Struct(
"SequenceNumber" / Int64ul,
"ProcessId" / Int32ul,
"ProcessTime" / Int64sl,
"ThreadId" / Int32ul,
"UserSid" / Sid,
"SessionId" / Int32ul,
"FileName" / WString,
"FileAttributes" / Int32ul,
"ProcessStartKey" / Int64ul
)
@declare(guid=guid("16c6501a-ff2d-46ea-868d-8f96cb0cb52d"), event_id=7, version=0)
class Microsoft_Windows_SEC_7_0(Etw):
pattern = Struct(
"SequenceNumber" / Int64ul,
"ProcessId" / Int32ul,
"ProcessTime" / Int64sl,
"ThreadId" / Int32ul,
"UserSid" / Sid,
"SessionId" / Int32ul,
"FileName" / WString,
"FileAttributes" / Int32ul,
"ProcessStartKey" / Int64ul
)
@declare(guid=guid("16c6501a-ff2d-46ea-868d-8f96cb0cb52d"), event_id=8, version=0)
class Microsoft_Windows_SEC_8_0(Etw):
pattern = Struct(
"SequenceNumber" / Int64ul,
"ProcessId" / Int32ul,
"ProcessTime" / Int64sl,
"ThreadId" / Int32ul,
"UserSid" / Sid,
"SessionId" / Int32ul,
"Key" / WString,
"ProcessStartKey" / Int64ul
)
@declare(guid=guid("16c6501a-ff2d-46ea-868d-8f96cb0cb52d"), event_id=9, version=0)
class Microsoft_Windows_SEC_9_0(Etw):
pattern = Struct(
"SequenceNumber" / Int64ul,
"ProcessId" / Int32ul,
"ProcessTime" / Int64sl,
"ThreadId" / Int32ul,
"UserSid" / Sid,
"SessionId" / Int32ul,
"Key" / WString,
"ProcessStartKey" / Int64ul
)
@declare(guid=guid("16c6501a-ff2d-46ea-868d-8f96cb0cb52d"), event_id=10, version=0)
class Microsoft_Windows_SEC_10_0(Etw):
pattern = Struct(
"SequenceNumber" / Int64ul,
"ProcessId" / Int32ul,
"ProcessTime" / Int64sl,
"ThreadId" / Int32ul,
"UserSid" / Sid,
"SessionId" / Int32ul,
"Key" / WString,
"NewKey" / WString,
"ProcessStartKey" / Int64ul
)
@declare(guid=guid("16c6501a-ff2d-46ea-868d-8f96cb0cb52d"), event_id=11, version=0)
class Microsoft_Windows_SEC_11_0(Etw):
pattern = Struct(
"SequenceNumber" / Int64ul,
"ProcessId" / Int32ul,
"ProcessTime" / Int64sl,
"ThreadId" / Int32ul,
"UserSid" / Sid,
"SessionId" / Int32ul,
"Key" / WString,
"Hive" / WString,
"RestoreFlags" / Int32ul,
"ProcessStartKey" / Int64ul
)
@declare(guid=guid("16c6501a-ff2d-46ea-868d-8f96cb0cb52d"), event_id=12, version=0)
class Microsoft_Windows_SEC_12_0(Etw):
pattern = Struct(
"SequenceNumber" / Int64ul,
"ProcessId" / Int32ul,
"ProcessTime" / Int64sl,
"ThreadId" / Int32ul,
"UserSid" / Sid,
"SessionId" / Int32ul,
"Key" / WString,
"Hive" / WString,
"NewHive" / WString,
"ProcessStartKey" / Int64ul
)
@declare(guid=guid("16c6501a-ff2d-46ea-868d-8f96cb0cb52d"), event_id=13, version=0)
class Microsoft_Windows_SEC_13_0(Etw):
pattern = Struct(
"SequenceNumber" / Int64ul,
"ProcessId" / Int32ul,
"ProcessTime" / Int64sl,
"ThreadId" / Int32ul,
"UserSid" / Sid,
"SessionId" / Int32ul,
"Key" / WString,
"Value" / WString,
"OldValueDataType" / Int32ul,
"OldValueDataSize" / Int32ul,
"OldValueCopiedSize" / Int32ul,
"OldValueData" / Bytes(lambda this: this.OldValueCopiedSize),
"NewValueDataType" / Int32ul,
"NewValueDataSize" / Int32ul,
"NewValueCopiedSize" / Int32ul,
"NewValueData" / Bytes(lambda this: this.NewValueCopiedSize),
"ProcessStartKey" / Int64ul
)
@declare(guid=guid("16c6501a-ff2d-46ea-868d-8f96cb0cb52d"), event_id=16, version=0)
class Microsoft_Windows_SEC_16_0(Etw):
pattern = Struct(
"SequenceNumber" / Int64ul,
"ProcessId" / Int32ul,
"ProcessTime" / Int64sl,
"ThreadId" / Int32ul,
"UserSid" / Sid,
"SessionId" / Int32ul,
"Key" / WString,
"Value" / WString,
"DataType" / Int32ul,
"ValueDataSize" / Int32ul,
"ValueCopiedSize" / Int32ul,
"ValueData" / Bytes(lambda this: this.ValueCopiedSize),
"ProcessStartKey" / Int64ul
)
@declare(guid=guid("16c6501a-ff2d-46ea-868d-8f96cb0cb52d"), event_id=17, version=0)
class Microsoft_Windows_SEC_17_0(Etw):
pattern = Struct(
"SequenceNumber" / Int64ul,
"ProcessId" / Int32ul,
"ProcessTime" / Int64sl,
"ThreadId" / Int32ul,
"UserSid" / Sid,
"SessionId" / Int32ul,
"ImageName" / WString,
"DocumentName" / WString,
"ProcessStartKey" / Int64ul
)
@declare(guid=guid("16c6501a-ff2d-46ea-868d-8f96cb0cb52d"), event_id=18, version=0)
class Microsoft_Windows_SEC_18_0(Etw):
pattern = Struct(
"SequenceNumber" / Int64ul,
"ProcessId" / Int32ul,
"ProcessTime" / Int64sl,
"ThreadId" / Int32ul,
"UserSid" / Sid,
"SessionId" / Int32ul,
"TargetProcessId" / Int32ul,
"TargetProcessTime" / Int64sl,
"TargetProcessName" / WString,
"TargetThreadId" / Int32ul,
"TargetThreadStartAddress" / Int64ul,
"StartAddressVadQueryResult" / Int32ul,
"StartAddressVadAllocationBase" / Int64ul,
"StartAddressVadAllocationProtect" / Int32ul,
"StartAddressVadRegionType" / Int32ul,
"StartAddressVadRegionSize" / Int64ul,
"StartAddressVadProtect" / Int32ul,
"SourceProcessStartKey" / Int64ul,
"TargetProcessStartKey" / Int64ul,
"MappedModuleName" / WString
)
@declare(guid=guid("16c6501a-ff2d-46ea-868d-8f96cb0cb52d"), event_id=19, version=0)
class Microsoft_Windows_SEC_19_0(Etw):
pattern = Struct(
"SequenceNumber" / Int64ul,
"ProcessId" / Int32ul,
"ProcessTime" / Int64sl,
"ThreadId" / Int32ul,
"UserSid" / Sid,
"SessionId" / Int32ul,
"TargetProcessId" / Int32ul,
"TargetProcessTime" / Int64sl,
"TargetProcess" / WString,
"Access" / Int32ul,
"SourceProcessStartKey" / Int64ul,
"TargetProcessStartKey" / Int64ul
)
@declare(guid=guid("16c6501a-ff2d-46ea-868d-8f96cb0cb52d"), event_id=20, version=0)
class Microsoft_Windows_SEC_20_0(Etw):
pattern = Struct(
"SequenceNumber" / Int64ul,
"ProcessId" / Int32ul,
"ProcessTime" / Int64sl,
"ThreadId" / Int32ul,
"UserSid" / Sid,
"SessionId" / Int32ul,
"Desktop" / WString,
"Access" / Int32ul,
"Duplicate" / Int8ul,
"Kernel" / Int8ul,
"ProcessStartKey" / Int64ul
)
@declare(guid=guid("16c6501a-ff2d-46ea-868d-8f96cb0cb52d"), event_id=21, version=0)
class Microsoft_Windows_SEC_21_0(Etw):
pattern = Struct(
"SequenceNumber" / Int64ul,
"ProcessId" / Int32ul,
"ProcessTime" / Int64sl,
"ThreadId" / Int32ul,
"UserSid" / Sid,
"SessionId" / Int32ul,
"VolumeName" / WString,
"VolWriteOffset" / Int64ul,
"VolWriteSize" / Int64ul,
"SystemVolume" / Int8ul,
"ProcessStartKey" / Int64ul
)
@declare(guid=guid("16c6501a-ff2d-46ea-868d-8f96cb0cb52d"), event_id=22, version=0)
class Microsoft_Windows_SEC_22_0(Etw):
pattern = Struct(
"SequenceNumber" / Int64ul,
"ProcessId" / Int32ul,
"ProcessTime" / Int64sl,
"ThreadId" / Int32ul,
"UserSid" / Sid,
"SessionId" / Int32ul,
"ProcessName" / WString,
"CommandLine" / WString,
"ProcessStartKey" / Int64ul,
"CommandLineTruncated" / Int8ul,
"CommandLineSize" / Int32ul
)
@declare(guid=guid("16c6501a-ff2d-46ea-868d-8f96cb0cb52d"), event_id=23, version=0)
class Microsoft_Windows_SEC_23_0(Etw):
pattern = Struct(
"SequenceNumber" / Int64ul,
"ProcessId" / Int32ul,
"ProcessTime" / Int64sl,
"ThreadId" / Int32ul,
"UserSid" / Sid,
"SessionId" / Int32ul,
"ImageName" / WString,
"MotW" / Int8ul,
"PartialCRC1" / Int32ul,
"PartialCRC2" / Int32ul,
"PartialCRC3" / Int32ul,
"SystemModeImage" / Int8ul,
"LoadImageAddress" / Int64ul,
"ProcessStartKey" / Int64ul,
"LoadImageSize" / Int64ul
)
@declare(guid=guid("16c6501a-ff2d-46ea-868d-8f96cb0cb52d"), event_id=24, version=0)
class Microsoft_Windows_SEC_24_0(Etw):
pattern = Struct(
"SequenceNumber" / Int64ul,
"ProcessId" / Int32ul,
"ProcessTime" / Int64sl,
"ThreadId" / Int32ul,
"UserSid" / Sid,
"SessionId" / Int32ul,
"ImageName" / WString,
"MotW" / Int8ul,
"PartialCRC1" / Int32ul,
"PartialCRC2" / Int32ul,
"PartialCRC3" / Int32ul,
"ImageSignatureLevel" / Int32ul,
"ImageSignatureType" / Int32ul,
"CurrentCodeIntegrityOptions" / Int32ul,
"OriginalCodeIntegrityOptions" / Int32ul,
"ProcessStartKey" / Int64ul,
"ImageBase" / Int64ul,
"ImageSize" / Int64ul
)
@declare(guid=guid("16c6501a-ff2d-46ea-868d-8f96cb0cb52d"), event_id=25, version=0)
class Microsoft_Windows_SEC_25_0(Etw):
pattern = Struct(
"SequenceNumber" / Int64ul,
"AffectedProcessId" / Int32ul,
"AffectedProcessTime" / Int64sl,
"CurrentTokenPointer" / Int64ul,
"CurrentTokenPrivPresent" / Int64ul,
"CurrentTokenPrivEnabled" / Int64ul,
"CurrentTokenPrivEnabledByDefault" / Int64ul,
"CurrentTokenIntegrityLevel" / Int32ul,
"CurrentTokenUserSid" / Sid,
"PreviousTokenPointer" / Int64ul,
"PreviousTokenPrivPresent" / Int64ul,
"PreviousTokenPrivEnabled" / Int64ul,
"PreviousTokenPrivEnabledByDefault" / Int64ul,
"PreviousTokenIntegrityLevel" / Int32ul,
"PreviousTokenUserSid" / Sid,
"OriginalTokenPointer" / Int64ul,
"OriginalTokenPrivPresent" / Int64ul,
"OriginalTokenPrivEnabled" / Int64ul,
"OriginalTokenPrivEnabledByDefault" / Int64ul,
"OriginalTokenIntegrityLevel" / Int32ul,
"OriginalTokenUserSid" / Sid,
"SystemTokenPointer" / Int64ul,
"InlineCheck" / Int8ul,
"AffectedProcessStartKey" / Int64ul,
"PrimaryTokenFrozen" / Int8ul,
"ParentTokenIntegrityLevel" / Int32ul
)
@declare(guid=guid("16c6501a-ff2d-46ea-868d-8f96cb0cb52d"), event_id=27, version=0)
class Microsoft_Windows_SEC_27_0(Etw):
pattern = Struct(
"SequenceNumber" / Int64ul,
"AffectedProcessId" / Int32ul,
"AffectedProcessStartKey" / Int64ul,
"AffectedProcessTime" / Int64sl,
"InlineCheck" / Int8ul,
"CurrentDaclPointer" / Int64ul,
"CurrentDaclValidAceList" / Int8ul,
"CurrentDaclAceCount" / Int32ul,
"CurrentDaclSids" / WString,
"CurrentDaclAccessMaskBlobSize" / Int32ul,
"CurrentDaclAccessMasks" / Bytes(lambda this: this.CurrentDaclAccessMaskBlobSize),
"PreviousDaclPointer" / Int64ul,
"PreviousDaclValidAceList" / Int8ul,
"PreviousDaclAceCount" / Int32ul,
"PreviousDaclSids" / WString,
"PreviousDaclAccessMaskBlobSize" / Int32ul,
"PreviousDaclAccessMasks" / Bytes(lambda this: this.PreviousDaclAccessMaskBlobSize),
"OriginalDaclPointer" / Int64ul,
"OriginalDaclValidAceList" / Int8ul,
"OriginalDaclAceCount" / Int32ul,
"OriginalDaclSids" / WString,
"OriginalDaclAccessMaskBlobSize" / Int32ul,
"OriginalDaclAccessMasks" / Bytes(lambda this: this.OriginalDaclAccessMaskBlobSize)
)
@declare(guid=guid("16c6501a-ff2d-46ea-868d-8f96cb0cb52d"), event_id=28, version=0)
class Microsoft_Windows_SEC_28_0(Etw):
pattern = Struct(
"SequenceNumber" / Int64ul,
"ProcessId" / Int32ul,
"ProcessStartKey" / Int64ul,
"Flags" / Int32ul,
"ThreadId" / Int32ul,
"CallerAddress" / Int64ul,
"StartAddress" / Int64ul,
"BackTraceSize" / Int32ul,
"BackTrace" / Bytes(lambda this: this.BackTraceSize),
"TargetCodeSize" / Int32ul,
"TargetCode" / Bytes(lambda this: this.TargetCodeSize),
"CallerCodeSize" / Int32ul,
"CallerCode" / Bytes(lambda this: this.CallerCodeSize)
)
@declare(guid=guid("16c6501a-ff2d-46ea-868d-8f96cb0cb52d"), event_id=29, version=0)
class Microsoft_Windows_SEC_29_0(Etw):
pattern = Struct(
"SequenceNumber" / Int64ul,
"CurrentValue" / Int64ul,
"OriginalValue" / Int64ul,
"IsSynchronous" / Int8ul
)
@declare(guid=guid("16c6501a-ff2d-46ea-868d-8f96cb0cb52d"), event_id=30, version=0)
class Microsoft_Windows_SEC_30_0(Etw):
pattern = Struct(
"SequenceNumber" / Int64ul,
"CurrentValue" / Int64ul,
"PreviousValue" / Int64ul,
"OriginalValue" / Int64ul,
"IsSynchronous" / Int8ul
)
@declare(guid=guid("16c6501a-ff2d-46ea-868d-8f96cb0cb52d"), event_id=31, version=0)
class Microsoft_Windows_SEC_31_0(Etw):
pattern = Struct(
"SequenceNumber" / Int64ul,
"SuspiciousEntryIndex" / Int32ul,
"TableSize" / Int32ul,
"Table" / Bytes(lambda this: this.TableSize),
"CodeSize" / Int32ul,
"Code" / Bytes(lambda this: this.CodeSize)
)
@declare(guid=guid("16c6501a-ff2d-46ea-868d-8f96cb0cb52d"), event_id=32, version=0)
class Microsoft_Windows_SEC_32_0(Etw):
pattern = Struct(
"SequenceNumber" / Int64ul,
"SuspiciousPointerIndex" / Int32ul,
"TableSize" / Int32ul,
"Table" / Bytes(lambda this: this.TableSize),
"CodeSize" / Int32ul,
"Code" / Bytes(lambda this: this.CodeSize)
)
@declare(guid=guid("16c6501a-ff2d-46ea-868d-8f96cb0cb52d"), event_id=33, version=0)
class Microsoft_Windows_SEC_33_0(Etw):
pattern = Struct(
"SequenceNumber" / Int64ul,
"SuspiciousPointerIndex" / Int32ul,
"TableSize" / Int32ul,
"Table" / Bytes(lambda this: this.TableSize),
| |
import os
import random
import sys
from dotenv import load_dotenv
import characters
import items
import names
import random_lists
import shared_functions
import traits
import wizard
from bot import bot as bot
from shared_functions import party as party
from shared_functions import npcs as npcs
from shared_functions import world as world
# below: laziness
load_dotenv()
TOKEN = os.getenv("TOKEN")
# Party & NPC Management
next_backstory = None
next_name = None
next_short_name = None
@bot.command(name='countitems')
async def count_items(ctx):
await ctx.send("There are " + str(len(items.item_dict)) + " items currently in the item pool.")
@bot.command(name='countbackstories')
async def count_backstories(ctx):
num_backstories = len(random_lists.Backstories)
await ctx.send("There are " + str(num_backstories) + " backstories currently in the backstory pool.")
@bot.command(name='nextname')
async def next_name_function(ctx, name):
global next_short_name
global next_name
next_short_name = name.split(" ")[0]
if shared_functions.find_character(next_short_name) is not None:
await ctx.send("A character already exists with the name " + next_short_name + ".")
next_short_name = None
return
next_name = name
@bot.command(name='nextbackstory')
async def next_backstory_function(ctx, backstory):
global next_backstory
next_backstory = backstory
@bot.command(name='additem', aliases=["item"])
async def add_item(ctx, name, item):
character = shared_functions.find_character(name)
if not character:
await ctx.send("Character does not exist!")
return
for i in range(0, len(character["Inventory"])):
if character["Inventory"][i] == "Empty slot" or character["Inventory"][i] == "Empty Slot":
character["Inventory"][i] = item
break
else:
await ctx.send(name + "'s inventory is full!")
return
await ctx.send(embed=characters.print_character(name))
@bot.command(name='removeitem', aliases=["take", "drop"])
async def remove_item(ctx, name, item):
character = shared_functions.find_character(name)
if not character:
await ctx.send("Character does not exist!")
return
length = len(item)
for i in range(0, len(character["Inventory"])):
print(character["Inventory"][i][0:length])
if character["Inventory"][i][0:length] == item:
character["Inventory"][i] = "Empty slot"
break
else:
await ctx.send("Item not found.")
return
await ctx.send(embed=characters.print_character(name))
@bot.command(name='pay', aliases=["givemoney", "givegold"])
async def pay(ctx, name, gold):
await increase(ctx, name, "Gold", gold)
@bot.command(name='increase', aliases=["increasestat", "boost", "booststat"])
async def increase(ctx, name, stat, number):
try:
number = int(number)
except ValueError:
await ctx.send("Stat must be increased by a number.")
return
character = shared_functions.find_character(name)
if not character:
await ctx.send("Character " + name + " does not exist")
return
if stat not in character:
await ctx.send("Stat " + stat + " does not exist")
return
try:
# prevent some jackass from crashing bot by trying to increase "Backstory"
int(character[stat])
except ValueError:
await ctx.send("Are you trying to increase a non-numerical stat...?")
return
character[stat] += number
await ctx.send(embed=characters.print_character(name))
@bot.command(name='decrease', aliases=["lowerstat", "decreasestat", "lower"])
async def decrease(ctx, name, stat, number):
await increase(ctx, name, stat, -int(number))
@bot.command(name='damage', aliases=["hurt"])
async def damage(ctx, name, number):
await decrease(ctx, name, "Health", number)
character = shared_functions.find_character(name)
if character and character["Health"] <= 0:
await kill_char(ctx, name)
@bot.command(name='heal', aliases=["restore"])
async def heal(ctx, name, number=None):
if number is not None:
await increase(ctx, name, "Health", number)
else:
character = shared_functions.find_character(name)
if not character:
await ctx.send("Character " + name + " does not exist, dummy")
return
# hardcoded max health right now; will eventually need to change to a character["Max Health"] attribute if i
# implement things like Blessing of Bloat
characters.change_character_data(name, "Health", 2 * character["Strongness"] + 1)
await ctx.send(embed=characters.print_character(name))
@bot.command(name='check', aliases=["statcheck"])
async def check(ctx, name, stat, required_number, global_modifier=0):
try:
required_number = int(required_number)
except ValueError:
target = shared_functions.find_character(required_number)
if not target:
await ctx.send("There is no character named " + required_number)
return
if stat not in target:
await ctx.send(required_number + " has no stat " + stat)
return
required_number = target[stat]
# get required_number from target stat.
character = shared_functions.find_character(name)
if not character:
await ctx.send("There is no character named " + name)
return
try:
global_modifier = int(global_modifier)
except ValueError:
await ctx.send("Modifier is not a number...")
return
if stat not in character:
await ctx.send(name + " has no stat " + stat)
return
global_modifier += world["modifier"]
roll = random.randint(1, 20)
print(roll)
passed = False
if roll == 20:
passed = True
elif roll == 1:
pass
else:
if (character[stat] - required_number) + roll + global_modifier >= 11:
passed = True
if passed:
await ctx.send(stat + " check passed!")
else:
await ctx.send(stat + " check failed!")
@bot.command(name='combat', aliases=["fight", "attack"])
async def combat(ctx, name, weapon_damage, target, global_modifier=0, stat="Strongness"):
damage_target = False
try:
defense = int(target)
except ValueError:
target_character = shared_functions.find_character(target)
if not target_character:
await ctx.send("Could not find target.")
return
if stat in target_character:
defense = target_character[stat]
else:
await ctx.send("Stat does not exist.")
damage_target = True
try:
global_modifier = int(global_modifier)
weapon_damage = int(weapon_damage)
except ValueError:
await ctx.send("One of the numerical parameters was not a number.")
return
character = shared_functions.find_character(name)
global_modifier += world["modifier"]
if not character:
await ctx.send("No character named " + name)
return
if stat not in character:
await ctx.send("Invalid stat")
return
roll = random.randint(1, 20)
print(roll)
miss = False
crit = 1
if roll == 20:
crit = 2
if roll == 1:
miss = True
else:
damage_done = (character[stat] - defense + roll + global_modifier - 10) * (weapon_damage * crit)
if damage_done < 0:
damage_done = 0
if miss:
await ctx.send("Missed!")
else:
if damage_target and damage_done > 0:
await damage(ctx, target, damage_done)
await ctx.send("Did " + str(damage_done) + " damage!")
if crit > 1:
await ctx.send("A critical hit!")
@bot.command(name='killchar', aliases=["kill", "nuke"])
async def kill_char(ctx, name):
# TODO: Have a character drop their entire inventory upon being killed, activating any explosives.
# It would be pretty comical to randomly trigger %use (prompting for a target if necessary).
# TODO: File away deceased characters in an additional dictionary for use with Necromancer.
character = shared_functions.find_character(name)
if not character:
await ctx.send("Could not find party member or NPC named " + name)
return
if name in npcs.keys():
relevant_dict = npcs
else:
relevant_dict = party
# later: add to necromancy dictionary
response = "**" + relevant_dict[name]["Name"] + " has been slain.**"
for item in relevant_dict[name]["Inventory"]:
if item != "Empty slot":
response += "\nThe following item dropped: " + items.item_dict[item].print_teaser()
relevant_dict.pop(name, False)
shared_functions.backup_characters()
await ctx.send(response)
@bot.command(name='party')
async def print_party(ctx, name=None):
if not name:
for character_name in party.keys():
response = characters.print_character(character_name)
await ctx.send(embed=response)
else:
response = characters.print_character(name)
await ctx.send(embed=response)
@bot.command(name='npc')
async def npc(ctx, name=None):
if not name:
length = str(len(npcs.keys()))
await ctx.send("There are currently " + length + " NPCs in the pool.")
return
if name == "all":
for character in npcs:
await ctx.send(embed=characters.print_character(character))
else:
await ctx.send(embed=characters.print_character(name))
@bot.command(name='randnpc')
async def randnpc(ctx):
if len(npcs.keys()) == 0:
await ctx.send("There are no NPCs!")
return
npc = random.choice(list(npcs.keys()))
await ctx.send(embed=characters.print_character(npc))
@bot.command(name='recruit', aliases=["hire", "addparty"])
async def recruit(ctx, name):
npc = npcs[name]
npcs.pop(name)
party[name] = npc
shared_functions.backup_characters()
await ctx.send(name + " added to party!")
@bot.command(name='fire', aliases=['retire', 'kick', 'ditch'])
async def leave(ctx, name):
try:
npc = party[name]
except KeyError:
await ctx.send("No party member named " + name)
return
party.pop(name)
npcs[name] = npc
await ctx.send(name + " removed from party!")
shared_functions.backup_characters()
@bot.command(name='wipeparty')
async def wipe_party(ctx):
global party
party = {}
shared_functions.backup_party()
await ctx.send("Successfully killed entire party.")
@bot.command(name='retireparty', aliases=["giveup", "win"])
async def retire_party(ctx):
for name in list(party.keys()):
await leave(ctx, name)
await advance(ctx, 1)
await ctx.send("Entire party has been retired.")
@bot.command(name='inventorysize')
async def inventory_size(ctx, name, size):
character = shared_functions.find_character(name)
if not character:
await ctx.send("Character does not exist!")
return
try:
int(size)
except ValueError:
await ctx.send("That is not a number you moron!")
return
length = len(party[name]["Inventory"])
if length > int(size):
party[name]["Inventory"] = party[name]["Inventory"][0:int(size)]
elif length < int(size):
for i in range(length, int(size)):
party[name]["Inventory"].append("Empty slot")
else:
await ctx.send("Character already has inventory of size " + size + ".")
return
await ctx.send(embed=characters.print_character(name))
@bot.command(name='restart')
async def restart(ctx):
sys.exit()
@bot.command(name='go', aliases=['advance', 'nextworld'])
async def advance(ctx, reset=False):
# future support: track actual world map position, take a direction as argument
world["number"] += 1
world["modifier"] = 1 - int(((world["number"] + 1) / 2))
world["stat cap"] = world["number"] + 4
world["boss stat cap"] = world["number"] + 6
if reset:
world["number"] = 1
world["modifier"] = 1
world["stat cap"] = 5
world["boss stat cap"] = 7
shared_functions.backup_world(world)
await(ctx.send("World has been set to " + str(world["number"]) + " providing a boost of " + str(
world["modifier"]) + " to all rolls."))
@bot.command(name='randchar')
async def random_char(ctx, boss=False):
if boss:
stat_cap = world["boss stat cap"]
else:
stat_cap = world["stat cap"]
if world["number"] <= 0:
await ctx.send("Invalid world.")
return
global next_backstory
global next_short_name
global next_name
if next_backstory:
backstory = next_backstory
next_backstory = None
else:
backstory = random.choice(random_lists.Backstories)
if next_short_name:
first_name = next_short_name
next_short_name = None
else:
first_name = random.choice(names.Names)
while first_name in npcs.keys():
first_name = random.choice(names.Names)
if next_name:
full_name = next_name
next_name = None
else:
middle_name = None
if random.randint(1, 2) == 2:
middle_name = random.choice(names.Names)
last_name = random.choice(names.Names)
if middle_name:
full_name = first_name + " " + middle_name + " " + last_name
else:
full_name = first_name + " " + last_name
| |
<filename>cgnet/feature/statistics.py
# Authors: <NAME>, <NAME>
# Contributors: <NAME>
import copy
import numpy as np
import torch
import scipy.spatial
import warnings
from .geometry import Geometry
g = Geometry(method='numpy')
KBOLTZMANN = 1.38064852e-23
AVOGADRO = 6.022140857e23
JPERKCAL = 4184
class GeometryStatistics():
"""Calculation of statistics for geometric features; namely
distances, angles, and dihedral cosines and sines.
Parameters
----------
data : torch.Tensor or np.array
Coordinate data of dimension [n_frames, n_beads, n_dimensions]
custom_feature_tuples : list of tuples (default=[])
List of 2-, 3-, and 4-element tuples containing arbitrary distance,
angle, and dihedral features to be calculated.
backbone_inds : 'all', list or np.ndarray, or None (default=None)
Which bead indices correspond to consecutive beads along the backbone
get_all_distances : Boolean (default=False)
Whether to calculate all pairwise distances
get_backbone_angles : Boolean (default=False)
Whether to calculate angles among adjacent beads along the backbone
get_backbone_dihedrals : Boolean (default=False)
Whether to calculate dihedral cosines and sines among adjacent beads
along the backbone
temperature : float or None (default=300.0)
Temperature of system. Use None for dimensionless calculations.
get_redundant_distance_mapping : Boolean (default=True)
If true, creates a redundant_distance_mapping attribute
bond_pairs : list of tuples (default=[])
List of 2-element tuples containing bonded pairs
adjacent_backbone_bonds : Boolean, (default=True)
Whether adjacent beads along the backbone should be considered
as bonds
Attributes
----------
beta : float
1/(Boltzmann constant)/(temperature) if temperature is not None in
units of kcal per mole; otherwise 1.0
descriptions : dictionary
List of indices (value) for each feature type (key)
redundant_distance_mapping
Redundant square distance matrix
feature_tuples : list of tuples
List of tuples for non-redundant feature descriptions in order
Example
-------
stats = GeometryStatistics(data, n_beads = 10)
prior_stats_dict = ds.get_prior_statistics()
"""
def __init__(self, data, custom_feature_tuples=None, backbone_inds=None,
get_all_distances=False, get_backbone_angles=False,
get_backbone_dihedrals=False, temperature=300.0,
get_redundant_distance_mapping=False, bond_pairs=None,
adjacent_backbone_bonds=True):
if torch.is_tensor(data):
self.data = data.detach().numpy()
else:
self.data = data
if custom_feature_tuples is None:
self.custom_feature_tuples = []
else:
self.custom_feature_tuples = custom_feature_tuples
self.n_frames = self.data.shape[0]
self.n_beads = self.data.shape[1]
assert self.data.shape[2] == 3 # dimensions
self.temperature = temperature
if self.temperature is not None:
self.beta = JPERKCAL/KBOLTZMANN/AVOGADRO/self.temperature
else:
self.beta = 1.0
if custom_feature_tuples is None:
if backbone_inds is None:
raise RuntimeError("You must specify either custom_feature_tuples ' \
'or backbone_inds='all'")
if type(backbone_inds) is str:
if backbone_inds == 'all':
if get_all_distances + get_backbone_angles + get_backbone_dihedrals == 0:
raise RuntimeError('Without custom feature tuples, you must specify ' \
'any of get_all_distances, get_backbone_angles, or ' \
'get_backbone_dihedrals.')
self._process_backbone(backbone_inds)
self._process_custom_feature_tuples()
if bond_pairs is None:
bond_pairs = []
if get_redundant_distance_mapping and not get_all_distances:
raise ValueError(
"Redundant distance mapping can only be returned "
"if get_all_distances is True."
)
self.get_redundant_distance_mapping = get_redundant_distance_mapping
if not get_all_distances:
if np.any([bond_ind not in self.custom_feature_tuples
for bond_ind in bond_pairs]):
raise ValueError(
"All bond_pairs must be also in custom_feature_tuples "
"if get_all_distances is False."
)
if np.any([len(bond_ind) != 2 for bond_ind in bond_pairs]):
raise RuntimeError(
"All bonds must be of length 2."
)
self._bond_pairs = bond_pairs
self.adjacent_backbone_bonds = adjacent_backbone_bonds
self.order = []
self.distances = []
self.angles = []
self.dihedral_cosines = []
self.dihedral_sines = []
self.descriptions = {
'Distances': [],
'Angles': [],
'Dihedral_cosines': [],
'Dihedral_sines': []
}
self._stats_dict = {}
# # # # # # #
# Distances #
# # # # # # #
if get_all_distances:
(self._pair_order,
self._adj_backbone_pairs) = g.get_distance_indices(self.n_beads,
self.backbone_inds,
self._backbone_map)
if len(self._custom_distance_pairs) > 0:
warnings.warn(
"All distances are already being calculated, so custom distances are meaningless."
)
self._custom_distance_pairs = []
self._distance_pairs = self._pair_order
if self.adjacent_backbone_bonds:
if np.any([bond_ind in self._adj_backbone_pairs
for bond_ind in self._bond_pairs]):
warnings.warn(
"Some bond indices were already on the backbone."
)
# We weed out already existing backbone pairs from the
# bond pairs provided by the user. We will append them
# to all of our bond pairs below.
self._bond_pairs = [bond_ind for bond_ind
in self._bond_pairs if bond_ind
not in self._adj_backbone_pairs]
# This attribute starts our list of "master" bond pairs
# when we've done some automatic calculations on the distances
# because we specified get_all_distances.
# Note also that we force the user to put backbone bonds in
# their custom_feature_tuples list if get_all_distances is
# False, which is why we're still inside the case where
# get_all_distances is True.
self.bond_pairs = copy.deepcopy(self._adj_backbone_pairs)
else:
self._distance_pairs = []
# If we haven't specified get_all_distances, our "master"
# bond list starts out empty
self.bond_pairs = []
self._distance_pairs.extend(self._custom_distance_pairs)
# Extend our master list of bond pairs by the user-defined, possibly
# filtered bond pairs
self.bond_pairs.extend(self._bond_pairs)
if len(self._distance_pairs) > 0:
self._get_distances()
# # # # # #
# Angles #
# # # # # #
if get_backbone_angles:
self._angle_trips = [(self.backbone_inds[i], self.backbone_inds[i+1],
self.backbone_inds[i+2])
for i in range(len(self.backbone_inds) - 2)]
if np.any([cust_angle in self._angle_trips
for cust_angle in self._custom_angle_trips]):
warnings.warn(
"Some custom angles were on the backbone and will not be re-calculated."
)
self._custom_angle_trips = [cust_angle for cust_angle
in self._custom_angle_trips
if cust_angle not in self._angle_trips]
else:
self._angle_trips = []
self._angle_trips.extend(self._custom_angle_trips)
if len(self._angle_trips) > 0:
self._get_angles()
# # # # # # #
# Dihedrals #
# # # # # # #
if get_backbone_dihedrals:
self._dihedral_quads = [(self.backbone_inds[i], self.backbone_inds[i+1],
self.backbone_inds[i+2], self.backbone_inds[i+3])
for i in range(len(self.backbone_inds) - 3)]
if np.any([cust_dih in self._dihedral_quads
for cust_dih in self._custom_dihedral_quads]):
warnings.warn(
"Some custom dihedrals were on the backbone and will not be re-calculated."
)
self._custom_dihedral_quads = [cust_dih for cust_dih
in self._custom_dihedral_quads
if cust_dih not in self._dihedral_quads]
else:
self._dihedral_quads = []
self._dihedral_quads.extend(self._custom_dihedral_quads)
if len(self._dihedral_quads) > 0:
self._get_dihedrals()
self.feature_tuples = []
self.master_description_tuples = []
self._master_stat_array = [[] for _ in range(3)]
for feature_type in self.order:
if feature_type not in ['Dihedral_cosines', 'Dihedral_sines']:
self.feature_tuples.extend(self.descriptions[feature_type])
self.master_description_tuples.extend(
self.descriptions[feature_type])
self._master_stat_array[0].extend(
self._stats_dict[feature_type]['mean'])
self._master_stat_array[1].extend(
self._stats_dict[feature_type]['std'])
self._master_stat_array[2].extend(
self._stats_dict[feature_type]['k'])
else:
self.master_description_tuples.extend(
[self._get_key(desc, feature_type)
for desc in self.descriptions[feature_type]])
self._master_stat_array[0].extend(
self._stats_dict[feature_type]['mean'])
self._master_stat_array[1].extend(
self._stats_dict[feature_type]['std'])
self._master_stat_array[2].extend(
self._stats_dict[feature_type]['k'])
if feature_type == 'Dihedral_cosines':
# because they have the same indices as dihedral sines,
# do only cosines
self.feature_tuples.extend(self.descriptions[feature_type])
self._master_stat_array = np.array(self._master_stat_array)
def _process_custom_feature_tuples(self):
"""Helper function to sort custom features into distances, angles,
and dihedrals.
"""
if len(self.custom_feature_tuples) > 0:
if (np.min([len(feat) for feat in self.custom_feature_tuples]) < 2 or
np.max([len(feat) for feat in self.custom_feature_tuples]) > 4):
raise ValueError(
"Custom features must be tuples of length 2, 3, or 4."
)
if np.max([np.max(bead)
for bead in self.custom_feature_tuples]) > self.n_beads - 1:
raise ValueError(
"Bead index in at least one feature is out of range."
)
_temp_dict = dict(
zip(self.custom_feature_tuples,
np.arange(len(self.custom_feature_tuples))))
if len(_temp_dict) < len(self.custom_feature_tuples):
self.custom_feature_tuples = list(_temp_dict.keys())
warnings.warn(
"Some custom feature tuples are repeated and have been removed."
)
self._custom_distance_pairs = [
feat for feat in self.custom_feature_tuples if len(feat) == 2]
self._custom_angle_trips = [
feat for feat in self.custom_feature_tuples if len(feat) == 3]
self._custom_dihedral_quads = [
feat for feat in self.custom_feature_tuples if len(feat) == 4]
else:
self._custom_distance_pairs = []
self._custom_angle_trips = []
self._custom_dihedral_quads = []
def _get_backbone_map(self):
"""Helper function that maps bead indices to indices along the backbone
only.
Returns
-------
backbone_map : dict
Dictionary with bead indices as keys and, as values, backbone
indices for beads along the backbone or np.nan otherwise.
"""
backbone_map = {mol_ind: bb_ind for bb_ind, mol_ind
in enumerate(self.backbone_inds)}
pad_map = {mol_ind: np.nan for mol_ind
in range(self.n_beads) if mol_ind not in self.backbone_inds}
return {**backbone_map, **pad_map}
def _process_backbone(self, backbone_inds):
"""Helper function to obtain attributes needed for backbone atoms.
"""
if type(backbone_inds) is str:
if backbone_inds == 'all':
self.backbone_inds = np.arange(self.n_beads)
self._backbone_map = {ind: ind for ind in range(self.n_beads)}
else:
raise RuntimeError(
"backbone_inds must be list or np.ndarray of indices, 'all', or None"
)
elif type(backbone_inds) in [list, np.ndarray]:
if len(np.unique(backbone_inds)) != len(backbone_inds):
raise ValueError(
'Backbone is not allowed to have repeat entries')
self.backbone_inds = np.array(backbone_inds)
if not np.all(np.sort(self.backbone_inds) == self.backbone_inds):
warnings.warn(
"Your backbone indices aren't sorted. Make sure your backbone indices are in consecutive order."
)
self._backbone_map = self._get_backbone_map()
elif backbone_inds is None:
if len(self.custom_feature_tuples) == 0:
raise RuntimeError(
"Must have either backbone or custom features. Did you forget "
"to specify backbone_inds='all'?")
self.backbone_inds = np.array([])
self._backbone_map = None
else:
raise RuntimeError(
"backbone_inds must be list or np.ndarray of indices, 'all', or None"
)
self.n_backbone_beads = len(self.backbone_inds)
def _get_distances(self):
"""Obtains all pairwise distances for the two-bead indices provided.
"""
self.distances = g.get_distances(
self._distance_pairs, self.data, norm=True)
self.descriptions['Distances'].extend(self._distance_pairs)
self._get_stats(self.distances, 'Distances')
self.order += ['Distances']
if self.get_redundant_distance_mapping:
self.redundant_distance_mapping | |
2, 4, 6],
[2, 0, 2, 4],
[4, 2, 0, 2],
[6, 4, 2, 0],
[8, 6, 4, 2]])
>>> sa.getOpFromLocation(4, 3)
<ChangeOps.Insertion: 0>
>>> sa.getOpFromLocation(2, 2)
<ChangeOps.NoChange: 3>
>>> sa.getOpFromLocation(0, 2)
<ChangeOps.Deletion: 1>
>>> sa.distanceMatrix[0][0] = 1
>>> sa.distanceMatrix
array([[1, 2, 4, 6],
[2, 0, 2, 4],
[4, 2, 0, 2],
[6, 4, 2, 0],
[8, 6, 4, 2]])
>>> sa.getOpFromLocation(1, 1)
<ChangeOps.Substitution: 2>
>>> sa.getOpFromLocation(0, 0)
Traceback (most recent call last):
ValueError: No movement possible from the origin
'''
possibleMoves = self.getPossibleMovesFromLocation(i, j)
if possibleMoves[0] is None:
if possibleMoves[1] is None:
raise ValueError('No movement possible from the origin')
return ChangeOps.Deletion
elif possibleMoves[1] is None:
return ChangeOps.Insertion
currentCost = self.distanceMatrix[i][j]
minIndex, minNewCost = min(enumerate(possibleMoves), key=operator.itemgetter(1))
if currentCost == minNewCost:
return ChangeOps.NoChange
else:
return ChangeOps(minIndex)
def insertCost(self, tup):
'''
Cost of inserting an extra hashed item.
For now, it's just the size of the keys of the NoteHashWithReference
>>> target = stream.Stream()
>>> source = stream.Stream()
>>> note1 = note.Note('C4')
>>> note2 = note.Note('D4')
>>> note3 = note.Note('C4')
>>> note4 = note.Note('E4')
>>> target.append([note1, note2, note3, note4])
>>> source.append([note1, note2, note3])
This is a StreamAligner with default hasher settings
>>> sa0 = alpha.analysis.aligner.StreamAligner(target, source)
>>> sa0.align()
>>> tup0 = sa0.hashedTargetStream[0]
>>> sa0.insertCost(tup0)
2
This is a StreamAligner with a modified hasher that doesn't hash pitch at all
>>> sa1 = alpha.analysis.aligner.StreamAligner(target, source)
>>> sa1.hasher.hashPitch = False
>>> sa1.align()
>>> tup1 = sa1.hashedTargetStream[0]
>>> sa1.insertCost(tup1)
1
This is a StreamAligner with a modified hasher that hashes 3 additional properties
>>> sa2 = alpha.analysis.aligner.StreamAligner(target, source)
>>> sa2.hasher.hashOctave = True
>>> sa2.hasher.hashIntervalFromLastNote = True
>>> sa2.hasher.hashIsAccidental = True
>>> sa2.align()
>>> tup2 = sa2.hashedTargetStream[0]
>>> sa2.insertCost(tup2)
5
'''
keyDictSize = len(tup.hashItemsKeys)
return keyDictSize
def deleteCost(self, tup):
'''
Cost of deleting an extra hashed item.
For now, it's just the size of the keys of the NoteHashWithReference
>>> target = stream.Stream()
>>> source = stream.Stream()
>>> note1 = note.Note('C4')
>>> note2 = note.Note('D4')
>>> note3 = note.Note('C4')
>>> note4 = note.Note('E4')
>>> target.append([note1, note2, note3, note4])
>>> source.append([note1, note2, note3])
This is a StreamAligner with default hasher settings
>>> sa0 = alpha.analysis.aligner.StreamAligner(target, source)
>>> sa0.align()
>>> tup0 = sa0.hashedSourceStream[0]
>>> sa0.deleteCost(tup0)
2
This is a StreamAligner with a modified hasher that doesn't hash pitch at all
>>> sa1 = alpha.analysis.aligner.StreamAligner(target, source)
>>> sa1.hasher.hashPitch = False
>>> sa1.align()
>>> tup1 = sa1.hashedSourceStream[0]
>>> sa1.deleteCost(tup1)
1
This is a StreamAligner with a modified hasher that hashes 3 additional properties
>>> sa2 = alpha.analysis.aligner.StreamAligner(target, source)
>>> sa2.hasher.hashOctave = True
>>> sa2.hasher.hashIntervalFromLastNote = True
>>> sa2.hasher.hashIsAccidental = True
>>> sa2.align()
>>> tup2 = sa2.hashedSourceStream[0]
>>> sa2.deleteCost(tup2)
5
'''
keyDictSize = len(tup.hashItemsKeys)
return keyDictSize
def substitutionCost(self, targetTup, sourceTup):
'''
Finds the cost of substituting the targetTup with the sourceTup.
For now it's just an interpolation of how many things they have in common
Example: equality testing, both streams made from same note
targetA will not have the same reference as sourceA
but their hashes will be equal, which makes for their hashed objects to be
able to be equal.
>>> note1 = note.Note('C4')
>>> targetA = stream.Stream()
>>> sourceA = stream.Stream()
>>> targetA.append(note1)
>>> sourceA.append(note1)
>>> targetA == sourceA
False
>>> saA = alpha.analysis.aligner.StreamAligner(targetA, sourceA)
>>> saA.align()
>>> hashedItem1A = saA.hashedTargetStream[0]
>>> hashedItem2A = saA.hashedSourceStream[0]
>>> print(hashedItem1A)
NoteHashWithReference(Pitch=60, Duration=1.0)
>>> print(hashedItem2A)
NoteHashWithReference(Pitch=60, Duration=1.0)
>>> saA.tupleEqualityWithoutReference(hashedItem1A, hashedItem2A)
True
>>> saA.substitutionCost(hashedItem1A, hashedItem2A)
0
>>> note2 = note.Note('D4')
>>> targetB = stream.Stream()
>>> sourceB = stream.Stream()
>>> targetB.append(note1)
>>> sourceB.append(note2)
>>> saB = alpha.analysis.aligner.StreamAligner(targetB, sourceB)
>>> saB.align()
>>> hashedItem1B = saB.hashedTargetStream[0]
>>> hashedItem2B = saB.hashedSourceStream[0]
hashed items only differ in 1 spot
>>> print(hashedItem1B)
NoteHashWithReference(Pitch=60, Duration=1.0)
>>> print(hashedItem2B)
NoteHashWithReference(Pitch=62, Duration=1.0)
>>> saB.substitutionCost(hashedItem1B, hashedItem2B)
1
>>> note3 = note.Note('E4')
>>> note4 = note.Note('E#4')
>>> note4.duration = duration.Duration('half')
>>> targetC = stream.Stream()
>>> sourceC = stream.Stream()
>>> targetC.append(note3)
>>> sourceC.append(note4)
>>> saC = alpha.analysis.aligner.StreamAligner(targetC, sourceC)
>>> saC.align()
>>> hashedItem1C = saC.hashedTargetStream[0]
>>> hashedItem2C = saC.hashedSourceStream[0]
hashed items should differ in 2 spots
>>> print(hashedItem1C)
NoteHashWithReference(Pitch=64, Duration=1.0)
>>> print(hashedItem2C)
NoteHashWithReference(Pitch=65, Duration=2.0)
>>> saC.substitutionCost(hashedItem1C, hashedItem2C)
2
'''
if self.tupleEqualityWithoutReference(targetTup, sourceTup):
return 0
totalPossibleDifferences = len(targetTup.hashItemsKeys)
numSimilaritiesInTuple = self.calculateNumSimilarities(targetTup, sourceTup)
totalPossibleDifferences -= numSimilaritiesInTuple
return totalPossibleDifferences
def calculateNumSimilarities(self, targetTup, sourceTup):
'''
Returns the number of attributes that two tuples have that are the same
>>> target = stream.Stream()
>>> source = stream.Stream()
>>> note1 = note.Note('D1')
>>> target.append([note1])
>>> source.append([note1])
>>> sa = alpha.analysis.aligner.StreamAligner(target, source)
>>> from collections import namedtuple
>>> NoteHash = namedtuple('NoteHash', ['Pitch', 'Duration'])
>>> nh1 = NoteHash(60, 4)
>>> nhwr1 = alpha.analysis.hasher.NoteHashWithReference(nh1)
>>> nhwr1.reference = note.Note('C4')
>>> nhwr1
NoteHashWithReference(Pitch=60, Duration=4)
>>> nh2 = NoteHash(60, 4)
>>> nhwr2 = alpha.analysis.hasher.NoteHashWithReference(nh2)
>>> nhwr2.reference = note.Note('C4')
>>> nhwr2
NoteHashWithReference(Pitch=60, Duration=4)
>>> sa.calculateNumSimilarities(nhwr1, nhwr2)
2
>>> nh3 = NoteHash(61, 4)
>>> nhwr3 = alpha.analysis.hasher.NoteHashWithReference(nh3)
>>> nhwr3.reference = note.Note('C#4')
>>> nhwr3
NoteHashWithReference(Pitch=61, Duration=4)
>>> sa.calculateNumSimilarities(nhwr1, nhwr3)
1
>>> nh4 = NoteHash(59, 1)
>>> nhwr4 = alpha.analysis.hasher.NoteHashWithReference(nh4)
>>> nhwr4.reference = note.Note('B3')
>>> nhwr4
NoteHashWithReference(Pitch=59, Duration=1)
>>> sa.calculateNumSimilarities(nhwr2, nhwr4)
0
'''
count = 0
for val in targetTup.hashItemsKeys:
if getattr(targetTup, val) == getattr(sourceTup, val):
count += 1
return count
def tupleEqualityWithoutReference(self, tup1, tup2):
'''
Returns whether two hashed items have the same attributes,
even though their references are different?
>>> target = stream.Stream()
>>> source = stream.Stream()
>>> note1 = note.Note('D1')
>>> target.append([note1])
>>> source.append([note1])
>>> sa = alpha.analysis.aligner.StreamAligner(target, source)
>>> from collections import namedtuple
>>> NoteHash = namedtuple('NoteHash', ['Pitch', 'Duration'])
>>> nh1 = NoteHash(60, 4)
>>> nhwr1 = alpha.analysis.hasher.NoteHashWithReference(nh1)
>>> nhwr1.reference = note.Note('C4')
>>> nhwr1
NoteHashWithReference(Pitch=60, Duration=4)
>>> nh2 = NoteHash(60, 4)
>>> nhwr2 = alpha.analysis.hasher.NoteHashWithReference(nh2)
>>> nhwr2.reference = note.Note('B#3')
>>> nhwr2
NoteHashWithReference(Pitch=60, Duration=4)
>>> sa.tupleEqualityWithoutReference(nhwr1, nhwr2)
True
This is a very difference has
>>> nh3 = NoteHash(61, 4)
>>> nhwr3 = alpha.analysis.hasher.NoteHashWithReference(nh3)
>>> nhwr3.reference = note.Note('C#4')
>>> nhwr3
NoteHashWithReference(Pitch=61, Duration=4)
>>> sa.tupleEqualityWithoutReference(nhwr1, nhwr3)
False
'''
for val in tup1.hashItemsKeys:
if getattr(tup1, val) != getattr(tup2, val):
return False
return True
def calculateChangesList(self):
'''
Traverses through self.distanceMatrix from bottom right corner to top left looking at
bestOp at every move to determine which change was most likely at any point. Compiles
the list of changes in self.changes. Also calculates some metrics like self.similarityScore
and self.changesCount.
>>> note1 = note.Note('C#4')
>>> note2 = note.Note('C4')
test 1: one insertion, one no change. Target stream has one more note than
source stream, so source stream needs an insertion to match target stream.
should be 0.5 similarity between the two
>>> targetA = stream.Stream()
>>> sourceA = stream.Stream()
>>> targetA.append([note1, note2])
>>> sourceA.append(note1)
>>> saA = alpha.analysis.aligner.StreamAligner(targetA, sourceA)
>>> saA.makeHashedStreams()
>>> saA.setupDistanceMatrix()
>>> saA.populateDistanceMatrix()
>>> saA.calculateChangesList()
>>> saA.changesCount[alpha.analysis.aligner.ChangeOps.Insertion]
1
>>> saA.changesCount[alpha.analysis.aligner.ChangeOps.NoChange]
1
>>> saA.similarityScore
0.5
test 2: one deletion, one no change. Target stream has one fewer note than
source stream, so source stream needs a deletion to match target stream.
should be 0.5 similarity between the two
>>> targetB = stream.Stream()
>>> sourceB = stream.Stream()
>>> targetB.append(note1)
>>> sourceB.append([note1, note2])
>>> saB = alpha.analysis.aligner.StreamAligner(targetB, sourceB)
>>> saB.makeHashedStreams()
>>> saB.setupDistanceMatrix()
>>> saB.populateDistanceMatrix()
>>> saB.calculateChangesList()
>>> saB.changesCount[alpha.analysis.aligner.ChangeOps.Deletion]
1
>>> saB.changesCount[alpha.analysis.aligner.ChangeOps.NoChange]
1
>>> saB.similarityScore
0.5
test 3: no changes
>>> targetC = stream.Stream()
>>> sourceC = stream.Stream()
>>> targetC.append([note1, note2])
>>> sourceC.append([note1, note2])
>>> saC = alpha.analysis.aligner.StreamAligner(targetC, sourceC)
>>> saC.makeHashedStreams()
>>> saC.setupDistanceMatrix()
>>> saC.populateDistanceMatrix()
>>> saC.calculateChangesList()
>>> saC.changesCount[alpha.analysis.aligner.ChangeOps.NoChange]
2
>>> saC.similarityScore
1.0
test 4: 1 no change, 1 substitution
>>> targetD = stream.Stream()
>>> sourceD = stream.Stream()
>>> note3 = note.Note('C4')
>>> note3.quarterLength = 2 # same pitch and offset as note2
>>> targetD.append([note1, note2])
>>> sourceD.append([note1, note3])
>>> saD = alpha.analysis.aligner.StreamAligner(targetD, sourceD)
>>> saD.makeHashedStreams()
>>> saD.setupDistanceMatrix()
>>> saD.populateDistanceMatrix()
>>> saD.calculateChangesList()
>>> saD.changesCount[alpha.analysis.aligner.ChangeOps.Substitution]
1
>>> saD.changesCount[alpha.analysis.aligner.ChangeOps.NoChange]
1
>>> saD.similarityScore
0.5
'''
i = self.n
j = self.m
while i != 0 or j != 0:
# check if possible moves are indexable
bestOp = self.getOpFromLocation(i, j)
targetStreamReference = self.hashedTargetStream[i - 1].reference
sourceStreamReference = self.hashedSourceStream[j - 1].reference
opTuple = | |
<filename>toboggan/flow.py
#
# This file is part of Toboggan, https://github.com/TheoryInPractice/Toboggan/,
# and is Copyright (C) North Carolina State University, 2017. It is licensed
# under the three-clause BSD license; see LICENSE.
#
# python libs
import math
from collections import defaultdict
import itertools
import numpy as np
from scipy.optimize import linprog
import copy
# local imports
from toboggan.graphs import convert_to_top_sorting, compute_cuts,\
compute_edge_cuts
from toboggan.partition import algorithm_u
class Instance:
"""
Information about an input instance to flow decomposition.
Maintains a topological ordering of the graph, the flow, and bounds on the
feasible path weights.
"""
def __init__(self, graph, k=None):
"""Create an instance from a graph and guess for the solution size."""
# information about the graph and its ordering
self.graph = graph
self.ordering = convert_to_top_sorting(graph)
self.cuts = compute_cuts(self.graph, self.ordering)
self.edge_cuts = compute_edge_cuts(self.graph, self.ordering)
self.flow = sum(w for _, w in self.graph.neighborhood(
self.graph.source()))
# get a lower bound on the number of paths needed
# We pass the input k to this function so it can inform the user if
# the input k can be easily identified as too small.
self.k = self._optimal_size_lower_bound(k)
# our initial guesses for weights will come from the flow values
self.weights = sorted(set([w for _, _, w in self.graph.edges()]))
# compute bounds on the largest weight
self.max_weight_bounds = self._compute_max_weight_bounds()
# compute bounds on the individual weights
self.weight_bounds = self._compute_weight_bounds()
def info(self):
"""A string representation of this object."""
print("n = {}, m = {}, k = {}.".format(len(self.graph),
self.graph.num_edges(), self.k))
print("Weights:", self.weights)
print("Max-weight bounds:", self.max_weight_bounds)
print("Weight bounds:", list(map(tuple, self.weight_bounds)))
print("")
print("Cut-representation:")
print(self.cuts)
def _compute_max_weight_bounds(self):
# Get lower bound for highest weight
min_max_weight = 1
# This checks each topological cut
for topol_cut in self.edge_cuts:
cut_size = len(topol_cut)
for _, w in topol_cut:
# use pigeonhole principle to lowerbound max weight
min_max_weight = max(min_max_weight, w // (self.k-cut_size+1))
# Compute heaviest possible path in graph
# by iterating over each node's out-neighborhood
maxpath = {v: 0 for v in self.graph}
maxpath[self.graph.source()] = self.flow
for v in self.ordering:
out = self.graph.neighborhood(v)
for u, w in out:
maxpath[u] = max(maxpath[u], min(w, maxpath[v]))
return (min_max_weight, maxpath[self.graph.sink()])
def _compute_weight_bounds(self):
supseq = []
summed = 0
# supseq is a list of "super-increasing" values taken from edge weights
# starting from smallest weight in the graph. These values are upper
# bounds on the different path weights.
for w in self.weights:
if w > self.max_weight_bounds[1]:
break
if w > summed:
supseq.append(w)
summed += w
# pad the rest of supseq with the max_weight_bound
while len(supseq) < self.k: # Sentinel elements
supseq.append(self.max_weight_bounds[1])
bounds = [(1, w) for w in supseq[:self.k]]
bounds[-1] = self.max_weight_bounds
# Next, compute lowerbounds for the path weights.
uppersum = [u for _, u in bounds]
for i in reversed(range(self.k-1)):
uppersum[i] += uppersum[i+1]
# Refine lower bounds by using upper bounds:
# the weight of path i must be at least F_i / i
# where F_i is an upper bound on how much flow all paths > i
# take up.
for i in range(1, self.k-1):
lower = max(bounds[i][0], (self.flow-uppersum[i+1]) // (i+1))
bounds[i] = (lower, bounds[i][1])
return np.array(bounds)
def _compute_multiset_bound(self, list1, list2):
"""
Treat twolists as multisets, return list1-list2.
Note: input lists should contain int or float type.
"""
# convert to dicts with contents as keys, multiplicities as vals
size1 = len(list1)
size2 = len(list2)
dict1 = defaultdict(int)
for item in list1:
dict1[item] += 1
dict2 = defaultdict(int)
for item in list2:
dict2[item] += 1
num_repeated = 0
for key, val in dict1.items():
num_repeated += min(val, dict2[key])
size1 -= num_repeated
size2 -= num_repeated
return num_repeated + math.ceil(max(size1, size2)/3) + \
min(size1, size2)
def _optimal_size_lower_bound(self, k):
"""
Get a lower bound on the optimal solution size.
We look over all s-t edge cuts consistent with the topological ordering
and pick the largest. Then we look over all pairs of cut-sets that are
large enough to further improve this lower-bound and check whether the
number of distinct edge-weights requires a larger lower-bound than
merely the largest cut-set size.
"""
edge_cut_sizes = [len(C) for C in self.edge_cuts]
max_edge_cut = max(edge_cut_sizes)
lower_bound = max_edge_cut
self.max_edge_cut_size = max_edge_cut
# Now check all pairs of cutsets "large enough" for better bound
sorted_cut_sizes = sorted([(cut_size, which_cut) for which_cut,
cut_size in enumerate(edge_cut_sizes)],
reverse=True)
cutsets_of_best_bound = []
# Starting with largest, iterate over cutsets
for idx1 in range(len(sorted_cut_sizes)):
current_size1, which_cut1 = sorted_cut_sizes[idx1]
# once one set is too small, all following will be, so break out
if math.ceil(current_size1/3) + current_size1 <= lower_bound:
break
for idx2 in range(idx1+1, len(sorted_cut_sizes)):
current_size2, which_cut2 = sorted_cut_sizes[idx2]
# if cutsize2 too small, the rest will be: break inner for loop
temp_bound = min(current_size1, current_size2) + math.ceil(
max(current_size1, current_size2)/3)
if temp_bound <= lower_bound:
break
# Now compute actual bound for this pair of cutsets;
# Get weights for each cutset as a multiset,
# compute size of (larger) difference
weights1 = set([w for _, w in self.edge_cuts[which_cut1]])
weights2 = set([w for _, w in self.edge_cuts[which_cut2]])
bound = self._compute_multiset_bound(weights1, weights2)
# Check if we need to update bound
if bound > lower_bound:
lower_bound = bound
cutsets_of_best_bound = [which_cut1, which_cut2]
if len(cutsets_of_best_bound) > 0:
which_cut1, which_cut2 = cutsets_of_best_bound
# let the user know their guess was bad if it was
self.best_cut_lower_bound = lower_bound
print("# Preprocessing")
print("#\tGraph has an edge cut of size {}.\n"
"#\tInvestigating cutsets yields bound {}.\n"
"#\tUser supplied k value of {}.\n"
"#\tContinuing using k = {}"
"".format(max_edge_cut, lower_bound, k, lower_bound))
if k is not None and lower_bound > k:
return lower_bound
elif k is None:
return lower_bound
else:
return k
def try_larger_k(self):
"""
Increase the value of k by 1.
We need to do this in a method in order to update internal data
structures about the weights.
"""
self.k = self.k + 1
# compute bounds on the largest weight
self.max_weight_bounds = self._compute_max_weight_bounds()
# compute bounds on the individual weights
self.weight_bounds = self._compute_weight_bounds()
def has_bad_bounds(self):
"""Check whether weight bounds disallow all solutions."""
# upper weight bounds miss each other
if self.max_weight_bounds[0] > self.max_weight_bounds[1]:
return True
# lower and upper bounds of each weight position miss each other
for lower, upper in self.weight_bounds:
if lower > upper:
return True
# otherwise all good
return False
class Constr:
"""
Class representing linear constraints imposed on path
weights as collected by the DP routine.
"""
eps = np.finfo(float).eps
ORDER_MATRIX = {} # Pre-computed matrices with ones on the diagonal and
# -1 on the upper off-diagonal.
ZERO_VECS = {} # Pre-computed zero vectors
INFEASIBLE = 0
REDUNDANT = 1
VALID = 2
SOLVED = 3
POW2 = None
def __init__(self, instance=None, constraint=None):
if constraint is not None:
self.instance = constraint.instance
self.known_values = copy.copy(constraint.known_values)
self.hashvalue = copy.copy(constraint.hashvalue)
self.rank = copy.copy(constraint.rank)
self.utri = constraint.utri.copy()
self.pivot_lookup = copy.copy(constraint.pivot_lookup)
else:
self.instance = instance
self.known_values = [None] * self.instance.k
# Make sure the necessary constants exist
if self.instance.k not in Constr.ORDER_MATRIX:
t = self.instance.k
Constr.ORDER_MATRIX[t] = np.eye(t-1, t, dtype=int) - \
np.eye(t-1, t, k=1, dtype=int)
Constr.ZERO_VECS[t-1] = np.zeros(t-1, dtype=int)
Constr.POW2 = 2**np.arange(64, dtype=np.uint64)
row = np.array([1] * self.instance.k + [self.instance.flow])
# In our application instance.k and instance.flow should always be
# the same, but we want to keep things clean.
self.hashvalue = hash(row.data.tobytes()) ^\
hash(self.instance.k) ^\
hash(self.instance.flow)
self.rank = 1
self.utri = np.zeros((self.instance.k, self.instance.k+1))
self.utri[0] = row
# pivot_lookup[j] gives the row_index of pivot in column j
# This is to avoid having to permute utri to be in RREF
self.pivot_lookup = [-1 for j in range(len(row))]
self.pivot_lookup[0] = 0
def __repr__(self):
return str(self.utri)
def _copy_with_new_row(self, row, reduced_row, pivot_idx):
res = Constr(constraint=self)
# update hashvalue by new row
res.hashvalue ^= hash(row.data.tobytes())
res.rank = self.rank + 1
# Ensure res.utri (with row added) is in RREF form.
# Make sure pivot is a 1
pivot_value = reduced_row[pivot_idx]
if pivot_value != 1:
reduced_row = reduced_row/pivot_value
# use new pivot to eliminate in other rows
for idx in range(self.rank):
val = res.utri[idx, pivot_idx]
if val != 0:
res.utri[idx, :] = res.utri[idx, :] | |
<gh_stars>0
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
"""
Executes state tests on multiple clients, checking for EVM trace equivalence
"""
import json, sys, re, os, subprocess, io, itertools, traceback, time, collections
from contextlib import redirect_stderr, redirect_stdout
import ethereum.transactions as transactions
from ethereum.utils import decode_hex, parse_int_or_hex, sha3, to_string, \
remove_0x_head, encode_hex, big_endian_to_int
from evmlab import genesis as gen
from evmlab import vm as VMUtils
from evmlab import opcodes
import logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
cfg ={}
local_cfg = {}
def parse_config():
"""Parses 'statetests.ini'-file, which
may contain user-specific configuration
"""
import configparser, getpass
config = configparser.ConfigParser()
config.read('statetests.ini')
uname = getpass.getuser()
if uname not in config.sections():
uname = "DEFAULT"
cfg['RANDOM_TESTS'] = config[uname]['random_tests']
cfg['DO_CLIENTS'] = config[uname]['clients'].split(",")
cfg['FORK_CONFIG'] = config[uname]['fork_config']
cfg['TESTS_PATH'] = config[uname]['tests_path']
global local_cfg
local_cfg = collections.defaultdict(lambda: None, config[uname])
print(local_cfg["geth.binary"])
print(local_cfg["test"])
# Make it possible to run in paralell sessions
cfg['PRESTATE_TMP_FILE'] ="%s-%d" % (config[uname]['prestate_tmp_file'] , os.getpid())
cfg['SINGLE_TEST_TMP_FILE'] ="%s-%d" % (config[uname]['single_test_tmp_file'], os.getpid())
cfg['LOGS_PATH'] = config[uname]['logs_path']
logger.info("Config")
logger.info("\tActive clients:")
for c in cfg['DO_CLIENTS']:
logger.info("\t* {} : {} docker:{}".format(c, getBaseCmd(c)[0],getBaseCmd(c)[1]) )
logger.info("\tTest generator:")
logger.info("\t* {} : {} docker:{}".format('testeth', getBaseCmd('testeth')[0],getBaseCmd('testeth')[1]) )
logger.info("\tFork config: %s", cfg['FORK_CONFIG'])
logger.info("\tPrestate tempfile: %s", cfg['PRESTATE_TMP_FILE'])
logger.info("\tSingle test tempfile: %s",cfg['SINGLE_TEST_TMP_FILE'])
logger.info("\tLog path: %s", cfg['LOGS_PATH'])
def getBaseCmd(bin_or_docker):
""" Gets the configured 'base_command' for an image or binary.
Returns a path or image name and a boolean if it's a docker
image or not (needed to know if any mounts are needed)
returns a tuple: ( name , isDocker)
"""
binary = local_cfg["{}.binary".format(bin_or_docker) ]
if binary:
return (binary, False)
image = local_cfg["{}.docker_name".format(bin_or_docker)]
if image:
return (image, True)
parse_config()
# used to check for unknown opcode names in traces
OPCODES = {}
op_keys = opcodes.opcodes.keys()
for op_key in op_keys:
if op_key in opcodes.opcodesMetropolis and cfg['FORK_CONFIG'] != 'Byzantium':
continue
name = opcodes.opcodes[op_key][0]
# allow opcode lookups by either name or number assignment
OPCODES[name] = op_key
OPCODES[op_key] = name
def iterate_tests(path = '/GeneralStateTests/', ignore = []):
logging.info (cfg['TESTS_PATH'] + path)
for subdir, dirs, files in sorted(os.walk(cfg['TESTS_PATH'] + path)):
for f in files:
if f.endswith('json'):
for ignore_name in ignore:
if f.find(ignore_name) != -1:
continue
yield os.path.join(subdir, f)
def convertGeneralTest(test_file, fork_name):
# same default as evmlab/genesis.py
metroBlock = 2000
if fork_name == 'Byzantium':
metroBlock = 0
with open(test_file) as json_data:
general_test = json.load(json_data)
for test_name in general_test:
# should only be one test_name per file
prestate = {
'env' : general_test[test_name]['env'],
'pre' : general_test[test_name]['pre'],
'config' : { # for pyeth run_statetest.py
'metropolisBlock' : 2000, # same default as evmlab/genesis.py
'eip158Block' : 2000,
'eip150Block' : 2000,
'eip155Block' : 2000,
'homesteadBlock' : 2000,
}
}
if cfg['FORK_CONFIG'] == 'Byzantium':
prestate['config'] = {
'metropolisBlock' : 0,
'eip158Block' : 0,
'eip150Block' : 0,
'eip155Block' : 0,
'homesteadBlock' : 0,
}
if cfg['FORK_CONFIG'] == 'Homestead':
prestate['config']['homesteadBlock'] = 0
#print("prestate:", prestate)
general_tx = general_test[test_name]['transaction']
transactions = []
for test_i in general_test[test_name]['post'][fork_name]:
test_tx = general_tx.copy()
d_i = test_i['indexes']['data']
g_i = test_i['indexes']['gas']
v_i = test_i['indexes']['value']
test_tx['data'] = general_tx['data'][d_i]
test_tx['gasLimit'] = general_tx['gasLimit'][g_i]
test_tx['value'] = general_tx['value'][v_i]
test_dgv = (d_i, g_i, v_i)
transactions.append((test_tx, test_dgv))
return prestate, transactions
def selectSingleFromGeneral(single_i, general_testfile, fork_name):
# a fork/network in a general state test has an array of test cases
# each element of the array specifies (d,g,v) indexes in the transaction
with open(general_testfile) as json_data:
general_test = json.load(json_data)
#logger.info("general_test: %s", general_test)
for test_name in general_test:
# should only be one test_name per file
single_test = general_test
single_tx = single_test[test_name]['transaction']
general_tx = single_test[test_name]['transaction']
selected_case = general_test[test_name]['post'][fork_name][single_i]
single_tx['data'] = [ general_tx['data'][selected_case['indexes']['data']] ]
single_tx['gasLimit'] = [ general_tx['gasLimit'][selected_case['indexes']['gas']] ]
single_tx['value'] = [ general_tx['value'][selected_case['indexes']['value']] ]
selected_case['indexes']['data'] = 0
selected_case['indexes']['gas'] = 0
selected_case['indexes']['value'] = 0
single_test[test_name]['post'] = {}
single_test[test_name]['post'][fork_name] = []
single_test[test_name]['post'][fork_name].append(selected_case)
return single_test
def getIntrinsicGas(test_tx):
tx = transactions.Transaction(
nonce=parse_int_or_hex(test_tx['nonce'] or b"0"),
gasprice=parse_int_or_hex(test_tx['gasPrice'] or b"0"),
startgas=parse_int_or_hex(test_tx['gasLimit'] or b"0"),
to=decode_hex(remove_0x_head(test_tx['to'])),
value=parse_int_or_hex(test_tx['value'] or b"0"),
data=decode_hex(remove_0x_head(test_tx['data'])))
return tx.intrinsic_gas_used
def getTxSender(test_tx):
tx = transactions.Transaction(
nonce=parse_int_or_hex(test_tx['nonce'] or b"0"),
gasprice=parse_int_or_hex(test_tx['gasPrice'] or b"0"),
startgas=parse_int_or_hex(test_tx['gasLimit'] or b"0"),
to=decode_hex(remove_0x_head(test_tx['to'])),
value=parse_int_or_hex(test_tx['value'] or b"0"),
data=decode_hex(remove_0x_head(test_tx['data'])))
if 'secretKey' in test_tx:
tx.sign(decode_hex(remove_0x_head(test_tx['secretKey'])))
return encode_hex(tx.sender)
def canon(str):
if str in [None, "0x", ""]:
return ""
if str[:2] == "0x":
return str
return "0x" + str
def toText(op):
return VMUtils.toText(op)
def dumpJson(obj, dir = None, prefix = None):
import tempfile
fd, temp_path = tempfile.mkstemp(prefix = 'randomtest_', suffix=".json", dir = dir)
with open(temp_path, 'w') as f :
json.dump(obj,f)
logger.info("Saved file to %s" % temp_path)
os.close(fd)
return temp_path
def createRandomStateTest():
(name, isDocker) = getBaseCmd("testeth")
if isDocker:
cmd = ['docker', "run", "--rm",name]
else:
cmd = [name]
cmd.extend(["-t","GeneralStateTests","--","--createRandomTest"])
outp = "".join(VMUtils.finishProc(VMUtils.startProc(cmd)))
#Validate that it's json
try:
test = json.loads(outp)
test['randomStatetest']['_info'] = {'sourceHash': "0000000000000000000000000000000000000000000000000000000000001337", "comment":"x"}
return test
except:
print("Exception generating test")
print('-'*60)
traceback.print_exc(file=sys.stdout)
print('-'*60)
return None
def generateTests():
import getpass, time
uname = getpass.getuser()
host_id = "%s-%s-%d" % (uname, time.strftime("%a_%H_%M_%S"), os.getpid())
here = os.path.dirname(os.path.realpath(__file__))
cfg['TESTS_PATH'] = "%s/generatedTests/" % here
# cpp needs the tests to be placed according to certain rules...
testfile_dir = "%s/generatedTests/GeneralStateTests/stRandom" % here
filler_dir = "%s/generatedTests/src/GeneralStateTestsFiller/stRandom" % here
os.makedirs( testfile_dir , exist_ok = True)
os.makedirs( filler_dir, exist_ok = True)
import pathlib
counter = 0
while True:
test_json = createRandomStateTest()
if test_json == None:
time.sleep(2)
continue
identifier = "%s-%d" %(host_id, counter)
test_fullpath = "%s/randomStatetest%s.json" % (testfile_dir, identifier)
filler_fullpath = "%s/randomStatetest%sFiller.json" % (filler_dir, identifier)
test_json['randomStatetest%s' % identifier] =test_json.pop('randomStatetest', None)
with open(test_fullpath, "w+") as f:
json.dump(test_json, f)
pathlib.Path(filler_fullpath).touch()
yield test_fullpath
counter = counter +1
def startJs(single_test_tmp_file):
logger.info("running state test in EthereumJS.")
testfile_path = os.path.abspath(single_test_tmp_file)
mount_testfile = testfile_path+":/ethereum/"+single_test_tmp_file
js_docker_cmd = ["docker", "run", "--rm", "-t", "-v", mount_testfile, getBaseCmd('js')[0], "-s", "--customStateTest", single_test_tmp_file]
js_docker_cmd.extend(['--jsontrace'])
js_docker_cmd.extend(['--fork', cfg['FORK_CONFIG']])
logger.info("js_cmd: %s " % " ".join(js_docker_cmd))
return {'proc':VMUtils.startProc(js_docker_cmd), 'cmd': " ".join(js_docker_cmd), 'output' : 'stdout'}
def startParity(test_file):
testfile_path = os.path.abspath(test_file)
mount_testfile = testfile_path + ":" + "/mounted_testfile"
(name, isDocker) = getBaseCmd("parity")
if isDocker:
cmd = ["docker", "run", "--rm", "-t", "-v", mount_testfile, name, "state-test", "/mounted_testfile", "--json"]
else:
cmd = [name,"state-test", testfile_path, "--json"]
return {'proc':VMUtils.startProc(cmd ), 'cmd': " ".join(cmd), 'output' : 'stdout'}
def startCpp(test_subfolder, test_name, test_dgv):
[d,g,v] = test_dgv
(name, isDocker) = getBaseCmd("cpp")
if isDocker:
cpp_mount_tests = cfg['TESTS_PATH'] + ":" + "/mounted_tests"
cmd = ["docker", "run", "--rm", "-t", "-v", cpp_mount_tests, name
,'-t',"GeneralStateTests/%s" % test_subfolder
,'--'
,'--singletest', test_name
,'--jsontrace',"'{ \"disableStorage\":true, \"disableMemory\":true }'"
,'--singlenet',cfg['FORK_CONFIG']
,'-d',str(d),'-g',str(g), '-v', str(v)
,'--testpath', '"/mounted_tests"']
else:
cmd = [name
,'-t',"GeneralStateTests/%s" % test_subfolder
,'--'
,'--singletest', test_name
,'--jsontrace',"'{ \"disableStorage\":true, \"disableMemory\":true }'"
,'--singlenet',cfg['FORK_CONFIG']
,'-d',str(d),'-g',str(g), '-v', str(v)
,'--testpath', cfg['TESTS_PATH']]
if cfg['FORK_CONFIG'] == 'Homestead' or cfg['FORK_CONFIG'] == 'Frontier':
cmd.extend(['--all']) # cpp requires this for some reason
return {'proc':VMUtils.startProc(cmd ), 'cmd': " ".join(cmd), 'output' : 'stdout'}
def startGeth(test_file):
testfile_path = os.path.abspath(test_file)
mount_testfile = testfile_path + ":" + "/mounted_testfile"
(name, isDocker) = getBaseCmd("geth")
if isDocker:
cmd = ["docker", "run", "--rm", "-t", "-v", mount_testfile, name, "--json", "--nomemory", "statetest", "/mounted_testfile"]
return {'proc':VMUtils.startProc(cmd ), 'cmd': " ".join(cmd), 'output' : 'stdout'}
else:
cmd = [name,"--json", "--nomemory", "statetest", testfile_path]
return {'proc':VMUtils.startProc(cmd ), 'cmd': " ".join(cmd), 'output' : 'stderr'}
def startPython(test_file, test_tx):
tx_encoded = json.dumps(test_tx)
tx_double_encoded = json.dumps(tx_encoded) # double encode to escape chars for command line
# command if not using a docker container
# pyeth_process = subprocess.Popen(["python", "run_statetest.py", test_file, tx_double_encoded], shell=False, stdout=subprocess.PIPE, close_fds=True)
# command to run docker container
# docker run --volume=/absolute/path/prestate.json:/mounted_prestate cdetrio/pyethereum run_statetest.py mounted_prestate "{\"data\": \"\", \"gasLimit\": \"0x0a00000000\", \"gasPrice\": \"0x01\", \"nonce\": \"0x00\", \"secretKey\": \"0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8\", \"to\": \"0x0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6\", \"value\": \"0x00\"}"
prestate_path = os.path.abspath(test_file)
mount_flag = prestate_path + ":" + "/mounted_prestate"
cmd = ["docker", "run", "--rm", "-t", "-v", mount_flag, cfg['PYETH_DOCKER_NAME'], "run_statetest.py", "/mounted_prestate", tx_double_encoded]
return {'proc':VMUtils.startProc(cmd), 'cmd': " ".join(cmd), 'output' : 'stdout'}
TEST_WHITELIST = []
SKIP_LIST = [
#'modexp_*', # regex example
'POP_Bounds',
'POP_BoundsOOG',
'MLOAD_Bounds',
'Call1024PreCalls', # Call1024PreCalls does produce a trace difference, worth fixing that trace
'createInitFailStackSizeLargerThan1024',
'createJS_ExampleContract',
'CALL_Bounds',
'mload32bitBound_Msize ',
'mload32bitBound_return2',
'Call1MB1024Calldepth ',
'shallowStackOK',
'stackOverflowM1PUSH', # slow
'static_Call1MB1024Calldepth', # slow
'static_Call1024BalanceTooLow',
'static_Call1024BalanceTooLow2',
'static_Call1024OOG',
'static_Call1024PreCalls',
'static_Call1024PreCalls2', # slow
'static_Call1024PreCalls3', #slow
'static_Call50000',
'static_Call50000bytesContract50_1',
'static_Call50000bytesContract50_2',
'static_Call50000bytesContract50_3',
'static_CallToNameRegistratorAddressTooBigLeft',
'static_Call50000_identity2',
'static_Call50000_identity',
'static_Call50000_ecrec',
'static_Call50000_rip160',
'static_Call50000_sha256',
'static_Return50000_2',
'static_callChangeRevert',
'static_log3_MaxTopic',
'static_log4_Caller',
'static_RawCallGas',
'static_RawCallGasValueTransfer',
'static_RawCallGasValueTransferAsk',
'static_RawCallGasValueTransferMemory',
'static_RawCallGasValueTransferMemoryAsk',
'static_refund_CallA_notEnoughGasInCall',
'static_LoopCallsThenRevert',
'HighGasLimit', # geth doesn't run
'zeroSigTransacrionCreate', # geth fails this one
'zeroSigTransacrionCreatePrice0', # geth fails
'zeroSigTransaction', # geth fails
'zeroSigTransaction0Price', # geth fails
'zeroSigTransactionInvChainID',
'zeroSigTransactionInvNonce',
'zeroSigTransactionInvNonce2',
'zeroSigTransactionOOG',
'zeroSigTransactionOrigin',
'zeroSigTransactionToZero',
'zeroSigTransactionToZero2',
'OverflowGasRequire2',
'TransactionDataCosts652',
'stackLimitPush31_1023',
'stackLimitPush31_1023',
'stackLimitPush31_1024',
'stackLimitPush31_1025', # test runner crashes
'stackLimitPush32_1023',
'stackLimitPush32_1024',
'stackLimitPush32_1025', # big trace, onsensus failure
'stackLimitGas_1023',
'stackLimitGas_1024', # consensus bug
'stackLimitGas_1025'
]
regex_skip = [skip.replace('*', '') for skip in SKIP_LIST if | |
= self
self._super_call = True
try:
res = getattr(PyFloatProperty, funcname)(*args2, **kwargs)
finally:
del self._super_call
return res
def _RegisterMethods(self):
cls = self.__class__
if not hasattr(cls,'_pyswig_methods_registered'):
cls._pyswig_methods_registered = True
ls = [ab for ab in cls.__dict__.iteritems()]
for a, b in ls:
if not a.startswith('_'):
setattr(cls, '%s_t_'%a, b)
def _SetSelf(*args, **kwargs):
"""_SetSelf(self, PyObject self)"""
return _propgrid.PyFloatProperty__SetSelf(*args, **kwargs)
_propgrid.PyFloatProperty_swigregister(PyFloatProperty)
class PyEditorDialogAdapter(PGEditorDialogAdapter):
"""Proxy of C++ PyEditorDialogAdapter class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""__init__(self) -> PyEditorDialogAdapter"""
_propgrid.PyEditorDialogAdapter_swiginit(self,_propgrid.new_PyEditorDialogAdapter(*args, **kwargs))
self._SetSelf(self); self._RegisterMethods()
def CallSuperMethod(self, *args, **kwargs):
funcname = args[0]
args2 = list(args)
args2[0] = self
self._super_call = True
try:
res = getattr(PyEditorDialogAdapter, funcname)(*args2, **kwargs)
finally:
del self._super_call
return res
def _RegisterMethods(self):
cls = self.__class__
if not hasattr(cls,'_pyswig_methods_registered'):
cls._pyswig_methods_registered = True
ls = [ab for ab in cls.__dict__.iteritems()]
for a, b in ls:
if not a.startswith('_'):
setattr(cls, '%s_t_'%a, b)
def _SetSelf(*args, **kwargs):
"""_SetSelf(self, PyObject self)"""
return _propgrid.PyEditorDialogAdapter__SetSelf(*args, **kwargs)
_propgrid.PyEditorDialogAdapter_swigregister(PyEditorDialogAdapter)
class PyEnumProperty(EnumProperty):
"""Proxy of C++ PyEnumProperty class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
__init__(self, String label=(*wxPGProperty::sm_wxPG_LABEL), String name=(*wxPGProperty::sm_wxPG_LABEL),
wxArrayString labels=wxArrayString(),
wxArrayInt values=wxArrayInt(),
int value=0) -> PyEnumProperty
"""
_propgrid.PyEnumProperty_swiginit(self,_propgrid.new_PyEnumProperty(*args, **kwargs))
self._SetSelf(self); self._RegisterMethods()
def CallSuperMethod(self, *args, **kwargs):
funcname = args[0]
args2 = list(args)
args2[0] = self
self._super_call = True
try:
res = getattr(PyEnumProperty, funcname)(*args2, **kwargs)
finally:
del self._super_call
return res
def _RegisterMethods(self):
cls = self.__class__
if not hasattr(cls,'_pyswig_methods_registered'):
cls._pyswig_methods_registered = True
ls = [ab for ab in cls.__dict__.iteritems()]
for a, b in ls:
if not a.startswith('_'):
setattr(cls, '%s_t_'%a, b)
def _SetSelf(*args, **kwargs):
"""_SetSelf(self, PyObject self)"""
return _propgrid.PyEnumProperty__SetSelf(*args, **kwargs)
_propgrid.PyEnumProperty_swigregister(PyEnumProperty)
class PyArrayStringProperty(ArrayStringProperty):
"""Proxy of C++ PyArrayStringProperty class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
__init__(self, String label=(*wxPGProperty::sm_wxPG_LABEL), String name=(*wxPGProperty::sm_wxPG_LABEL),
wxArrayString value=wxArrayString()) -> PyArrayStringProperty
"""
_propgrid.PyArrayStringProperty_swiginit(self,_propgrid.new_PyArrayStringProperty(*args, **kwargs))
self._SetSelf(self); self._RegisterMethods()
def CallSuperMethod(self, *args, **kwargs):
funcname = args[0]
args2 = list(args)
args2[0] = self
self._super_call = True
try:
res = getattr(PyArrayStringProperty, funcname)(*args2, **kwargs)
finally:
del self._super_call
return res
def _RegisterMethods(self):
cls = self.__class__
if not hasattr(cls,'_pyswig_methods_registered'):
cls._pyswig_methods_registered = True
ls = [ab for ab in cls.__dict__.iteritems()]
for a, b in ls:
if not a.startswith('_'):
setattr(cls, '%s_t_'%a, b)
def _SetSelf(*args, **kwargs):
"""_SetSelf(self, PyObject self)"""
return _propgrid.PyArrayStringProperty__SetSelf(*args, **kwargs)
_propgrid.PyArrayStringProperty_swigregister(PyArrayStringProperty)
class PyComboBoxEditor(PGComboBoxEditor):
"""Proxy of C++ PyComboBoxEditor class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""__init__(self) -> PyComboBoxEditor"""
_propgrid.PyComboBoxEditor_swiginit(self,_propgrid.new_PyComboBoxEditor(*args, **kwargs))
self._SetSelf(self); self._RegisterMethods()
def CallSuperMethod(self, *args, **kwargs):
funcname = args[0]
args2 = list(args)
args2[0] = self
self._super_call = True
try:
res = getattr(PyComboBoxEditor, funcname)(*args2, **kwargs)
finally:
del self._super_call
return res
def _RegisterMethods(self):
cls = self.__class__
if not hasattr(cls,'_pyswig_methods_registered'):
cls._pyswig_methods_registered = True
ls = [ab for ab in cls.__dict__.iteritems()]
for a, b in ls:
if not a.startswith('_'):
setattr(cls, '%s_t_'%a, b)
def _SetSelf(*args, **kwargs):
"""_SetSelf(self, PyObject self)"""
return _propgrid.PyComboBoxEditor__SetSelf(*args, **kwargs)
_propgrid.PyComboBoxEditor_swigregister(PyComboBoxEditor)
class PyFileDialogAdapter(PGFileDialogAdapter):
"""Proxy of C++ PyFileDialogAdapter class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""__init__(self) -> PyFileDialogAdapter"""
_propgrid.PyFileDialogAdapter_swiginit(self,_propgrid.new_PyFileDialogAdapter(*args, **kwargs))
self._SetSelf(self); self._RegisterMethods()
def CallSuperMethod(self, *args, **kwargs):
funcname = args[0]
args2 = list(args)
args2[0] = self
self._super_call = True
try:
res = getattr(PyFileDialogAdapter, funcname)(*args2, **kwargs)
finally:
del self._super_call
return res
def _RegisterMethods(self):
cls = self.__class__
if not hasattr(cls,'_pyswig_methods_registered'):
cls._pyswig_methods_registered = True
ls = [ab for ab in cls.__dict__.iteritems()]
for a, b in ls:
if not a.startswith('_'):
setattr(cls, '%s_t_'%a, b)
def _SetSelf(*args, **kwargs):
"""_SetSelf(self, PyObject self)"""
return _propgrid.PyFileDialogAdapter__SetSelf(*args, **kwargs)
_propgrid.PyFileDialogAdapter_swigregister(PyFileDialogAdapter)
class PyStringProperty(StringProperty):
"""Proxy of C++ PyStringProperty class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
__init__(self, String label=(*wxPGProperty::sm_wxPG_LABEL), String name=(*wxPGProperty::sm_wxPG_LABEL),
String value=wxEmptyString) -> PyStringProperty
"""
_propgrid.PyStringProperty_swiginit(self,_propgrid.new_PyStringProperty(*args, **kwargs))
self._SetSelf(self); self._RegisterMethods()
def CallSuperMethod(self, *args, **kwargs):
funcname = args[0]
args2 = list(args)
args2[0] = self
self._super_call = True
try:
res = getattr(PyStringProperty, funcname)(*args2, **kwargs)
finally:
del self._super_call
return res
def _RegisterMethods(self):
cls = self.__class__
if not hasattr(cls,'_pyswig_methods_registered'):
cls._pyswig_methods_registered = True
ls = [ab for ab in cls.__dict__.iteritems()]
for a, b in ls:
if not a.startswith('_'):
setattr(cls, '%s_t_'%a, b)
def _SetSelf(*args, **kwargs):
"""_SetSelf(self, PyObject self)"""
return _propgrid.PyStringProperty__SetSelf(*args, **kwargs)
_propgrid.PyStringProperty_swigregister(PyStringProperty)
class PyLongStringDialogAdapter(PGLongStringDialogAdapter):
"""Proxy of C++ PyLongStringDialogAdapter class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""__init__(self) -> PyLongStringDialogAdapter"""
_propgrid.PyLongStringDialogAdapter_swiginit(self,_propgrid.new_PyLongStringDialogAdapter(*args, **kwargs))
self._SetSelf(self); self._RegisterMethods()
def CallSuperMethod(self, *args, **kwargs):
funcname = args[0]
args2 = list(args)
args2[0] = self
self._super_call = True
try:
res = getattr(PyLongStringDialogAdapter, funcname)(*args2, **kwargs)
finally:
del self._super_call
return res
def _RegisterMethods(self):
cls = self.__class__
if not hasattr(cls,'_pyswig_methods_registered'):
cls._pyswig_methods_registered = True
ls = [ab for ab in cls.__dict__.iteritems()]
for a, b in ls:
if not a.startswith('_'):
setattr(cls, '%s_t_'%a, b)
def _SetSelf(*args, **kwargs):
"""_SetSelf(self, PyObject self)"""
return _propgrid.PyLongStringDialogAdapter__SetSelf(*args, **kwargs)
_propgrid.PyLongStringDialogAdapter_swigregister(PyLongStringDialogAdapter)
class PyEditEnumProperty(EditEnumProperty):
"""Proxy of C++ PyEditEnumProperty class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(self, String label, String name, wxChar labels, long values,
String value) -> PyEditEnumProperty
__init__(self, String label=(*wxPGProperty::sm_wxPG_LABEL), String name=(*wxPGProperty::sm_wxPG_LABEL),
wxArrayString labels=wxArrayString(),
wxArrayInt values=wxArrayInt(),
String value=wxEmptyString) -> PyEditEnumProperty
__init__(self, String label, String name, PGChoices choices, String value=wxEmptyString) -> PyEditEnumProperty
__init__(self, String label, String name, wxChar labels, long values,
PGChoices choicesCache, String value) -> PyEditEnumProperty
"""
_propgrid.PyEditEnumProperty_swiginit(self,_propgrid.new_PyEditEnumProperty(*args))
self._SetSelf(self); self._RegisterMethods()
def CallSuperMethod(self, *args, **kwargs):
funcname = args[0]
args2 = list(args)
args2[0] = self
self._super_call = True
try:
res = getattr(PyEditEnumProperty, funcname)(*args2, **kwargs)
finally:
del self._super_call
return res
def _RegisterMethods(self):
cls = self.__class__
if not hasattr(cls,'_pyswig_methods_registered'):
cls._pyswig_methods_registered = True
ls = [ab for ab in cls.__dict__.iteritems()]
for a, b in ls:
if not a.startswith('_'):
setattr(cls, '%s_t_'%a, b)
def _SetSelf(*args, **kwargs):
"""_SetSelf(self, PyObject self)"""
return _propgrid.PyEditEnumProperty__SetSelf(*args, **kwargs)
_propgrid.PyEditEnumProperty_swigregister(PyEditEnumProperty)
class PyTextCtrlEditor(PGTextCtrlEditor):
"""Proxy of C++ PyTextCtrlEditor class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""__init__(self) -> PyTextCtrlEditor"""
_propgrid.PyTextCtrlEditor_swiginit(self,_propgrid.new_PyTextCtrlEditor(*args, **kwargs))
self._SetSelf(self); self._RegisterMethods()
def CallSuperMethod(self, *args, **kwargs):
funcname = args[0]
args2 = list(args)
args2[0] = self
self._super_call = True
try:
res = getattr(PyTextCtrlEditor, funcname)(*args2, **kwargs)
finally:
del self._super_call
return res
def _RegisterMethods(self):
cls = self.__class__
if not hasattr(cls,'_pyswig_methods_registered'):
cls._pyswig_methods_registered = True
ls = [ab for ab in cls.__dict__.iteritems()]
for a, b in ls:
if not a.startswith('_'):
setattr(cls, '%s_t_'%a, b)
def _SetSelf(*args, **kwargs):
"""_SetSelf(self, PyObject self)"""
return _propgrid.PyTextCtrlEditor__SetSelf(*args, **kwargs)
_propgrid.PyTextCtrlEditor_swigregister(PyTextCtrlEditor)
class PySystemColourProperty(SystemColourProperty):
"""Proxy of C++ PySystemColourProperty class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(self, String label=(*wxPGProperty::sm_wxPG_LABEL), String name=(*wxPGProperty::sm_wxPG_LABEL),
ColourPropertyValue value=wxColourPropertyValue()) -> PySystemColourProperty
__init__(self, String label, String name, wxChar labels, long values,
PGChoices choicesCache, ColourPropertyValue value) -> PySystemColourProperty
__init__(self, String label, String name, wxChar labels, long values,
PGChoices choicesCache, Colour value) -> PySystemColourProperty
"""
_propgrid.PySystemColourProperty_swiginit(self,_propgrid.new_PySystemColourProperty(*args))
self._SetSelf(self); self._RegisterMethods()
def CallSuperMethod(self, *args, **kwargs):
funcname = args[0]
args2 = list(args)
args2[0] = self
self._super_call = True
try:
res = getattr(PySystemColourProperty, funcname)(*args2, **kwargs)
finally:
del self._super_call
return res
def _RegisterMethods(self):
cls = self.__class__
if not hasattr(cls,'_pyswig_methods_registered'):
cls._pyswig_methods_registered = True
ls = [ab for ab in cls.__dict__.iteritems()]
for a, b in ls:
if not a.startswith('_'):
setattr(cls, '%s_t_'%a, b)
def _SetSelf(*args, **kwargs):
"""_SetSelf(self, PyObject self)"""
return _propgrid.PySystemColourProperty__SetSelf(*args, **kwargs)
_propgrid.PySystemColourProperty_swigregister(PySystemColourProperty)
class PyFlagsProperty(FlagsProperty):
"""Proxy of C++ PyFlagsProperty class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
__init__(self, String label=(*wxPGProperty::sm_wxPG_LABEL), String name=(*wxPGProperty::sm_wxPG_LABEL),
wxArrayString labels=wxArrayString(),
wxArrayInt values=wxArrayInt(),
int value=0) -> PyFlagsProperty
"""
_propgrid.PyFlagsProperty_swiginit(self,_propgrid.new_PyFlagsProperty(*args, **kwargs))
self._SetSelf(self); self._RegisterMethods()
def CallSuperMethod(self, *args, **kwargs):
funcname = args[0]
args2 = list(args)
args2[0] = self
self._super_call = True
try:
res = getattr(PyFlagsProperty, funcname)(*args2, **kwargs)
finally:
del self._super_call
return res
def _RegisterMethods(self):
cls = self.__class__
if not hasattr(cls,'_pyswig_methods_registered'):
cls._pyswig_methods_registered = True
ls = [ab for ab in cls.__dict__.iteritems()]
for a, b in ls:
if not a.startswith('_'):
setattr(cls, '%s_t_'%a, b)
def _SetSelf(*args, **kwargs):
"""_SetSelf(self, PyObject self)"""
return _propgrid.PyFlagsProperty__SetSelf(*args, **kwargs)
_propgrid.PyFlagsProperty_swigregister(PyFlagsProperty)
class PyFontProperty(FontProperty):
"""Proxy of C++ PyFontProperty class"""
thisown | |
True
def _parse_atom_attributes(token):
"""
Parse bracketed tokens.
Parameters
----------
token: str
Token in the form of a json dictionary.
Returns
-------
dict
"""
if not token.strip().startswith('{'):
raise ValueError('The token should start with a curly bracket.')
try:
attributes = json.loads(token)
except JSONDecodeError as error:
raise ValueError('The following value is not a valid atom attribute token: "{}".'
.format(token)) from error
modifications = {}
for key, value in attributes.items():
try:
if '|' in value:
modifications[key] = Choice(value.split('|'))
except TypeError:
pass
attributes.update(modifications)
return attributes
def _get_atoms(tokens, natoms):
atoms = []
while tokens and _some_atoms_left(tokens, atoms, natoms):
token = tokens.popleft()
if token.startswith('{'):
msg = 'Found atom attributes without an atom reference.'
raise IOError(msg)
if tokens:
next_token = tokens[0]
else:
next_token = ''
if next_token.startswith('{'):
atoms.append([token, _parse_atom_attributes(next_token)])
tokens.popleft()
else:
atoms.append([token, {}])
return atoms
def _treat_block_interaction_atoms(atoms, context, section):
atom_names = list(context.nodes)
all_references = []
for atom in atoms:
reference = atom[0]
if reference.isdigit():
# The indices in the file are 1-based
reference = int(reference) - 1
try:
reference = atom_names[reference]
except IndexError:
msg = ('There are {} atoms defined in the block "{}". '
'Interaction in section "{}" cannot refer to '
'atom index {}.')
raise IOError(msg.format(len(context), context.name,
section, reference + 1))
atom[0] = reference
else:
if reference not in context:
msg = ('There is no atom "{}" defined in the block "{}". '
'Section "{}" cannot refer to it.')
raise IOError(msg.format(reference, context.name, section))
if reference[0] in '+-<>':
msg = ('Atom names in blocks cannot be prefixed with + or -. '
'The name "{}", used in section "{}" of the block "{}" '
'is not valid in a block.')
raise IOError(msg.format(reference, section, context.name))
all_references.append(reference)
return all_references
def _split_node_key(key):
"""
Split a node key into a prefix and a base and validate the key validity.
"""
if not key:
raise IOError('A node key cannot be empty.')
# If the atom name is prefixed, we can get the order.
prefix_end = 0 # Make sure prefix_end is defined even if key is empty
for prefix_end, char in enumerate(key):
if char not in '+-><*':
break
else: # no break
# This branch could also be taken if 'key' was empty. However, we
# tested already that it was not the case.
msg = ('The atom key "{}" is not valid. There must be a name '
'following prefix.')
raise IOError(msg.format(key))
prefix = key[:prefix_end]
if len(set(prefix)) > 1:
msg = ('Atom name prefix cannot mix characters. Atom name "{}" '
'is not a valid key.')
raise IOError(msg.format(key))
base = key[prefix_end:]
return prefix, base
def _get_order_and_prefix_from_attributes(attributes):
prefix_from_attributes = ''
order_from_attributes = None
Sequence = collections.abc.Sequence # pylint: disable=invalid-name
if attributes.get('order') is not None:
order = attributes['order']
order_from_attributes = order
if isinstance(order, numbers.Integral) and not isinstance(order, bool):
# Boolean as abstract subclasses of number.Integral as they can be
# considered as 0 and 1. Yet, they yield unexpected results and
# should not be accepted as valid values for 'order'.
if order > 0:
prefix_char = '+'
else:
prefix_char = '-'
prefix_from_attributes = prefix_char * int(abs(order))
elif (isinstance(order, Sequence) # We can do the following operations
and len(set(order)) == 1 # It is homogeneous
and order[0] in '><*'): # The character is an expected one
prefix_from_attributes = order
else:
raise IOError('The order given in attribute ("{}") is not valid. '
'It must be am integer or a homogeneous series '
'of ">", "<", or "*".'
.format(order))
return prefix_from_attributes, order_from_attributes
def _get_order_and_prefix_from_prefix(prefix):
"""
Convert a prefix into a numerical value.
"""
prefix_from_prefix = None
order_from_prefix = 0
if not prefix:
return prefix_from_prefix, order_from_prefix
# It is already validated.
prefix_from_prefix = prefix
if prefix[0] == '+':
order_from_prefix = len(prefix)
elif prefix[0] == '-':
order_from_prefix = -len(prefix)
else:
order_from_prefix = prefix
return prefix_from_prefix, order_from_prefix
def _treat_atom_prefix(reference, attributes):
"""
Connect graph keys, order, and atom names.
In a link, the graph keys, the order attribute of the atoms, and the atom
names are interconnected. In most cases, the graph key is the atom name
prefixed in a way that represent the order attribute. It is possible to
define the order and the atom name from the graph key, or to set the graph
key to represent the order, depending on what is explicitly specified in
the file.
In a link node, the order can be an integer, a series of '>' (*e.g.* '>',
'>>', '>>>', ...), a series of '<', or a series of '*'. The series of '>',
'<', and '*' translate directly from the key prefix to the order attribute,
and *vice versa*. Numerical values of the order attribute, however, are
converted into series of '+' or '-' for the key prefix; there, the number
of '+' or '-' in the prefix corresponds to the value of the order
attribute.
The order can be specified either by the key prefix, or by the attribute.
If it is specified in the two places, then they have to match each other or
a :exc:`IOError` is raised.
If the atom name is explicitly specified, then it is not modified. If it is
not specified, then it is set from the node key. The base of the node key
(*i.e.* what follows the prefix) is not modified, but a prefix can be added
if there is none. The base of the node key and the atom name *can* differ.
The atom name is what will be use for the graph isomorphism. The base of
the key cannot be empty (*i.e.* '+++' or '*' are not valid keys); if it is,
then an :exc:`IOError` is raised.
Parameters
----------
reference: str
A node key for a link, as written in the file.
attributes: dict
The node attributes read fro the file.
Returns
-------
prefixed_reference: str
The node key with the appropriate prefix.
attributes: dict
A shalow copy of the node attribute dictionary with the 'order' and the
'atomname' attributes set as appropriate.
Raises
------
IOError
The node key, or an attribute value is invalid.
Examples
--------
>>> _treat_atom_prefix('+BB', {})
('+BB', {'order': 1, 'atomname': 'BB'})
>>> _treat_atom_prefix('BB', {'order': 1})
('+BB', {'order': 1, 'atomname': 'BB'})
>>> _treat_atom_prefix('--XX', {'atomname': 'BB'})
('+BB', {'order': -2, 'atomname': 'BB'})
>>> _treat_atom_prefix('>>BB', {})
('>>BB', {'order': '>>', 'atomname': 'BB'})
"""
prefix, base = _split_node_key(reference)
# Is the order specified in the attributes?
(prefix_from_attributes,
order_from_attributes) = _get_order_and_prefix_from_attributes(attributes)
# Is there a specified prefix?
(prefix_from_prefix,
order_from_prefix) = _get_order_and_prefix_from_prefix(prefix)
# If the order is defined twice, is it consistent?
if (order_from_attributes is not None
and prefix_from_prefix is not None
and order_from_attributes != order_from_prefix):
msg = ('The sequence order for atom "{}" of a '
'link is not consistent between the name prefix '
'(order={}) and the atom attributes (order={}).')
raise IOError(msg.format(reference, order_from_prefix, order_from_attributes))
return_attributes = copy.copy(attributes)
if order_from_attributes is None:
return_attributes['order'] = order_from_prefix
if 'atomname' not in return_attributes:
return_attributes['atomname'] = base
if prefix_from_prefix is None:
prefixed = prefix_from_attributes + base
else:
prefixed = reference
return prefixed, return_attributes
def _treat_link_interaction_atoms(atoms, context, section):
all_references = []
for reference, attributes in atoms:
intermediate = context._apply_to_all_nodes.copy()
intermediate.update(attributes)
attributes = intermediate
prefixed_reference, attributes = _treat_atom_prefix(reference, attributes)
all_references.append(prefixed_reference)
if prefixed_reference in context:
context_atom = context.nodes[prefixed_reference]
for key, value in attributes.items():
if key in context_atom and value != context_atom[key]:
msg = ('Attribute {} of atom {} conflicts in a link '
'between its definition in section "{}" '
'(value is "{}") and its previous definition '
'(value was "{}").')
raise IOError(msg.format(key, reference, section,
value, context_atom[key]))
context_atom.update(attributes)
else:
context.add_node(prefixed_reference, **attributes)
return all_references
def _parse_interaction_parameters(tokens):
parameters = []
for token in tokens:
if _is_param_effector(token):
effector_name, effector_param_str = token.split('(', 1)
effector_param_str = effector_param_str[:-1] # Remove the closing parenthesis
try:
effector_class = PARAMETER_EFFECTORS[effector_name]
except KeyError:
raise IOError('{} is not a known parameter effector.'
.format(effector_name))
if '|' in effector_param_str:
effector_param_str, effector_format = effector_param_str.split('|')
else:
effector_format = None
effector_param = [elem.strip() for elem in effector_param_str.split(',')]
parameter = effector_class(effector_param, format_spec=effector_format)
else:
parameter = token
parameters.append(parameter)
return parameters
def _is_param_effector(token):
return (
'(' in token
and not | |
'buffer', 'offset', 'size'])
def glBindBufferRange(target, index, buffer, offset, size):
pass
@params(api='gl', prms=['v'])
def glNormal3iv(v):
pass
@params(api='gl', prms=['program', 'location', 'params'])
def glGetUniformdv(program, location, params):
pass
@params(api='gl', prms=['target', 's', 't', 'r', 'q'])
def glMultiTexCoord4s(target, s, t, r, q):
pass
@params(api='gl', prms=['v'])
def glTexCoord1iv(v):
pass
@params(api='gl', prms=['v'])
def glColor3uiv(v):
pass
@params(api='gl', prms=['base'])
def glListBase(base):
pass
@params(api='gl', prms=['sync', 'flags', 'timeout'])
def glClientWaitSync(sync, flags, timeout):
pass
@params(api='gl', prms=['texture', 'internalformat', 'buffer'])
def glTextureBuffer(texture, internalformat, buffer):
pass
@params(api='gl', prms=['index', 'v'])
def glVertexAttrib4Nsv(index, v):
pass
@params(api='gl', prms=['x', 'y', 'width', 'height', 'format', 'type', 'bufSize', 'data'])
def glReadnPixels(x, y, width, height, format, type, bufSize, data):
pass
@params(api='gl', prms=['srcName', 'srcTarget', 'srcLevel', 'srcX', 'srcY', 'srcZ', 'dstName', 'dstTarget', 'dstLevel', 'dstX', 'dstY', 'dstZ', 'srcWidth', 'srcHeight', 'srcDepth'])
def glCopyImageSubData(srcName, srcTarget, srcLevel, srcX, srcY, srcZ, dstName, dstTarget, dstLevel, dstX, dstY, dstZ, srcWidth, srcHeight, srcDepth):
pass
@params(api='gl', prms=['shadertype', 'location', 'params'])
def glGetUniformSubroutineuiv(shadertype, location, params):
pass
@params(api='gl', prms=['bindingindex', 'buffer', 'offset', 'stride'])
def glBindVertexBuffer(bindingindex, buffer, offset, stride):
pass
@params(api='gl', prms=['source', 'type', 'id', 'severity', 'length', 'buf'])
def glDebugMessageInsert(source, type, id, severity, length, buf):
pass
@params(api='gl', prms=['sampler'])
def glIsSampler(sampler):
pass
@params(api='gl', prms=['target', 'level', 'xoffset', 'x', 'y', 'width'])
def glCopyTexSubImage1D(target, level, xoffset, x, y, width):
pass
@params(api='gl', prms=['s'])
def glTexCoord1i(s):
pass
@params(api='gl', prms=['target'])
def glCheckFramebufferStatus(target):
pass
@params(api='gl', prms=['s'])
def glTexCoord1d(s):
pass
@params(api='gl', prms=['s'])
def glTexCoord1f(s):
pass
@params(api='gl', prms=['unit', 'texture', 'level', 'layered', 'layer', 'access', 'format'])
def glBindImageTexture(unit, texture, level, layered, layer, access, format):
pass
@params(api='gl', prms=['program', 'count', 'constvaryings', 'bufferMode'])
def glTransformFeedbackVaryings(program, count, constvaryings, bufferMode):
pass
@params(api='gl', prms=['mode', 'start', 'end', 'count', 'type', 'indices'])
def glDrawRangeElements(mode, start, end, count, type, indices):
pass
@params(api='gl', prms=['target', 'index', 'buffer'])
def glBindBufferBase(target, index, buffer):
pass
@params(api='gl', prms=['v'])
def glColor3bv(v):
pass
@params(api='gl', prms=['n', 'samplers'])
def glCreateSamplers(n, samplers):
pass
@params(api='gl', prms=['mode', 'first', 'count', 'drawcount'])
def glMultiDrawArrays(mode, first, count, drawcount):
pass
@params(api='gl', prms=['type', 'coords'])
def glTexCoordP4ui(type, coords):
pass
@params(api='gl', prms=['index', 'x', 'y', 'z'])
def glVertexAttribI3ui(index, x, y, z):
pass
@params(api='gl', prms=['attribindex', 'size', 'type', 'relativeoffset'])
def glVertexAttribIFormat(attribindex, size, type, relativeoffset):
pass
@params(api='gl', prms=['n', 'framebuffers'])
def glCreateFramebuffers(n, framebuffers):
pass
@params(api='gl', prms=['red', 'green', 'blue', 'alpha'])
def glClearAccum(red, green, blue, alpha):
pass
@params(api='gl', prms=['target', 'id'])
def glBeginQuery(target, id):
pass
@params(api='gl', prms=['target', 'u1', 'u2', 'ustride', 'uorder', 'v1', 'v2', 'vstride', 'vorder', 'points'])
def glMap2d(target, u1, u2, ustride, uorder, v1, v2, vstride, vorder, points):
pass
@params(api='gl', prms=['target', 'u1', 'u2', 'ustride', 'uorder', 'v1', 'v2', 'vstride', 'vorder', 'points'])
def glMap2f(target, u1, u2, ustride, uorder, v1, v2, vstride, vorder, points):
pass
@params(api='gl', prms=['location', 'count', 'transpose', 'value'])
def glUniformMatrix2x4fv(location, count, transpose, value):
pass
@params(api='gl', prms=['index', 'n', 'f'])
def glDepthRangeIndexed(index, n, f):
pass
@params(api='gl', prms=[])
def glGetError():
pass
@params(api='gl', prms=['target', 'pname', 'params'])
def glGetTexEnviv(target, pname, params):
pass
@params(api='gl', prms=['u'])
def glEvalCoord1d(u):
pass
@params(api='gl', prms=['target', 'level', 'pname', 'params'])
def glGetTexLevelParameterfv(target, level, pname, params):
pass
@params(api='gl', prms=['u'])
def glEvalCoord1f(u):
pass
@params(api='gl', prms=['map', 'mapsize', 'values'])
def glPixelMapfv(map, mapsize, values):
pass
@params(api='gl', prms=['map', 'values'])
def glGetPixelMapusv(map, values):
pass
@params(api='gl', prms=['op', 'value'])
def glAccum(op, value):
pass
@params(api='gl', prms=['v'])
def glRasterPos3sv(v):
pass
@params(api='gl', prms=['program', 'location', 'v0', 'v1'])
def glProgramUniform2ui(program, location, v0, v1):
pass
@params(api='gl', prms=['program', 'location', 'v0', 'v1', 'v2', 'v3'])
def glProgramUniform4ui(program, location, v0, v1, v2, v3):
pass
@params(api='gl', prms=['buffer', 'size', 'data', 'usage'])
def glNamedBufferData(buffer, size, data, usage):
pass
@params(api='gl', prms=['buffer', 'internalformat', 'offset', 'size', 'format', 'type', 'data'])
def glClearNamedBufferSubData(buffer, internalformat, offset, size, format, type, data):
pass
@params(api='gl', prms=['v1', 'v2'])
def glRectsv(v1, v2):
pass
@params(api='gl', prms=['coord', 'pname', 'params'])
def glGetTexGeniv(coord, pname, params):
pass
@params(api='gl', prms=['pname', 'param'])
def glPixelStorei(pname, param):
pass
@params(api='gl', prms=['texture', 'level', 'xoffset', 'yoffset', 'zoffset', 'width', 'height', 'depth', 'bufSize', 'pixels'])
def glGetCompressedTextureSubImage(texture, level, xoffset, yoffset, zoffset, width, height, depth, bufSize, pixels):
pass
@params(api='gl', prms=['flag'])
def glDepthMask(flag):
pass
@params(api='gl', prms=['pname', 'values'])
def glPatchParameterfv(pname, values):
pass
@params(api='gl', prms=['texture', 'levels', 'internalformat', 'width', 'height'])
def glTextureStorage2D(texture, levels, internalformat, width, height):
pass
@params(api='gl', prms=['target', 'internalformat', 'buffer', 'offset', 'size'])
def glTexBufferRange(target, internalformat, buffer, offset, size):
pass
@params(api='gl', prms=['v'])
def glRasterPos4fv(v):
pass
@params(api='gl', prms=['u'])
def glEvalCoord1dv(u):
pass
@params(api='gl', prms=[])
def glPopClientAttrib():
pass
@params(api='gl', prms=['vaobj', 'first', 'count', 'buffers', 'offsets', 'strides'])
def glVertexArrayVertexBuffers(vaobj, first, count, buffers, offsets, strides):
pass
@params(api='gl', prms=['program', 'pname', 'value'])
def glProgramParameteri(program, pname, value):
pass
@params(api='gl', prms=['target', 'query', 'v'])
def glGetMapfv(target, query, v):
pass
@params(api='gl', prms=['v'])
def glRasterPos2fv(v):
pass
@params(api='gl', prms=['barriers'])
def glMemoryBarrierByRegion(barriers):
pass
@params(api='gl', prms=['v'])
def glVertex2sv(v):
pass
@params(api='gl', prms=['v'])
def glWindowPos2sv(v):
pass
@params(api='gl', prms=['type'])
def glCreateShader(type):
pass
@params(api='gl', prms=['n', 'renderbuffers'])
def glGenRenderbuffers(n, renderbuffers):
pass
@params(api='gl', prms=['target', 'level', 'xoffset', 'yoffset', 'x', 'y', 'width', 'height'])
def glCopyTexSubImage2D(target, level, xoffset, yoffset, x, y, width, height):
pass
@params(api='gl', prms=['sfactorRGB', 'dfactorRGB', 'sfactorAlpha', 'dfactorAlpha'])
def glBlendFuncSeparate(sfactorRGB, dfactorRGB, sfactorAlpha, dfactorAlpha):
pass
@params(api='gl', prms=['v'])
def glTexCoord2fv(v):
pass
@params(api='gl', prms=['v'])
def glTexCoord4fv(v):
pass
@params(api='gl', prms=['size'])
def glPointSize(size):
pass
@params(api='gl', prms=['unit', 'texture'])
def glBindTextureUnit(unit, texture):
pass
@params(api='gl', prms=['pipeline', 'bufSize', 'length', 'infoLog'])
def glGetProgramPipelineInfoLog(pipeline, bufSize, length, infoLog):
pass
@params(api='gl', prms=['index', 'v'])
def glVertexAttrib4Nuiv(index, v):
pass
@params(api='gl', prms=['sync', 'flags', 'timeout'])
def glWaitSync(sync, flags, timeout):
pass
@params(api='gl', prms=['buf', 'modeRGB', 'modeAlpha'])
def glBlendEquationSeparatei(buf, modeRGB, modeAlpha):
pass
@params(api='gl', prms=['location', 'x', 'y', 'z'])
def glUniform3d(location, x, y, z):
pass
@params(api='gl', prms=['location', 'v0', 'v1', 'v2'])
def glUniform3f(location, v0, v1, v2):
pass
@params(api='gl', prms=['program', 'location', 'count', 'value'])
def glProgramUniform3uiv(program, location, count, value):
pass
@params(api='gl', prms=['program', 'name'])
def glGetFragDataIndex(program, name):
pass
@params(api='gl', prms=['v'])
def glColor3sv(v):
pass
@params(api='gl', prms=['v'])
def glVertex4sv(v):
pass
@params(api='gl', prms=['id', 'target'])
def glQueryCounter(id, target):
pass
@params(api='gl', prms=['n', 'framebuffers'])
def glDeleteFramebuffers(n, framebuffers):
pass
@params(api='gl', prms=['mode', 'first', 'count'])
def glDrawArrays(mode, first, count):
pass
@params(api='gl', prms=['s', 't', 'r', 'q'])
def glTexCoord4f(s, t, r, q):
pass
@params(api='gl', prms=['mask'])
def glClear(mask):
pass
@params(api='gl', prms=['target', 'n', 'ids'])
def glCreateQueries(target, n, ids):
pass
@params(api='gl', prms=['sampler', 'pname', 'params'])
def glGetSamplerParameterfv(sampler, pname, params):
pass
@params(api='gl', prms=['x', 'y', 'z'])
def glTranslatef(x, y, z):
pass
@params(api='gl', prms=['index', 'x', 'y', 'z', 'w'])
def glVertexAttrib4Nub(index, x, y, z, w):
pass
@params(api='gl', prms=['x', 'y', 'z'])
def glTranslated(x, y, z):
pass
@params(api='gl', prms=['sampler', 'pname', 'param'])
def glSamplerParameterIiv(sampler, pname, param):
pass
@params(api='gl', prms=['mode', 'type', 'indirect'])
def glDrawElementsIndirect(mode, type, indirect):
pass
@params(api='gl', prms=['v'])
def glSecondaryColor3bv(v):
pass
@params(api='gl', prms=['s', 't', 'r', 'q'])
def glTexCoord4s(s, t, r, q):
pass
@params(api='gl', prms=['id', 'pname', 'params'])
def glGetQueryObjecti64v(id, pname, params):
pass
@params(api='gl', prms=['program', 'uniformCount', 'constuniformNames', 'uniformIndices'])
def glGetUniformIndices(program, uniformCount, constuniformNames, uniformIndices):
pass
@params(api='gl', prms=['v'])
def glVertex3iv(v):
pass
@params(api='gl', prms=['coord', 'pname', 'params'])
def glTexGenfv(coord, pname, params):
pass
@params(api='gl', prms=['first', 'count', 'buffers', 'offsets', 'strides'])
def glBindVertexBuffers(first, count, buffers, offsets, strides):
pass
@params(api='gl', prms=['face', 'pname', 'param'])
def glMateriali(face, pname, param):
pass
@params(api='gl', prms=['array'])
def glIsVertexArray(array):
pass
@params(api='gl', prms=['index'])
def glDisableVertexAttribArray(index):
pass
@params(api='gl', prms=['program', 'storageBlockIndex', 'storageBlockBinding'])
def glShaderStorageBlockBinding(program, storageBlockIndex, storageBlockBinding):
pass
@params(api='gl', prms=['face', 'pname', 'param'])
def glMaterialf(face, pname, param):
pass
@params(api='gl', prms=['texture', 'levels', 'internalformat', 'width'])
def glTextureStorage1D(texture, levels, internalformat, width):
pass
@params(api='gl', prms=['program', 'programInterface', 'pname', 'params'])
def glGetProgramInterfaceiv(program, programInterface, pname, params):
pass
@params(api='gl', prms=['buffer', 'access'])
def glMapNamedBuffer(buffer, access):
pass
@params(api='gl', prms=['program', 'location', 'bufSize', 'params'])
def glGetnUniformdv(program, location, bufSize, params):
pass
@params(api='gl', prms=['target', 'first', 'count', 'buffers'])
def glBindBuffersBase(target, first, count, buffers):
pass
@params(api='gl', prms=['index', 'pname', 'params'])
def glGetVertexAttribIiv(index, pname, params):
pass
@params(api='gl', prms=['type', 'value'])
def glVertexP4uiv(type, value):
pass
@params(api='gl', prms=['index', 'v'])
def glVertexAttribL4dv(index, v):
pass
@params(api='gl', prms=['pname', 'value'])
def glPatchParameteri(pname, value):
pass
@params(api='gl', prms=['target', 'u1', 'u2', 'stride', 'order', 'points'])
def glMap1d(target, u1, u2, stride, order, points):
pass
@params(api='gl', prms=['target', 'u1', 'u2', 'stride', 'order', 'points'])
def glMap1f(target, u1, u2, stride, order, points):
pass
@params(api='gl', prms=['framebuffer', 'attachment', 'pname', 'params'])
def glGetNamedFramebufferAttachmentParameteriv(framebuffer, attachment, pname, params):
pass
@params(api='gl', prms=['target', 'format', 'type', 'rowBufSize', 'row', 'columnBufSize', 'column', 'span'])
def glGetnSeparableFilter(target, format, type, rowBufSize, row, columnBufSize, column, span):
pass
@params(api='gl', prms=['index', 'v'])
def glVertexAttrib4sv(index, v):
pass
@params(api='gl', prms=['program', 'location', 'count', 'value'])
def glProgramUniform1dv(program, location, count, value):
pass
@params(api='gl', prms=['light', 'pname', 'param'])
def glLighti(light, pname, param):
pass
@params(api='gl', prms=['target', 'level', 'internalformat', 'width', 'border', 'format', 'type', 'pixels'])
def glTexImage1D(target, level, internalformat, width, border, format, type, pixels):
pass
@params(api='gl', prms=['light', 'pname', 'param'])
def glLightf(light, pname, param):
pass
@params(api='gl', prms=['value', 'invert'])
def glSampleCoverage(value, invert):
pass
@params(api='gl', prms=['v'])
def glSecondaryColor3usv(v):
pass
@params(api='gl', prms=['xfb', 'pname', 'index', 'param'])
def glGetTransformFeedbacki_v(xfb, pname, index, param):
pass
@params(api='gl', prms=['location', 'v0', 'v1'])
def glUniform2i(location, v0, v1):
pass
@params(api='gl', prms=['un', 'u1', 'u2', 'vn', 'v1', 'v2'])
def glMapGrid2f(un, u1, u2, vn, v1, v2):
pass
@params(api='gl', prms=['index', 'x'])
def glVertexAttribL1d(index, x):
pass
@params(api='gl', prms=['target', 'attachment', 'texture', 'level', 'layer'])
def glFramebufferTextureLayer(target, attachment, texture, level, layer):
pass
@params(api='gl', prms=['program', 'location', 'count', 'value'])
def glProgramUniform2fv(program, location, count, value):
pass
@params(api='gl', prms=['program', 'location', 'count', 'transpose', 'value'])
def glProgramUniformMatrix2x4dv(program, location, count, transpose, value):
pass
@params(api='gl', prms=['target', 'pname', 'param'])
def glTexEnvf(target, pname, param):
pass
@params(api='gl', prms=['target', 'index', 'data'])
def glGetInteger64i_v(target, index, data):
pass
@params(api='gl', prms=['target', 'pname', 'param'])
def glTexEnvi(target, pname, param):
pass
@params(api='gl', prms=['srcX0', 'srcY0', 'srcX1', 'srcY1', 'dstX0', 'dstY0', 'dstX1', 'dstY1', 'mask', 'filter'])
def glBlitFramebuffer(srcX0, srcY0, srcX1, srcY1, dstX0, dstY0, dstX1, dstY1, mask, filter):
pass
@params(api='gl', prms=['target', 'index'])
def glIsEnabledi(target, index):
pass
@params(api='gl', prms=['s', 't'])
def glTexCoord2i(s, t):
pass
@params(api='gl', prms=['index', 'type', 'normalized', 'value'])
def glVertexAttribP2ui(index, type, normalized, value):
pass
@params(api='gl', prms=['target', 'query', 'v'])
def glGetMapiv(target, query, v):
pass
@params(api='gl', prms=['ptr', 'length', 'label'])
def glObjectPtrLabel(ptr, length, label):
pass
@params(api='gl', prms=['count', 'bufSize', 'sources', 'types', 'ids', 'severities', 'lengths', 'messageLog'])
def glGetDebugMessageLog(count, bufSize, sources, types, ids, severities, lengths, messageLog):
pass
@params(api='gl', prms=['program', 'color', 'name'])
def glBindFragDataLocation(program, color, name):
pass
@params(api='gl', prms=['v'])
def glSecondaryColor3ubv(v):
pass
@params(api='gl', prms=['pname', 'param'])
def glLightModelf(pname, param):
pass
@params(api='gl', prms=['mode', 'type', 'indirect', 'drawcount', 'stride'])
def glMultiDrawElementsIndirect(mode, type, indirect, drawcount, stride):
pass
@params(api='gl', prms=['n', 'type', 'lists'])
def glCallLists(n, type, lists):
pass
@params(api='gl', prms=['left', 'right', 'bottom', 'top', 'zNear', 'zFar'])
def glFrustum(left, right, bottom, top, zNear, zFar):
pass
@params(api='gl', prms=['s', 't', 'r'])
def glTexCoord3i(s, t, r):
pass
@params(api='gl', prms=['index', 'v'])
def glVertexAttribI3uiv(index, v):
pass
@params(api='gl', prms=['source', 'id', 'length', 'message'])
def glPushDebugGroup(source, id, length, message):
pass
@params(api='gl', prms=['texture', 'type', 'coords'])
def glMultiTexCoordP1uiv(texture, type, coords):
pass
@params(api='gl', prms=['target', 'level', 'xoffset', 'yoffset', 'zoffset', 'x', 'y', 'width', 'height'])
def glCopyTexSubImage3D(target, level, xoffset, yoffset, zoffset, x, y, width, height):
pass
@params(api='gl', prms=['program', 'uniformBlockIndex', 'bufSize', 'length', 'uniformBlockName'])
def glGetActiveUniformBlockName(program, uniformBlockIndex, bufSize, length, uniformBlockName):
pass
@params(api='gl', prms=['s', 't', 'r'])
def glTexCoord3s(s, t, r):
pass
@params(api='gl', prms=['n', 'textures', 'residences'])
def glAreTexturesResident(n, textures, residences):
pass
@params(api='gl', prms=['program', 'location', 'v0', 'v1'])
def glProgramUniform2d(program, location, v0, v1):
pass
@params(api='gl', prms=['program', 'location', 'v0', 'v1'])
def glProgramUniform2f(program, location, v0, v1):
pass
@params(api='gl', prms=['v'])
def glRasterPos4sv(v):
pass
@params(api='gl', prms=['red', 'green', 'blue', 'alpha'])
def glColor4s(red, green, blue, alpha):
pass
@params(api='gl', prms=['array'])
def glBindVertexArray(array):
pass
@params(api='gl', prms=['red', 'green', 'blue', 'alpha'])
def glColor4b(red, green, blue, alpha):
pass
@params(api='gl', prms=['red', 'green', 'blue', 'alpha'])
def glColor4f(red, green, blue, alpha):
pass
@params(api='gl', prms=['red', 'green', 'blue', 'alpha'])
def glColor4d(red, green, blue, alpha):
pass
@params(api='gl', prms=['red', 'green', 'blue', 'alpha'])
def glColor4i(red, green, blue, alpha):
pass
@params(api='gl', prms=['buffer', 'offset', 'size', 'data'])
def glNamedBufferSubData(buffer, offset, size, data):
pass
@params(api='gl', prms=['v'])
def glVertex2dv(v):
pass
@params(api='gl', prms=['target', 'framebuffer'])
def glBindFramebuffer(target, framebuffer):
pass
@params(api='gl', prms=['v1', 'v2'])
def glRectfv(v1, v2):
pass
@params(api='gl', prms=['location', 'count', 'transpose', 'value'])
def glUniformMatrix2x4dv(location, count, transpose, value):
pass
@params(api='gl', prms=['program', 'programInterface', 'name'])
def glGetProgramResourceLocationIndex(program, programInterface, name):
pass
@params(api='gl', prms=['x', 'y', 'width', 'height'])
def glViewport(x, y, width, height):
pass
@params(api='gl', prms=['renderbuffer'])
def glIsRenderbuffer(renderbuffer):
pass
@params(api='gl', prms=['target', 'samples', 'internalformat', 'width', 'height', 'fixedsamplelocations'])
def glTexStorage2DMultisample(target, samples, internalformat, width, height, fixedsamplelocations):
pass
@params(api='gl', prms=['program', 'shadertype', 'index', 'pname', 'values'])
def glGetActiveSubroutineUniformiv(program, shadertype, index, pname, values):
pass
@params(api='gl', prms=['target', 'internalformat', 'buffer'])
def glTexBuffer(target, internalformat, buffer):
pass
@params(api='gl', prms=['i'])
def glArrayElement(i):
pass
@params(api='gl', prms=['program'])
def glValidateProgram(program):
pass
@params(api='gl', prms=['pipeline', 'program'])
def glActiveShaderProgram(pipeline, program):
pass
@params(api='gl', prms=['texture', 'type', 'coords'])
def glMultiTexCoordP2uiv(texture, type, coords):
pass
@params(api='gl', prms=['x1', 'y1', 'x2', 'y2'])
def glRecti(x1, y1, x2, y2):
pass
@params(api='gl', prms=['x1', 'y1', 'x2', 'y2'])
def glRectf(x1, y1, x2, y2):
pass
@params(api='gl', prms=['x1', 'y1', 'x2', 'y2'])
def glRectd(x1, y1, x2, y2):
pass
@params(api='gl', prms=['target', 'texture'])
def glBindTexture(target, texture):
pass
@params(api='gl', prms=['x1', 'y1', 'x2', 'y2'])
def glRects(x1, y1, x2, y2):
pass
@params(api='gl', prms=['program', 'shader'])
def glDetachShader(program, shader):
pass
@params(api='gl', prms=['texture', 'level', 'xoffset', 'yoffset', 'width', 'height', 'format', 'type', 'pixels'])
def glTextureSubImage2D(texture, level, xoffset, | |
<reponame>ModelDBRepository/137676
#encoding: utf-8
"""
analysis.py -- Parallelized data collection and figure creation
BaseAnalysis provides general functionality for running many 'embarrassingly
parallel' long-running calls in an IPython.kernel ipcluster in order to collect
data (e.g., running model simulations for various parameter sets and comparing
results across parameters).
Subclasses provide all relevant data collection and graphics code. Figures
can be created as Matplotlib figures.
Written by <NAME>, 03/30/2008.
Updated to use IPython.kernel, 01/26/2010.
Copyright (c) 2008-2009 Columbia University. All rights reserved.
Copyright (c) 2010-2011 Johns Hopkins University. All rights reserved.
This software is provided AS IS under the terms of the Open Source MIT License.
See http://www.opensource.org/licenses/mit-license.php.
"""
# Library imports
from os import path as _path, makedirs
from matplotlib.figure import Figure
from numpy import ndarray as _ndarray
# IPython imports
from IPython.kernel import client as IPclient
from IPython.kernel.multiengineclient import FullBlockingMultiEngineClient as IPmec
from IPython.kernel.taskclient import BlockingTaskClient as IPtc
# Traits imports
from enthought.traits.api import HasTraits, String, Directory, Either, \
Trait, Tuple, Instance, false, true
# Package imports
from . import ANA_DIR
from ..tools.bash import CPrint
from ..tools.path import unique_path
# Constants
PICKLE_FILE_NAME = 'analysis.pickle'
class BaseAnalysis(HasTraits):
"""
Base functionality for data analysis and plotting of model results
Data collection is initiated by calling an instance of a BaseAnalysis
subclass with arguments that get passed to the subclass's collect_data()
method. Afterwards, the associated figure can be created and displayed
by calling the view() method.
Subclasses should override:
collect_data -- do everything necessary to create the arrays of data that
you wish to be plotted; these arrays should be stored in the *results*
dictionary attribute
create_plots -- all the graphics code for appropriately plotting the data
collected in *results*; for Matplotlib code, *figure* must be set to a
figure handle or a dict of figure handles keyed by filename stubs. In
use, this should not be called directly (use view() and save_plots()).
label -- override the trait default to be more descriptive
Constructor keyword arguments:
desc -- short phrase describing the analysis being performed
datadir -- specify path where all the data should be saved
figure_size -- tuple (width, height) of figure in pixels (600, 800)
log -- whether to log output messages to analysis.log in the datadir (True)
Public methods for data collection:
get_multiengine_client -- gets an ipcontroller multi-engine client and
verifies the presence of valid ipengines
get_task_client -- like above, but returns a task client for queueing
a sequence of tasks to be farmed out to the ipcluster
Public methods:
reset -- cleans this analysis object so that it may be called again
view -- brings up the figure(s) created by create_plots()
save_plots -- saves the figure as an image file in *datadir*
save_data -- pickles the *results* dictionary in *datadir*
"""
# Data location
label = String(__name__.split('.')[-1])
desc = String
datadir = Directory
autosave = true
# A matplotlib figure handle or dict of handles
figure = Trait(None, dict, Instance(Figure))
figure_size = Tuple((600, 800))
# IPython.kernel clients
mec = Trait(None, Instance(IPmec))
tc = Trait(None, Instance(IPtc))
# Console and log output
log = true
logfd = Instance(file)
out = Instance(CPrint)
# Dictionary for storing collected data; a "finished" flag
results = Trait(dict)
finished = false
def __init__(self, **traits):
HasTraits.__init__(self, **traits)
try:
if not _path.exists(self.datadir):
makedirs(self.datadir)
except OSError:
self.out('Reverting to base directory:\n%s'%ANA_DIR, error=True)
self.datadir = ANA_DIR
finally:
self.datadir = _path.abspath(self.datadir)
self.out('%s initialized:\n%s'%(self.__class__.__name__, str(self)))
def __call__(self, *args, **kw):
"""Execute data collection; this is a wrapper for collect_data
"""
if self.finished:
self.out('Already completed. Create new instance or reset.')
return
self.out('Running data collection...')
try:
self.collect_data(*args, **kw)
except Exception, e:
import pdb, os
self.out('Unhandled exception:\n%s: %s'%(e.__class__.__name__,
e.message), error=True)
pdb.post_mortem(os.sys.exc_info()[2])
else:
self.finished = True
if len(self.results):
self._save_call_args(*args, **kw)
self.out('Finished collecting data:\n%s'%'\n'.join(['%s: %s'%
(k, self.results[k]) for k in self.results
if type(self.results[k]) is _ndarray]))
if self.autosave:
self.save_data()
else:
self.out('Warning: No results found! Analysis incomplete?')
finally:
if self.logfd and not self.logfd.closed:
self.logfd.close()
return
def __str__(self):
"""Column-formatted output of information about this analysis object
"""
col_w = 16
s = ['Subclass:'.ljust(col_w) + self.__class__.__name__]
if self.desc:
s += ['Description:'.ljust(col_w) + self.desc]
s += ['Directory:'.ljust(col_w) + self.datadir]
if self.mec is not None:
s += ['Engines:'.ljust(col_w) + str(self.mec.get_ids())]
else:
s += ['Engines:'.ljust(col_w) + 'N/A']
s += ['Autosave:'.ljust(col_w) + str(self.autosave)]
s += ['Log output:'.ljust(col_w) + str(self.log)]
s += ['Completed:'.ljust(col_w) + str(self.finished)]
if self.results:
s += ['Results:'.ljust(col_w) + '%d items:'%len(self.results)]
res_list = str(self.results.keys())[1:-1]
if len(res_list) < 60:
s += [' '*col_w + res_list]
else:
res_split = res_list[:60].split(',')
res_split[-1] = ' etc.'
res_list = ','.join(res_split)
s += [' '*col_w + res_list]
else:
s += ['Results:'.ljust(col_w) + 'None']
return '\n'.join(s)
# Subclass override methods
def collect_data(self, *args, **kw):
"""Subclass override; set the results dictionary
"""
raise NotImplementedError
def create_plots(self, *args, **kw):
"""Subclass override; create figure object
"""
raise NotImplementedError
# Public methods
def get_multiengine_client(self):
"""Gets a multi-engine client for an ipcontroller
Returns None if a valid connection could not be established.
"""
if self.mec is not None:
return self.mec
# Create and return new multi-engine client
mec = None
try:
mec = IPclient.MultiEngineClient()
except Exception, e:
self.out('Could not connect to ipcontroller:\n%s: %s'%
(e.__class__.__name__, e.message), error=True)
else:
engines = mec.get_ids()
N = len(engines)
if N:
self.out('Connected to %d ipengine instances:\n%s'%(N,
str(engines)))
else:
self.out('No ipengines connected to controller', error=True)
finally:
self.mec = mec
return mec
def get_task_client(self):
"""Gets a task client for an ipcontroller
Returns None if a valid connection could not be established.
"""
if self.tc is not None:
return self.tc
# Create and return new task client
tc = None
try:
tc = IPclient.TaskClient()
except Exception, e:
self.out('Could not connect to ipcontroller:\n%s: %s'%
(e.__class__.__name__, e.message), error=True)
finally:
self.tc = tc
return tc
def reset(self):
"""Reset analysis state so that it can be called again
"""
self.finished = False
self.results = {}
self.datadir = self._datadir_default()
self.log = False # close old log file
self.log = True # open new log file
return True
def execute(self, func, *args, **kw):
"""Wrapper for executing long-running function calls in serial
"""
if not callable(func):
self.out("Function argument to execute() must be callable",
error=True)
return
# Log and execute the call
self.out('Running %s():\n%s\nArgs: %s\nKeywords: %s'%
(func.__name__, str(func), args, kw))
func(*args, **kw)
return
def view(self):
"""Bring up the figure for this analysis
"""
if self._no_data():
return
self.create_plots()
success = False
if isinstance(self.figure, dict) or isinstance(self.figure, Figure):
self.out('Bringing up MPL figure(s)...')
from pylab import isinteractive, ion, show
if not isinteractive():
ion()
show()
success = True
else:
self.out('No valid figure object found!', error=True)
return success
def save_data(self):
"""Saves the results data for a completed analysis
"""
if self._no_data():
return
filename = _path.join(self.datadir, PICKLE_FILE_NAME)
try:
fd = open(filename, 'w')
except IOError:
self.out('Could not open save file!', error=True)
else:
import cPickle
try:
cPickle.dump(dict(self.results), fd)
except cPickle.PicklingError, e:
self.out('PicklingError: %s'%str(e), error=True)
except TypeError, e:
self.out('TypeError: %s'%str(e), error=True)
else:
self.out('Analysis data save to file:\n%s'%filename)
finally:
fd.close()
return
@classmethod
def load_data(cls, pickle_path='.'):
"""Gets a new analysis object containing saved results data
"""
if not pickle_path.endswith(PICKLE_FILE_NAME.split('.')[-1]):
pickle_path = _path.join(pickle_path, PICKLE_FILE_NAME)
pickle_path = _path.abspath(pickle_path)
if not _path.exists(pickle_path):
raise IOError, 'Analysis data not found: %s'%pickle_path
# Open the file
fd = file(pickle_path, 'r')
# Unpickle the results data
import cPickle, sys
results = cPickle.load(fd)
datadir = _path.split(pickle_path)[0]
fd.close()
# Create and return the new analysis object
new_analysis = cls(results=results, datadir=datadir, finished=True,
log=False)
CPrint(prefix=cls.__name__)('Results loaded from path:\n%s'%
pickle_path)
return new_analysis
def save_plots(self, stem='figure', fmt='pdf'):
"""Saves the current results plots as image file(s)
Optional keyword arguments:
stem -- base filename for image file (default 'figure')
fmt -- specifies image format for saving, either 'pdf' or 'png'
"""
if self._no_data():
return
# Validate format specification
if fmt not in ('pdf', 'png'):
self.out("Image format must be either 'pdf' or 'png'", error=True)
return
# Inline function for creating unique image filenames
get_filename = \
lambda stem: unique_path(_path.join(self.datadir, stem),
fmt="%s_%02d", ext=fmt)
filename_list = []
# Create and save | |
#!/usr/bin/python
"""Use a trunnel input file to generate examples of that file for
fuzzing.
Here's the strategy:
First, sort all the types topologically so that we consider
every type before any type that depends on it.
Then, for we iterate over each type to make examples of it. We do
a recursive descent on the syntax tree, yielding a sequence of
(entry, constraint) tuples. The "entry" item is a list whose
members are bytestrings or NamedInt objects. The "constraint" item
is an instance of Constraint that describes which NamedInt entries
must have certain values.
As we handle each (entry,constraint) tuple, we replace each
NamedInt value in the entry with its constrained value, then merge
the parts of the entry together. If we haven't seen it before for
this type, we save it to disk.
To avoid combinatorial explosions, we limit the fan-out for each
step, and choose different combinatoric strategies depending
on the number of items to be considered at once.
"""
import trunnel.CodeGen
import trunnel.Grammar
import os
import hashlib
import random
class Constraints(object):
"""A Constraints object represents a set of constraints on named integer
values. It may also represent a 'failed constraint', which is
impossible to satisfy.
"""
def __init__(self):
pass
def isFailed(self):
"""Return true iff this constraint is unsatisfiable."""
return False
def add(self, k, v):
"""Return a new constraint made by adding the constraint "k=v" to this
constrint.
"""
raise NotImplemented()
def merge(self, other):
"""Return a (maybe) new constraint made by adding all the constraints
in 'other' to this constraint."""
raise NotImplemented()
def apply(self, item):
"""Given an object that might be a NamedInt or a byte sequence, return
a byte sequence obtained by applying this constraint to
that item.
"""
if isinstance(item, NamedInt):
return item.apply(self)
return item
def getConstraint(self, name):
"""Return the (integer) value that the integer field 'name'
must have, or None if there is no such constraint.
"""
return None
class NoConstraints(Constraints):
"""Represents the absence of any constraints. Use the NIL singleton
instead of creating more of this object.
"""
def __init__(self):
Constraints.__init__(self)
def add(self, k, v):
# Nothing plus something is something
some = SomeConstraints({k: v})
return some
def merge(self, other):
# Nothing plus anything is that thing
return other
NIL = NoConstraints()
class FailedConstraint(Constraints):
"""Represents an unsatisfiable constraint, probably created by setting
the same integer to two incompatible values."""
def __init__(self):
Constraints.__init__(self)
def isFailed(self):
return True
def add(self, k, v):
# Failed can't become any more failed
return self
def merge(self, other):
# Failed can't become any more failed
return self
def apply(self, item):
# You should never call apply on a failed constraint.
assert False
FAILED = FailedConstraint()
class SomeConstraints(Constraints):
"""Represents a set of one or more constraints in a key-value dictionary.
"""
def __init__(self, d): # Owns reference to d!
Constraints.__init__(self)
self._d = d
def add(self, k, v):
try:
oldval = self._d[k]
except KeyError:
# We had no previous value for this, so we can just add it
# to our dict.
newd = self._d.copy()
newd[k] = v
return SomeConstraints(newd)
if oldval == v:
# No change, so no need to allocate a new object.
return self
else:
# Incompatible change; we can't satisfy it.
return FAILED
def merge(self, other):
if not isinstance(other, SomeConstraints):
# 'other' is either NIL or FAILED, which have simple merge rules.
return other.merge(self)
if len(other._d) < len(self._d):
# This function runs in O(len(self._d)), so let's run it
# on the shorter item.
return other.merge(self)
newd = self._d.copy()
newd.update(other._d)
for k, v in self._d.items(): # XXX Here's the inefficient O(n).
if newd[k] != v:
return FAILED
return SomeConstraints(newd)
def getConstraint(self, name):
return self._d.get(name)
def constrain(k, v):
if k is None:
return NIL
else:
return SomeConstraints({k: v})
class NamedInt(object):
"""Represents an integer object with a name whose value (maybe)
depends on some other part of the structure.
"""
def __init__(self, name, width, val=None):
self._name = name
self._width = width
self._val = val
def withVal(self, val):
assert self._val is None
return NamedInt(self._name, self._width, val)
def __len__(self):
return self._width
def apply(self, constraints):
val = constraints.getConstraint(self._name)
if val is None:
val = self._val
if val is None:
# We expected to have some constraint on this value, but we
# didn't. How about 3? 3 is a nice number.
val = 3
# encode val little-endian in width bytes.
return encodeInt(val, self._width)
def encodeInt(val, width):
return b"".join(chr((val >> (width-i)*8) & 0xff)
for i in range(1, width+1))
def findLength(lst):
"""Given a list of bytestrings and NamedInts, return the total
length of all items in the list.
"""
return sum(len(item) for item in lst)
def combineExamples(grp, n, maximum=256):
"""Given a sequence of examples, yield up to 'maxiumum' values built
by concatenating n items from the sequence (chosen with
replacement).
If possible, do an exhaustive combination of values. Otherwise,
take items randomly.
"""
if len(grp) ** n > maximum:
# we have to sample.
for i in range(maximum):
result = []
for j in range(n):
result.append(random.choice(grp))
yield b"".join(result)
return
else:
for e in combineExhaustively(grp, n):
yield e
def combineExhaustively(grp, n):
"""Yield all bytestrings made by concatenating n members of grp
(with replacement)."""
if n == 0:
yield b""
elif n == 1:
for e in grp:
yield e
else:
for e in grp:
for rest in combineExhaustively(grp, n-1):
yield e + rest
def crossProduct(lol):
"""Given a list of lists of (entry, constraint) pairs,
yield the cross-product of those lists.
"""
if len(lol) == 0:
return
elif len(lol) == 1:
for item, constraint in lol[0]:
yield item, constraint
else:
for item, constraint in lol[0]:
for irest, crest in crossProduct(lol[1:]):
c2 = constraint.merge(crest)
if not c2.isFailed():
yield item + irest, c2
def explore(lol):
"""As cross-product, but for cases where we face a much more
combinatorically intense list of lists. For this case,
we consider the inputs position by position. For each position,
we let it vary over all its values, while choosing the simplest
value for the other positions that allows it to meet its constraints.
For example, if the lists had members (a), (x,y,z), (1,2,3), and no
constraints, we'd yield: ax1, ax1, ay1, az1, ax1, ax2, ax3.
"""
if len(lol) == 0:
return
elif len(lol) == 1:
for item, constraint in lol[0]:
yield item, constraint
else:
for idx in range(len(lol)):
for item, constraint in exploreAt(lol, idx):
yield item, constraint
def findComplying(lol, c):
"""Find a single value from among crossproduct(lol) complying with c.
Return that value and its combined constraints."""
if len(lol) == 0:
return [], c
for i, c2 in lol[0]:
cboth = c.merge(c2)
if cboth.isFailed():
continue
rest, call = findComplying(lol[1:], cboth)
if call.isFailed():
continue
return rest, call
return [], FAILED
def exploreAt(lol, idx):
"""Helper for explore."""
before = lol[:idx]
at = lol[idx]
after = lol[idx+1:]
for item, constraint in at:
pre, c = findComplying(before, constraint)
post, c2 = findComplying(after, c)
yield pre + item + post, c2
def take_n(iterator, n):
"""Takes an iterator and yields up to the first n items
from that iterator."""
so_far = 0
for item in iterator:
so_far += 1
if so_far > n:
return
yield item
class CorpusGenerator(trunnel.CodeGen.ASTVisitor):
# target_dir -- where to write items
# sort_order -- topologically sorted list of structure names
# structExamples -- map from structure name to possible
# values that we generated for that structure
# _expandConst -- helper function that knows how to map constant
# names to integers.
# _maxFanout -- used to limit the branching factor when running
# combinatorically intense generators.
# _maxExamples -- maximum number of distinct examples to generate
# for each structure
# _maxCombinatorics -- when building long sequences, we try a cross-product
# approach when it would generate fewer than this many entries.
# Otherwise, we try an alternative approach; see explore().
def __init__(self, target_dir):
trunnel.CodeGen.ASTVisitor.__init__(self)
self.target_dir = target_dir
self.structExamples = {}
self._maxFanout = 128
self._maxCombinatorics = 1024
self._maxExamples = 1024
self._constrainedIntFieldNames = None
self._strictFail = False # DOCDOC
def setChecker(self, ch):
self.sort_order = ch.sortedStructs
self._expandConst = ch.expandConstant
def expandConst(self, v):
| |
from script."
STR_DEBUGGEE_UNKNOWN = "Failed to find script."
STR_MULTIPLE_DEBUGGEES = "WARNING: There is more than one debuggee '%s'."
MSG_ERROR_HOST_TEXT = """The debugger was not able to set the host to '%s'.
The following error was returned:
%s"""
STR_SOURCE_NOT_FOUND = "Failed to get source from debuggee."
STR_SCRIPTS_CONNECTING = "Connecting to '%s'..."
STR_SCRIPTS_NO_SCRIPTS = "No scripts to debug on '%s'"
STR_SCRIPTS_TO_DEBUG = """Scripts to debug on '%s':
pid name
--------------------------"""
STR_STACK_TRACE = """Stack trace for thread %d:
Frame File Name Line Function
------------------------------------------------------------------------------"""
STR_SOURCE_LINES = """Source lines for thread %d from file '%s':
"""
STR_ACTIVE_THREADS = """List of active threads known to the debugger:
No Tid Name State
-----------------------------------------------"""
STR_BREAKPOINTS_LIST = """List of breakpoints:
Id State Line Filename-Scope-Condition-Encoding
------------------------------------------------------------------------------"""
STR_BREAKPOINTS_TEMPLATE = """ %2d %-8s %5d %s
%s
%s
%s"""
STR_ENCRYPTION_SUPPORT_ERROR = "Encryption is not supported since the python-crypto package was not found. Either install the python-crypto package or allow unencrypted connections."
STR_PASSWORD_NOT_SET = 'Password is not set.'
STR_PASSWORD_SET = 'Password is set to: "%s"'
STR_PASSWORD_BAD = 'The password should begin with a letter and continue with any combination of digits, letters or underscores (\'_\'). Only English characters are accepted for letters.'
STR_ENCRYPT_MODE = 'Force encryption mode: %s'
STR_REMOTE_MODE = 'Allow remote machines mode: %s'
STR_ENCODING_MODE = 'Encoding is set to: %s'
STR_ENCODING_MODE_SET = 'Encoding was set to: %s'
STR_ENCODING_BAD = 'The specified encoding was not recognized by the debugger.'
STR_ENVIRONMENT = 'The current environment mapping is:'
STR_ENVIRONMENT_EMPTY = 'The current environment mapping is not set.'
STR_SYNCHRONICITY_BAD = "Can not process command when thread is running unless synchronicity mode is turned on. Type 'help synchro' at the command prompt for more information."
STR_SYNCHRONICITY_MODE = 'The synchronicity mode is set to: %s'
STR_TRAP_MODE = 'Trap unhandled exceptions mode is set to: %s'
STR_TRAP_MODE_SET = "Trap unhandled exceptions mode was set to: %s."
STR_FORK_MODE = "Fork mode is set to: %s, %s."
STR_FORK_MODE_SET = "Fork mode was set to: %s, %s."
STR_LOCAL_NAMESPACE_WARNING = 'Debugger modifications to the original bindings of the local namespace of this frame will be committed before the execution of the next statement of the frame. Any code using these variables executed before that point will see the original values.'
STR_WARNING = 'Warning: %s'
STR_MAX_NAMESPACE_WARNING_TITLE = 'Namespace Warning'
STR_MAX_NAMESPACE_WARNING_TYPE = '*** WARNING ***'
STR_MAX_NAMESPACE_WARNING_MSG = 'Number of items exceeds capacity of namespace browser.'
STR_MAX_EVALUATE_LENGTH_WARNING = 'Output length exeeds maximum capacity.'
FORK_CHILD = 'child'
FORK_PARENT = 'parent'
FORK_MANUAL = 'manual'
FORK_AUTO = 'auto'
ENCRYPTION_ENABLED = 'encrypted'
ENCRYPTION_DISABLED = 'plain-text'
STATE_ENABLED = 'enabled'
STATE_DISABLED = 'disabled'
BREAKPOINTS_FILE_EXT = '.bpl'
PYTHON_FILE_EXTENSION = '.py'
PYTHONW_FILE_EXTENSION = '.pyw'
PYTHONW_SO_EXTENSION = '.so'
PYTHON_EXT_LIST = ['.py', '.pyw', '.pyc', '.pyd', '.pyo', '.so']
MODULE_SCOPE = '?'
MODULE_SCOPE2 = '<module>'
BLENDER_SOURCE_NOT_AVAILABLE = as_unicode('Blender script source code is not available.')
SOURCE_NOT_AVAILABLE = as_unicode('Source code is not available.')
SCOPE_SEP = '.'
BP_FILENAME_SEP = ':'
BP_EVAL_SEP = ','
DEBUGGER_FILENAME = 'rpdb2.py'
THREADING_FILENAME = 'threading.py'
STR_STATE_BROKEN = 'waiting at break point'
STATE_BROKEN = 'broken'
STATE_RUNNING = 'running'
STATE_ANALYZE = 'analyze'
STATE_DETACHED = 'detached'
STATE_DETACHING = 'detaching'
STATE_SPAWNING = 'spawning'
STATE_ATTACHING = 'attaching'
DEFAULT_NUMBER_OF_LINES = 20
DICT_KEY_TID = 'tid'
DICT_KEY_STACK = 'stack'
DICT_KEY_CODE_LIST = 'code_list'
DICT_KEY_CURRENT_TID = 'current tid'
DICT_KEY_BROKEN = 'broken'
DICT_KEY_BREAKPOINTS = 'breakpoints'
DICT_KEY_LINES = 'lines'
DICT_KEY_FILENAME = 'filename'
DICT_KEY_FIRST_LINENO = 'first_lineno'
DICT_KEY_FRAME_LINENO = 'frame_lineno'
DICT_KEY_EVENT = 'event'
DICT_KEY_EXPR = 'expr'
DICT_KEY_NAME = 'name'
DICT_KEY_REPR = 'repr'
DICT_KEY_IS_VALID = 'fvalid'
DICT_KEY_TYPE = 'type'
DICT_KEY_SUBNODES = 'subnodes'
DICT_KEY_N_SUBNODES = 'n_subnodes'
DICT_KEY_ERROR = 'error'
RPDB_EXEC_INFO = as_unicode('rpdb_exception_info')
MODE_ON = 'ON'
MODE_OFF = 'OFF'
ENCODING_UTF8_PREFIX_1 = '\xef\xbb\xbf'
ENCODING_SOURCE = '# -*- coding: %s -*-\n'
ENCODING_AUTO = as_unicode('auto')
ENCODING_RAW = as_unicode('raw')
ENCODING_RAW_I = as_unicode('__raw')
MAX_EVALUATE_LENGTH = 256 * 1024
MAX_NAMESPACE_ITEMS = 1024
MAX_SORTABLE_LENGTH = 256 * 1024
REPR_ID_LENGTH = 4096
MAX_NAMESPACE_WARNING = {
DICT_KEY_EXPR: STR_MAX_NAMESPACE_WARNING_TITLE,
DICT_KEY_NAME: STR_MAX_NAMESPACE_WARNING_TITLE,
DICT_KEY_REPR: STR_MAX_NAMESPACE_WARNING_MSG,
DICT_KEY_IS_VALID: False,
DICT_KEY_TYPE: STR_MAX_NAMESPACE_WARNING_TYPE,
DICT_KEY_N_SUBNODES: 0
}
MAX_EVENT_LIST_LENGTH = 1000
EVENT_EXCLUDE = 'exclude'
EVENT_INCLUDE = 'include'
INDEX_TABLE_SIZE = 100
DISPACHER_METHOD = 'dispatcher_method'
CONFLICTING_MODULES = ['psyco', 'pdb', 'bdb', 'doctest']
XML_DATA = """<?xml version='1.0'?>
<methodCall>
<methodName>dispatcher_method</methodName>
<params>
<param>
<value><string>%s</string></value>
</param>
</params>
</methodCall>""" % RPDB_COMPATIBILITY_VERSION
N_WORK_QUEUE_THREADS = 8
DEFAULT_PATH_SUFFIX_LENGTH = 55
ELLIPSIS_UNICODE = as_unicode('...')
ELLIPSIS_BYTES = as_bytes('...')
ERROR_NO_ATTRIBUTE = 'Error: No attribute.'
g_server_lock = threading.RLock()
g_server = None
g_debugger = None
g_fScreen = False
g_fDefaultStd = True
#
# In debug mode errors and tracebacks are printed to stdout
#
g_fDebug = False
#
# Lock for the traceback module to prevent it from interleaving
# output from different threads.
#
g_traceback_lock = threading.RLock()
g_source_provider_aux = None
g_lines_cache = {}
g_initial_cwd = []
g_error_mapping = {
socket.error: STR_COMMUNICATION_FAILURE,
CConnectionException: STR_LOST_CONNECTION,
FirewallBlock: STR_FIREWALL_BLOCK,
BadVersion: STR_BAD_VERSION,
UnexpectedData: STR_UNEXPECTED_DATA,
SpawnUnsupported: STR_SPAWN_UNSUPPORTED,
UnknownServer: STR_DEBUGGEE_UNKNOWN,
UnsetPassword: STR_PASSWORD_MUST_BE_SET,
EncryptionNotSupported: STR_DEBUGGEE_NO_ENCRYPTION,
EncryptionExpected: STR_ENCRYPTION_EXPECTED,
DecryptionFailure: STR_DECRYPTION_FAILURE,
AuthenticationBadData: STR_ACCESS_DENIED,
AuthenticationFailure: STR_ACCESS_DENIED,
BadMBCSPath: STR_BAD_MBCS_PATH,
AlreadyAttached: STR_ALREADY_ATTACHED,
NotAttached: STR_NOT_ATTACHED,
DebuggerNotBroken: STR_DEBUGGEE_NOT_BROKEN,
NoThreads: STR_NO_THREADS,
NoExceptionFound: STR_EXCEPTION_NOT_FOUND,
}
#
# These globals are related to handling the os.fork() os._exit() and exec
# pattern.
#
g_forkpid = None
g_forktid = None
g_fignorefork = False
g_exectid = None
g_execpid = None
g_fos_exit = False
#
# To hold a reference to __main__ to prevent its release if an unhandled
# exception is raised.
#
g_module_main = None
g_found_conflicting_modules = []
g_fignore_atexit = False
g_ignore_broken_pipe = 0
#
# Unicode version of path names that do not encode well witn the windows
# 'mbcs' encoding. This dict is used to work with such path names on
# windows.
#
g_found_unicode_files = {}
g_frames_path = {}
g_signal_handlers = {}
g_signals_pending = []
#g_profile = None
g_fFirewallTest = True
if is_py3k():
g_safe_base64_to = bytes.maketrans(as_bytes('/+='), as_bytes('_-#'))
g_safe_base64_from = bytes.maketrans(as_bytes('_-#'), as_bytes('/+='))
else:
g_safe_base64_to = string.maketrans(as_bytes('/+='), as_bytes('_-#'))
g_safe_base64_from = string.maketrans(as_bytes('_-#'), as_bytes('/+='))
g_alertable_waiters = {}
g_builtins_module = sys.modules.get('__builtin__', sys.modules.get('builtins'))
#
# ---------------------------- General Utils ------------------------------
#
def job_wrapper(event, foo, *args, **kwargs):
try:
#print_debug('Thread %d doing job %s' % (thread.get_ident(), foo.__name__))
foo(*args, **kwargs)
finally:
event.set()
def send_job(tid, timeout, foo, *args, **kwargs):
#
# Attempt to send job to thread tid.
# Will throw KeyError if thread tid is not available for jobs.
#
(lock, jobs) = g_alertable_waiters[tid]
event = threading.Event()
f = lambda: job_wrapper(event, foo, *args, **kwargs)
jobs.append(f)
try:
lock.acquire()
lock_notify_all(lock)
finally:
lock.release()
safe_wait(event, timeout)
def alertable_wait(lock, timeout = None):
jobs = []
tid = thread.get_ident()
g_alertable_waiters[tid] = (lock, jobs)
try:
safe_wait(lock, timeout)
while len(jobs) != 0:
job = jobs.pop(0)
try:
job()
except:
pass
if len(jobs) == 0:
time.sleep(0.1)
finally:
del g_alertable_waiters[tid]
def safe_wait(lock, timeout = None):
#
# workaround windows bug where signal handlers might raise exceptions
# even if they return normally.
#
while True:
try:
t0 = time.time()
return lock.wait(timeout)
except:
if timeout == None:
continue
timeout -= (time.time() - t0)
if timeout <= 0:
return
#
# The following code is related to the ability of the debugger
# to work both on Python 2.5 and 3.0.
#
def lock_notify_all(lock):
try:
if is_py3k():
return lock.notify_all()
except AttributeError:
pass
return lock.notifyAll()
def event_is_set(event):
try:
if is_py3k():
return event.is_set()
except AttributeError:
pass
return event.isSet()
def thread_set_daemon(thread, fdaemon):
try:
if is_py3k():
return thread.set_daemon(fdaemon)
except AttributeError:
pass
return thread.setDaemon(fdaemon)
def thread_is_alive(thread):
try:
if is_py3k():
return thread.is_alive()
except AttributeError:
pass
return thread.isAlive()
def thread_set_name(thread, name):
try:
if is_py3k():
return thread.set_name(name)
except AttributeError:
pass
return thread.setName(name)
def thread_get_name(thread):
try:
if is_py3k():
return thread.get_name()
except AttributeError:
pass
return thread.getName()
def current_thread():
try:
if is_py3k():
return threading.current_thread()
except AttributeError:
pass
return threading.currentThread()
class _stub_type:
pass
def _rpdb2_bytes(s, e):
return s.encode(e)
if not hasattr(g_builtins_module, 'unicode'):
unicode = _stub_type
if not hasattr(g_builtins_module, 'long'):
long = _stub_type
if not hasattr(g_builtins_module, 'str8'):
str8 = _stub_type
if not hasattr(g_builtins_module, 'bytearray'):
bytearray = _stub_type
if not hasattr(g_builtins_module, 'bytes'):
bytes = _stub_type
#
# Pickle on Python 2.5 should know how to handle byte strings
# that arrive from Python 3.0 over sockets.
#
g_builtins_module.bytes = _rpdb2_bytes
if is_py3k():
class sets:
Set = _stub_type
BaseSet = _stub_type
ImmutableSet = _stub_type
if sys.version_info[:2] <= (2, 3):
set = sets.Set
def _raw_input(s):
if is_py3k():
return input(s)
i = raw_input(s)
i = as_unicode(i, detect_encoding(sys.stdin), fstrict = True)
return i
def _print(s, f = sys.stdout, feol = True):
s = as_unicode(s)
encoding = detect_encoding(f)
s = as_bytes(s, encoding, fstrict = False)
s = as_string(s, encoding)
if feol:
f.write(s + '\n')
else:
f.write(s)
def detect_encoding(file):
try:
encoding = file.encoding
if encoding == None:
return detect_locale()
except:
return detect_locale()
try:
codecs.lookup(encoding)
return encoding
except:
pass
if encoding.lower().startswith('utf_8'):
return 'utf-8'
return 'ascii'
def detect_locale():
encoding = locale.getdefaultlocale()[1]
if encoding == None:
return 'ascii'
try:
codecs.lookup(encoding)
return encoding
except:
pass
if encoding.lower().startswith('utf_8'):
return 'utf-8'
return 'ascii'
def class_name(c):
s = safe_str(c)
if "'" in s:
s = s.split("'")[1]
assert(s.startswith(__name__ + '.'))
return s
def clip_filename(path, n = DEFAULT_PATH_SUFFIX_LENGTH):
suffix = calc_suffix(path, n)
if not suffix.startswith('...'):
return suffix
index = suffix.find(os.sep)
if index == -1:
return suffix
clip = '...' + suffix[index:]
return clip
def safe_str(x):
try:
return str(x)
except:
return 'N/A'
def safe_repr(x):
try:
return repr(x)
except:
return 'N/A'
def parse_type(t):
rt = safe_repr(t)
if not "'" in rt:
return rt
st = rt.split("'")[1]
return st
def repr_list(pattern, l, length, encoding, is_valid):
length = max(0, length - len(pattern) + 2)
s = ''
index = | |
kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index")
return self.remove_result_endpoint.call_with_http_info(**kwargs)
def result(self, **kwargs):
"""result # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.result(async_req=True)
>>> result = thread.get()
Keyword Args:
username (str): [optional] if omitted the server will use the default value of "GAST"
password (str): [optional] if omitted the server will use the default value of "<PASSWORD>"
name (str): [optional]
area (str): [optional] if omitted the server will use the default value of "free"
compress (str): [optional] if omitted the server will use the default value of "false"
language (str): [optional] if omitted the server will use the default value of "de"
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get("_return_http_data_only", True)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index")
return self.result_endpoint.call_with_http_info(**kwargs)
def resultfile(self, **kwargs):
"""resultfile # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.resultfile(async_req=True)
>>> result = thread.get()
Keyword Args:
username (str): [optional] if omitted the server will use the default value of "GAST"
password (str): [optional] if omitted the server will use the default value of "<PASSWORD>"
name (str): [optional]
area (str): [optional] if omitted the server will use the default value of "free"
compress (str): [optional] if omitted the server will use the default value of "false"
format (str): [optional] if omitted the server will use the default value of "csv"
language (str): [optional] if omitted the server will use the default value of "de"
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get("_return_http_data_only", True)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index")
return self.resultfile_endpoint.call_with_http_info(**kwargs)
def results(self, **kwargs):
"""results # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.results(async_req=True)
>>> result = thread.get()
Keyword Args:
username (str): [optional] if omitted the server will use the default value of "GAST"
password (str): [optional] if omitted the server will use the default value of "<PASSWORD>"
selection (str): [optional]
area (str): [optional] if omitted the server will use the default value of "free"
pagelength (str): [optional] if omitted the server will use the default value of "100"
language (str): [optional] if omitted the server will use the default value of "de"
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get("_return_http_data_only", True)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index")
return self.results_endpoint.call_with_http_info(**kwargs)
def statistic(self, **kwargs):
"""statistic # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.statistic(async_req=True)
>>> result = thread.get()
Keyword Args:
username (str): [optional] if omitted the server will use the default value of "GAST"
password (str): [optional] if omitted the server will use the default value of "<PASSWORD>"
name (str): [optional]
area (str): [optional] if omitted the server will use the default value of "free"
language (str): [optional] if omitted the server will use the default value of "de"
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get("_return_http_data_only", True)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index")
return self.statistic_endpoint.call_with_http_info(**kwargs)
def statistics(self, **kwargs):
"""statistics # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.statistics(async_req=True)
>>> result = thread.get()
Keyword Args:
username (str): [optional] if omitted the server will use the default value of "GAST"
password (str): [optional] if omitted the server will use the default value of "GAST"
selection (str): [optional]
searchcriterion (str): [optional] if omitted the server will use the default value of "Code"
sortcriterion (str): [optional] if omitted the server will use the default value of "Name"
pagelength (str): [optional] if omitted the server will use the default value of "100"
language (str): [optional] if omitted the server will use the default value of "de"
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. | |
import os
import math
import requests
import json
import datetime
import hashlib
from pygdpr.models.dpa import DPA
from bs4 import BeautifulSoup
from pygdpr.services.filename_from_path_service import filename_from_path_service
from pygdpr.services.pdf_to_text_service import PDFToTextService
from pygdpr.specifications import pdf_file_extension_specification
from pygdpr.specifications.should_retain_document_specification import ShouldRetainDocumentSpecification
from pygdpr.models.common.pagination import Pagination
from pygdpr.policies.gdpr_policy import GDPRPolicy
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from pygdpr.policies.webdriver_exec_policy import WebdriverExecPolicy
class Estonia(DPA):
def __init__(self, path=os.curdir):
country_code='EE'
super().__init__(country_code, path)
def get_source(self, page_url=None, driver=None):
assert (page_url is not None)
results_response = None
try:
results_response = requests.request('GET', page_url)
results_response.raise_for_status()
except requests.exceptions.HTTPError as error:
if to_print:
print(error)
pass
return results_response
def get_docs(self, existing_docs=[], overwrite=False, to_print=True):
added_docs = []
# call all the get_docs_X() functions
added_docs += self.get_docs_Prescriptions(existing_docs=[], overwrite=False, to_print=True)
added_docs += self.get_docs_Instructions(existing_docs=[], overwrite=False, to_print=True)
added_docs += self.get_docs_AnnualReports(existing_docs=[], overwrite=False, to_print=True)
return added_docs
def get_docs_Prescriptions(self, existing_docs=[], overwrite=False, to_print=True):
existed_docs = []
dict_hashcode = {}
source = {
"host": "https://www.aki.ee",
"start_path": "/et/inspektsioon-kontaktid/menetlusotsused/ettekirjutused"
}
host = source['host']
start_path = source['start_path']
page_url = host + start_path
page_source = self.get_source(page_url=page_url)
if page_source is None:
print('page not exist')
results_soup = BeautifulSoup(page_source.text, 'html.parser')
assert results_soup
region_inner = results_soup.find('div', class_='region-sidebar-first-inner')
menu = region_inner.find('ul', class_='menu')
for li in menu.find_all('li'):
result_href = li.find('a').get('href')
result_link = host + result_href
result_text = li.find('a').get_text()
result_year = result_text.split()[-1]
if result_year < '2018':
continue
print(result_year)
print(result_text)
result_source = self.get_source(page_url=result_link)
if result_source is None:
continue
pages_soup = BeautifulSoup(result_source.text, 'html.parser')
assert pages_soup
region_content_inner = pages_soup.find('div', class_='region-content-inner')
block_system = region_content_inner.find('div', class_='block-system')
field_item_even = block_system.find('div', class_='field-item even')
for p in field_item_even.find_all('p'):
document = p.find('a')
if document is None:
continue
document_href = document.get('href')
document_url = host + document_href
document_title = document.get_text()
# get the date from title or href
date_split = document_title.split('nr')[0]
date_str = date_split.split()[-1]
if date_str.isalpha():
date_split = document_href.split('nr')[0]
date_str_list = date_split.split('/')[-1].split('_')
if len(date_str_list) < 2:
continue
else:
date_str = date_str_list[-2]
tmp = datetime.datetime.strptime(date_str, '%d.%m.%Y')
date = datetime.date(tmp.year, tmp.month, tmp.day)
if ShouldRetainDocumentSpecification().is_satisfied_by(date) is False:
continue
document_hash = hashlib.md5(document_title.encode()).hexdigest()
if document_hash in existing_docs and overwrite == False:
if to_print:
print('\tSkipping existing document:\t', document_hash)
continue
if document_hash in dict_hashcode and dict_hashcode[document_hash] == date:
print('\tSkipping existing document:\t', document_hash)
print('date: ', date)
print('\tdocument_url: ', document_url)
print('\tdocument_title: ', document_title)
try:
document_response = requests.request('GET', document_url)
document_response.raise_for_status()
except requests.exceptions.HTTPError as error:
if to_print:
print(error)
pass
dpa_folder = self.path
# document_folder = dpa_folder + '/' + 'Prescriptions' + '/' + document_hash
document_folder = dpa_folder + '/estonia' + '/' + 'Prescriptions' + '/' + document_hash
try:
os.makedirs(document_folder)
except FileExistsError:
pass
if document_response is None:
continue
document_content = document_response.content
with open(document_folder + '/' + self.language_code + '.pdf', 'wb') as f:
f.write(document_content)
with open(document_folder + '/' + self.language_code + '.txt', 'w') as f:
document_text = PDFToTextService().text_from_pdf_path(
document_folder + '/' + self.language_code + '.pdf')
f.write(document_text)
with open(document_folder + '/' + 'metadata.json', 'w') as f:
metadata = {
'title': {
self.language_code: document_title
},
'md5': document_hash,
'releaseDate': date.strftime('%d/%m/%Y'),
'url': document_url
}
json.dump(metadata, f, indent=4, sort_keys=True, ensure_ascii=False)
existed_docs.append(document_hash)
dict_hashcode[document_hash] = date
return existed_docs
def get_docs_Instructions(self, existing_docs=[], overwrite=False, to_print=True):
existed_docs = []
source = {
"host": "https://www.aki.ee",
"start_path": "/et/koik-juhised-loetelus"
}
host = source['host']
start_path = source['start_path']
page_url = host + start_path
page_source = self.get_source(page_url=page_url)
if page_source is None:
print('page not exist')
results_soup = BeautifulSoup(page_source.text, 'html.parser')
assert results_soup
block_main = results_soup.find('div', class_='block-system-main')
content_clearfix = block_main.find('div', class_='content clearfix')
# type 1 files
field_name_body = content_clearfix.find('div', class_='field-name-body')
field_item = field_name_body.find('div', class_='field-item even')
tbody = field_item.find('tbody')
for tr in tbody.find_all('tr'):
year_list = []
for td in tr.find_all('td'):
if td.get_text() == 'Teiste asutustega koostöös loodud juhendid':
break
# find the year of document
if td.find('a') == None:
candidate_year = td.get_text()
if candidate_year.isdigit():
year_list.append(candidate_year)
continue
document_href = td.find('a').get('href')
document_title = td.find('a').get_text()
if len(year_list) == 0:
continue
year = year_list[-1]
if year < '2018':
continue
print('document_title:', document_title)
if document_href.startswith('https'):
document_url = document_href
else:
document_url = host + document_href
print('\tdocument_url:', document_url)
print('\tyear: ',year)
document_hash = hashlib.md5(document_title.encode()).hexdigest()
if document_hash in existing_docs and overwrite == False:
if to_print:
print('\tSkipping existing document:\t', document_hash)
continue
dpa_folder = self.path
# document_folder = dpa_folder + '/' + 'Instructions' + '/' + document_hash
document_folder = dpa_folder + '/estonia' + '/' + 'Instructions' + '/' + document_hash
try:
os.makedirs(document_folder)
except FileExistsError:
pass
try:
document_response = requests.request('GET', document_url)
document_response.raise_for_status()
except requests.exceptions.HTTPError as error:
if to_print:
print(error)
pass
if document_response is None:
continue
document_content = document_response.content
with open(document_folder + '/' + self.language_code + '.pdf', 'wb') as f:
f.write(document_content)
with open(document_folder + '/' + self.language_code + '.txt', 'w') as f:
document_text = PDFToTextService().text_from_pdf_path(
document_folder + '/' + self.language_code + '.pdf')
f.write(document_text)
with open(document_folder + '/' + 'metadata.json', 'w') as f:
metadata = {
'title': {
self.language_code: document_title
},
'md5': document_hash,
'releaseDate': year,
'url': document_url
}
json.dump(metadata, f, indent=4, sort_keys=True, ensure_ascii=False)
existed_docs.append(document_hash)
# type 2 files
field_name_field = content_clearfix.find('div', class_='field-name-field-files')
field_item = field_name_field.find('div', class_='field-item even')
tbody = field_item.find('tbody')
for tr in tbody.find_all('tr', class_='odd'):
year_str = tr.find('td', class_='extended-file-field-table-date').get_text()
document_section = tr.find('td', class_='extended-file-field-table-filename')
document_url = document_section.find('a').get('href')
document_title = document_section.find('a').get_text()
year = year_str.split()[0].split('.')[-1]
document_hash = hashlib.md5(document_title.encode()).hexdigest()
if year < '2018':
continue
print('document_url:', document_url)
print('document_title:', document_title)
print(year)
if document_hash in existing_docs and overwrite == False:
if to_print:
print('\tSkipping existing document:\t', document_hash)
continue
existed_docs.append(document_hash)
dpa_folder = self.path
#document_folder = dpa_folder + '/' + 'Instructions' + '/' + document_hash
document_folder = dpa_folder + '/estonia' + '/' + 'Instructions' + '/' + document_hash
try:
os.makedirs(document_folder)
except FileExistsError:
pass
try:
document_response = requests.request('GET', document_url)
document_response.raise_for_status()
except requests.exceptions.HTTPError as error:
if to_print:
print(error)
pass
if document_response is None:
continue
document_content = document_response.content
with open(document_folder + '/' + self.language_code + '.pdf', 'wb') as f:
f.write(document_content)
with open(document_folder + '/' + self.language_code + '.txt', 'w') as f:
document_text = PDFToTextService().text_from_pdf_path(
document_folder + '/' + self.language_code + '.pdf')
f.write(document_text)
with open(document_folder + '/' + 'metadata.json', 'w') as f:
metadata = {
'title': {
self.language_code: document_title
},
'md5': document_hash,
'releaseDate': year,
'url': document_url
}
json.dump(metadata, f, indent=4, sort_keys=True, ensure_ascii=False)
existed_docs.append(document_hash)
return existed_docs
def get_docs_AnnualReports(self, existing_docs=[], overwrite=False, to_print=True):
existed_docs = []
page_url = 'https://aastaraamat.aki.ee/'
page_source = self.get_source(page_url=page_url)
if page_source is None:
print('page not exist')
results_soup = BeautifulSoup(page_source.text, 'html.parser')
assert results_soup
# get the newest annual report
region_content = results_soup.find('div', class_='region region-content')
block_core = region_content.find('div', class_='block-core')
document_title = block_core.get_text().strip()
print('document_title: ', document_title)
document_hash = hashlib.md5(document_title.encode()).hexdigest()
year = document_title.split()[-1].strip()
block_views = region_content.find('div', class_='block-views')
view_content = block_views.find('div', class_='view-content')
for div in view_content.find_all('div', class_='views-row'):
document = div.find('div', class_='views-field-title')
title = document.get_text()
document_href = document.find('a').get('href')
if title != 'Aastaraamatu PDF':
continue
document_url = page_url + document_href
document_source = self.get_source(page_url=document_url)
if document_source is None:
print('page not exist')
document_soup = BeautifulSoup(document_source.text, 'html.parser')
assert document_soup
node_content = document_soup.find('div', class_='node__content')
text_formatted = node_content.find('div', class_='text-formatted')
article_href = text_formatted.find('a').get('href')
article_url = page_url + article_href
print('\turl: ', article_url)
if document_hash in existing_docs and overwrite == False:
if to_print:
print('\tSkipping existing document:\t', document_hash)
continue
dpa_folder = self.path
# document_folder = dpa_folder + '/' + 'Annual Reports' + '/' + document_hash
document_folder = dpa_folder + '/estonia' + '/' + 'Annual Reports' + '/' + document_hash
try:
os.makedirs(document_folder)
except FileExistsError:
pass
try:
document_response = requests.request('GET', article_url)
document_response.raise_for_status()
except requests.exceptions.HTTPError as error:
if to_print:
print(error)
pass
if document_response is None:
continue
document_content = document_response.content
with open(document_folder + '/' + self.language_code + '.pdf', 'wb') as f:
f.write(document_content)
with open(document_folder + '/' + self.language_code + '.txt', 'w') as f:
document_text = PDFToTextService().text_from_pdf_path(
document_folder + '/' + self.language_code + '.pdf')
f.write(document_text)
with open(document_folder + '/' + 'metadata.json', 'w') as f:
metadata = {
'title': {
self.language_code: document_title
},
'md5': document_hash,
'releaseDate': year,
'url': document_url
}
json.dump(metadata, f, indent=4, sort_keys=True, ensure_ascii=False)
existed_docs.append(document_hash)
# older annual reports
exec_path = WebdriverExecPolicy().get_system_path()
options = webdriver.ChromeOptions()
options.add_argument('headless')
driver_doc = webdriver.Chrome(options=options, executable_path=exec_path)
driver_doc.get(page_url)
for i in range(1, 3):
document = driver_doc.find_element_by_xpath('//*[@id="block-aastaraamat-main-menu"]/ul/li[3]/ul/li['+str(i)+']/a')
document_href = document.get_attribute("href")
page_source = self.get_source(page_url=document_href)
if page_source is None:
print('page not exist')
document_soup = BeautifulSoup(page_source.text, 'html.parser')
assert document_soup
region_content = document_soup.find('div', class_='region region-content')
block_core = region_content.find('div', class_='block-core')
document_title = block_core.get_text().strip()
year_str = document_title.split()[-1].strip()
# 2019 annual report
if year_str == '2019':
year = year_str
print('document_title: ', document_title)
print('\turl: ', document_href)
document_hash = hashlib.md5(document_title.encode()).hexdigest()
block_views = region_content.find('div', class_='block-views')
view_content = block_views.find('div', class_='view-content')
for div in view_content.find_all('div', class_='views-row'):
document = div.find('div', class_='views-field-title')
title = document.get_text()
document_href = document.find('a').get('href')
if title != 'Aastaraamatu | |
# coding: utf-8
# flake8: noqa
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 4
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import apis into sdk package
from isi_sdk_8_0_1.api.antivirus_api import AntivirusApi
from isi_sdk_8_0_1.api.audit_api import AuditApi
from isi_sdk_8_0_1.api.auth_api import AuthApi
from isi_sdk_8_0_1.api.auth_groups_api import AuthGroupsApi
from isi_sdk_8_0_1.api.auth_providers_api import AuthProvidersApi
from isi_sdk_8_0_1.api.auth_roles_api import AuthRolesApi
from isi_sdk_8_0_1.api.auth_users_api import AuthUsersApi
from isi_sdk_8_0_1.api.certificate_api import CertificateApi
from isi_sdk_8_0_1.api.cloud_api import CloudApi
from isi_sdk_8_0_1.api.cluster_api import ClusterApi
from isi_sdk_8_0_1.api.cluster_nodes_api import ClusterNodesApi
from isi_sdk_8_0_1.api.debug_api import DebugApi
from isi_sdk_8_0_1.api.dedupe_api import DedupeApi
from isi_sdk_8_0_1.api.event_api import EventApi
from isi_sdk_8_0_1.api.file_filter_api import FileFilterApi
from isi_sdk_8_0_1.api.filepool_api import FilepoolApi
from isi_sdk_8_0_1.api.filesystem_api import FilesystemApi
from isi_sdk_8_0_1.api.fsa_api import FsaApi
from isi_sdk_8_0_1.api.fsa_results_api import FsaResultsApi
from isi_sdk_8_0_1.api.hardening_api import HardeningApi
from isi_sdk_8_0_1.api.hardware_api import HardwareApi
from isi_sdk_8_0_1.api.healthcheck_api import HealthcheckApi
from isi_sdk_8_0_1.api.id_resolution_api import IdResolutionApi
from isi_sdk_8_0_1.api.job_api import JobApi
from isi_sdk_8_0_1.api.license_api import LicenseApi
from isi_sdk_8_0_1.api.local_api import LocalApi
from isi_sdk_8_0_1.api.namespace_api import NamespaceApi
from isi_sdk_8_0_1.api.network_api import NetworkApi
from isi_sdk_8_0_1.api.network_groupnets_api import NetworkGroupnetsApi
from isi_sdk_8_0_1.api.network_groupnets_subnets_api import NetworkGroupnetsSubnetsApi
from isi_sdk_8_0_1.api.protocols_api import ProtocolsApi
from isi_sdk_8_0_1.api.protocols_hdfs_api import ProtocolsHdfsApi
from isi_sdk_8_0_1.api.quota_api import QuotaApi
from isi_sdk_8_0_1.api.quota_quotas_api import QuotaQuotasApi
from isi_sdk_8_0_1.api.quota_reports_api import QuotaReportsApi
from isi_sdk_8_0_1.api.remotesupport_api import RemotesupportApi
from isi_sdk_8_0_1.api.snapshot_api import SnapshotApi
from isi_sdk_8_0_1.api.snapshot_changelists_api import SnapshotChangelistsApi
from isi_sdk_8_0_1.api.snapshot_snapshots_api import SnapshotSnapshotsApi
from isi_sdk_8_0_1.api.statistics_api import StatisticsApi
from isi_sdk_8_0_1.api.storagepool_api import StoragepoolApi
from isi_sdk_8_0_1.api.sync_api import SyncApi
from isi_sdk_8_0_1.api.sync_policies_api import SyncPoliciesApi
from isi_sdk_8_0_1.api.sync_reports_api import SyncReportsApi
from isi_sdk_8_0_1.api.sync_target_api import SyncTargetApi
from isi_sdk_8_0_1.api.upgrade_api import UpgradeApi
from isi_sdk_8_0_1.api.upgrade_cluster_api import UpgradeClusterApi
from isi_sdk_8_0_1.api.worm_api import WormApi
from isi_sdk_8_0_1.api.zones_api import ZonesApi
from isi_sdk_8_0_1.api.zones_summary_api import ZonesSummaryApi
# import ApiClient
from isi_sdk_8_0_1.api_client import ApiClient
from isi_sdk_8_0_1.configuration import Configuration
# import models into sdk package
from isi_sdk_8_0_1.models.access_point_create_params import AccessPointCreateParams
from isi_sdk_8_0_1.models.acl_object import AclObject
from isi_sdk_8_0_1.models.ads_provider_controllers import AdsProviderControllers
from isi_sdk_8_0_1.models.ads_provider_controllers_controller import AdsProviderControllersController
from isi_sdk_8_0_1.models.ads_provider_domains import AdsProviderDomains
from isi_sdk_8_0_1.models.ads_provider_domains_domain import AdsProviderDomainsDomain
from isi_sdk_8_0_1.models.ads_provider_search import AdsProviderSearch
from isi_sdk_8_0_1.models.ads_provider_search_object import AdsProviderSearchObject
from isi_sdk_8_0_1.models.antivirus_policies import AntivirusPolicies
from isi_sdk_8_0_1.models.antivirus_policy import AntivirusPolicy
from isi_sdk_8_0_1.models.antivirus_quarantine import AntivirusQuarantine
from isi_sdk_8_0_1.models.antivirus_quarantine_path_params import AntivirusQuarantinePathParams
from isi_sdk_8_0_1.models.antivirus_scan_item import AntivirusScanItem
from isi_sdk_8_0_1.models.antivirus_server import AntivirusServer
from isi_sdk_8_0_1.models.antivirus_servers import AntivirusServers
from isi_sdk_8_0_1.models.antivirus_settings import AntivirusSettings
from isi_sdk_8_0_1.models.antivirus_settings_settings import AntivirusSettingsSettings
from isi_sdk_8_0_1.models.audit_progress import AuditProgress
from isi_sdk_8_0_1.models.audit_progress_progress import AuditProgressProgress
from isi_sdk_8_0_1.models.audit_settings import AuditSettings
from isi_sdk_8_0_1.models.audit_settings_settings import AuditSettingsSettings
from isi_sdk_8_0_1.models.audit_topic import AuditTopic
from isi_sdk_8_0_1.models.audit_topic_create_params import AuditTopicCreateParams
from isi_sdk_8_0_1.models.audit_topics import AuditTopics
from isi_sdk_8_0_1.models.auth_access import AuthAccess
from isi_sdk_8_0_1.models.auth_access_access_item import AuthAccessAccessItem
from isi_sdk_8_0_1.models.auth_access_access_item_file import AuthAccessAccessItemFile
from isi_sdk_8_0_1.models.auth_access_access_item_file_file_permissions import AuthAccessAccessItemFileFilePermissions
from isi_sdk_8_0_1.models.auth_access_access_item_file_group import AuthAccessAccessItemFileGroup
from isi_sdk_8_0_1.models.auth_access_access_item_share import AuthAccessAccessItemShare
from isi_sdk_8_0_1.models.auth_access_access_item_share_effective_user import AuthAccessAccessItemShareEffectiveUser
from isi_sdk_8_0_1.models.auth_access_access_item_share_share_permissions import AuthAccessAccessItemShareSharePermissions
from isi_sdk_8_0_1.models.auth_access_access_item_share_share_permissions_share_relevant_ace import AuthAccessAccessItemShareSharePermissionsShareRelevantAce
from isi_sdk_8_0_1.models.auth_cache_item import AuthCacheItem
from isi_sdk_8_0_1.models.auth_group import AuthGroup
from isi_sdk_8_0_1.models.auth_group_extended import AuthGroupExtended
from isi_sdk_8_0_1.models.auth_group_object_history_item import AuthGroupObjectHistoryItem
from isi_sdk_8_0_1.models.auth_groups import AuthGroups
from isi_sdk_8_0_1.models.auth_id import AuthId
from isi_sdk_8_0_1.models.auth_id_ntoken import AuthIdNtoken
from isi_sdk_8_0_1.models.auth_id_ntoken_privilege_item import AuthIdNtokenPrivilegeItem
from isi_sdk_8_0_1.models.auth_ldap_templates import AuthLdapTemplates
from isi_sdk_8_0_1.models.auth_ldap_templates_extended import AuthLdapTemplatesExtended
from isi_sdk_8_0_1.models.auth_ldap_templates_ldap_field_template_item import AuthLdapTemplatesLdapFieldTemplateItem
from isi_sdk_8_0_1.models.auth_log_level import AuthLogLevel
from isi_sdk_8_0_1.models.auth_log_level_extended import AuthLogLevelExtended
from isi_sdk_8_0_1.models.auth_log_level_level import AuthLogLevelLevel
from isi_sdk_8_0_1.models.auth_netgroup import AuthNetgroup
from isi_sdk_8_0_1.models.auth_netgroups import AuthNetgroups
from isi_sdk_8_0_1.models.auth_privilege import AuthPrivilege
from isi_sdk_8_0_1.models.auth_privileges import AuthPrivileges
from isi_sdk_8_0_1.models.auth_role import AuthRole
from isi_sdk_8_0_1.models.auth_roles import AuthRoles
from isi_sdk_8_0_1.models.auth_shells import AuthShells
from isi_sdk_8_0_1.models.auth_user import AuthUser
from isi_sdk_8_0_1.models.auth_users import AuthUsers
from isi_sdk_8_0_1.models.auth_wellknowns import AuthWellknowns
from isi_sdk_8_0_1.models.certificate_server import CertificateServer
from isi_sdk_8_0_1.models.certificate_server_certificate import CertificateServerCertificate
from isi_sdk_8_0_1.models.certificate_server_certificate_fingerprint import CertificateServerCertificateFingerprint
from isi_sdk_8_0_1.models.certificate_server_id_params import CertificateServerIdParams
from isi_sdk_8_0_1.models.certificate_server_item import CertificateServerItem
from isi_sdk_8_0_1.models.changelist_lins import ChangelistLins
from isi_sdk_8_0_1.models.changelist_lins_ctime import ChangelistLinsCtime
from isi_sdk_8_0_1.models.changelist_lins_extended import ChangelistLinsExtended
from isi_sdk_8_0_1.models.cloud_access import CloudAccess
from isi_sdk_8_0_1.models.cloud_access_cluster import CloudAccessCluster
from isi_sdk_8_0_1.models.cloud_access_item import CloudAccessItem
from isi_sdk_8_0_1.models.cloud_account import CloudAccount
from isi_sdk_8_0_1.models.cloud_account_create_params import CloudAccountCreateParams
from isi_sdk_8_0_1.models.cloud_accounts import CloudAccounts
from isi_sdk_8_0_1.models.cloud_job import CloudJob
from isi_sdk_8_0_1.models.cloud_job_create_params import CloudJobCreateParams
from isi_sdk_8_0_1.models.cloud_job_extended import CloudJobExtended
from isi_sdk_8_0_1.models.cloud_job_files import CloudJobFiles
from isi_sdk_8_0_1.models.cloud_job_files_name import CloudJobFilesName
from isi_sdk_8_0_1.models.cloud_job_job_engine_job import CloudJobJobEngineJob
from isi_sdk_8_0_1.models.cloud_jobs import CloudJobs
from isi_sdk_8_0_1.models.cloud_jobs_files import CloudJobsFiles
from isi_sdk_8_0_1.models.cloud_pool import CloudPool
from isi_sdk_8_0_1.models.cloud_pools import CloudPools
from isi_sdk_8_0_1.models.cloud_proxies import CloudProxies
from isi_sdk_8_0_1.models.cloud_proxy import CloudProxy
from isi_sdk_8_0_1.models.cloud_settings import CloudSettings
from isi_sdk_8_0_1.models.cloud_settings_settings import CloudSettingsSettings
from isi_sdk_8_0_1.models.cloud_settings_settings_cloud_policy_defaults import CloudSettingsSettingsCloudPolicyDefaults
from isi_sdk_8_0_1.models.cloud_settings_settings_cloud_policy_defaults_cache import CloudSettingsSettingsCloudPolicyDefaultsCache
from isi_sdk_8_0_1.models.cloud_settings_settings_sleep_timeout_cloud_garbage_collection import CloudSettingsSettingsSleepTimeoutCloudGarbageCollection
from isi_sdk_8_0_1.models.cluster_add_node_item import ClusterAddNodeItem
from isi_sdk_8_0_1.models.cluster_archive_item import ClusterArchiveItem
from isi_sdk_8_0_1.models.cluster_assess_item import ClusterAssessItem
from isi_sdk_8_0_1.models.cluster_config import ClusterConfig
from isi_sdk_8_0_1.models.cluster_config_device import ClusterConfigDevice
from isi_sdk_8_0_1.models.cluster_config_onefs_version import ClusterConfigOnefsVersion
from isi_sdk_8_0_1.models.cluster_config_timezone import ClusterConfigTimezone
from isi_sdk_8_0_1.models.cluster_email import ClusterEmail
from isi_sdk_8_0_1.models.cluster_email_extended import ClusterEmailExtended
from isi_sdk_8_0_1.models.cluster_email_settings import ClusterEmailSettings
from isi_sdk_8_0_1.models.cluster_firmware_progress import ClusterFirmwareProgress
from isi_sdk_8_0_1.models.cluster_firmware_status import ClusterFirmwareStatus
from isi_sdk_8_0_1.models.cluster_firmware_status_node import ClusterFirmwareStatusNode
from isi_sdk_8_0_1.models.cluster_firmware_status_node_device import ClusterFirmwareStatusNodeDevice
from isi_sdk_8_0_1.models.cluster_firmware_status_node_package_item import ClusterFirmwareStatusNodePackageItem
from isi_sdk_8_0_1.models.cluster_firmware_upgrade_item import ClusterFirmwareUpgradeItem
from isi_sdk_8_0_1.models.cluster_identity import ClusterIdentity
from isi_sdk_8_0_1.models.cluster_identity_extended import ClusterIdentityExtended
from isi_sdk_8_0_1.models.cluster_identity_logon import ClusterIdentityLogon
from isi_sdk_8_0_1.models.cluster_identity_logon_extended import ClusterIdentityLogonExtended
from isi_sdk_8_0_1.models.cluster_node import ClusterNode
from isi_sdk_8_0_1.models.cluster_node_extended import ClusterNodeExtended
from isi_sdk_8_0_1.models.cluster_node_extended_extended import ClusterNodeExtendedExtended
from isi_sdk_8_0_1.models.cluster_node_hardware import ClusterNodeHardware
from isi_sdk_8_0_1.models.cluster_node_partitions import ClusterNodePartitions
from isi_sdk_8_0_1.models.cluster_node_sensors import ClusterNodeSensors
from isi_sdk_8_0_1.models.cluster_node_state import ClusterNodeState
from isi_sdk_8_0_1.models.cluster_node_state_extended import ClusterNodeStateExtended
from isi_sdk_8_0_1.models.cluster_node_state_extended_extended import ClusterNodeStateExtendedExtended
from isi_sdk_8_0_1.models.cluster_node_state_servicelight import ClusterNodeStateServicelight
from isi_sdk_8_0_1.models.cluster_node_state_smartfail import ClusterNodeStateSmartfail
from isi_sdk_8_0_1.models.cluster_node_status import ClusterNodeStatus
from isi_sdk_8_0_1.models.cluster_nodes import ClusterNodes
from isi_sdk_8_0_1.models.cluster_nodes_available import ClusterNodesAvailable
from isi_sdk_8_0_1.models.cluster_nodes_available_node import ClusterNodesAvailableNode
from isi_sdk_8_0_1.models.cluster_nodes_error import ClusterNodesError
from isi_sdk_8_0_1.models.cluster_nodes_extended import ClusterNodesExtended
from isi_sdk_8_0_1.models.cluster_nodes_extended_extended import ClusterNodesExtendedExtended
from isi_sdk_8_0_1.models.cluster_nodes_extended_extended_extended import ClusterNodesExtendedExtendedExtended
from isi_sdk_8_0_1.models.cluster_nodes_onefs_version import ClusterNodesOnefsVersion
from isi_sdk_8_0_1.models.cluster_owner import ClusterOwner
from isi_sdk_8_0_1.models.cluster_patch_patch import ClusterPatchPatch
from isi_sdk_8_0_1.models.cluster_patch_patches import ClusterPatchPatches
from isi_sdk_8_0_1.models.cluster_patch_patches_patch import ClusterPatchPatchesPatch
from isi_sdk_8_0_1.models.cluster_patch_patches_patch_file import ClusterPatchPatchesPatchFile
from isi_sdk_8_0_1.models.cluster_patch_patches_patch_service import ClusterPatchPatchesPatchService
from isi_sdk_8_0_1.models.cluster_retry_last_action_item import ClusterRetryLastActionItem
from isi_sdk_8_0_1.models.cluster_statfs import ClusterStatfs
from isi_sdk_8_0_1.models.cluster_time import ClusterTime
from isi_sdk_8_0_1.models.cluster_time_error import ClusterTimeError
from isi_sdk_8_0_1.models.cluster_time_extended import ClusterTimeExtended
from isi_sdk_8_0_1.models.cluster_time_extended_extended import ClusterTimeExtendedExtended
from isi_sdk_8_0_1.models.cluster_time_node import ClusterTimeNode
from isi_sdk_8_0_1.models.cluster_timezone import ClusterTimezone
from isi_sdk_8_0_1.models.cluster_timezone_extended import ClusterTimezoneExtended
from isi_sdk_8_0_1.models.cluster_timezone_settings import ClusterTimezoneSettings
from isi_sdk_8_0_1.models.cluster_timezone_settings_extended import ClusterTimezoneSettingsExtended
from isi_sdk_8_0_1.models.cluster_upgrade import ClusterUpgrade
from isi_sdk_8_0_1.models.cluster_upgrade_item import ClusterUpgradeItem
from isi_sdk_8_0_1.models.cluster_version import ClusterVersion
from isi_sdk_8_0_1.models.cluster_version_node import ClusterVersionNode
from isi_sdk_8_0_1.models.compatibilities_class_active import CompatibilitiesClassActive
from isi_sdk_8_0_1.models.compatibilities_class_active_active_item import CompatibilitiesClassActiveActiveItem
from isi_sdk_8_0_1.models.compatibilities_class_active_item import CompatibilitiesClassActiveItem
from isi_sdk_8_0_1.models.compatibilities_class_available import CompatibilitiesClassAvailable
from isi_sdk_8_0_1.models.compatibilities_class_available_available_item import CompatibilitiesClassAvailableAvailableItem
from isi_sdk_8_0_1.models.compatibilities_ssd_active import CompatibilitiesSsdActive
from isi_sdk_8_0_1.models.compatibilities_ssd_active_active_item import CompatibilitiesSsdActiveActiveItem
from isi_sdk_8_0_1.models.compatibilities_ssd_active_id_params import CompatibilitiesSsdActiveIdParams
from isi_sdk_8_0_1.models.compatibilities_ssd_active_item import CompatibilitiesSsdActiveItem
from isi_sdk_8_0_1.models.compatibilities_ssd_available import CompatibilitiesSsdAvailable
from isi_sdk_8_0_1.models.compatibilities_ssd_available_available_item import CompatibilitiesSsdAvailableAvailableItem
from isi_sdk_8_0_1.models.copy_errors import CopyErrors
from isi_sdk_8_0_1.models.copy_errors_copy_errors import CopyErrorsCopyErrors
from isi_sdk_8_0_1.models.create_antivirus_scan_item_response import CreateAntivirusScanItemResponse
from isi_sdk_8_0_1.models.create_auth_refresh_item_response import CreateAuthRefreshItemResponse
from isi_sdk_8_0_1.models.create_cloud_account_response import CreateCloudAccountResponse
from isi_sdk_8_0_1.models.create_cloud_job_response import CreateCloudJobResponse
from isi_sdk_8_0_1.models.create_cloud_pool_response import CreateCloudPoolResponse
from isi_sdk_8_0_1.models.create_cloud_proxy_response import CreateCloudProxyResponse
from isi_sdk_8_0_1.models.create_compatibilities_class_active_item_response import CreateCompatibilitiesClassActiveItemResponse
from isi_sdk_8_0_1.models.create_compatibilities_class_active_item_response_merge import CreateCompatibilitiesClassActiveItemResponseMerge
from isi_sdk_8_0_1.models.create_compatibilities_class_active_item_response_split import CreateCompatibilitiesClassActiveItemResponseSplit
from isi_sdk_8_0_1.models.create_filepool_policy_response import CreateFilepoolPolicyResponse
from isi_sdk_8_0_1.models.create_hardening_apply_item_response import CreateHardeningApplyItemResponse
from isi_sdk_8_0_1.models.create_hardening_resolve_item_response import CreateHardeningResolveItemResponse
from isi_sdk_8_0_1.models.create_hardening_revert_item_response import CreateHardeningRevertItemResponse
from isi_sdk_8_0_1.models.create_job_job_response import CreateJobJobResponse
from isi_sdk_8_0_1.models.create_nfs_alias_response import CreateNfsAliasResponse
from isi_sdk_8_0_1.models.create_nfs_nlm_sessions_check_item_response import CreateNfsNlmSessionsCheckItemResponse
from isi_sdk_8_0_1.models.create_quota_report_response import CreateQuotaReportResponse
from isi_sdk_8_0_1.models.create_response import CreateResponse
from isi_sdk_8_0_1.models.create_snapshot_alias_response import CreateSnapshotAliasResponse
from isi_sdk_8_0_1.models.create_snapshot_changelist_response import CreateSnapshotChangelistResponse
from isi_sdk_8_0_1.models.create_snapshot_lock_response import CreateSnapshotLockResponse
from isi_sdk_8_0_1.models.create_snapshot_repstate_response import CreateSnapshotRepstateResponse
from isi_sdk_8_0_1.models.create_snapshot_schedule_response import CreateSnapshotScheduleResponse
from isi_sdk_8_0_1.models.create_storagepool_tier_response import CreateStoragepoolTierResponse
from isi_sdk_8_0_1.models.create_sync_reports_rotate_item_response import CreateSyncReportsRotateItemResponse
from isi_sdk_8_0_1.models.debug_stats import DebugStats
from isi_sdk_8_0_1.models.debug_stats_handler import DebugStatsHandler
from isi_sdk_8_0_1.models.debug_stats_unknown import DebugStatsUnknown
from isi_sdk_8_0_1.models.dedupe_dedupe_summary import DedupeDedupeSummary
from isi_sdk_8_0_1.models.dedupe_dedupe_summary_summary import DedupeDedupeSummarySummary
from isi_sdk_8_0_1.models.dedupe_report import DedupeReport
from isi_sdk_8_0_1.models.dedupe_report_extended import DedupeReportExtended
from isi_sdk_8_0_1.models.dedupe_reports import DedupeReports
from isi_sdk_8_0_1.models.dedupe_settings import DedupeSettings
from isi_sdk_8_0_1.models.dedupe_settings_extended import DedupeSettingsExtended
from isi_sdk_8_0_1.models.dedupe_settings_settings import DedupeSettingsSettings
from isi_sdk_8_0_1.models.diagnostics_gather_settings import DiagnosticsGatherSettings
from isi_sdk_8_0_1.models.diagnostics_gather_status import DiagnosticsGatherStatus
from isi_sdk_8_0_1.models.diagnostics_netlogger_settings import DiagnosticsNetloggerSettings
from isi_sdk_8_0_1.models.directory_query import DirectoryQuery
from isi_sdk_8_0_1.models.directory_query_scope import DirectoryQueryScope
from isi_sdk_8_0_1.models.directory_query_scope_conditions import DirectoryQueryScopeConditions
from isi_sdk_8_0_1.models.drives_drive_firmware import DrivesDriveFirmware
from isi_sdk_8_0_1.models.drives_drive_firmware_node import DrivesDriveFirmwareNode
from isi_sdk_8_0_1.models.drives_drive_firmware_node_drive import DrivesDriveFirmwareNodeDrive
from isi_sdk_8_0_1.models.drives_drive_firmware_update import DrivesDriveFirmwareUpdate
from isi_sdk_8_0_1.models.drives_drive_firmware_update_item import DrivesDriveFirmwareUpdateItem
from isi_sdk_8_0_1.models.drives_drive_firmware_update_node import DrivesDriveFirmwareUpdateNode
from isi_sdk_8_0_1.models.drives_drive_firmware_update_node_status import DrivesDriveFirmwareUpdateNodeStatus
from isi_sdk_8_0_1.models.drives_drive_format_item import DrivesDriveFormatItem
from isi_sdk_8_0_1.models.drives_drive_purpose_item import DrivesDrivePurposeItem
from isi_sdk_8_0_1.models.empty import Empty
from isi_sdk_8_0_1.models.error import Error
from isi_sdk_8_0_1.models.event_alert_condition import EventAlertCondition
from isi_sdk_8_0_1.models.event_alert_conditions import EventAlertConditions
from isi_sdk_8_0_1.models.event_alert_conditions_alert_condition import EventAlertConditionsAlertCondition
from isi_sdk_8_0_1.models.event_categories import EventCategories
from isi_sdk_8_0_1.models.event_category import EventCategory
from isi_sdk_8_0_1.models.event_channel import EventChannel
from isi_sdk_8_0_1.models.event_channel_extended_extended import EventChannelExtendedExtended
from isi_sdk_8_0_1.models.event_channel_parameters import EventChannelParameters
from isi_sdk_8_0_1.models.event_channels import EventChannels
from isi_sdk_8_0_1.models.event_channels_extended import EventChannelsExtended
from isi_sdk_8_0_1.models.event_event import EventEvent
from isi_sdk_8_0_1.models.event_eventgroup_definitions import EventEventgroupDefinitions
from isi_sdk_8_0_1.models.event_eventgroup_definitions_eventgroup_definition import EventEventgroupDefinitionsEventgroupDefinition
from isi_sdk_8_0_1.models.event_eventgroup_occurrence import EventEventgroupOccurrence
from isi_sdk_8_0_1.models.event_eventgroup_occurrences import EventEventgroupOccurrences
from isi_sdk_8_0_1.models.event_eventgroup_occurrences_eventgroup import EventEventgroupOccurrencesEventgroup
from isi_sdk_8_0_1.models.event_eventlist import EventEventlist
from isi_sdk_8_0_1.models.event_eventlist_event import EventEventlistEvent
from isi_sdk_8_0_1.models.event_eventlists import EventEventlists
from isi_sdk_8_0_1.models.event_settings import EventSettings
from isi_sdk_8_0_1.models.event_settings_settings import EventSettingsSettings
from isi_sdk_8_0_1.models.event_settings_settings_maintenance import EventSettingsSettingsMaintenance
from isi_sdk_8_0_1.models.file_filter_settings import FileFilterSettings
from isi_sdk_8_0_1.models.file_filter_settings_extended import FileFilterSettingsExtended
from isi_sdk_8_0_1.models.file_filter_settings_settings import FileFilterSettingsSettings
from isi_sdk_8_0_1.models.filepool_default_policy import FilepoolDefaultPolicy
from isi_sdk_8_0_1.models.filepool_default_policy_default_policy import FilepoolDefaultPolicyDefaultPolicy
from isi_sdk_8_0_1.models.filepool_default_policy_default_policy_action import FilepoolDefaultPolicyDefaultPolicyAction
from isi_sdk_8_0_1.models.filepool_default_policy_extended import FilepoolDefaultPolicyExtended
from isi_sdk_8_0_1.models.filepool_policies import FilepoolPolicies
from isi_sdk_8_0_1.models.filepool_policies_extended import FilepoolPoliciesExtended
from isi_sdk_8_0_1.models.filepool_policy import FilepoolPolicy
from isi_sdk_8_0_1.models.filepool_policy_action import FilepoolPolicyAction
from isi_sdk_8_0_1.models.filepool_policy_action_extended import FilepoolPolicyActionExtended
from isi_sdk_8_0_1.models.filepool_policy_extended import FilepoolPolicyExtended
from isi_sdk_8_0_1.models.filepool_policy_extended_extended import FilepoolPolicyExtendedExtended
from isi_sdk_8_0_1.models.filepool_policy_file_matching_pattern import FilepoolPolicyFileMatchingPattern
from isi_sdk_8_0_1.models.filepool_policy_file_matching_pattern_or_criteria_item import FilepoolPolicyFileMatchingPatternOrCriteriaItem
from isi_sdk_8_0_1.models.filepool_policy_file_matching_pattern_or_criteria_item_and_criteria_item import FilepoolPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem
from isi_sdk_8_0_1.models.filepool_template import FilepoolTemplate
from isi_sdk_8_0_1.models.filepool_template_action import FilepoolTemplateAction
from isi_sdk_8_0_1.models.filepool_templates import FilepoolTemplates
from isi_sdk_8_0_1.models.fsa_result import FsaResult
from isi_sdk_8_0_1.models.fsa_results import FsaResults
from isi_sdk_8_0_1.models.fsa_settings import FsaSettings
from isi_sdk_8_0_1.models.fsa_settings_settings import FsaSettingsSettings
from isi_sdk_8_0_1.models.ftp_settings import FtpSettings
from isi_sdk_8_0_1.models.ftp_settings_extended import FtpSettingsExtended
from isi_sdk_8_0_1.models.ftp_settings_settings import FtpSettingsSettings
from isi_sdk_8_0_1.models.group_members import GroupMembers
from isi_sdk_8_0_1.models.groupnet_subnet import GroupnetSubnet
from isi_sdk_8_0_1.models.groupnet_subnets import GroupnetSubnets
from isi_sdk_8_0_1.models.hardening_apply_item import HardeningApplyItem
from isi_sdk_8_0_1.models.hardening_resolve_item import HardeningResolveItem
from isi_sdk_8_0_1.models.hardening_state import HardeningState
from isi_sdk_8_0_1.models.hardening_state_state import HardeningStateState
from isi_sdk_8_0_1.models.hardening_status import HardeningStatus
from isi_sdk_8_0_1.models.hardening_status_status import HardeningStatusStatus
from isi_sdk_8_0_1.models.hardware_fcport import HardwareFcport
from isi_sdk_8_0_1.models.hardware_fcports import HardwareFcports
from isi_sdk_8_0_1.models.hardware_fcports_node import HardwareFcportsNode
from isi_sdk_8_0_1.models.hardware_fcports_node_fcport import HardwareFcportsNodeFcport
from isi_sdk_8_0_1.models.hardware_tape_name_params import HardwareTapeNameParams
from isi_sdk_8_0_1.models.hardware_tapes import HardwareTapes
from isi_sdk_8_0_1.models.hardware_tapes_devices import HardwareTapesDevices
from isi_sdk_8_0_1.models.hdfs_log_level import HdfsLogLevel
from isi_sdk_8_0_1.models.hdfs_proxyuser import HdfsProxyuser
from isi_sdk_8_0_1.models.hdfs_proxyuser_create_params import HdfsProxyuserCreateParams
from isi_sdk_8_0_1.models.hdfs_proxyusers import HdfsProxyusers
from isi_sdk_8_0_1.models.hdfs_rack import HdfsRack
from isi_sdk_8_0_1.models.hdfs_racks import HdfsRacks
from isi_sdk_8_0_1.models.hdfs_ranger_plugin_settings import HdfsRangerPluginSettings
from isi_sdk_8_0_1.models.hdfs_ranger_plugin_settings_settings import HdfsRangerPluginSettingsSettings
from isi_sdk_8_0_1.models.hdfs_settings import HdfsSettings
from isi_sdk_8_0_1.models.hdfs_settings_settings import HdfsSettingsSettings
from isi_sdk_8_0_1.models.healthcheck_checklist import HealthcheckChecklist
from isi_sdk_8_0_1.models.healthcheck_checklist_item import HealthcheckChecklistItem
from isi_sdk_8_0_1.models.healthcheck_checklist_item_parameter import HealthcheckChecklistItemParameter
from isi_sdk_8_0_1.models.healthcheck_checklist_item_thresholds import HealthcheckChecklistItemThresholds
from isi_sdk_8_0_1.models.healthcheck_checklists import HealthcheckChecklists
from isi_sdk_8_0_1.models.healthcheck_evaluation import HealthcheckEvaluation
from isi_sdk_8_0_1.models.healthcheck_evaluation_create_params import HealthcheckEvaluationCreateParams
from isi_sdk_8_0_1.models.healthcheck_evaluation_detail import HealthcheckEvaluationDetail
from isi_sdk_8_0_1.models.healthcheck_evaluation_extended import HealthcheckEvaluationExtended
from isi_sdk_8_0_1.models.healthcheck_evaluation_override import HealthcheckEvaluationOverride
from isi_sdk_8_0_1.models.healthcheck_evaluations import HealthcheckEvaluations
from isi_sdk_8_0_1.models.healthcheck_item import HealthcheckItem
from isi_sdk_8_0_1.models.healthcheck_item_parameter import HealthcheckItemParameter
from isi_sdk_8_0_1.models.healthcheck_items import HealthcheckItems
from isi_sdk_8_0_1.models.healthcheck_parameter import HealthcheckParameter
from isi_sdk_8_0_1.models.healthcheck_parameter_extended import HealthcheckParameterExtended
from isi_sdk_8_0_1.models.healthcheck_parameters import HealthcheckParameters
from isi_sdk_8_0_1.models.healthcheck_parameters_extended import HealthcheckParametersExtended
from isi_sdk_8_0_1.models.histogram_stat_by import HistogramStatBy
from isi_sdk_8_0_1.models.histogram_stat_by_breakout import HistogramStatByBreakout
from isi_sdk_8_0_1.models.history_file import HistoryFile
from isi_sdk_8_0_1.models.history_file_statistic import HistoryFileStatistic
from isi_sdk_8_0_1.models.http_settings import HttpSettings
from isi_sdk_8_0_1.models.http_settings_settings import HttpSettingsSettings
from isi_sdk_8_0_1.models.id_resolution_path import IdResolutionPath
from isi_sdk_8_0_1.models.id_resolution_paths import IdResolutionPaths
from isi_sdk_8_0_1.models.job_event import JobEvent
from isi_sdk_8_0_1.models.job_events import JobEvents
from isi_sdk_8_0_1.models.job_job import JobJob
from isi_sdk_8_0_1.models.job_job_avscan_params import JobJobAvscanParams
from isi_sdk_8_0_1.models.job_job_changelistcreate_params import JobJobChangelistcreateParams
from isi_sdk_8_0_1.models.job_job_create_params import JobJobCreateParams
from isi_sdk_8_0_1.models.job_job_domainmark_params import JobJobDomainmarkParams
from isi_sdk_8_0_1.models.job_job_extended import JobJobExtended
from isi_sdk_8_0_1.models.job_job_prepair_params import JobJobPrepairParams
from isi_sdk_8_0_1.models.job_job_smartpoolstree_params import JobJobSmartpoolstreeParams
from isi_sdk_8_0_1.models.job_job_snaprevert_params import JobJobSnaprevertParams
from isi_sdk_8_0_1.models.job_job_summary import JobJobSummary
from isi_sdk_8_0_1.models.job_job_summary_summary import JobJobSummarySummary
from isi_sdk_8_0_1.models.job_jobs import JobJobs
from isi_sdk_8_0_1.models.job_policies import JobPolicies
from isi_sdk_8_0_1.models.job_policy import JobPolicy
from isi_sdk_8_0_1.models.job_policy_interval import JobPolicyInterval
from isi_sdk_8_0_1.models.job_recent import JobRecent
from isi_sdk_8_0_1.models.job_reports import JobReports
from isi_sdk_8_0_1.models.job_statistics import JobStatistics
from isi_sdk_8_0_1.models.job_statistics_job import JobStatisticsJob
from isi_sdk_8_0_1.models.job_statistics_job_node import JobStatisticsJobNode
from isi_sdk_8_0_1.models.job_statistics_job_node_cpu import JobStatisticsJobNodeCpu
from isi_sdk_8_0_1.models.job_statistics_job_node_io import JobStatisticsJobNodeIo
from isi_sdk_8_0_1.models.job_statistics_job_node_io_read import JobStatisticsJobNodeIoRead
from isi_sdk_8_0_1.models.job_statistics_job_node_io_write import JobStatisticsJobNodeIoWrite
from isi_sdk_8_0_1.models.job_statistics_job_node_memory import JobStatisticsJobNodeMemory
from isi_sdk_8_0_1.models.job_statistics_job_node_memory_physical import JobStatisticsJobNodeMemoryPhysical
from isi_sdk_8_0_1.models.job_statistics_job_node_memory_virtual import JobStatisticsJobNodeMemoryVirtual
from isi_sdk_8_0_1.models.job_statistics_job_node_worker import JobStatisticsJobNodeWorker
from isi_sdk_8_0_1.models.job_type import JobType
from isi_sdk_8_0_1.models.job_types import JobTypes
from isi_sdk_8_0_1.models.license_license import LicenseLicense
from isi_sdk_8_0_1.models.license_license_create_params import LicenseLicenseCreateParams
from isi_sdk_8_0_1.models.license_licenses import LicenseLicenses
from isi_sdk_8_0_1.models.mapping_dump import MappingDump
from isi_sdk_8_0_1.models.mapping_identities import MappingIdentities
from isi_sdk_8_0_1.models.mapping_identities_create_params import MappingIdentitiesCreateParams
from isi_sdk_8_0_1.models.mapping_identities_target import MappingIdentitiesTarget
from isi_sdk_8_0_1.models.mapping_identity import MappingIdentity
from isi_sdk_8_0_1.models.mapping_identity_target import MappingIdentityTarget
from isi_sdk_8_0_1.models.mapping_import import MappingImport
from isi_sdk_8_0_1.models.mapping_users_lookup import MappingUsersLookup
from isi_sdk_8_0_1.models.mapping_users_lookup_mapping_item import MappingUsersLookupMappingItem
from isi_sdk_8_0_1.models.mapping_users_lookup_mapping_item_group | |
u'DigPattern_RefTrig_Pattern': 8583,
u'DigPattern_RefTrig_Src': 5175,
u'DigPattern_RefTrig_When': 5176,
u'DigPattern_StartTrig_Pattern': 8582,
u'DigPattern_StartTrig_Src': 5136,
u'DigPattern_StartTrig_When': 5137,
u'Exported_10MHzRefClk_OutputTerm': 8814,
u'Exported_20MHzTimebase_OutputTerm': 5719,
u'Exported_AIConvClk_OutputTerm': 5767,
u'Exported_AIConvClk_Pulse_Polarity': 5768,
u'Exported_AIHoldCmpltEvent_OutputTerm': 6381,
u'Exported_AIHoldCmpltEvent_PulsePolarity': 6382,
u'Exported_AdvCmpltEvent_Delay': 5975,
u'Exported_AdvCmpltEvent_OutputTerm': 5713,
u'Exported_AdvCmpltEvent_Pulse_Polarity': 5714,
u'Exported_AdvCmpltEvent_Pulse_Width': 5716,
u'Exported_AdvTrig_OutputTerm': 5701,
u'Exported_AdvTrig_Pulse_Polarity': 5702,
u'Exported_AdvTrig_Pulse_Width': 5704,
u'Exported_AdvTrig_Pulse_WidthUnits': 5703,
u'Exported_ChangeDetectEvent_OutputTerm': 8599,
u'Exported_ChangeDetectEvent_Pulse_Polarity': 8963,
u'Exported_CtrOutEvent_OutputBehavior': 5967,
u'Exported_CtrOutEvent_OutputTerm': 5911,
u'Exported_CtrOutEvent_Pulse_Polarity': 5912,
u'Exported_CtrOutEvent_Toggle_IdleState': 6250,
u'Exported_DataActiveEvent_Lvl_ActiveLvl': 5684,
u'Exported_DataActiveEvent_OutputTerm': 5683,
u'Exported_DividedSampClkTimebase_OutputTerm': 8609,
u'Exported_HshkEvent_Delay': 8892,
u'Exported_HshkEvent_Interlocked_AssertOnStart': 8894,
u'Exported_HshkEvent_Interlocked_AssertedLvl': 8893,
u'Exported_HshkEvent_Interlocked_DeassertDelay': 8895,
u'Exported_HshkEvent_OutputBehavior': 8891,
u'Exported_HshkEvent_OutputTerm': 8890,
u'Exported_HshkEvent_Pulse_Polarity': 8896,
u'Exported_HshkEvent_Pulse_Width': 8897,
u'Exported_PauseTrig_Lvl_ActiveLvl': 5654,
u'Exported_PauseTrig_OutputTerm': 5653,
u'Exported_RdyForStartEvent_Lvl_ActiveLvl': 5969,
u'Exported_RdyForStartEvent_OutputTerm': 5641,
u'Exported_RdyForXferEvent_DeassertCond': 10595,
u'Exported_RdyForXferEvent_DeassertCondCustomThreshold': 10596,
u'Exported_RdyForXferEvent_Lvl_ActiveLvl': 8886,
u'Exported_RdyForXferEvent_OutputTerm': 8885,
u'Exported_RefTrig_OutputTerm': 1424,
u'Exported_RefTrig_Pulse_Polarity': 1425,
u'Exported_SampClkTimebase_OutputTerm': 6393,
u'Exported_SampClk_DelayOffset': 8644,
u'Exported_SampClk_OutputBehavior': 6251,
u'Exported_SampClk_OutputTerm': 5731,
u'Exported_SampClk_Pulse_Polarity': 5732,
u'Exported_StartTrig_OutputTerm': 1412,
u'Exported_StartTrig_Pulse_Polarity': 1413,
u'Exported_SyncPulseEvent_OutputTerm': 8764,
u'Exported_WatchdogExpiredEvent_OutputTerm': 8618,
u'ExtCal_LastTemp': 6247,
u'ExtCal_RecommendedInterval': 6248,
u'HshkTrig_Type': 8887,
u'Hshk_DelayAfterXfer': 8898,
u'Hshk_SampleInputDataWhen': 8900,
u'Hshk_StartCond': 8899,
u'Implicit_UnderflowBehavior': 12029,
u'Interlocked_HshkTrig_AssertedLvl': 8889,
u'Interlocked_HshkTrig_Src': 8888,
u'Logging_FilePath': 11972,
u'Logging_FilePreallocationSize': 12230,
u'Logging_FileWriteSize': 12227,
u'Logging_Mode': 11973,
u'Logging_Pause': 12259,
u'Logging_SampsPerFile': 12260,
u'Logging_TDMS_GroupName': 11974,
u'Logging_TDMS_Operation': 11975,
u'MasterTimebase_Rate': 5269,
u'MasterTimebase_Src': 4931,
u'OnDemand_SimultaneousAOEnable': 8608,
u'PauseTrig_Term': 12064,
u'PauseTrig_Type': 4966,
u'PersistedChan_AllowInteractiveDeletion': 8914,
u'PersistedChan_AllowInteractiveEditing': 8913,
u'PersistedChan_Author': 8912,
u'PersistedScale_AllowInteractiveDeletion': 8918,
u'PersistedScale_AllowInteractiveEditing': 8917,
u'PersistedScale_Author': 8916,
u'PersistedTask_AllowInteractiveDeletion': 8910,
u'PersistedTask_AllowInteractiveEditing': 8909,
u'PersistedTask_Author': 8908,
u'PhysicalChanName': 6389,
u'PhysicalChan_AI_InputSrcs': 12248,
u'PhysicalChan_AI_SupportedMeasTypes': 12247,
u'PhysicalChan_AI_TermCfgs': 9026,
u'PhysicalChan_AO_ManualControlAmplitude': 10783,
u'PhysicalChan_AO_ManualControlEnable': 10782,
u'PhysicalChan_AO_ManualControlFreq': 10784,
u'PhysicalChan_AO_ManualControl_ShortDetected': 11971,
u'PhysicalChan_AO_SupportedOutputTypes': 12249,
u'PhysicalChan_AO_SupportedPowerUpOutputTypes': 12366,
u'PhysicalChan_AO_TermCfgs': 10659,
u'PhysicalChan_CI_SupportedMeasTypes': 12250,
u'PhysicalChan_CO_SupportedOutputTypes': 12251,
u'PhysicalChan_DI_ChangeDetectSupported': 10662,
u'PhysicalChan_DI_PortWidth': 10660,
u'PhysicalChan_DI_SampClkSupported': 10661,
u'PhysicalChan_DI_SampModes': 12256,
u'PhysicalChan_DO_PortWidth': 10663,
u'PhysicalChan_DO_SampClkSupported': 10664,
u'PhysicalChan_DO_SampModes': 12257,
u'PhysicalChan_TEDS_BitStream': 8671,
u'PhysicalChan_TEDS_MfgID': 8666,
u'PhysicalChan_TEDS_ModelNum': 8667,
u'PhysicalChan_TEDS_SerialNum': 8668,
u'PhysicalChan_TEDS_TemplateIDs': 8847,
u'PhysicalChan_TEDS_VersionLetter': 8670,
u'PhysicalChan_TEDS_VersionNum': 8669,
u'ReadWaitMode': 8754,
u'Read_AccessoryInsertionOrRemovalDetected': 12144,
u'Read_AutoStart': 6182,
u'Read_AvailSampPerChan': 4643,
u'Read_ChangeDetect_HasOverflowed': 8596,
u'Read_ChannelsToRead': 6179,
u'Read_CommonModeRangeErrorChans': 10905,
u'Read_CommonModeRangeErrorChansExist': 10904,
u'Read_CurrReadPos': 4641,
u'Read_DevsWithInsertedOrRemovedAccessories': 12145,
u'Read_DigitalLines_BytesPerChan': 8572,
u'Read_NumChans': 8571,
u'Read_Offset': 6411,
u'Read_OpenCurrentLoopChans': 10762,
u'Read_OpenCurrentLoopChansExist': 10761,
u'Read_OpenThrmcplChans': 10903,
u'Read_OpenThrmcplChansExist': 10902,
u'Read_OverWrite': 4625,
u'Read_OvercurrentChans': 10727,
u'Read_OvercurrentChansExist': 10726,
u'Read_OverloadedChans': 8565,
u'Read_OverloadedChansExist': 8564,
u'Read_RawDataWidth': 8570,
u'Read_ReadAllAvailSamp': 4629,
u'Read_RelativeTo': 6410,
u'Read_SleepTime': 8880,
u'Read_TotalSampPerChanAcquired': 6442,
u'Read_WaitMode': 8754,
u'RealTime_ConvLateErrorsToWarnings': 8942,
u'RealTime_NumOfWarmupIters': 8941,
u'RealTime_ReportMissedSamp': 8985,
u'RealTime_WaitForNextSampClkWaitMode': 8943,
u'RealTime_WriteRecoveryMode': 8986,
u'RefClk_Rate': 4885,
u'RefClk_Src': 4886,
u'RefTrig_AutoTrigEnable': 11969,
u'RefTrig_AutoTriggered': 11970,
u'RefTrig_Delay': 5251,
u'RefTrig_PretrigSamples': 5189,
u'RefTrig_Term': 12063,
u'RefTrig_Type': 5145,
u'SampClkTimebase_Term': 12060,
u'SampClk_ActiveEdge': 4865,
u'SampClk_DigFltr_Enable': 8734,
u'SampClk_DigFltr_MinPulseWidth': 8735,
u'SampClk_DigFltr_TimebaseRate': 8737,
u'SampClk_DigFltr_TimebaseSrc': 8736,
u'SampClk_DigSync_Enable': 8738,
u'SampClk_MaxRate': 8904,
u'SampClk_OverrunBehavior': 12028,
u'SampClk_Rate': 4932,
u'SampClk_Src': 6226,
u'SampClk_Term': 12059,
u'SampClk_TimebaseDiv': 6379,
u'SampClk_Timebase_ActiveEdge': 6380,
u'SampClk_Timebase_MasterTimebaseDiv': 4869,
u'SampClk_Timebase_Rate': 4867,
u'SampClk_Timebase_Src': 4872,
u'SampClk_UnderflowBehavior': 10593,
u'SampQuant_SampMode': 4864,
u'SampQuant_SampPerChan': 4880,
u'SampTimingEngine': 10790,
u'SampTimingType': 4935,
u'Scale_Descr': 4646,
u'Scale_Lin_Slope': 4647,
u'Scale_Lin_YIntercept': 4648,
u'Scale_Map_PreScaledMax': 4657,
u'Scale_Map_PreScaledMin': 4658,
u'Scale_Map_ScaledMax': 4649,
u'Scale_Map_ScaledMin': 4656,
u'Scale_Poly_ForwardCoeff': 4660,
u'Scale_Poly_ReverseCoeff': 4661,
u'Scale_PreScaledUnits': 6391,
u'Scale_ScaledUnits': 6427,
u'Scale_Table_PreScaledVals': 4663,
u'Scale_Table_ScaledVals': 4662,
u'Scale_Type': 6441,
u'SelfCal_LastTemp': 6244,
u'SelfCal_Supported': 6240,
u'StartTrig_Delay': 6230,
u'StartTrig_DelayUnits': 6344,
u'StartTrig_Retriggerable': 6415,
u'StartTrig_Term': 12062,
u'StartTrig_Type': 5011,
u'SwitchChan_AnlgBusSharingEnable': 12190,
u'SwitchChan_Bandwidth': 1600,
u'SwitchChan_Impedance': 1601,
u'SwitchChan_MaxACCarryCurrent': 1608,
u'SwitchChan_MaxACCarryPwr': 1602,
u'SwitchChan_MaxACSwitchCurrent': 1606,
u'SwitchChan_MaxACSwitchPwr': 1604,
u'SwitchChan_MaxACVoltage': 1617,
u'SwitchChan_MaxDCCarryCurrent': 1607,
u'SwitchChan_MaxDCCarryPwr': 1603,
u'SwitchChan_MaxDCSwitchCurrent': 1605,
u'SwitchChan_MaxDCSwitchPwr': 1609,
u'SwitchChan_MaxDCVoltage': 1616,
u'SwitchChan_Usage': 6372,
u'SwitchChan_WireMode': 6373,
u'SwitchDev_AutoConnAnlgBus': 6106,
u'SwitchDev_NumColumns': 6378,
u'SwitchDev_NumRelays': 6374,
u'SwitchDev_NumRows': 6377,
u'SwitchDev_NumSwitchChans': 6376,
u'SwitchDev_PwrDownLatchRelaysAfterSettling': 8923,
u'SwitchDev_RelayList': 6108,
u'SwitchDev_Settled': 4675,
u'SwitchDev_SettlingTime': 4676,
u'SwitchDev_SwitchChanList': 6375,
u'SwitchDev_Temperature': 12314,
u'SwitchDev_Topology': 6461,
u'SwitchScan_BreakMode': 4679,
u'SwitchScan_RepeatMode': 4680,
u'SwitchScan_WaitingForAdv': 6105,
u'SyncClk_Interval': 12158,
u'SyncPulse_MinDelayToStart': 8767,
u'SyncPulse_ResetDelay': 12157,
u'SyncPulse_ResetTime': 12156,
u'SyncPulse_Src': 8765,
u'SyncPulse_SyncTime': 8766,
u'SyncPulse_Term': 12165,
u'Sys_DevNames': 6459,
u'Sys_GlobalChans': 4709,
u'Sys_NIDAQMajorVersion': 4722,
u'Sys_NIDAQMinorVersion': 6435,
u'Sys_NIDAQUpdateVersion': 12066,
u'Sys_Scales': 4710,
u'Sys_Tasks': 4711,
u'Task_Channels': 4723,
u'Task_Complete': 4724,
u'Task_Devices': 8974,
u'Task_Name': 4726,
u'Task_NumChans': 8577,
u'Task_NumDevices': 10682,
u'Trigger_SyncType': 12160,
u'Val_100MHzTimebase': 15857,
u'Val_10MHzRefClock': 12536,
u'Val_20MHzTimebase': 12537,
u'Val_20MHzTimebaseClock': 12486,
u'Val_2Wire': 2,
u'Val_2point5V': 14620,
u'Val_3Wire': 3,
u'Val_3point3V': 14621,
u'Val_4Wire': 4,
u'Val_5V': 14619,
u'Val_5Wire': 5,
u'Val_80MHzTimebase': 14636,
u'Val_8MHzTimebase': 16023,
u'Val_A': 12513,
u'Val_AC': 10045,
u'Val_AHighBHigh': 10040,
u'Val_AHighBLow': 10041,
u'Val_AI': 10100,
u'Val_AIConvertClock': 12484,
u'Val_AIHoldCmpltEvent': 12493,
u'Val_ALowBHigh': 10042,
u'Val_ALowBLow': 10043,
u'Val_AM': 14756,
u'Val_AO': 10102,
u'Val_AOSeries': 14647,
u'Val_AandB': 12515,
u'Val_AboveLvl': 10093,
u'Val_AccelUnit_g': 10186,
u'Val_Accelerometer': 10356,
u'Val_Acquired_Into_Buffer': 1,
u'Val_Action_Cancel': 1,
u'Val_Action_Commit': 0,
u'Val_ActiveDrive': 12573,
u'Val_ActiveHigh': 10095,
u'Val_ActiveLow': 10096,
u'Val_AdvCmpltEvent': 12492,
u'Val_AdvanceTrigger': 12488,
u'Val_AllowRegen': 10097,
u'Val_Amps': 10342,
u'Val_AnlgEdge': 10099,
u'Val_AnlgLvl': 10101,
u'Val_AnlgWin': 10103,
u'Val_Auto': -1,
u'Val_B': 12514,
u'Val_BSeriesDAQ': 14662,
u'Val_B_Type_TC': 10047,
u'Val_Bar': 15880,
u'Val_BelowLvl': 10107,
u'Val_Best50HzRejection': 14713,
u'Val_Best60HzRejection': 14714,
u'Val_Bit_CouplingTypes_AC': 1,
u'Val_Bit_CouplingTypes_DC': 2,
u'Val_Bit_CouplingTypes_Ground': 4,
u'Val_Bit_CouplingTypes_HFReject': 8,
u'Val_Bit_CouplingTypes_LFReject': 16,
u'Val_Bit_CouplingTypes_NoiseReject': 32,
u'Val_Bit_TermCfg_Diff': 4,
u'Val_Bit_TermCfg_NRSE': 2,
u'Val_Bit_TermCfg_PseudoDIFF': 8,
u'Val_Bit_TermCfg_RSE': 1,
u'Val_Bit_TriggerUsageTypes_Advance': 1,
u'Val_Bit_TriggerUsageTypes_ArmStart': 32,
u'Val_Bit_TriggerUsageTypes_Handshake': 16,
u'Val_Bit_TriggerUsageTypes_Pause': 2,
u'Val_Bit_TriggerUsageTypes_Reference': 4,
u'Val_Bit_TriggerUsageTypes_Start': 8,
u'Val_Bits': 10109,
u'Val_BreakBeforeMake': 10110,
u'Val_Bridge': 15908,
u'Val_BuiltIn': 10200,
u'Val_BurstHandshake': 12548,
u'Val_CI': 10131,
u'Val_CO': 10132,
u'Val_CSeriesModule': 14659,
u'Val_CartesianShearStrainXY': 15976,
u'Val_CartesianStrainX': 15974,
u'Val_CartesianStrainY': 15975,
u'Val_Cfg_Default': -1,
u'Val_Chan': 10113,
u'Val_ChanForAllLines': 1,
u'Val_ChanPerLine': 0,
u'Val_ChangeDetection': 12504,
u'Val_ChangeDetectionEvent': 12511,
u'Val_ChannelCurrent': 1,
u'Val_ChannelHighImpedance': 2,
u'Val_ChannelVoltage': 0,
u'Val_ClearExpiration': 1,
u'Val_Closed': 10438,
u'Val_CompactDAQ': 14637,
u'Val_CompactDAQChassis': 14658,
u'Val_ConstVal': 10116,
u'Val_Cont': 10117,
u'Val_ContSamps': 10123,
u'Val_CountDown': 10124,
u'Val_CountEdges': 10125,
u'Val_CountUp': 10128,
u'Val_CounterOutputEvent': 12494,
u'Val_Create': 15848,
u'Val_CreateOrReplace': 15847,
u'Val_CurrReadPos': 10425,
u'Val_CurrWritePos': 10430,
u'Val_Current': 10134,
u'Val_CurrentRMS': 10351,
u'Val_Custom': 10137,
u'Val_DC': 10050,
u'Val_DI': 10151,
u'Val_DMA': 10054,
u'Val_DO': 10153,
u'Val_Default': -1,
u'Val_DegC': 10143,
u'Val_DegF': 10144,
u'Val_DegR': 10145,
u'Val_Degrees': 10146,
u'Val_DeltaRosette': 15969,
u'Val_Diff': 10106,
u'Val_DigEdge': 10150,
u'Val_DigLvl': 10152,
u'Val_DigPattern': 10398,
u'Val_DigitalIO': 14648,
u'Val_DoNotAllowRegen': 10158,
u'Val_DoNotInvertPolarity': 0,
u'Val_DoNotOverwriteUnreadSamps': 10159,
u'Val_DoNotWrite': 12540,
u'Val_DynamicSignalAcquisition': 14649,
u'Val_ESeriesDAQ': 14642,
u'Val_E_Type_TC': 10055,
u'Val_EnteringWin': 10163,
u'Val_EverySample': 10164,
u'Val_ExtControlled': 10326,
u'Val_External': 10167,
u'Val_FM': 14757,
u'Val_Falling': 10171,
u'Val_FallingSlope': 10171,
u'Val_Finite': 10172,
u'Val_FiniteSamps': 10178,
u'Val_FirstPretrigSamp': 10427,
u'Val_FirstSample': 10424,
u'Val_Fixed50PercentDutyCycle': 14711,
u'Val_FixedHighFreq': 14709,
u'Val_FixedLowFreq': 14710,
u'Val_FootPounds': 15884,
u'Val_Force_Bridge': 15899,
u'Val_Force_IEPESensor': 15895,
u'Val_Freq': 10179,
u'Val_Freq_Voltage': 10181,
u'Val_FromCustomScale': 10065,
u'Val_FromTEDS': 12516,
u'Val_FullBridge': 10182,
u'Val_FullBridgeI': 10183,
u'Val_FullBridgeII': 10184,
u'Val_FullBridgeIII': 10185,
u'Val_FuncGen': 14750,
u'Val_GND': 10066,
u'Val_GPS_Timestamp': 10362,
u'Val_Ground': 2,
u'Val_GroupByChannel': 0,
u'Val_GroupByScanNumber': 1,
u'Val_HWTimedSinglePoint': 12522,
u'Val_HalfBridge': 10187,
u'Val_HalfBridgeI': 10188,
u'Val_HalfBridgeII': 10189,
u'Val_HaltOutputAndError': 14615,
u'Val_Handshake': 10389,
u'Val_HandshakeTriggerAsserts': 12552,
u'Val_HandshakeTriggerDeasserts': 12553,
u'Val_High': 10192,
u'Val_HighFreq2Ctr': 10157,
u'Val_HighImpedance': 12527,
u'Val_HighResolution': 10195,
u'Val_HighSpeed': 14712,
u'Val_Hz': 10373,
u'Val_IRIGB': 10070,
u'Val_IgnoreOverruns': 15863,
u'Val_Immediate': 10198,
u'Val_Implicit': 10451,
u'Val_InchOunces': 15882,
u'Val_InchPounds': 15883,
u'Val_Inches': 10379,
u'Val_InchesPerSecond': 15960,
u'Val_InchesPerSecondSquared': 12471,
u'Val_InsideWin': 10199,
u'Val_Interlocked': 12549,
u'Val_Internal': 10200,
u'Val_Interrupts': 10204,
u'Val_InvertPolarity': 1,
u'Val_J_Type_TC': 10072,
u'Val_K_Type_TC': 10073,
u'Val_Kelvins': 10325,
u'Val_KilogramForce': 15877,
u'Val_LargeRng2Ctr': 10205,
u'Val_LeavingWin': 10208,
u'Val_LeftJustified': 10209,
u'Val_Linear': 10447,
u'Val_Load': 10440,
u'Val_Log': 15844,
u'Val_LogAndRead': 15842,
u'Val_Loopback0': 0,
u'Val_Loopback180': 1,
u'Val_LosslessPacking': 12555,
u'Val_LossyLSBRemoval': 12556,
u'Val_Low': 10214,
u'Val_LowFreq1Ctr': 10105,
u'Val_Lvl': 10210,
u'Val_MSeriesDAQ': 14643,
u'Val_MaintainExistingValue': 12528,
u'Val_MapRanges': 10448,
u'Val_Master': 15888,
u'Val_MaxShearStrain': 15977,
u'Val_MaxShearStrainAngle': 15978,
u'Val_Meters': 10219,
u'Val_MetersPerSecond': 15959,
u'Val_MetersPerSecondSquared': 12470,
u'Val_MilliVoltsPerInchPerSecond': 15964,
u'Val_MillivoltsPerMillimeterPerSecond': 15963,
u'Val_MostRecentSamp': 10428,
u'Val_Multicycle': 14614,
u'Val_NIELVIS': 14755,
u'Val_NRSE': 10078,
u'Val_N_Type_TC': 10077,
u'Val_NetworkDAQ': 14829,
u'Val_NewtonMeters': 15881,
u'Val_Newtons': 15875,
u'Val_NoAction': 10227,
u'Val_NoBridge': 10228,
u'Val_NoChange': 10160,
u'Val_None': 10230,
u'Val_Off': 10231,
u'Val_Ohms': 10384,
u'Val_OnBrdMemEmpty': 10235,
u'Val_OnBrdMemHalfFullOrLess': 10239,
u'Val_OnBrdMemMoreThanHalfFull': 10237,
u'Val_OnBrdMemNotEmpty': 10241,
u'Val_OnBrdMemNotFull': 10242,
u'Val_OnDemand': 10390,
u'Val_OnbrdMemCustomThreshold': 12577,
u'Val_OnbrdMemFull': 10236,
u'Val_OnbrdMemMoreThanHalfFull': 10237,
u'Val_Once': 10244,
u'Val_Open': 10437,
u'Val_OpenCollector': 12574,
u'Val_OpenOrCreate': 15846,
u'Val_OutsideWin': 10251,
u'Val_OverwriteUnreadSamps': 10252,
u'Val_PCCard': 12585,
u'Val_PCI': 12582,
u'Val_PCIe': 13612,
u'Val_PPS': 10080,
u'Val_PXI': 12583,
u'Val_PXIe': 14706,
u'Val_Pascals': 10081,
u'Val_PathStatus_AlreadyExists': 10432,
u'Val_PathStatus_Available': 10431,
u'Val_PathStatus_ChannelInUse': 10434,
u'Val_PathStatus_ChannelReservedForRouting': 10436,
u'Val_PathStatus_SourceChannelConflict': 10435,
u'Val_PathStatus_Unsupported': 10433,
u'Val_PatternDoesNotMatch': 10253,
u'Val_PatternMatches': 10254,
u'Val_PauseUntilDataAvailable': 14616,
u'Val_Period': 10256,
u'Val_PipelinedSampClk': 14668,
u'Val_Poll': 12524,
u'Val_Polynomial': 10449,
u'Val_Position_AngEncoder': 10360,
u'Val_Position_EddyCurrentProximityProbe': 14835,
u'Val_Position_LVDT': 10352,
u'Val_Position_LinEncoder': 10361,
u'Val_Position_RVDT': 10353,
u'Val_Pounds': 15876,
u'Val_PoundsPerSquareInch': 15879,
u'Val_Pressure_Bridge': 15902,
u'Val_PrincipalStrain1': 15971,
u'Val_PrincipalStrain2': 15972,
u'Val_PrincipalStrainAngle': 15973,
u'Val_ProgrammedIO': 10264,
u'Val_PseudoDiff': 12529,
u'Val_Pt3750': 12481,
u'Val_Pt3851': 10071,
u'Val_Pt3911': 12482,
u'Val_Pt3916': 10069,
u'Val_Pt3920': 10053,
u'Val_Pt3928': 12483,
u'Val_PullDown': 15951,
u'Val_PullUp': 15950,
u'Val_Pulse': 10265,
u'Val_PulseFrequency': 15864,
u'Val_PulseTicks': 15866,
u'Val_PulseTime': 15865,
u'Val_PulseWidth': 10359,
u'Val_Pulse_Freq': 10119,
u'Val_Pulse_Ticks': 10268,
u'Val_Pulse_Time': 10269,
u'Val_QuarterBridge': 10270,
u'Val_QuarterBridgeI': 10271,
u'Val_QuarterBridgeII': 10272,
u'Val_R1': 12465,
u'Val_R2': 12466,
u'Val_R3': 12467,
u'Val_R4': 14813,
u'Val_RSE': 10083,
u'Val_R_Type_TC': 10082,
u'Val_Radians': 10273,
u'Val_RectangularRosette': 15968,
u'Val_RefTrig': 10426,
u'Val_ReferenceTrigger': 12490,
u'Val_ReservedForRouting': 10441,
u'Val_ResetTimer': 0,
u'Val_Resistance': 10278,
u'Val_RightJustified': 10279,
u'Val_Rising': 10280,
u'Val_RisingSlope': 10280,
u'Val_Rosette_Strain_Gage': 15980,
u'Val_SCC': 14707,
u'Val_SCCConnectorBlock': 14704,
u'Val_SCCModule': 14705,
u'Val_SCExpress': 15886,
u'Val_SCSeriesDAQ': 14645,
u'Val_SCXI': 12584,
u'Val_SCXI1124Range0to10V': 14631,
u'Val_SCXI1124Range0to1V': 14629,
u'Val_SCXI1124Range0to20mA': 14635,
u'Val_SCXI1124Range0to5V': 14630,
u'Val_SCXI1124RangeNeg10to10V': 14634,
u'Val_SCXI1124RangeNeg1to1V': 14632,
u'Val_SCXI1124RangeNeg5to5V': 14633,
u'Val_SCXIModule': 14660,
u'Val_SSeriesDAQ': 14644,
u'Val_S_Type_TC': 10085,
u'Val_SameAsMasterTimebase': 10282,
u'Val_SameAsSampTimebase': 10284,
u'Val_SampClk': 10388,
u'Val_SampClkActiveEdge': 14617,
u'Val_SampClkInactiveEdge': 14618,
u'Val_SampClkPeriods': 10286,
u'Val_SampleClock': 12487,
u'Val_SampleCompleteEvent': 12530,
u'Val_Save_AllowInteractiveDeletion': 4,
u'Val_Save_AllowInteractiveEditing': 2,
u'Val_Save_Overwrite': 1,
u'Val_Sawtooth': 14754,
u'Val_Seconds': 10364,
u'Val_SemiPeriod': 10289,
u'Val_Sine': 14751,
u'Val_SingleCycle': 14613,
u'Val_Slave': 15889,
u'Val_Sleep': 12547,
u'Val_Software': 10292,
u'Val_SoundPressure_Microphone': 10354,
u'Val_Source': 10439,
u'Val_Square': 14753,
u'Val_StartTrigger': 12491,
u'Val_StopTaskAndError': 15862,
u'Val_Strain': 10299,
u'Val_Strain_Gage': 10300,
u'Val_SwitchBlock': 15870,
u'Val_Switch_Topology_1127_Independent': u'1127/Independent',
u'Val_Switch_Topology_1128_Independent': u'1128/Independent',
u'Val_Switch_Topology_1130_Independent': u'1130/Independent',
u'Val_Switch_Topology_1160_16_SPDT': u'1160/16-SPDT',
u'Val_Switch_Topology_1161_8_SPDT': u'1161/8-SPDT',
u'Val_Switch_Topology_1166_16_DPDT': u'1166/16-DPDT',
u'Val_Switch_Topology_1166_32_SPDT': u'1166/32-SPDT',
u'Val_Switch_Topology_1167_Independent': u'1167/Independent',
u'Val_Switch_Topology_1169_100_SPST': u'1169/100-SPST',
u'Val_Switch_Topology_1169_50_DPST': u'1169/50-DPST',
u'Val_Switch_Topology_1192_8_SPDT': u'1192/8-SPDT',
u'Val_Switch_Topology_1193_Independent': u'1193/Independent',
u'Val_Switch_Topology_2510_Independent': u'2510/Independent',
u'Val_Switch_Topology_2512_Independent': u'2512/Independent',
u'Val_Switch_Topology_2514_Independent': u'2514/Independent',
u'Val_Switch_Topology_2515_Independent': u'2515/Independent',
u'Val_Switch_Topology_2520_80_SPST': u'2520/80-SPST',
u'Val_Switch_Topology_2521_40_DPST': u'2521/40-DPST',
u'Val_Switch_Topology_2522_53_SPDT': u'2522/53-SPDT',
u'Val_Switch_Topology_2523_26_DPDT': u'2523/26-DPDT',
u'Val_Switch_Topology_2527_Independent': u'2527/Independent',
u'Val_Switch_Topology_2530_Independent': u'2530/Independent',
u'Val_Switch_Topology_2548_4_SPDT': u'2548/4-SPDT',
u'Val_Switch_Topology_2558_4_SPDT': u'2558/4-SPDT',
u'Val_Switch_Topology_2564_16_SPST': u'2564/16-SPST',
u'Val_Switch_Topology_2564_8_DPST': u'2564/8-DPST',
u'Val_Switch_Topology_2565_16_SPST': u'2565/16-SPST',
u'Val_Switch_Topology_2566_16_SPDT': u'2566/16-SPDT',
u'Val_Switch_Topology_2566_8_DPDT': u'2566/8-DPDT',
u'Val_Switch_Topology_2567_Independent': u'2567/Independent',
u'Val_Switch_Topology_2568_15_DPST': u'2568/15-DPST',
u'Val_Switch_Topology_2568_31_SPST': u'2568/31-SPST',
u'Val_Switch_Topology_2569_100_SPST': u'2569/100-SPST',
u'Val_Switch_Topology_2569_50_DPST': u'2569/50-DPST',
u'Val_Switch_Topology_2570_20_DPDT': u'2570/20-DPDT',
u'Val_Switch_Topology_2570_40_SPDT': u'2570/40-SPDT',
| |
import pickle
from collections import defaultdict
from pathlib import Path
from typing import Optional, Callable
import numpy as np
import torch
import torch.utils.data as torchdata
from ignite.contrib.handlers import ProgressBar
from ignite.engine import create_supervised_evaluator, Events, Engine
from ignite.metrics import Accuracy, Loss
from torch import nn
from torch.nn import functional as F
from alr import ALRModel
from alr import MCDropout
from alr.acquisition import BALD
from alr.data import DataManager
from alr.data import RelabelDataset, PseudoLabelDataset, UnlabelledDataset
from alr.data.datasets import Dataset
from alr.training import Trainer
from alr.training.samplers import RandomFixedLengthSampler
from alr.training.utils import EarlyStopper, PLPredictionSaver
from alr.utils import eval_fwd_exp, timeop, manual_seed
from alr.utils._type_aliases import _DeviceType, _Loss_fn
class PseudoLabelManager:
def __init__(
self,
pool: UnlabelledDataset,
model: nn.Module,
threshold: float,
log_dir: Optional[str] = None,
device: _DeviceType = None,
**kwargs,
):
bs = kwargs.pop("batch_size", 1024)
shuffle = kwargs.pop("shuffle", False)
assert not shuffle
self._pool = pool
self._loader = torchdata.DataLoader(
pool, batch_size=bs, shuffle=shuffle, **kwargs
)
self._model = model
self._log_dir = log_dir
self._device = device
self._threshold = threshold
self.acquired_sizes = []
def attach(self, engine: Engine):
engine.add_event_handler(Events.STARTED, self._initialise)
# could also be EPOCH_COMPLETED since there's only one iteration in each epoch
engine.add_event_handler(Events.ITERATION_COMPLETED, self._load_labels)
def _load_labels(self, engine: Engine):
evaluator = create_supervised_evaluator(
self._model, metrics=None, device=self._device
)
plc = PseudoLabelCollector(
self._threshold,
log_dir=self._log_dir,
)
plc.attach(evaluator, batch_size=self._loader.batch_size)
plc.global_step_from_engine(engine)
evaluator.run(self._loader)
indices, pseudo_labels = (
evaluator.state.pl_indices.cpu().numpy(),
evaluator.state.pl_plabs.cpu().numpy(),
)
self.acquired_sizes.append(indices.shape[0])
if indices.shape[0]:
confident_points = torchdata.Subset(self._pool, indices)
if self._pool.debug:
# pool returns target labels too
engine.state.pseudo_labelled_dataset = RelabelDataset(
confident_points, pseudo_labels
)
else:
engine.state.pseudo_labelled_dataset = PseudoLabelDataset(
confident_points, pseudo_labels
)
else:
engine.state.pseudo_labelled_dataset = None
@staticmethod
def _initialise(engine: Engine):
engine.state.pseudo_labelled_dataset = None
class PseudoLabelCollector:
def __init__(
self,
threshold: float,
log_dir: Optional[str] = None,
pred_transform: Callable[[torch.Tensor], torch.Tensor] = lambda x: x.exp(),
):
self._indices = []
self._plabs = []
self._pred_transform = pred_transform
self._output_transform = lambda x: x
self._thresh = threshold
self._targets = []
self._preds = []
if log_dir:
self._saver = PLPredictionSaver(log_dir, pred_transform=pred_transform)
else:
self._saver = None
self._batch_size = None
def _parse(self, engine: Engine):
preds, targets = self._output_transform(engine.state.output)
# state.iteration starts with 1
iteration = engine.state.iteration - 1
offset = iteration * self._batch_size
with torch.no_grad():
preds = self._pred_transform(preds)
preds_max, plabs = torch.max(preds, dim=-1)
mask = torch.nonzero(preds_max >= self._thresh).flatten()
if mask.shape[0]:
# plabs = [N,]
self._plabs.append(plabs[mask])
self._indices.append(mask + offset)
def _flush(self, engine: Engine):
if self._indices and self._plabs:
engine.state.pl_indices = torch.cat(self._indices)
engine.state.pl_plabs = torch.cat(self._plabs)
else:
engine.state.pl_indices = torch.Tensor([])
engine.state.pl_plabs = torch.Tensor([])
self._indices = []
self._plabs = []
def attach(self, engine: Engine, batch_size: int, output_transform=lambda x: x):
r"""
Args:
engine (Engine): ignite engine object
batch_size (int): engine's batch size
output_transform (Callable): if engine.state.output is not (preds, target),
then output_transform should return aforementioned tuple.
Returns:
NoneType: None
"""
engine.add_event_handler(Events.ITERATION_COMPLETED, self._parse)
engine.add_event_handler(Events.COMPLETED, self._flush)
self._output_transform = output_transform
self._batch_size = batch_size
if self._saver:
self._saver.attach(engine, output_transform=output_transform)
def global_step_from_engine(self, engine: Engine):
if self._saver:
self._saver.global_step_from_engine(engine)
def _update_dataloader(
loader: torchdata.DataLoader,
dataset: torchdata.Dataset,
sampler: Optional[torchdata.Sampler] = None,
):
# attributes that usually go in dataloader's constructor
attrs = [k for k in loader.__dict__.keys() if not k.startswith("_")]
drop = ["dataset", "sampler", "batch_sampler", "dataset_kind"]
kwargs = {k: getattr(loader, k) for k in attrs if k not in drop}
if not isinstance(
loader.sampler,
(
torchdata.SequentialSampler,
torchdata.RandomSampler,
RandomFixedLengthSampler,
),
):
raise ValueError(
f"Only sequential, random, and random fixed length samplers "
f"are supported in _update_dataloader"
)
kwargs["dataset"] = dataset
# Sequential and Random will be automatically determined if sampler is None (depending on shuffle)
kwargs["sampler"] = sampler
return torchdata.DataLoader(**kwargs)
def create_pseudo_label_trainer(
model: ALRModel,
loss: _Loss_fn,
optimiser: str,
train_loader: torchdata.DataLoader,
val_loader: torchdata.DataLoader,
pseudo_label_manager: PseudoLabelManager,
rfls_len: Optional[int] = None,
patience: Optional[int] = None,
reload_best: Optional[bool] = None,
epochs: Optional[int] = 1,
device: _DeviceType = None,
*args,
**kwargs,
):
def _step(engine: Engine, _):
# update loader accordingly: if pld is not none, concatenate them
new_loader = train_loader
pld = engine.state.pseudo_labelled_dataset
if pld is not None:
# only reset weights if engine.state.epoch != 1
model.reset_weights()
train_ds = torchdata.ConcatDataset((train_loader.dataset, pld))
# update dataloader's dataset attribute
if rfls_len:
new_loader = _update_dataloader(
train_loader,
train_ds,
RandomFixedLengthSampler(train_ds, length=rfls_len, shuffle=True),
)
else:
new_loader = _update_dataloader(train_loader, train_ds)
else:
assert engine.state.epoch == 1
# begin supervised training
trainer = Trainer(
model,
loss,
optimiser,
patience,
reload_best,
device=device,
*args,
**kwargs,
)
history = trainer.fit(
new_loader,
val_loader=val_loader,
epochs=epochs,
)
# if early stopping was applied w/ patience, then the actual train acc and loss should be
# -patience from the final loss/acc UNLESS we reached the maximum number of epochs.
if patience and len(history["train_loss"]) != epochs:
return history["train_loss"][-patience], history["train_acc"][-patience]
return history["train_loss"][-1], history["train_acc"][-1]
e = Engine(_step)
pseudo_label_manager.attach(e)
return e
class EphemeralTrainer:
def __init__(
self,
model: ALRModel,
pool: UnlabelledDataset,
loss: _Loss_fn,
optimiser: str,
threshold: float,
random_fixed_length_sampler_length: Optional[int] = None,
log_dir: Optional[str] = None,
patience: Optional[int] = None,
reload_best: Optional[bool] = False,
device: _DeviceType = None,
pool_loader_kwargs: Optional[dict] = {},
*args,
**kwargs,
):
self._pool = pool
self._model = model
self._loss = loss
self._optimiser = optimiser
self._patience = patience
self._reload_best = reload_best
self._device = device
self._args = args
self._kwargs = kwargs
self._threshold = threshold
self._log_dir = log_dir
self._pool_loader_kwargs = pool_loader_kwargs
self._rfls_len = random_fixed_length_sampler_length
def fit(
self,
train_loader: torchdata.DataLoader,
val_loader: Optional[torchdata.DataLoader] = None,
iterations: Optional[int] = 1,
epochs: Optional[int] = 1,
):
if self._patience and val_loader is None:
raise ValueError(
"If patience is specified, then val_loader must be provided in .fit()."
)
val_evaluator = create_supervised_evaluator(
self._model,
metrics={"acc": Accuracy(), "loss": Loss(self._loss)},
device=self._device,
)
history = defaultdict(list)
pbar = ProgressBar()
def _log_metrics(engine: Engine):
# train_loss and train_acc are moving averages of the last epoch
# in the supervised training loop
train_loss, train_acc = engine.state.output
history[f"train_loss"].append(train_loss)
history[f"train_acc"].append(train_acc)
pbar.log_message(
f"Eph. iteration {engine.state.epoch}/{engine.state.max_epochs}\n"
f"\ttrain acc = {train_acc}, train loss = {train_loss}"
)
if val_loader is None:
return # job done
# val loader - save to history and print metrics. Also, add handlers to
# evaluator (e.g. early stopping, model checkpointing that depend on val_acc)
metrics = val_evaluator.run(val_loader).metrics
history[f"val_acc"].append(metrics["acc"])
history[f"val_loss"].append(metrics["loss"])
pbar.log_message(
f"\tval acc = {metrics['acc']}, val loss = {metrics['loss']}"
)
pseudo_label_manager = PseudoLabelManager(
pool=self._pool,
model=self._model,
threshold=self._threshold,
log_dir=self._log_dir,
device=self._device,
**self._pool_loader_kwargs,
)
trainer = create_pseudo_label_trainer(
model=self._model,
loss=self._loss,
optimiser=self._optimiser,
train_loader=train_loader,
val_loader=val_loader,
pseudo_label_manager=pseudo_label_manager,
rfls_len=self._rfls_len,
patience=self._patience,
reload_best=self._reload_best,
epochs=epochs,
device=self._device,
*self._args,
**self._kwargs,
)
# output of trainer are running averages of train_loss and train_acc (from the
# last epoch of the supervised trainer)
pbar.attach(trainer, output_transform=lambda x: {"loss": x[0], "acc": x[1]})
if val_loader is not None and self._patience:
es = EarlyStopper(
self._model, self._patience, trainer, key="acc", mode="max"
)
es.attach(val_evaluator)
trainer.add_event_handler(Events.EPOCH_COMPLETED, _log_metrics)
trainer.run(
range(iterations),
max_epochs=iterations,
epoch_length=1,
)
if val_loader is not None and self._patience and self._reload_best:
es.reload_best()
history["train_size"] = np.array(pseudo_label_manager.acquired_sizes) + len(
train_loader.dataset
)
return history
def evaluate(self, data_loader: torchdata.DataLoader) -> dict:
evaluator = create_supervised_evaluator(
self._model,
metrics={"acc": Accuracy(), "loss": Loss(self._loss)},
device=self._device,
)
return evaluator.run(data_loader).metrics
def main(threshold: float, b: int):
manual_seed(42)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
kwargs = dict(num_workers=4, pin_memory=True)
BATCH_SIZE = 64
REPS = 3
ITERS = 14
VAL_SIZE = 5_000
MIN_TRAIN_LEN = 12_500
SSL_ITERATIONS = 200
EPOCHS = 200
accs = defaultdict(list)
template = f"thresh_{threshold}_b_{b}"
calib_metrics = Path("calib_metrics") / template
saved_models = Path("saved_models") / template
metrics = Path("metrics") / template
calib_metrics.mkdir(parents=True)
saved_models.mkdir(parents=True)
metrics.mkdir(parents=True)
train, pool, test = Dataset.MNIST.get_fixed()
val, pool = torchdata.random_split(pool, (VAL_SIZE, len(pool) - VAL_SIZE))
pool = UnlabelledDataset(pool)
test_loader = torchdata.DataLoader(test, batch_size=512, shuffle=False, **kwargs)
val_loader = torchdata.DataLoader(val, batch_size=512, shuffle=False, **kwargs)
for r in range(1, REPS + 1):
model = MCDropout(Dataset.MNIST.model, forward=20, fast=True).to(device)
bald = BALD(eval_fwd_exp(model), device=device, batch_size=512, **kwargs)
dm = DataManager(train, pool, bald)
dm.reset() # to reset pool
print(f"=== repeat #{r} of {REPS} ===")
for i in range(1, ITERS + 1):
# don't reset weights: let ephemeral trainer take care of it
# since we're collecting calibration metrics,
# make pool return targets too. (i.e. debug mode)
with dm.unlabelled.tmp_debug():
trainer = EphemeralTrainer(
model,
dm.unlabelled,
F.nll_loss,
"Adam",
threshold=threshold,
random_fixed_length_sampler_length=MIN_TRAIN_LEN,
log_dir=(calib_metrics / f"rep_{r}" / f"iter_{i}"),
patience=3,
reload_best=True,
device=device,
pool_loader_kwargs=kwargs,
)
train_loader = torchdata.DataLoader(
dm.labelled,
batch_size=BATCH_SIZE,
sampler=RandomFixedLengthSampler(
dm.labelled, MIN_TRAIN_LEN, shuffle=True
),
**kwargs,
)
with timeop() as t:
history = trainer.fit(
train_loader,
val_loader,
iterations=SSL_ITERATIONS,
epochs=EPOCHS,
)
# eval on test set
test_metrics = trainer.evaluate(test_loader)
accs[dm.n_labelled].append(test_metrics["acc"])
print(f"-- Iteration {i} of {ITERS} --")
print(
f"\ttrain: {dm.n_labelled}; pool: {dm.n_unlabelled}\n"
f"\t[test] acc: {test_metrics['acc']}; time: {t}"
)
# save stuff
with open(metrics / f"rep_{r}_iter_{i}.pkl", "wb") as fp:
payload = {
"history": history,
"test_metrics": test_metrics,
"labelled_classes": dm.unlabelled.labelled_classes,
"labelled_indices": dm.unlabelled.labelled_indices,
}
pickle.dump(payload, fp)
torch.save(model.state_dict(), saved_models / f"rep_{r}_iter_{i}.pth")
# finally, acquire points
dm.acquire(b)
with open(f"{template}_accs.pkl", "wb") as | |
"""
from scipy.ndimage import zoom
if os.path.isfile(fn):
segm = fits.getdata(fn)
segm_out = zoom(segm, zoom=0.5, order=1)
fits.writeto(fn_out, segm_out, overwrite=True)
else:
pass
def process_resampling(fn, bounds, obj_name, band,
pixel_scale=DF_pixel_scale, r_scale=12,
mag_limit=15, dir_measure='./', work_dir='./',
factor=1, verbose=True):
from .image import ImageList
# turn bounds_list into 2d array
bounds = np.atleast_2d(bounds).astype(int)
if factor!=1:
if verbose:
logger.info('Resampling by a factor of {0:.1g}...'.format(factor))
scale = 1/factor
fn_rp = "{0}_{2}.{1}".format(*os.path.basename(fn).rsplit('.', 1) + ['rp'])
fn_rp = os.path.join(work_dir, fn_rp)
bounds_rp = np.array([np.round(b_*scale) for b_ in bounds], dtype=int)
# resample image if it does not exist
if not os.path.exists(fn_rp):
write_downsample_fits(fn, fn_rp, scale, order=3)
# construct Image List for original image
DF_Images = ImageList(fn, bounds, obj_name, band,
pixel_scale=pixel_scale)
# read faint stars info and brightness measurement
DF_Images.read_measurement_tables(dir_measure,
r_scale=r_scale,
mag_limit=mag_limit)
# new quantities and names
r_scale *= scale
pixel_scale /= scale
obj_name_rp = obj_name + '_rp'
if verbose:
logger.info('Transforming coordinates for measurement tables...')
for Img, bound, bound_rp in zip(DF_Images, bounds, bounds_rp):
# transform coordinates and write as new tables
old_range = 'X[{0:d}-{2:d}]Y[{1:d}-{3:d}]'.format(*bound)
new_range = 'X[{0:d}-{2:d}]Y[{1:d}-{3:d}]'.format(*bound_rp)
table_faint, table_norm = Img.table_faint, Img.table_norm
fn_catalog = os.path.join(dir_measure,
"%s-catalog_PS_%s_all.txt"%(obj_name_rp, band.lower()))
fn_norm = os.path.join(dir_measure, "%s-norm_%dpix_%smag%s_%s.txt"\
%(obj_name_rp, r_scale, band.lower(), mag_limit, new_range))
transform_table_coordinates(table_faint, fn_catalog, scale)
transform_table_coordinates(table_norm, fn_norm, scale)
# reproject segmentation
if verbose:
logger.info('Resampling segmentation for bounds:', bound)
fn_seg = os.path.join(dir_measure,
"%s-segm_%s_catalog_%s.fits"\
%(obj_name, band.lower(), old_range))
fn_seg_out = os.path.join(dir_measure, "%s-segm_%s_catalog_%s.fits"\
%(obj_name_rp, band.lower(), new_range))
downsample_segmentation(fn_seg, fn_seg_out, scale)
else:
fn_rp, bounds_rp = fn, bounds
return fn_rp, bounds_rp
### Catalog / Data Manipulation Helper ###
def id_generator(size=6, chars=None):
if chars is None:
chars = string.ascii_uppercase + string.digits
return ''.join(random.choice(chars) for _ in range(size))
def crop_catalog(cat, bounds, keys=("X_IMAGE", "Y_IMAGE"), sortby=None):
Xmin, Ymin, Xmax, Ymax = bounds
A, B = keys
crop = (cat[A]>=Xmin) & (cat[A]<=Xmax) & (cat[B]>=Ymin) & (cat[B]<=Ymax)
if sortby is not None:
cat_crop = cat[crop]
cat_crop.sort(keys=sortby)
return cat_crop
else:
return cat[crop]
def crop_pad(image, pad):
""" Crop the padding of the image """
shape = image.shape
return image[pad:shape[0]-pad, pad:shape[1]-pad]
def crop_image(data, bounds, wcs=None, draw=False, **kwargs):
""" Crop the data (and segm map if given) with the given bouds.
Note boundaries are in 1-based pixel coordianates. """
Xmin, Ymin, Xmax, Ymax = bounds
# X, Y image size
nX, nY = (Xmax-Xmin, Ymax-Ymin)
# center in 1-based pixel coordinates
cen = (Xmin+(nX-1)/2., Ymin+(nY-1)/2.)
# make cutout
cutout = Cutout2D(data, cen, (nY, nX), wcs=wcs)
if draw:
from .plotting import draw_bounds
draw_bounds(data, bounds, **kwargs)
# also return cutout of wcs if given
if wcs is None:
return cutout.data
else:
return cutout.data, cutout.wcs
def transform_coords2pixel(table, wcs, name='',
RA_key="RAJ2000", DE_key="DEJ2000", origin=1):
""" Transform the RA/DEC columns in the table into pixel coordinates given wcs"""
coords = np.vstack([np.array(table[RA_key]),
np.array(table[DE_key])]).T
pos = wcs.wcs_world2pix(coords, origin)
table.add_column(Column(np.around(pos[:,0], 4)*u.pix), name="X_CATALOG")
table.add_column(Column(np.around(pos[:,1], 4)*u.pix), name="Y_CATALOG")
table.add_column(Column(np.arange(len(table))+1, dtype=int),
index=0, name="ID"+'_'+name)
return table
def merge_catalog(SE_catalog, table_merge, sep=5 * u.arcsec,
RA_key="RAJ2000", DE_key="DEJ2000", keep_columns=None):
""" Crossmatch and merge two catalogs by coordinates"""
c_SE = SkyCoord(ra=SE_catalog["X_WORLD"], dec=SE_catalog["Y_WORLD"])
c_tab = SkyCoord(ra=table_merge[RA_key], dec=table_merge[DE_key])
idx, d2d, d3d = c_SE.match_to_catalog_sky(c_tab)
match = d2d < sep
cat_SE_match = SE_catalog[match]
cat_tab_match = table_merge[idx[match]]
cat_tab_match.add_column(cat_SE_match["NUMBER"], index=0, name="NUMBER")
cat_match = join(cat_SE_match, cat_tab_match, keys='NUMBER')
if keep_columns is not None:
cat_match.keep_columns(keep_columns)
return cat_match
def read_measurement_table(dir_name, bounds0,
obj_name='', band='G',
pad=50, r_scale=12,
mag_limit=15):
""" Read measurement tables from the directory """
use_PS1_DR2 = True if 'PS2' in dir_name else False
# Magnitude name
b_name = band.lower()
mag_name = b_name+'MeanPSFMag' if use_PS1_DR2 else b_name+'mag'
# Clipped bounds
patch_Xmin0, patch_Ymin0, patch_Xmax0, patch_Ymax0 = bounds0
bounds = (patch_Xmin0+pad, patch_Ymin0+pad,
patch_Xmax0-pad, patch_Ymax0-pad)
## Read measurement for faint stars from catalog
# Faint star catalog name
fname_catalog = os.path.join(dir_name, "%s-catalog_PS_%s_all.txt"%(obj_name, b_name))
# Check if the file exist before read
assert os.path.isfile(fname_catalog), f"Table {fname_catalog} does not exist!"
logger.debug(f"Reading catalog {fname_catalog}.")
table_catalog = Table.read(fname_catalog, format="ascii")
mag_catalog = table_catalog[mag_name]
# stars fainter than magnitude limit (fixed as background), > 22 is ignored
table_faint = table_catalog[(mag_catalog>=mag_limit) & (mag_catalog<22)]
table_faint = crop_catalog(table_faint,
keys=("X_CATALOG", "Y_CATALOG"),
bounds=bounds)
## Read measurement for bright stars
# Catalog name
range_str = "X[{:d}-{:d}]Y[{:d}-{:d}]"
range_str = range_str.format(patch_Xmin0, patch_Xmax0, patch_Ymin0, patch_Ymax0)
fname_norm = os.path.join(dir_name, "%s-norm_%dpix_%smag%s_%s.txt"\
%(obj_name, r_scale, b_name, mag_limit, range_str))
# Check if the file exist before read
assert os.path.isfile(fname_norm), f"Table {fname_norm} does not exist"
logger.debug(f"Reading catalog {fname_norm}.")
table_norm = Table.read(fname_norm, format="ascii")
# Crop the catalog
table_norm = crop_catalog(table_norm, bounds=bounds0)
# Do not use flagged measurement
Iflag = table_norm["Iflag"]
table_norm = table_norm[Iflag==0]
return table_faint, table_norm
def assign_star_props(ZP, sky_mean, image_shape, pos_ref,
table_norm, table_faint=None,
r_scale=12, mag_threshold=[13.5,12],
psf=None, keys='Imed', verbose=True,
draw=True, save=False, save_dir='./'):
""" Assign position and flux for faint and bright stars from tables. """
from .modeling import Stars
# Positions & Flux (estimate) of bright stars from measured norm
star_pos = np.vstack([table_norm["X_CATALOG"],
table_norm["Y_CATALOG"]]).T - pos_ref
mag = table_norm['MAG_AUTO_corr'] if 'MAG_AUTO_corr' in table_norm.colnames else table_norm['MAG_AUTO']
Flux = 10**((np.array(mag)-ZP)/(-2.5))
# Estimate of brightness I at r_scale (I = Intensity - BKG) and flux
z_norm = table_norm['Imed'].data - table_norm['Isky'].data
z_norm[z_norm<=0] = min(1, z_norm[z_norm>0].min())
# Convert and printout thresholds
Flux_threshold = 10**((np.array(mag_threshold) - ZP) / (-2.5))
if verbose:
msg = "Magnitude Thresholds: {0}, {1} mag"
msg = msg.format(*mag_threshold)
logger.info(msg)
msg = "Flux Thresholds: {0}, {1} ADU"
msg = msg.format(*np.around(Flux_threshold,2))
logger.info(msg)
try:
SB_threshold = psf.Flux2SB(Flux_threshold, BKG=sky_mean, ZP=ZP, r=r_scale)
msg = "Surface Brightness Thresholds: {0}, {1} mag/arcsec^2 "
msg = msg.format(*np.around(SB_threshold,1))
msg += "at {0} pix for sky = {1:.3f}".format(r_scale, sky_mean)
logger.info(msg3)
except:
pass
# Bright stars in model
stars_bright = Stars(star_pos, Flux, Flux_threshold=Flux_threshold,
z_norm=z_norm, r_scale=r_scale, BKG=sky_mean)
stars_bright = stars_bright.remove_outsider(image_shape, gap=[3*r_scale, r_scale])
stars_bright._info()
if (table_faint is not None) & ('MAG_AUTO_corr' in table_faint.colnames):
table_faint['FLUX_AUTO_corr'] = 10**((table_faint['MAG_AUTO_corr']-ZP)/(-2.5))
try:
ma = table_faint['FLUX_AUTO_corr'].data.mask
except AttributeError:
ma = np.isnan(table_faint['FLUX_AUTO_corr'])
# Positions & Flux of faint stars from catalog
star_pos_faint = np.vstack([table_faint["X_CATALOG"].data[~ma],
table_faint["Y_CATALOG"].data[~ma]]).T - pos_ref
Flux_faint = np.array(table_faint['FLUX_AUTO_corr'].data[~ma])
# Combine two samples, make sure they do not overlap
star_pos = np.vstack([star_pos, star_pos_faint])
Flux = np.concatenate([Flux, Flux_faint])
stars_all = Stars(star_pos, Flux, Flux_threshold, BKG=sky_mean)
if draw:
stars_all.plot_flux_dist(label='All', color='plum')
stars_bright.plot_flux_dist(label='Model', color='orange', ZP=ZP,
save=save, save_dir=save_dir)
plt.show()
return stars_bright, stars_all
def interp_I0(r, I, r0, r1, r2):
""" Interpolate I0 at r0 with I(r) between r1 and r2 """
range_intp = (r>r1) & (r<r2)
logI0 = np.interp(r0, r[(r>r1)&(r<r2)], np.log10(I[(r>r1)&(r<r2)]))
return 10**logI0
def compute_mean_I(r, I, r1, r2):
""" Compute mean I under I(r) between r1 and r2 """
range_intg = (r>r1) & (r<r2)
r_range = r[range_intg]
return np.trapz(I[range_intg], r_range)/(r_range.max()-r_range.min())
def fit_n0(dir_measure, bounds,
obj_name, band, BKG, ZP,
pixel_scale=DF_pixel_scale,
fit_range=[20,40], dr=0.1,
N_fit=15, mag_max=13, mag_limit=15,
I_norm=24, norm='intp',
r_scale=12, sky_std=3,
plot_brightest=True, draw=True,
save=False, save_dir="./"):
"""
Fit the first component of using bright stars.
Parameters
----------
dir_measure : str
Directory storing the measurement
bounds : 1d list, [Xmin, Ymin, Xmax, Ymax]
Fitting boundary
band : str, 'g' 'G' 'r' or 'R'
Filter name
obj_name : str
Object name
BKG : float
Background value for profile measurement
ZP : float
Zero-point
pixel_scale : float, optional, default 2.5
Pixel scale in arcsec/pix
fit_range : 2-list, optional, default [20, 40]
Range for fitting in arcsec
dr : float, optional, default 0.2
Profile step paramter
N_fit : int, optional, default 15
Number of stars used to fit n0
mag_max : float, optional, default 13
Max magnitude of stars used to fit n0
I_norm : float, optional, default 24
SB at which profiles are normed
norm : 'intp' or 'intg', optional, default 'intg'
Normalization method to scale profiles.
Use mean value by 'intg', use interpolated value by 'intp'
r_scale : int, optional, default 12
Radius (in pix) at which the brightness is measured
Default is 30" for Dragonfly.
mag_limit : float, optional, default 15
Magnitude upper limit below which are measured
sky_std : float, optional, default 3
Sky stddev (for display only)
plot_brightest : bool, optional, default True
Whether to draw profile of the brightest star
draw : bool, optional, default True
Whether to draw profiles and fit process
| |
to plot the actual coeff, not the target
# should specify this in the paper
except AssertionError:
print('Clustering target not reached')
unsuccessful_flags_3.append(i)
peak_times_3[i] = np.nan
peak_heights_3[i] = np.nan
peak_heights_sd_3[i] = np.nan
period_prevalences_3[i] = np.nan
period_prevalences_sd_3[i] = np.nan
dirname_parent = os.path.dirname(__file__)
dirname = os.path.join(dirname_parent, 'Experiments', 'Paper', 'Cache')
id_params = (
n, p, p_i, mc_iterations, max_t, interval, seed, t_i, t_c, t_r, t_d, t_t, p_q, p_t, quarantine_time, resolution,
epsilon_clustering)
# normal hashes are salted between runs -> use something that is persistent
tag = str(hashlib.md5(str(id_params).encode('utf8')).hexdigest())
with open(os.path.join(dirname, tag + '_metrics.p'), 'wb') as f:
out = [Cs, unsuccessful_flags_1, peak_times_1, peak_heights_1, period_prevalences_1,
Cs, unsuccessful_flags_2, peak_times_2, peak_heights_2, period_prevalences_2,
Cs, unsuccessful_flags_3, peak_times_3, peak_heights_3, period_prevalences_3]
pickle.dump(out, f)
fig, axes = plt.subplots(3, 1, sharex=True, figsize=(14, 14 / 16 * 9))
# fig.subplots_adjust(wspace = 0.5)
ax1, ax2, ax3 = axes
ax1.plot(Cs, peak_times_1, Cs, peak_times_2, Cs, peak_times_3)
ax1.set_ylabel('Peak time')
ax2.plot(Cs, peak_heights_1, Cs, peak_heights_2, Cs, peak_heights_3)
ax2.set_ylabel('Peak prevalence')
ax3.plot(Cs, period_prevalences_1, Cs, period_prevalences_2, Cs, period_prevalences_3)
ax3.set_ylabel('Fraction of affected')
ax3.set_xlabel('C(g)')
# labels = [interval[0],] + list(['' for i in range(len(ps)-2)]) + [interval[1],]
ax3.set_xticks(Cs[1:-1], minor=True)
ax3.set_xticks([interval[0], interval[1]])
# plt.tick_params(
# axis='x', # changes apply to the x-axis
# which='minor', # both major and minor ticks are affected
# # bottom=False, # ticks along the bottom edge are off
# # top=False, # ticks along the top edge are off
# labelbottom=False) # labels along the bottom edge are off
# plt.xticks([interval[0],interval[1]])
plt.legend(['Vanilla', 'Quarantine', 'Tracing'])
parent = os.path.dirname(path)
fig.savefig(os.path.join(parent, 'Pics', 'Cvaried_n{}_C{}_comp'.format(
n, str(interval[0]) + 'to' + str(interval[1])) + '.png'), bbox_inches='tight')
return out # Cs, unsuccessful_flags, times, peaks, period_prev
# OLD, now this is in vary_C_pi_comp_corrected
# def vary_C_comp_corrected(res, n, p, p_i, mc_iterations, max_t, interval=None, seed=0, force_recompute=False,
# path=None):
# # BROKEN! Since martin's commit?
#
# # measure effect of clustering coeff on tracing effectiveness. Here we scale according to the vanilla outcome
#
# if not interval:
# # THEORY: the average clustering coeff of erdos renyi networks is p!
# # so I test around that to see what changed
# interval = (0.5 * p, 10 * p)
#
# Cs = np.linspace(interval[0], interval[1], endpoint=True, num=res)
#
# # the following two variables save the actual values that were achieved by the heuristic.
# # In theory, these should be approximately the same in each net
# achieved_clusterings = np.zeros((3, res))
# achieved_disps = np.zeros((3, res))
#
# # vanilla
# peak_times_1 = np.ndarray(res)
# peak_heights_1 = np.ndarray(res)
# peak_heights_sd_1 = np.ndarray(res)
# period_prevalences_1 = np.ndarray(res)
# period_prevalences_sd_1 = np.ndarray(res)
# unsuccessful_flags_1 = []
# for i, C in tqdm(enumerate(Cs), total=res,desc='Vanilla'):
# net, mean_counts, sd_counts, t_peak, mean_peak, sd_peak, mean_prevalence, sd_prevalence, equilib_flag , achieved_clustering, achieved_disp = \
# simple_experiment(n, p, p_i, mc_iterations, max_t, seed=seed + i, mode='vanilla',
# force_recompute=force_recompute,
# path=path, clustering=C)
#
# assert equilib_flag, 'Sim not complete?'
#
# peak_times_1[i] = t_peak
# peak_heights_1[i] = mean_peak
# peak_heights_sd_1[i] = sd_peak
# period_prevalences_1[i] = mean_prevalence
# period_prevalences_sd_1[i] = sd_prevalence
#
# achieved_clusterings[0, i] = achieved_clustering
# achieved_disps[0, i] = achieved_disp
#
#
# # exposed = counts[EXP_STATE, :]
# # infected = counts[INF_STATE, :]
# # ep_curve = exposed + infected
# #
# # exposed_sd = sd[EXP_STATE, :]
# # infected_sd = sd[INF_STATE, :]
# # ep_curve_sd = exposed_sd + infected_sd
# #
# # # these are the point prevalence +- sd
# # upper_alpha = (ep_curve[t_peak] + ep_curve_sd[t_peak])/n
# # lower_alpha = (ep_curve[t_peak] - ep_curve_sd[t_peak])/n
# #
# # recovered = counts[REC_STATE, :]
# # recovered_sd = sd[REC_STATE, :]
# #
# #
# # upper_beta = recovered[-1]-recovered_sd/n
#
#
# # quarantine
# peak_times_2 = np.ndarray(res)
# peak_heights_2 = np.ndarray(res)
# peak_heights_sd_2 = np.ndarray(res)
# period_prevalences_2 = np.ndarray(res)
# period_prevalences_sd_2 = np.ndarray(res)
# unsuccessful_flags_2 = []
# for i, C in tqdm(enumerate(Cs), total=res,desc='Quarantine'):
# net, mean_counts, sd_counts, t_peak, mean_peak, sd_peak, mean_prevalence, sd_prevalence, equilib_flag , achieved_clustering, achieved_disp = \
# simple_experiment(n, p, p_i, mc_iterations, max_t, seed=seed + i + res, mode='quarantine',
# force_recompute=force_recompute,
# path=path, clustering=C)
#
# assert equilib_flag, 'Sim not complete?'
#
# peak_times_2[i] = t_peak
# peak_heights_2[i] = mean_peak / peak_heights_1[i]
# peak_heights_sd_2[i] = sd_peak
# period_prevalences_2[i] = mean_prevalence / period_prevalences_1[i]
# period_prevalences_sd_2[i] = sd_prevalence
#
# achieved_clusterings[1, i] = achieved_clustering
# achieved_disps[1, i] = achieved_disp
#
#
#
#
# # tracing
# peak_times_3 = np.ndarray(res)
# peak_heights_3 = np.ndarray(res)
# peak_heights_sd_3 = np.ndarray(res)
# period_prevalences_3 = np.ndarray(res)
# period_prevalences_sd_3 = np.ndarray(res)
# unsuccessful_flags_3 = []
# for i, C in tqdm(enumerate(Cs), total=res,desc='Tracing'):
# net, mean_counts, sd_counts, t_peak, mean_peak, sd_peak, mean_prevalence, sd_prevalence, equilib_flag , achieved_clustering, achieved_disp = \
# simple_experiment(n, p, p_i, 2*mc_iterations, max_t, seed=seed + i + 2 * res, mode='tracing',
# force_recompute=force_recompute,
# path=path, clustering=C)
#
# assert equilib_flag, 'Sim not complete?'
#
# peak_times_3[i] = t_peak
# peak_heights_3[i] = mean_peak / peak_heights_1[i]
# peak_heights_sd_3[i] = sd_peak
# period_prevalences_3[i] = mean_prevalence / period_prevalences_1[i]
# period_prevalences_3_sd_2[i] = sd_prevalencea
#
# achieved_clusterings[2, i] = achieved_clustering
# achieved_disps[2, i] = achieved_disp
#
# dirname_parent = os.path.dirname(__file__)
# dirname = os.path.join(dirname_parent, 'Experiments', 'Paper', 'Cache')
#
# id_params = (
# n, p, p_i, mc_iterations, max_t, interval, seed, t_i, t_c, t_r, t_d, t_t, p_q, p_t, quarantine_time, resolution,
# epsilon_clustering)
# # normal hashes are salted between runs -> use something that is persistent
# tag = str(hashlib.md5(str(id_params).encode('utf8')).hexdigest())
#
# with open(os.path.join(dirname, tag + '_metrics_corrected.p'), 'wb') as f:
# out = [Cs, unsuccessful_flags_1, peak_times_1, peak_heights_1, period_prevalences_1,
# Cs, unsuccessful_flags_2, peak_times_2, peak_heights_2, period_prevalences_2,
# Cs, unsuccessful_flags_3, peak_times_3, peak_heights_3, period_prevalences_3,
# achieved_clusterings, achieved_disps]
#
# pickle.dump(out, f)
#
# # two modes for visualization
# show_both = False
# if show_both:
# fig, axes = plt.subplots(2, 1, sharex=True, figsize=(14, 14 / 16 * 9))
#
# # fig.subplots_adjust(wspace = 0.5)
# ax2, ax3 = axes
#
# # ax1.plot(Cs, peak_times_1,Cs, peak_times_2,Cs, peak_times_3)
# # ax1.set_ylabel('Peak time')
#
# ax2.plot(Cs, peak_heights_2, 'C1')
# ax2.plot(Cs, peak_heights_3, 'C2')
# ax2.set_ylabel('Scaled peak height')
#
# ax3.plot(Cs, period_prevalences_2, 'C1')
# ax3.plot(Cs, period_prevalences_3, 'C2')
# ax3.set_ylabel('Scaled period prevalence')
# ax3.set_xlabel('C(g)')
# # labels = [interval[0],] + list(['' for i in range(len(ps)-2)]) + [interval[1],]
# ax3.set_xticks(Cs, minor=False)
# ax3.xaxis.set_major_formatter(FormatStrFormatter('%.3f'))
#
# # ax3.set_xticks([interval[0], interval[1]])
#
# # plt.tick_params(
# # axis='x', # changes apply to the x-axis
# # which='minor', # both major and minor ticks are affected
# # # bottom=False, # ticks along the bottom edge are off
# # # top=False, # ticks along the top edge are off
# # labelbottom=False) # labels along the bottom edge are off
#
# ax_upper_axis = ax2.twiny()
#
# ax_upper_axis.set_xlim(ax3.get_xlim())
# ax_upper_axis.set_xticks(Cs)
# ax_upper_axis.set_xticklabels(["{:.3f}".format(a) for a in achieved_disps.mean(axis=0)])
# # ax_upper_axis.xaxis.set_major_formatter(FormatStrFormatter('%.3f'))
# ax_upper_axis.set_xlabel('D(g)')
#
# # plt.xticks([interval[0],interval[1]])
# ax3.legend(['Quarantine', 'Tracing'])
#
# parent = os.path.dirname(path)
# fig.savefig(os.path.join(parent, 'Pics', 'Cvaried_n{}_C{}_comp_corrected'.format(
# n, str(interval[0]) + 'to' + str(interval[1])) + '.png'), bbox_inches='tight')
# else:
# fig, axes = plt.subplots(2, 1, sharex=True, figsize=(14, 14 / 16 * 9))
#
# # fig.subplots_adjust(wspace = 0.5)
# ax2, ax3 = axes
#
# # ax1.plot(Cs, peak_times_1,Cs, peak_times_2,Cs, peak_times_3)
# # ax1.set_ylabel('Peak time')
#
# ax2.plot(Cs, peak_heights_3, 'C2')
# ax2.set_ylabel('Scaled peak height')
#
# ax3.plot(Cs, period_prevalences_3, 'C2')
# ax3.set_ylabel('Scaled period prevalence')
# ax3.set_xlabel('C(g)')
# ax3.set_xticks(Cs, minor=False)
# ax3.xaxis.set_major_formatter(FormatStrFormatter('%.3f'))
#
# # ax3.set_xticks(Cs[1:-1], minor=True)
# # ax3.set_xticks([interval[0], interval[1]])
# # ax3.set_xticks(Cs, minor=True)
#
# ax_upper_axis = ax2.twiny()
#
# ax_upper_axis.set_xlim(ax3.get_xlim())
# ax_upper_axis.set_xticks(Cs)
# ax_upper_axis.set_xticklabels(["{:.3f}".format(a) for a in achieved_disps.mean(axis=0)])
# # ax_upper_axis.xaxis.set_major_formatter(FormatStrFormatter('%.3f'))
# ax_upper_axis.set_xlabel('D(g)')
#
# # plt.legend(['Quarantine', 'Tracing'])
# ax3.legend(['Tracing', ])
#
# parent = os.path.dirname(path)
# fig.savefig(os.path.join(parent, 'Pics', 'Cvaried_n{}_C{}_comp_corrected_tracing'.format(
# n, str(interval[0]) + 'to' + str(interval[1])) + '.png'), bbox_inches='tight')
#
# return out
def vary_C_pi_comp_corrected(res, n, p, p_is: tuple, mc_iterations, max_t, interval=None, seed=0, force_recompute=False,
path=None):
# measure effect of clustering coeff on tracing effectiveness. Here we scale according to the vanilla outcome
# Several values for infectvity p_i are used
Cs = np.linspace(interval[0], interval[1], endpoint=True, num=res)
n_p_i = len(p_is)
assert n_p_i <= 5, 'Less values for p_i should be selected for visibility'
# the following two variables save the actual values that were achieved by the heuristic.
# In theory, these should be approximately the same in each net
achieved_clusterings = np.zeros((3 * n_p_i, res))
achieved_disps = np.zeros((3 * n_p_i, res))
# vanilla
peak_times_1 = np.ndarray((res, n_p_i))
peak_heights_1 = np.ndarray((res, n_p_i))
peak_heights_sd_1 = np.ndarray((res, n_p_i))
period_prevalences_1 = np.ndarray((res, n_p_i))
period_prevalences_sd_1 = np.ndarray((res, n_p_i))
for i, C in tqdm(enumerate(Cs), total=res, desc='Vanilla'):
for j, p_inf in enumerate(p_is):
net, mean_counts, sd_counts, t_peak, mean_peak, sd_peak, mean_prevalence, sd_prevalence, equilib_flag, achieved_clustering, achieved_disp = \
simple_experiment(n, p, p_inf, mc_iterations, max_t, seed=j * 156484 + seed + i, mode='vanilla',
force_recompute=force_recompute,
path=path, clustering=C)
assert equilib_flag, 'Sim not complete?'
peak_times_1[i, j] = t_peak
peak_heights_1[i, j] = mean_peak
peak_heights_sd_1[i, j] = sd_peak
period_prevalences_1[i, j] = mean_prevalence
period_prevalences_sd_1[i, j] = sd_prevalence
achieved_clusterings[j, i] = achieved_clustering
achieved_disps[j, i] = achieved_disp
# quarantine
peak_times_2 = np.ndarray((res, n_p_i))
peak_heights_2 = np.ndarray((res, n_p_i))
peak_heights_sd_2 = np.ndarray((res, n_p_i))
period_prevalences_2 = np.ndarray((res, n_p_i))
period_prevalences_sd_2 = np.ndarray((res, n_p_i))
for i, C in tqdm(enumerate(Cs), total=res, desc='Quarantine'):
for j, p_inf in enumerate(p_is):
net, mean_counts, sd_counts, t_peak, mean_peak, sd_peak, mean_prevalence, sd_prevalence, equilib_flag, achieved_clustering, achieved_disp = \
simple_experiment(n, p, p_inf, mc_iterations, max_t, seed=j * 84265 + seed + i + res, mode='quarantine',
force_recompute=force_recompute,
path=path, clustering=C)
assert equilib_flag, 'Sim not complete?'
peak_times_2[i, j] = t_peak
| |
#!/usr/bin/python3
# -*- encoding: utf-8 -*-
__version__ = 1.2
from tkinter.ttk import *
from tkinter.messagebox import *
from tkinter.scrolledtext import *
from tkinter import *
from bs4 import BeautifulSoup
from urllib.request import urlopen
from mailcomposer import MailComposer
from threading import Thread
import os
import sys
import sqlite3
import webbrowser
import ttips
import subprocess
import socket
listaRastreio = []
listaPendentes = []
listaEntregues = []
listaTodos = []
listaSepararEntregues = []
listaSepararPendentes = []
# Evita que o programa abra novamente enquanto enquanto ele já estiver aberto
pid = os.getpid()
pidfile = '/tmp/aprastreio.pid'
if not os.path.isfile(pidfile):
os.system(f'touch {pidfile}')
os.system(f'echo {pid} >> {pidfile}')
else:
sys.exit(-1)
# Cria o banco de dados caso ele não exista
db = os.path.expanduser('~/Dropbox/aprastreio/banco/')
if not os.path.exists(db):
os.makedirs(db)
banco = os.path.join(os.path.dirname(db), 'rastreios.db')
conexao = sqlite3.connect(banco, check_same_thread=False)
c = conexao.cursor()
c.execute('CREATE TABLE IF NOT EXISTS rastreio (id INTEGER PRIMARY KEY AUTOINCREMENT,'
'codrastreio TEXT VARCHAR(13) UNIQUE NOT NULL, objeto TEXT VARCHAR(50) NOT NULL)')
c.execute('CREATE TABLE IF NOT EXISTS entregues (id INTEGER PRIMARY KEY AUTOINCREMENT,'
'codrastreio TEXT VARCHAR(13) UNIQUE NOT NULL, objeto TEXT VARCHAR(50) NOT NULL)')
c.execute('CREATE TABLE IF NOT EXISTS pendentes (id INTEGER PRIMARY KEY AUTOINCREMENT,'
'codrastreio TEXT VARCHAR(13) UNIQUE NOT NULL, objeto TEXT VARCHAR(50) NOT NULL)')
else:
banco = os.path.join(os.path.dirname(db), 'rastreios.db')
conexao = sqlite3.connect(banco, check_same_thread=False)
c = conexao.cursor()
c.execute('CREATE TABLE IF NOT EXISTS rastreio (id INTEGER PRIMARY KEY AUTOINCREMENT,'
'codrastreio TEXT VARCHAR(13) UNIQUE NOT NULL, objeto TEXT VARCHAR(50) NOT NULL)')
c.execute('CREATE TABLE IF NOT EXISTS entregues (id INTEGER PRIMARY KEY AUTOINCREMENT,'
'codrastreio TEXT VARCHAR(13) UNIQUE NOT NULL, objeto TEXT VARCHAR(50) NOT NULL)')
c.execute('CREATE TABLE IF NOT EXISTS pendentes (id INTEGER PRIMARY KEY AUTOINCREMENT,'
'codrastreio TEXT VARCHAR(13) UNIQUE NOT NULL, objeto TEXT VARCHAR(50) NOT NULL)')
# Procura novas versões do software
def CheckUpdates(event=None):
janela.unbind('<Enter>')
versao = urlopen('https://www.dropbox.com/s/61rpf1xg8qr1vh1/version_linux.txt?dl=true').read()
if float(versao) > float(__version__):
subprocess.call(
['notify-send', 'AP - Rastreio Correios', 'Há uma nova versão disponível. Baixe agora!'])
showinfo(title='Atualização', message='Há uma nova versão disponível. Baixe agora!')
webbrowser.open('https://github.com/Alexsussa/aprastreio/releases/')
class Rastreio:
def __init__(self, master=None, rastreio='', objeto=''):
self.rastreio = rastreio
self.objeto = objeto
self.c1 = Frame(master)
self.c1['padx'] = 5
self.c1['pady'] = 3
self.c1.pack()
self.c2 = Frame(master)
self.c2.pack()
self.c3 = Frame(master)
self.c3.pack()
self.c4 = Frame(master)
self.c4.pack()
self.c5 = Frame(master)
self.c5.pack()
# Menu superior
menubar = Menu(janela)
arquivo = Menu(menubar, tearoff=0)
menubar.add_cascade(label='Arquivo', menu=arquivo)
menubar.add_separator()
arquivo.add_command(label='Sincronizar rastreios...',
command=lambda: Thread(target=self.NotifAltStatus).start(), accelerator='Ctrl+R')
# arquivo.add_command(label='Arquivar entregues', command=lambda: Thread(target=self.arquivarEntregues).start(), accelerator='Ctrl+B')
arquivo.add_command(label='Mover para entregues', command=lambda: Thread(target=self.arquivarRastreio).start(),
accelerator='Ctrl+B')
arquivo.add_command(label='Salvar', command=lambda: Thread(target=self.Cadastrar).start(), accelerator='Ctrl+S')
arquivo.add_command(label='Atualizar', command=lambda: Thread(target=self.Atualizar).start(),
accelerator='Ctrl+U')
arquivo.add_command(label='Deletar', command=lambda: Thread(target=self.Deletar).start(), accelerator='Ctrl+D')
arquivo.add_separator()
arquivo.add_command(label='Mostrar todos os rastreios',
command=lambda: {self.txtObjeto.config(values=self.listaTodos(event='<Button-1>')),
janela.bind('<<ComboboxSelected>>', self.BuscaTodos)})
arquivo.add_command(label='Mostar apenas os entregues',
command=lambda: {self.txtObjeto.config(values=self.listaEntregues(event='<Button-1>')),
janela.bind('<<ComboboxSelected>>', self.BuscaEntregues)})
"""arquivo.add_command(label='Mostar apenas os pendentes',
command=lambda: {self.txtObjeto.config(values=self.listaPendentes(event='<Button-1>')),
janela.bind('<<ComboboxSelected>>', self.BuscaPendentes)})"""
arquivo.add_separator()
arquivo.add_command(label='Sair', command=janela.destroy, accelerator='Ctrl+Q')
janela.bind('<Control-q>', self.JanExit)
janela.bind('<Control-Q>', self.JanExit)
ajuda = Menu(menubar, tearoff=0)
menubar.add_cascade(label='Ajuda', menu=ajuda)
ajuda.add_command(label='GitHub AP Rastreio...', command=lambda: Thread(
target=self.NavLink('https://github.com/Alexsussa/aprastreio/')).start(), accelerator='Ctrl+G')
ajuda.add_command(label='Checar atualizações...', command=lambda: Thread(target=CheckUpdates).start(),
accelerator='Ctrl+K')
ajuda.add_separator()
ajuda.add_command(label='Sobre', command=self.Sobre, accelerator='Ctrl+H')
janela.bind('<Control-h>', self.Sobre)
janela.bind('<Control-H>', self.Sobre)
janela.bind('<Control-g>', lambda e: Thread(target=self.NavLink('https://github.com/Alexsussa/aprastreio/')))
janela.bind('<Control-G>', lambda e: Thread(target=self.NavLink('https://github.com/Alexsussa/aprastreio/')))
janela.bind('<Control-k>', CheckUpdates)
janela.bind('<Control-K>', CheckUpdates)
janela.bind('<Control-b>', lambda e: Thread(target=self.arquivarRastreio).start())
janela.bind('<Control-B>', lambda e: Thread(target=self.arquivarRastreio).start())
janela.config(menu=menubar)
# Layout do programa
self.lbRastreio = Label(self.c1, text='RASTREIO:', fg='black')
self.lbRastreio.pack(side=LEFT)
self.txtRastreio = Entry(self.c1, width=14, bg='white', fg='black', selectbackground='blue',
selectforeground='white')
self.txtRastreio.pack(side=LEFT, padx=2)
self.lbObjeto = Label(self.c1, text='OBJETO:', fg='black')
self.lbObjeto.pack(side=LEFT)
self.txtObjeto = Combobox(self.c1, width=32, background='white', foreground='black',
values=self.listaTodos(event='<Button-1>'))
self.txtObjeto.pack(side=LEFT, padx=2)
janela.bind('<<ComboboxSelected>>', self.BuscaTodos)
self.btnRastrear = Button(self.c1, text='RASTREAR', fg='black',
command=lambda: {Thread(target=self.Rastrear).start(), self.BuscaRastreio()})
self.btnRastrear.pack(side=LEFT, padx=2)
janela.bind('<Return>', lambda e: {Thread(target=self.Rastrear).start(), self.BuscaRastreio()})
janela.bind('<KP_Enter>', lambda e: {Thread(target=self.Rastrear).start(), self.BuscaRastreio()})
self.campo = ScrolledText(self.c2, width=77, height=30, bg='lightgray', fg='black', state='disable',
selectbackground='blue', font=('sans-serif', '10'))
self.campo.pack(fill='both', expand=True, pady=5)
self.whatsappimg = PhotoImage(file='imagens/WhatsApp.png')
self.emailimg = PhotoImage(file='imagens/Email.png')
self.salvarimg = PhotoImage(file='imagens/Salvar.png')
self.atualizarimg = PhotoImage(file='imagens/Atualizar.png')
self.deletarimg = PhotoImage(file='imagens/Lixeira.png')
self.btnWhatsapp = Button(image=self.whatsappimg, command=lambda: Thread(target=self.WhatsApp).start())
self.btnWhatsapp.pack(side=RIGHT)
ttips.Create(self.btnWhatsapp, text='Enviar por WhatsApp, Ctrl+W')
janela.bind('<Control-w>', lambda e: Thread(target=self.WhatsApp).start())
janela.bind('<Control-W>', lambda e: Thread(target=self.WhatsApp).start())
self.btnEmail = Button(image=self.emailimg, command=lambda: Thread(target=self.Email).start())
self.btnEmail.pack(side=RIGHT)
ttips.Create(self.btnEmail, text='Enviar por Email, Ctrl+E')
janela.bind('<Control-e>', lambda e: Thread(target=self.Email).start())
janela.bind('<Control-E>', lambda e: Thread(target=self.Email).start())
self.btnSalvar = Button(image=self.salvarimg, command=lambda: [self.RastreioExiste(), self.Cadastrar()])
self.btnSalvar.pack(side=LEFT, padx=1)
ttips.Create(self.btnSalvar, text='Salvar, Ctrl+S')
janela.bind('<Control-s>', lambda e: Thread(target=self.Cadastrar).start())
janela.bind('<Control-S>', lambda e: Thread(target=self.Cadastrar).start())
self.btnAtualizar = Button(image=self.atualizarimg, command=self.Atualizar)
self.btnAtualizar.pack(side=LEFT, padx=1)
ttips.Create(self.btnAtualizar, text='Atualizar, Ctrl+U')
janela.bind('<Control-u>', lambda e: Thread(target=self.Atualizar).start())
janela.bind('<Control-U>', lambda e: Thread(target=self.Atualizar).start())
self.btnDeletar = Button(image=self.deletarimg, command=self.Deletar)
self.btnDeletar.pack(side=LEFT, padx=1)
ttips.Create(self.btnDeletar, text='Deletar, Ctrl+D')
janela.bind('<Control-d>', lambda e: Thread(target=self.Deletar).start())
janela.bind('<Control-D>', lambda e: Thread(target=self.Deletar).start())
self.lbCreditos = Label(text='AP Correios - 2020')
self.lbCreditos.pack(side=TOP)
self.lbCreditos = Label(text='Software criado por <NAME>')
self.lbCreditos.pack(side=BOTTOM)
self.mouseMenu = Menu(janela, tearoff=0)
self.mouseMenu.add_command(label='Recortar')
self.mouseMenu.add_command(label='Copiar')
self.mouseMenu.add_command(label='Colar')
janela.bind('<Control-L>', self.Limpar)
janela.bind('<Enter>', Thread(target=CheckUpdates).start())
janela.bind('<Control-r>', lambda e: Thread(target=self.NotifAltStatus).start())
janela.bind('<Control-R>', lambda e: Thread(target=self.NotifAltStatus).start())
# Move rastreio para a lista de entregues
def arquivarRastreio(self):
rastreio = self.txtRastreio.get()
objeto = self.txtObjeto.get()
if rastreio == '' or objeto == '':
showwarning(title='Aviso', message='Selecione um rastreio para mover.')
else:
c.execute(f'SELECT codrastreio FROM rastreio WHERE codrastreio = "{rastreio}"')
c.execute(f'INSERT INTO entregues SELECT * FROM rastreio WHERE codrastreio = "{rastreio}"')
c.execute(f'DELETE FROM rastreio WHERE codrastreio = "{rastreio}"')
conexao.commit()
listaTodos.clear()
self.txtObjeto.config(values=self.listaTodos())
self.Limpar()
showinfo(title='Status', message=f'Rastreio {rastreio} arquivado.')
# Fecha o programa principal
def JanExit(self, event=None):
janela.destroy()
def NavLink(self, url):
webbrowser.open_new_tab(url)
def Sobre(self, event=None):
popup = Toplevel()
sobre = Label(popup, text='AP - Rastreios v1.2')
sobre.pack(pady=20)
logo = PhotoImage(file='imagens/sobre.png')
bgimg = Label(popup, image=logo)
bgimg.pack()
bgimg.image = logo
mit = Label(popup, text='Licença\n', fg='blue', cursor='hand2')
mit.pack()
github = Label(popup, text='GitHub\n', fg='blue', cursor='hand2')
github.pack()
popup.title('Sobre')
popup.geometry('400x300')
popup.resizable(False, False)
popup.grab_set()
popup.focus_force()
popup.transient(janela)
mit.bind('<Button-1>', lambda e: Thread(
target=self.NavLink('https://github.com/Alexsussa/aprastreio/blob/master/LICENSE')).start())
github.bind('<Button-1>',
lambda e: Thread(target=self.NavLink('https://github.com/Alexsussa/aprastreio/')).start())
# Notificação de alteração de status dos rastreios
def NotifAltStatus(self, event=None):
try:
info = askyesno(title='ATUALIZANDO RASTREIOS',
message='Atualizando status dos rastreios...',
detail='Clique em SIM e aguarde até os objetos não entregues aparecerem na tela principal\nou clique em NÃO para atualizar manualmente mais tarde.')
if info == False:
pass
else:
janela.after(3600000, lambda: Thread(target=self.NotifAltStatus).start())
subprocess.call(['notify-send', 'AP - Rastreio Correios',
'Atualizando status dos rastreios...\n\nPor favor, aguarde...'])
c.execute('SELECT * FROM rastreio ORDER BY codrastreio')
self.Limpar()
for cod in c:
linkcorreios = urlopen(f'https://www.linkcorreios.com.br/?id={cod[1]}')
soup = BeautifulSoup(linkcorreios, 'html.parser')
lastStatus = soup.find('ul', attrs={'class': 'linha_status'})
last = lastStatus.text.strip().upper()
self.campo.delete(1.0, END)
if last[0:39] != 'STATUS: OBJETO ENTREGUE AO DESTINATÁRIO':
self.campo.config(state='normal')
self.campo.insert(INSERT, '-' * 80)
self.campo.insert(INSERT, '\n\nALTERAÇÃO DE STATUS')
self.campo.insert(INSERT, f'\n\n{cod[2]}\n{cod[1]}\n\n{last}\n\n', '-' * 80)
self.campo.config(state='disable')
subprocess.call(
['notify-send', 'AP - Rastreio Correios', f'ALTERAÇÂO DE STATUS\n\n{cod[2]}\n\n{last}\n\n'])
subprocess.call(['notify-send', 'AP - Rastreio Correios',
'Todos os objetos não entregues estão na tela principal.'])
except socket.error:
subprocess.call(['notify-send', 'AP - Rastreio Correios',
'Tempo de resposta do servidor execedido.\n\nSem conexão com a internet.'])
showerror(title='AVISO', message='Tempo de resposta do servidor execedido.\n\nSem conexão com a internet.')
def MenuMouse(self, event):
w = event.widget
self.mouseMenu.entryconfigure("Recortar", command=lambda: w.event_generate('<<Cut>>'))
self.mouseMenu.entryconfigure("Copiar", command=lambda: w.event_generate('<<Copy>>'))
self.mouseMenu.entryconfigure("Colar", command=lambda: w.event_generate('<<Paste>>'))
self.mouseMenu.tk_popup(event.x_root, event.y_root)
def Rastrear(self, event=None):
rastreio = self.txtRastreio.get()
objeto = self.txtObjeto.get()
if rastreio == '':
showwarning(title='AVISO', message='Digite um código de rastreio para rastrear.')
elif len(rastreio) != 13:
showwarning(title='AVISO', message='Rastreio deve conter 13 dígitos\nsendo duas letras iniciais e '
'duas letras finais, como no\nexemplo abaixo:\n\n "OJ123456789BR"')
else:
try:
subprocess.call(['notify-send', 'AP - Rastreio Correios', 'Rastreando encomenda...'])
linkcorreios = urlopen(f'https://www.linkcorreios.com.br/?id={rastreio}', timeout=20)
soup = BeautifulSoup(linkcorreios, 'html.parser')
status = soup.find('div', attrs={'class': 'singlepost'})
retorno = ''
if status:
retorno = status.text.strip().upper()
else:
retorno = 'O rastreamento não está disponível no momento:\n\n' \
'- Verifique se o código do objeto está correto;\n' \
'- O objeto pode demorar até 24 horas (após postagem) para ser rastreado no\nsistema dos Correios.'.strip().upper()
# print(retorno)
self.campo.config(state='normal')
self.campo.delete(1.0, END)
self.campo.insert(INSERT, retorno)
self.campo.config(state='disable')
lastStatus = soup.find('ul', attrs={'class': 'linha_status'})
if lastStatus:
last = lastStatus.text.strip().upper()
else:
last = 'O rastreamento não está disponível no momento:\n\n' \
'- Verifique se o código do objeto está correto;\n' \
'- O objeto pode demorar até 24 horas (após postagem) para ser rastreado no sistema dos Correios.'.strip().upper()
subprocess.call(['notify-send', 'AP - Rastreio Correios', f'{objeto}\n\n{last}'])
except socket.error:
subprocess.call(['notify-send', 'AP - Rastreio Correios',
'Tempo de resposta do servidor execedido.\n\nSem conexão com a internet.'])
showerror(title='AVISO',
message='Tempo de resposta do servidor execedido.\n\nSem conexão com a internet.')
"""except socket.timeout:
subprocess.call(
['notify-send', 'AP - Rastreio Correios', 'Tempo de resposta do servidor execedido.'])
showerror(title='AVISO', message='Tempo de resposta do servidor execedido.')"""
def WhatsApp(self):
rastreio = self.txtRastreio.get().strip().upper()
if rastreio == '':
showerror(title='AVISO', message='Para fazer o envio pelo WhatsApp, primeiro busque pelo rastreio.')
elif len(rastreio) != 13:
showwarning(title='AVISO', message='Rastreio deve conter 13 dígitos\nsendo duas letras iniciais e '
'duas letras finais, como no\nexemplo abaixo:\n\n "OJ123456789BR"')
else:
rastreio = self.txtRastreio.get()
webbrowser.open(
f'https://web.whatsapp.com/send?phone=&text=Ol%c3%a1.%20Clique%20no%20link%20para%20rastrear%20o%20objeto%20c%c3%b3digo%20{rastreio}%0ahttps%3a%2f%2fwww.linkcorreios.com.br%2f{rastreio}%3fw%3d1&source=&data=')
def Email(self):
rastreio = self.txtRastreio.get().strip().upper()
if not os.path.exists('/usr/bin/thunderbird') and not os.path.exists('/usr/bin/evolution'):
showwarning(title='AVISO', message='Nenhum cliente de email está instalado em seu computador.')
else:
rastreio = self.txtRastreio.get().strip().upper()
if rastreio == '':
showerror(title='AVISO', message='Para fazer o envio pelo Email, primeiro busque pelo rastreio.')
elif len(rastreio) != 13:
showwarning(title='AVISO', message='Rastreio deve | |
<filename>DeepLearning/RunNN.py<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 8 22:05:38 2018
@author: Chandar_S
"""
from cnn import cnn
from fnn import fnn
from rnn import rnn
from nn_utilities_py import nn_utilities
import tensorflow as tf
from scipy.misc import imread
import os
import numpy as np
import pylab
import matplotlib.pyplot as plt
import sys
import PIL
data_path = os.path.abspath('E:\MLData\\')
#data_path = os.path.abspath('./')+"/"
nn_utilities_obj = nn_utilities(data_path)
#letters = { 1: 'A', 2: 'B', 3: 'C', 4: 'D', 5: 'E', 6: 'F', 7: 'G', 8: 'H', 9: 'I', 10: 'J',
#11: 'K', 12: 'L', 13: 'M', 14: 'N', 15: 'O', 16: 'P', 17: 'Q', 18: 'R', 19: 'S', 20: 'T',
#21: 'U', 22: 'V', 23: 'W', 24: 'X', 25: 'Y', 26: 'Z', 27: '-'}
alphadigit = { 0:'0', 1:'1', 2:'2', 3:'3', 4:'4', 5:'5', 6:'6', 7:'7', 8:'8', 9:'9', 10:'A', 11:'B', 12:'C', 13:'D', 14:'E', 15:'F', 16:'G', 17:'H', 18:'I', 19:'J', 20:'K', 21:'L', 22:'M', 23:'N', 24:'O', 25:'P', 26:'Q', 27:'R', 28:'S', 29:'T', 30:'U', 31:'V', 32:'W', 33:'X', 34:'Y', 35:'Z', 36:'a', 37:'b', 38:'d', 39:'e', 40:'f', 41:'g', 42:'h', 43:'n', 44:'q', 45:'r', 46:'t'}
def run_test():
input_data = nn_utilities_obj.load_emnist_alphadigit_data()
# input_data = nn_utilities_obj.load_emnist_letters_data()
# nn_utilities_obj.load_PneumothoraxDataset()
# nn_utilities_obj.load_fashion_data()
# input_data = nn_utilities_obj.load_mnist_digit_data()
# nn_utilities_obj.prepare_digits_image_inputs()
print (input_data["x_train"][0])
print (input_data["y_train"].shape)
print (alphadigit[np.argmax(input_data["y_train"][100])])
pylab.imshow(input_data["x_train"][100].reshape(28,28), cmap='gray')
pylab.axis('off')
pylab.show()
# print (letters[np.argmax(input_data["y_train"][1100]) + 1])
def run_fnn():
fnn_obj = fnn(data_path)
# Flag makes it run with new simplified code and does not run validation accuracy for quicker response
legacy_run = False
## GET INPUT DATA
# input_data = nn_utilities_obj.prepare_digits_image_inputs()
input_data = nn_utilities_obj.load_mnist_digit_data()
# input_data = nn_utilities_obj.load_fashion_data()
## 2 LAYER FNN INPUTS
hiddenlayer_1_width = 256
hiddenlayer_2_width = 256
## Override the default learning rate
fnn_obj.learning_rate_var = 0.001
if legacy_run == True:
## CREATE FNN MODEL
optimizer, cost, accuracy, fnn_model = fnn_obj.create_model(fnn_obj.x, input_data["x_train"].shape[1], hiddenlayer_1_width, hiddenlayer_2_width, input_data["y_train"].shape[1])
else:
## CREATE FNN MODEL
optimizer, cost, accuracy, fnn_model = fnn_obj.create_simplified_model(fnn_obj.x, input_data["x_train"].shape[1], hiddenlayer_1_width, hiddenlayer_2_width, input_data["y_train"].shape[1] )
## TRAIN THE MODEL AND TEST PREDICTION
run_nn(fnn_obj, input_data, optimizer, cost, accuracy, fnn_model, "fnn/"+input_data["name"])
def run_cnn():
cnn_obj = cnn(data_path)
# Flag makes it run with new simplified code and does not run validation accuracy for quicker response
legacy_run = False
training = True
''' WE NEED THIS FOR LOOKING AT HEAT MAP OVER IMAGE'''
single_layer_fnn = True
## GET INPUT DATA
# input_data = nn_utilities_obj.prepare_digits_image_inputs()
# input_data = nn_utilities_obj.load_mnist_digit_data()
# input_data = nn_utilities_obj.load_emnist_alphadigit_data()
# input_data = nn_utilities_obj.load_emnist_letters_data()
# input_data = nn_utilities_obj.load_fashion_data()
input_data = nn_utilities_obj.load_PneumothoraxDataset()
## Override the default learning rate
cnn_obj.learning_rate_var = 0.0001
## 2 LAYER FNN INPUTS
hiddenlayer_1_width = 500
hiddenlayer_2_width = 500
## Assuming it's a SQUARE IMAGE
image_height = int(np.sqrt(input_data["x_train"].shape[1]))
image_width = image_height
if legacy_run == True:
## CREATE CNN & DNN MODEL
optimizer, cost, accuracy, cnn_fnn_model = cnn_obj.create_model([image_height, image_width], hiddenlayer_1_width, hiddenlayer_2_width, input_data["y_train"].shape[1], single_layer_fnn)
else:
## CREATE CNN & DNN MODEL
optimizer, cost, accuracy, cnn_fnn_model = cnn_obj.create_simplified_model([image_height, image_width], hiddenlayer_1_width, hiddenlayer_2_width, input_data["y_train"].shape[1], single_layer_fnn)
## TRAIN THE MODEL AND TEST PREDICTION
run_nn(cnn_obj, input_data, optimizer, cost, accuracy, cnn_fnn_model, "cnn\\" + input_data["name"] + "\\" + input_data["name"], True, training)
def run_rnn():
rnn_obj = rnn(data_path)
## GET INPUT DATA
input_data = nn_utilities_obj.prepare_digits_image_inputs()
# input_data = nn_utilities_obj.load_fashion_data()
## Override the default learning rate
rnn_obj.learning_rate_var = 0.0005
## Assuming it's a SQUARE IMAGE
image_height = int(np.sqrt(input_data["x_train"].shape[1]))
image_width = image_height
# Network Parameters
num_input = image_height # MNIST data input (img shape: 28*28)
timesteps = image_width # timesteps
num_hidden = 128 # hidden layer num of features
num_classes = 10 # MNIST total classes (0-9 digits)
## CREATE RNN MODEL
optimizer, cost, accuracy, rnn_model = rnn_obj.create_model(num_input, timesteps, num_hidden, num_classes)
input_data["x_train"] = np.reshape(input_data["x_train"],[input_data["x_train"].shape[0], timesteps,num_input])
input_data["x_validation"] = np.reshape(input_data["x_validation"],[input_data["x_validation"].shape[0], timesteps,num_input])
## TRAIN THE MODEL AND TEST PREDICTION
run_nn(rnn_obj, input_data, optimizer, cost, accuracy, rnn_model, "rnn/"+input_data["name"])
def run_nn(obj, input_data, optimizer, cost, accuracy, model, model_name=None, run_validation_accuracy=True, Training=True):
if Training:
# Python optimisation variables
training_epochs = 5
display_step = 100
batch_size = 200
quick_training = True
print ("Starting session")
#### TRAIN AND TEST NN
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# TRAIN
trained_model = obj.train_model(sess, model, training_epochs, display_step, batch_size, optimizer, cost, accuracy, input_data["x_train"], input_data["x_train_4D"], input_data["y_train"], input_data["x_validation"], input_data["y_validation"], quick_training, model_name, run_validation_accuracy)
else:
if input_data["name"] == "emnist_alpha_digit_data":
## RUN AM EXAMPLE AND SEE HOW PREDICTION WORKS ##
# images = ['Number-0.tif', 'Number-1.tif', 'Number-2.tif', 'Number-3.tif', 'Number-4.tif', 'Number-5.tif', 'Number-6.1.tif', 'Number-6.2.tif', 'Number-7.tif', 'Number-8.1.tif', 'Number-8.2.tif','Number-9.tif']
import cv2
cv2.imwrite("E:\MLData\Test\images\input1.jpg", 255 - input_data["x_validation"][11].reshape(28,28) )
cv2.imwrite("E:\MLData\Test\images\input2.jpg", 255 - input_data["x_validation"][100].reshape(28,28) )
cv2.imwrite("E:\MLData\Test\images\input3.jpg", 255 - input_data["x_validation"][8800].reshape(28,28) )
print (alphadigit[np.argmax(input_data["y_validation"][11])], alphadigit[np.argmax(input_data["y_validation"][100])], alphadigit[np.argmax(input_data["y_validation"][8800])])
images = ["input1.jpg", "input2.jpg", "input3.jpg", 'TestSheet1_Section1_VSubSection1_HSubSection8.jpg', 'TestSheet1_Section1_VSubSection1_HSubSection9.jpg', 'TestSheet1_Section1_VSubSection1_HSubSection10.jpg', 'TestSheet1_Section1_VSubSection1_HSubSection11.jpg','TestSheet1_Section1_VSubSection1_HSubSection12.jpg','TestSheet1_Section1_VSubSection1_HSubSection13.jpg','TestSheet1_Section1_VSubSection1_HSubSection14.jpg','TestSheet1_Section1_VSubSection1_HSubSection15.jpg','TestSheet1_Section1_VSubSection1_HSubSection16.jpg']
cols_count = int(len(images)/2) + (len(images) - int(len(images)/2)*2)
f, a = plt.subplots(nrows=2, ncols=cols_count, figsize=(8, 3),
sharex=True, sharey=True, squeeze=False)
img_nbr = 0
i = 0
for image_name in images:
img, prediction, prediction_confidence = test_mnist_model(model_name="cnn\\"+input_data["name"],img_name=image_name)
a[i][img_nbr].imshow(img, cmap='gray')
a[i][img_nbr].axis('off')
# title = str(prediction)
# print(prediction)
title = str(alphadigit[prediction]) + " (" + str(int(prediction_confidence)) + "%)"
a[i][img_nbr].set_title(title, fontsize=10)
img_nbr += 1
''' New row'''
if (img_nbr == cols_count):
i = i + 1
img_nbr = 0
f.show()
plt.draw()
plt.waitforbuttonpress()
elif input_data["name"] == "mnist_digit_data":
''''' TESTING '''
test = input_data["test"]
if (test is not None):
img_name = obj.rng.choice(test.filename)
filepath = os.path.join(data_path, 'Image', 'Numbers', 'Images', 'test', img_name)
img = imread(filepath, flatten=True)
# convert list to ndarray and PREP AS PER INPUT FORMAT
x_test = np.stack(img)
if len(input_data["x_train"].shape) == 2:
x_test = x_test.reshape(-1, input_data["x_train"].shape[1])
else:
x_test = x_test.reshape(-1, input_data["x_train"].shape[1], input_data["x_train"].shape[2])
## PREDICT AND VALIDATE
predicted_test = obj.predictvalue(trained_model, x_test)
print("Prediction is: ", predicted_test[0])
pylab.imshow(img, cmap='gray')
pylab.axis('off')
pylab.show()
'''' TESTING END'''
print ("Ending session")
elif input_data["name"] == "Pneumothorax_data":
## DO MIT CAM Analysis to print the Heatmap
CAM_analysis = True
if (CAM_analysis == True):
load_Pneumothorax_model(model_name, obj, input_data)
def test_mnist_model(model_name, img_name):
filepath = os.path.join(data_path, 'Test', 'Images', img_name)
from PIL import Image
basewidth = 28
img = Image.open(filepath).convert('L')
wpercent = (basewidth / float(img.size[0]))
hsize = int((float(img.size[1]) * float(wpercent)))
img_resized = 255 - np.array(img.resize((basewidth, hsize), PIL.Image.ANTIALIAS))
# convert list to ndarray and PREP AS PER INPUT FORMAT
x_test = np.stack(img_resized)
sess=tf.Session()
graph = tf.get_default_graph()
saver = tf.train.Saver()
print ("Restoring Model")
saver.restore(sess, data_path + "SavedModel\\"+model_name+".ckpt")
x = graph.get_tensor_by_name("x:0")
keep_prob = graph.get_tensor_by_name("keep_probability:0")
#Now, access the op that you want to run.
model = graph.get_tensor_by_name("fnn/fnn_Out/BiasAdd:0")
## PREDICT AND VALIDATE
try:
x_test_1 = x_test.reshape(-1, x_test.shape[0] * x_test.shape[1])
feed_dict ={x:x_test_1, keep_prob:1.0}
except:
x_test_2 = x_test.reshape(-1, x_test.shape[0], x_test.shape[1])
feed_dict ={x:x_test_2, keep_prob:1.0}
# predict = tf.argmax(model , 1)
predicted_pct = tf.nn.softmax(model) * 100
with sess:
# predicted_test = predict.eval(feed_dict)
# predicted_values = model.eval(feed_dict)
predicted_confidence = np.round(predicted_pct.eval(feed_dict), 0)
predicted_test = np.argmax(predicted_confidence, 1)
# print (predicted_test, predicted_confidence)
# print("Prediction is: ", predicted_test[0])
# pylab.imshow(img_resized, cmap='gray')
# pylab.title('Prediction is ' + str(predicted_test[0]))
# pylab.axis('off')
# pylab.show()
return img_resized, predicted_test[0], np.max(predicted_confidence)
def load_Pneumothorax_model(model_name, obj, input_data):
with tf.Session() as sess:
saver = tf.train.Saver()
print ("Restoring Model")
saver.restore(sess, data_path + "SavedModel\\"+model_name+".ckpt")
print ("Starting with CAM Analysis")
"""DOING CAM Heatmaps Analysis"""
'''extract the features and weights using the function defined directly above '''
(feature_maps, dense_weights) = extract_features_weights(sess, obj) #TODO
# print("Feature Maps: "+str(feature_maps))
# print("Dense Weights: "+str(dense_weights))
'''TODO: compute the CAM for a pneumothorax detection using the function above'''
WHICH_OPTION_INDEX = 1
cam = compute_cam(WHICH_OPTION_INDEX, feature_maps, dense_weights)
## Assuming it's a SQUARE IMAGE
image_height = int(np.sqrt(input_data["x_train"].shape[1]))
image_width = image_height
''' upsample the CAM Tensor to a 28\times 28 image '''
cam_upsampled = tf.image.resize_bilinear(cam, [image_height,image_width])
inds = []
for check_index in range (1,20):
if np.argmax(input_data["y_validation"][check_index]) == WHICH_OPTION_INDEX:
inds.extend([check_index])
print (inds)
# inds= [79, 31]
input_data["y_validation"] = np.stack(input_data["y_validation"])
# print (type(input_data["x_validation"][1]))
# print (input_data["y_validation"][1])
for im, cl in zip(input_data["x_validation"][inds], input_data["y_validation"][inds]):
heatmap = sess.run(
cam_upsampled,
feed_dict={
obj.x: im[np.newaxis,:],
})
vis_cam(im, np.squeeze(heatmap), input_data)
"""DOING CAM Heatmaps Analysis"""
''' Extract the last Layer weights of CNN and FNN for CAM manipulation'''
def extract_features_weights(sess, cnn_obj):
#access feature map activations directly from the model declaration
feature_maps = cnn_obj.cnn_output
# graph = tf.get_default_graph()
# for op in graph.get_operations():
# print(op.name)
# we have implemented 2 different methods, so handling both scenarios
try:
#access the weights by searching by name
dense_weights = sess.graph.get_tensor_by_name('fnn/FNN_Output_Weight:0')
except:
#access the weights by searching by name
dense_weights = sess.graph.get_tensor_by_name('fnn/dense_layer/kernel:0')
return (feature_maps, dense_weights)
''' Forms a CAM operation given a class name, feature maps, and weights
Params:
- class_index: index of the class to measure
- fmap: (1 x h x w x d) tf.Tensor of activations from the final convolutional layer
- weights: (features | |
response = self._webclient.APICall('GET', u'object/%s/scenejs/' % objectpk, timeout=timeout)
geometries = []
for encodedGeometry in response['geometries']:
geometry = {}
positions = numpy.fromstring(base64.b64decode(encodedGeometry['positions_base64']), dtype=float)
positions.resize(len(positions) / 3, 3)
geometry['positions'] = positions
indices = numpy.fromstring(base64.b64decode(encodedGeometry['indices_base64']), dtype=numpy.uint32)
indices.resize(len(indices) / 3, 3)
geometry['indices'] = indices
geometries.append(geometry)
return geometries
#
# Sensor mappings related
#
def GetSceneSensorMapping(self, scenepk=None, usewebapi=True, timeout=5):
"""Return the mapping of camerafullname to cameraid. e.g. {'sourcecamera/ensenso_l_rectified': '150353', 'sourcecamera/ensenso_r_rectified':'150353_Right' ...}
"""
assert(usewebapi)
if scenepk is None:
scenepk = self.scenepk
instobjects = self._webclient.APICall('GET', u'scene/%s/instobject/' % scenepk, fields='attachedsensors,connectedBodies,object_pk,name', params={'limit': 0}, timeout=timeout)['objects']
sensormapping = {}
for instobject in instobjects:
if len(instobject.get('attachedsensors', [])) > 0:
attachedsensors = self._webclient.APICall('GET', u'robot/%s/attachedsensor/' % instobject['object_pk'])['attachedsensors']
for attachedsensor in attachedsensors:
camerafullname = '%s/%s' % (instobject['name'], attachedsensor['name'])
if 'hardware_id' in attachedsensor['sensordata']:
sensormapping[camerafullname] = attachedsensor['sensordata']['hardware_id']
else:
sensormapping[camerafullname] = None
log.warn(u'attached sensor %s does not have hardware_id', camerafullname)
if len(instobject.get('connectedBodies', [])) > 0:
connectedBodies = self._webclient.APICall('GET', u'robot/%s/connectedBody/' % instobject['object_pk'])['connectedBodies']
for connectedBody in connectedBodies:
connectedBodyScenePk = GetPrimaryKeyFromURI(connectedBody['url'])
connectedBodyInstObjects = self._webclient.APICall('GET', u'scene/%s/instobject/' % connectedBodyScenePk, fields='attachedsensors,object_pk,name', params={'limit': 0}, timeout=timeout)['objects']
for connectedBodyInstObject in connectedBodyInstObjects:
if len(connectedBodyInstObject.get('attachedsensors', [])) == 0:
continue
attachedsensors = self._webclient.APICall('GET', u'robot/%s/attachedsensor/' % connectedBodyInstObject['object_pk'])['attachedsensors']
for attachedsensor in attachedsensors:
camerafullname = '%s/%s_%s' % (instobject['name'], connectedBody['name'], attachedsensor['name'])
if 'hardware_id' in attachedsensor['sensordata']:
sensormapping[camerafullname] = attachedsensor['sensordata']['hardware_id']
else:
sensormapping[camerafullname] = None
log.warn(u'attached sensor %s does not have hardware_id', camerafullname)
return sensormapping
def SetSceneSensorMapping(self, sensormapping, scenepk=None, usewebapi=True, timeout=5):
"""
:param sensormapping: The mapping of camerafullname to cameraid. e.g. {'sourcecamera/ensenso_l_rectified': '150353', 'sourcecamera/ensenso_r_rectified':'150353_Right' ...}
"""
assert(usewebapi)
if scenepk is None:
scenepk = self.scenepk
instobjects = self._webclient.APICall('GET', u'scene/%s/instobject/' % scenepk, params={'limit': 0}, fields='attachedsensors,connectedBodies,object_pk,name', timeout=timeout)['objects']
cameracontainernames = set([camerafullname.split('/', 1)[0] for camerafullname in sensormapping.keys()])
sensormapping = dict(sensormapping)
for instobject in instobjects:
if instobject['name'] not in cameracontainernames:
continue
if len(instobject.get('attachedsensors', [])) > 0:
attachedsensors = self._webclient.APICall('GET', u'robot/%s/attachedsensor/' % instobject['object_pk'])['attachedsensors']
for attachedsensor in attachedsensors:
camerafullname = '%s/%s' % (instobject['name'], attachedsensor['name'])
if camerafullname in sensormapping.keys():
hardwareId = attachedsensor['sensordata'].get('hardware_id', None)
if hardwareId != sensormapping[camerafullname]:
self._webclient.APICall('PUT', u'robot/%s/attachedsensor/%s' % (instobject['object_pk'], attachedsensor['pk']), data={'sensordata': {'hardware_id': str(sensormapping[camerafullname])}})
del sensormapping[camerafullname]
if len(instobject.get('connectedBodies', [])) > 0:
connectedBodies = self._webclient.APICall('GET', u'robot/%s/connectedBody/' % instobject['object_pk'])['connectedBodies']
for connectedBody in connectedBodies:
connectedBodyScenePk = GetPrimaryKeyFromURI(connectedBody['url'])
connectedBodyInstObjects = self._webclient.APICall('GET', u'scene/%s/instobject/' % connectedBodyScenePk, params={'limit': 0}, fields='attachedsensors,object_pk,name', timeout=timeout)['objects']
for connectedBodyInstObject in connectedBodyInstObjects:
if len(connectedBodyInstObject.get('attachedsensors', [])) == 0:
continue
attachedsensors = self._webclient.APICall('GET', u'robot/%s/attachedsensor/' % connectedBodyInstObject['object_pk'])['attachedsensors']
for attachedsensor in attachedsensors:
camerafullname = '%s/%s_%s' % (instobject['name'], connectedBody['name'], attachedsensor['name'])
if camerafullname in sensormapping.keys():
hardwareId = attachedsensor['sensordata'].get('hardware_id', None)
if hardwareId != sensormapping[camerafullname]:
self._webclient.APICall('PUT', u'robot/%s/attachedsensor/%s' % (connectedBodyInstObject['object_pk'], attachedsensor['pk']), data={'sensordata': {'hardware_id': str(sensormapping[camerafullname])}})
del sensormapping[camerafullname]
if sensormapping:
raise ControllerClientError(_('some sensors are not found in scene: %r') % sensormapping.keys())
#
# File related
#
def UploadFile(self, f, filename=None, timeout=10):
"""Uploads a file managed by file handle f
Returns:
(dict) json response
"""
data = {}
if filename:
data['filename'] = filename
response = self._webclient.Request('POST', '/fileupload', files={'file': f}, data=data, timeout=timeout)
if response.status_code in (200,):
try:
return response.json()
except Exception as e:
log.exception('failed to upload file: %s', e)
raise ControllerClientError(response.content.decode('utf-8'))
def DeleteFile(self, filename, timeout=10):
response = self._webclient.Request('POST', '/file/delete/', data={'filename': filename}, timeout=timeout)
if response.status_code in (200,):
try:
return response.json()['filename']
except Exception as e:
log.exception('failed to delete file: %s', e)
raise ControllerClientError(response.content.decode('utf-8'))
def DeleteFiles(self, filenames, timeout=10):
response = self._webclient.Request('POST', '/file/delete/', data={'filenames': filenames}, timeout=timeout)
if response.status_code in (200,):
try:
return response.json()['filenames']
except Exception as e:
log.exception('failed to delete file: %s', e)
raise ControllerClientError(response.content.decode('utf-8'))
def ListFiles(self, dirname='', timeout=2):
response = self._webclient.Request('GET', '/file/list/', params={'dirname': dirname}, timeout=timeout)
if response.status_code in (200, 404):
try:
return response.json()
except Exception as e:
log.exception('failed to delete file: %s', e)
raise ControllerClientError(response.content.decode('utf-8'))
def FileExists(self, path, timeout=5):
"""Check if a file exists on server
"""
response = self._webclient.Request('HEAD', u'/u/%s/%s' % (self.controllerusername, path.rstrip('/')), timeout=timeout)
if response.status_code not in [200, 301, 404]:
raise ControllerClientError(response.content.decode('utf-8'))
return response.status_code != 404
def DownloadFile(self, filename, ifmodifiedsince=None, timeout=5):
"""Downloads a file given filename
:return: A streaming response
"""
headers = {}
if ifmodifiedsince:
headers['If-Modified-Since'] = _FormatHTTPDate(ifmodifiedsince)
response = self._webclient.Request('GET', u'/u/%s/%s' % (self.controllerusername, filename), headers=headers, stream=True, timeout=timeout)
if ifmodifiedsince and response.status_code == 304:
return response
if response.status_code != 200:
raise ControllerClientError(response.content.decode('utf-8'))
return response
def FlushAndDownloadFile(self, filename, timeout=5):
"""Flush and perform a HEAD operation on the given filename to retrieve metadata.
:return: A streaming response
"""
response = self._webclient.Request('GET', '/file/download/', params={'filename': filename}, stream=True, timeout=timeout)
if response.status_code != 200:
raise ControllerClientError(response.content.decode('utf-8'))
return response
def FlushAndHeadFile(self, filename, timeout=5):
"""Flush and perform a HEAD operation on the given filename to retrieve metadata.
:return: A dict containing "modified (datetime.datetime)" and "size (int)"
"""
response = self._webclient.Request('HEAD', '/file/download/', params={'filename': filename}, timeout=timeout)
if response.status_code != 200:
raise ControllerClientError(response.content.decode('utf-8'))
return {
'modified': datetime.datetime(*email.utils.parsedate(response.headers['Last-Modified'])[:6]),
'size': int(response.headers['Content-Length']),
}
def HeadFile(self, filename, timeout=5):
"""Perform a HEAD operation on the given filename to retrieve metadata.
:return: A dict containing "modified (datetime.datetime)" and "size (int)"
"""
path = u'/u/%s/%s' % (self.controllerusername, filename.rstrip('/'))
response = self._webclient.Request('HEAD', path, timeout=timeout)
if response.status_code not in [200]:
raise ControllerClientError(response.content.decode('utf-8'))
return {
'modified': datetime.datetime(*email.utils.parsedate(response.headers['Last-Modified'])[:6]),
'size': int(response.headers['Content-Length']),
}
def FlushCache(self, timeout=5):
"""Flush pending changes in cache to disk
"""
response = self._webclient.Request('POST', '/flushcache/', timeout=timeout)
if response.status_code != 200:
raise ControllerClientError(response.content.decode('utf-8'))
#
# Log related
#
def GetUserLog(self, category, level='DEBUG', keyword=None, limit=None, cursor=None, includecursor=False, forward=False, timeout=2):
"""Get the user log from the controller.
"""
params = {
'keyword': (keyword or '').strip(),
'cursor': (cursor or '').strip(),
'includecursor': 'true' if includecursor else 'false',
'forward': 'true' if forward else 'false',
'limit': str(limit or 0),
'level': level,
}
response = self._webclient.Request('GET', '/log/user/%s/' % category, params=params, timeout=timeout)
if response.status_code != 200:
raise ControllerClientError(_('Failed to retrieve user log, status code is %d') % response.status_code)
return response.json()
#
# Query list of scenepks based on barcdoe field
#
def QueryScenePKsByBarcodes(self, barcodes, timeout=2):
response = self._webclient.Request('GET', '/query/barcodes/', params={'barcodes': ','.join(barcodes)})
if response.status_code != 200:
raise ControllerClientError(_('Failed to query scenes based on barcode, status code is %d') % response.status_code)
return response.json()
#
# Report stats to registration controller
#
def ReportStats(self, data, timeout=5):
response = self._webclient.Request('POST', '/stats/', data=json.dumps(data), headers={'Content-Type': 'application/json'}, timeout=timeout)
if response.status_code != 200:
raise ControllerClientError(_('Failed to upload stats, status code is %d') % response.status_code)
#
# Config.
#
def GetConfig(self, filename=None, timeout=5):
"""Retrieve configuration file content from controller.
:param filename: optional, can be one of controllersystem.conf, binpickingsystem.conf, teachworkersystem.conf, robotbridges.conf.json
:return: configuration file content dictionary
"""
path = '/config/'
if filename:
path = '/config/%s/' % filename
response = self._webclient.Request('GET', path, timeout=timeout)
if response.status_code != 200:
raise ControllerClientError(_('Failed to retrieve configuration from controller, status code is %d') % response.status_code)
return response.json()
def SetConfig(self, data, filename=None, timeout=5):
"""Set configuration file content to controller.
:param data: content dictionary in its entirety
:param filename: optional, can be one of controllersystem.conf, binpickingsystem.conf, teachworkersystem.conf, robotbridges.conf.json
"""
path = '/config/'
if filename:
path = '/config/%s/' % filename
response = self._webclient.Request('PUT', path, data=json.dumps(data), headers={'Content-Type': 'application/json'}, timeout=timeout)
if response.status_code not in (200, 202):
raise ControllerClientError(_('Failed to set configuration to controller, status code is %d') % response.status_code)
def GetSystemInfo(self, timeout=3):
response = self._webclient.Request('GET', '/systeminfo/')
if response.status_code != 200:
raise ControllerClientError(_('Failed to retrieve system info from controller, status code is %d') % response.status_code)
return response.json()
#
# Reference Object PKs.
#
def ModifySceneAddReferenceObjectPK(self, scenepk, referenceobjectpk, timeout=5):
return self.ModifySceneAddReferenceObjectPKs(scenepk, [referenceobjectpk], timeout=timeout)
def ModifySceneAddReferenceObjectPKs(self, scenepk, referenceobjectpks, timeout=5):
"""
Add multiple referenceobjectpks to the scene.
"""
response = self._webclient.Request('POST', '/referenceobjectpks/add/', data=json.dumps({
'scenepk': scenepk,
'referenceobjectpks': referenceobjectpks,
}), headers={'Content-Type': 'application/json'}, timeout=timeout)
if response.status_code != 200:
raise ControllerClientError(_('Failed to add referenceobjectpks %r to scene %r, status code is %d') % (referenceobjectpks, scenepk, response.status_code))
def ModifySceneRemoveReferenceObjectPK(self, scenepk, referenceobjectpk, timeout=5):
return self.ModifySceneRemoveReferenceObjectPKs(scenepk, [referenceobjectpk], timeout=timeout)
def ModifySceneRemoveReferenceObjectPKs(self, scenepk, referenceobjectpks, timeout=5):
"""
Remove multiple referenceobjectpks from the scene.
"""
response = self._webclient.Request('POST', '/referenceobjectpks/remove/', data=json.dumps({
'scenepk': scenepk,
'referenceobjectpks': referenceobjectpks,
}), headers={'Content-Type': 'application/json'}, timeout=timeout)
if response.status_code != 200:
raise ControllerClientError(_('Failed to remove referenceobjectpks %r from scene %r, status code is %d') % (referenceobjectpks, scenepk, response.status_code))
#
# ITL program related
#
def GetITLPrograms(self, fields=None, offset=0, limit=0, usewebapi=True, timeout=5, **kwargs):
assert(usewebapi)
params = {
'offset': offset,
'limit': limit,
}
params.update(kwargs)
return self.ObjectsWrapper(self._webclient.APICall('GET', u'itl/', fields=fields, timeout=timeout, params=params))
def GetITLProgram(self, programName, fields=None, usewebapi=True, timeout=5):
assert(usewebapi)
return self._webclient.APICall('GET', u'itl/%s/' % programName, fields=fields, timeout=timeout)
def CreateITLProgram(self, data, fields=None, usewebapi=True, timeout=5):
assert(usewebapi)
return self._webclient.APICall('POST', u'itl/', data=data, fields=fields, timeout=timeout)
def SetITLProgram(self, | |
<filename>gae_app/lib/fixture/loadable/loadable.py
"""Abstract classes for :class:`fixture.base.Fixture` descendants that load / unload data
See :ref:`Using LoadableFixture<using-loadable-fixture>` for examples.
"""
# from __future__ import with_statement
__all__ = ['LoadableFixture', 'EnvLoadableFixture', 'DBLoadableFixture', 'DeferredStoredObject']
import sys, types
from fixture.base import Fixture
from fixture.util import ObjRegistry, _mklog
from fixture.style import OriginalStyle
from fixture.dataset import Ref, dataset_registry, DataRow, is_rowlike
from fixture.exc import UninitializedError, LoadError, UnloadError, StorageMediaNotFound
import logging
log = _mklog("fixture.loadable")
treelog = _mklog("fixture.loadable.tree")
class StorageMediumAdapter(object):
"""common interface for working with storable objects.
"""
def __init__(self, medium, dataset):
self.medium = medium
self.dataset = dataset
self.transaction = None
def __getattr__(self, name):
return getattr(self.obj, name)
def __repr__(self):
return "%s at %s for %s" % (
self.__class__.__name__, hex(id(self)), self.medium)
def clear(self, obj):
"""Must clear the stored object.
"""
raise NotImplementedError
def clearall(self):
"""Must clear all stored objects.
"""
log.info("CLEARING stored objects for %s", self.dataset)
for obj in self.dataset.meta._stored_objects:
try:
self.clear(obj)
except Exception, e:
etype, val, tb = sys.exc_info()
raise UnloadError(etype, val, self.dataset,
stored_object=obj), None, tb
def save(self, row, column_vals):
"""Given a DataRow, must save it somehow.
column_vals is an iterable of (column_name, column_value)
"""
raise NotImplementedError
def visit_loader(self, loader):
"""A chance to visit the LoadableFixture object.
By default it does nothing.
"""
pass
class LoadQueue(ObjRegistry):
"""Keeps track of what class instances were loaded.
"level" is used like so:
The lower the level, the lower that object is on the foreign key chain.
As the level increases, this means more foreign objects depend on the
local object. Thus, objects need to be unloaded starting at the lowest
level and working up. Also, since objects can appear multiple times in
foreign key chains, the queue only acknowledges the object at its
highest level, since this will ensure all dependencies get unloaded
before it.
"""
def __init__(self):
ObjRegistry.__init__(self)
self.tree = {}
self.limit = {}
def __repr__(self):
return "<%s at %s>" % (
self.__class__.__name__, hex(id(self)))
def _pushid(self, id, level):
if id in self.limit:
# only store the object at its highest level:
if level > self.limit[id]:
self.tree[self.limit[id]].remove(id)
del self.limit[id]
else:
return
self.tree.setdefault(level, [])
self.tree[level].append(id)
self.limit[id] = level
def clear(self):
"""clear internal registry"""
ObjRegistry.clear(self)
# this is an attempt to free up refs to database connections:
self.tree = {}
self.limit = {}
def register(self, obj, level):
"""register this object as "loaded" at level
"""
id = ObjRegistry.register(self, obj)
self._pushid(id, level)
return id
def referenced(self, obj, level):
"""tell the queue that this object was referenced again at level.
"""
id = self.id(obj)
self._pushid(id, level)
def to_unload(self):
"""yields a list of objects in an order suitable for unloading.
"""
level_nums = self.tree.keys()
level_nums.sort()
treelog.info("*** unload order ***")
for level in level_nums:
unload_queue = self.tree[level]
verbose_obj = []
for id in unload_queue:
obj = self.registry[id]
verbose_obj.append(obj.__class__.__name__)
yield obj
treelog.info("%s. %s", level, verbose_obj)
class LoadableFixture(Fixture):
"""
knows how to load data into something useful.
This is an abstract class and cannot be used directly. You can use a
LoadableFixture that already knows how to load into a specific medium,
such as SQLAlchemyFixture, or create your own to build your own to load
DataSet objects into custom storage media.
Keyword Arguments:
dataclass
class to instantiate with datasets (defaults to that of Fixture)
style
a Style object to translate names with (defaults to NamedDataStyle)
medium
optional LoadableFixture.StorageMediumAdapter to store DataSet
objects with
"""
style = OriginalStyle()
dataclass = Fixture.dataclass
def __init__(self, style=None, medium=None, **kw):
Fixture.__init__(self, loader=self, **kw)
if style:
self.style = style
if medium:
self.Medium = medium
self.loaded = None
StorageMediumAdapter = StorageMediumAdapter
Medium = StorageMediumAdapter
StorageMediaNotFound = StorageMediaNotFound
LoadQueue = LoadQueue
def attach_storage_medium(self, ds):
"""attach a :class:`StorageMediumAdapter` to DataSet"""
raise NotImplementedError
def begin(self, unloading=False):
"""begin loading"""
if not unloading:
self.loaded = self.LoadQueue()
def commit(self):
"""commit load transaction"""
raise NotImplementedError
def load(self, data):
"""load data"""
def loader():
for ds in data:
self.load_dataset(ds)
self.wrap_in_transaction(loader, unloading=False)
def load_dataset(self, ds, level=1):
"""load this dataset and all its dependent datasets.
level is essentially the order of processing (going from dataset to
dependent datasets). Child datasets are always loaded before the
parent. The level is important for visualizing the chain of
dependencies : 0 is the bottom, and thus should be the first set of
objects unloaded
"""
is_parent = level==1
levsep = is_parent and "/--------" or "|__.."
treelog.info(
"%s%s%s (%s)", level * ' ', levsep, ds.__class__.__name__,
(is_parent and "parent" or level))
for ref_ds in ds.meta.references:
r = ref_ds.shared_instance(default_refclass=self.dataclass)
new_level = level+1
self.load_dataset(r, level=new_level)
self.attach_storage_medium(ds)
if ds in self.loaded:
# keep track of its order but don't actually load it...
self.loaded.referenced(ds, level)
return
log.info("LOADING rows in %s", ds)
ds.meta.storage_medium.visit_loader(self)
registered = False
for key, row in ds:
try:
self.resolve_row_references(ds, row)
if not isinstance(row, DataRow):
row = row(ds)
def column_vals():
for c in row.columns():
yield (c, self.resolve_stored_object(getattr(row, c)))
obj = ds.meta.storage_medium.save(row, column_vals())
ds.meta._stored_objects.store(key, obj)
# save the instance in place of the class...
ds._setdata(key, row)
if not registered:
self.loaded.register(ds, level)
registered = True
except Exception, e:
etype, val, tb = sys.exc_info()
raise LoadError(etype, val, ds, key=key, row=row), None, tb
def resolve_row_references(self, current_dataset, row):
"""resolve this DataRow object's referenced values.
"""
def resolved_rowlike(rowlike):
key = rowlike.__name__
if rowlike._dataset is type(current_dataset):
return DeferredStoredObject(rowlike._dataset, key)
loaded_ds = self.loaded[rowlike._dataset]
return loaded_ds.meta._stored_objects.get_object(key)
def resolve_stored_object(candidate):
if is_rowlike(candidate):
return resolved_rowlike(candidate)
else:
# then it is the stored object itself. this would happen if
# there is a reciprocal foreign key (i.e. organization has a
# parent organization)
return candidate
for name in row.columns():
val = getattr(row, name)
if type(val) in (types.ListType, types.TupleType):
# i.e. categories = [python, ruby]
setattr(row, name, map(resolve_stored_object, val))
elif is_rowlike(val):
# i.e. category = python
setattr(row, name, resolved_rowlike(val))
elif isinstance(val, Ref.Value):
# i.e. category_id = python.id.
ref = val.ref
# now the ref will return the attribute from a stored object
# when __get__ is invoked
ref.dataset_obj = self.loaded[ref.dataset_class]
def rollback(self):
"""rollback load transaction"""
raise NotImplementedError
def then_finally(self, unloading=False):
"""called in a finally block after load transaction has begun"""
pass
def unload(self):
"""unload data"""
if self.loaded is None:
raise UninitializedError(
"Cannot unload data because it has not yet been loaded in this "
"process. Call data.setup() before data.teardown()")
def unloader():
for dataset in self.loaded.to_unload():
self.unload_dataset(dataset)
self.loaded.clear()
dataset_registry.clear()
self.wrap_in_transaction(unloader, unloading=True)
def unload_dataset(self, dataset):
"""unload data stored for this dataset"""
dataset.meta.storage_medium.clearall()
def wrap_in_transaction(self, routine, unloading=False):
"""call routine in a load transaction"""
self.begin(unloading=unloading)
try:
try:
routine()
except:
self.rollback()
raise
else:
self.commit()
finally:
self.then_finally(unloading=unloading)
class EnvLoadableFixture(LoadableFixture):
"""An abstract fixture that can resolve DataSet objects from an env.
Keyword "env" should be a dict or a module if not None.
According to the style rules, the env will be used to find objects by name.
"""
def __init__(self, env=None, **kw):
LoadableFixture.__init__(self, **kw)
self.env = env
def attach_storage_medium(self, ds):
"""Lookup a storage medium in the ``env`` and attach it to a DataSet.
A storage medium is looked up by name. If a specific name has not been declared in the DataSet
then it will be guessed using the :meth:`Style.guess_storable_name <fixture.style.Style.guess_storable_name>` method.
Once a name is found (typically the name of a DataSet class, say, EmployeeData) then it is looked up
in the ``env`` which is expected to be a dict or module like object.
The method first tries ``env.get('EmployeeData')`` then ``getattr(env, 'EmployeeData')``.
The return value is the storage medium (i.e. a data mapper for the Employees table)
Note that a :mod:`style <fixture.style>` might translate a name to maintain a consistent
naming scheme between DataSet classes and data mappers.
"""
if ds.meta.storage_medium is not None:
# already attached...
return
storable = ds.meta.storable
if not storable:
if not ds.meta.storable_name:
ds.meta.storable_name = self.style.guess_storable_name(
ds.__class__.__name__)
if | |
lbl='입체/종이퍼즐'),
1058: SChildCategory(b=array([24]), m=array([73]), lbl='키보드/마우스 세트'),
1059: SChildCategory(b=array([33]), m=array([368]), lbl='일반세탁기'),
1060: SChildCategory(b=array([41]), m=array([369]), lbl='탄산음료'),
1061: SChildCategory(b=array([27]), m=array([370]), lbl='치즈'),
1062: SChildCategory(b=array([37]), m=array([178]), lbl='헤어왁스'),
1063: SChildCategory(b=array([6]), m=array([274]), lbl='야구복'),
1064: SChildCategory(b=array([26]), m=array([321]), lbl='전원식 비데'),
1065: SChildCategory(b=array([43]), m=array([215]), lbl='단열용품'),
1066: SChildCategory(b=array([15]), m=array([285]), lbl='멀티형 에어컨'),
1067: SChildCategory(b=array([13]), m=array([101]), lbl='수영가방'),
1068: SChildCategory(b=array([22]), m=array([202]), lbl='부츠'),
1069: SChildCategory(b=array([4]), m=array([4]), lbl='돌침대'),
1070: SChildCategory(b=array([24]), m=array([31]), lbl='포토프린터'),
1071: SChildCategory(b=array([51]), m=array([371]), lbl='반찬류'),
1072: SChildCategory(b=array([39]), m=array([372]), lbl='외장 광학드라이브'),
1073: SChildCategory(b=array([5]), m=array([276]), lbl='사각/드로즈팬티'),
1074: SChildCategory(b=array([2]), m=array([300]), lbl='오뚝이'),
1075: SChildCategory(b=array([20]), m=array([60]), lbl='유색보석 귀걸이'),
1076: SChildCategory(b=array([42]), m=array([373]), lbl='일반가스레인지'),
1077: SChildCategory(b=array([5]), m=array([64]), lbl='아동 잠옷'),
1078: SChildCategory(b=array([7]), m=array([28]), lbl='정장자켓'),
1079: SChildCategory(b=array([7]), m=array([88]), lbl='밀리터리/항공점퍼'),
1080: SChildCategory(b=array([2]), m=array([113]), lbl='기타 역할놀이'),
1081: SChildCategory(b=array([16]), m=array([244]), lbl='식기건조대'),
1082: SChildCategory(b=array([10]), m=array([9]), lbl='온습도계'),
1083: SChildCategory(b=array([27]), m=array([270]), lbl='햄통조림'),
1084: SChildCategory(b=array([27]), m=array([211]), lbl='초콜릿'),
1085: SChildCategory(b=array([33]), m=array([374]), lbl='디지털도어락'),
1086: SChildCategory(b=array([24]), m=array([73]), lbl='타블렛'),
1087: SChildCategory(b=array([43]), m=array([109]), lbl='각티슈'),
1088: SChildCategory(b=array([16]), m=array([258]), lbl='수세미/주방솔'),
1089: SChildCategory(b=array([1]), m=array([375]), lbl='바이올린'),
1090: SChildCategory(b=array([18]), m=array([21]), lbl='트러블케어'),
1091: SChildCategory(b=array([18]), m=array([315]), lbl='남성 스킨'),
1092: SChildCategory(b=array([13]), m=array([293]), lbl='스키/보드복 자켓'),
1093: SChildCategory(b=array([40]), m=array([191]), lbl='휴지걸이/디스펜서'),
1094: SChildCategory(b=array([40]), m=array([125]), lbl='세탁솔'),
1095: SChildCategory(b=array([3]), m=array([3]), lbl='이어캡'),
1096: SChildCategory(b=array([40]), m=array([148]), lbl='울/실크세제'),
1097: SChildCategory(b=array([42]), m=array([299]), lbl='양문형냉장고'),
1098: SChildCategory(b=array([12]), m=array([326]), lbl='기타가방'),
1099: SChildCategory(b=array([51]), m=array([240]), lbl='멸치'),
1100: SChildCategory(b=array([44]), m=array([117]), lbl='자전거 모자/두건'),
1101: SChildCategory(b=array([23]), m=array([288]), lbl='유아 가운'),
1102: SChildCategory(b=array([34]), m=array([262]), lbl='차량용 보호필름'),
1103: SChildCategory(b=array([15]), m=array([268]), lbl='에어워셔'),
1104: SChildCategory(b=array([23]), m=array([29]), lbl='유아 크림'),
1105: SChildCategory(b=array([13]), m=array([152]), lbl='셔틀콕'),
1106: SChildCategory(b=array([15]), m=array([18]), lbl='돈풍기'),
1107: SChildCategory(b=array([12]), m=array([170]), lbl='나침반'),
1108: SChildCategory(b=array([11]), m=array([220]), lbl='인테리어 파티션'),
1109: SChildCategory(b=array([9]), m=array([111]), lbl='트위드자켓'),
1110: SChildCategory(b=array([27]), m=array([211]), lbl='젤리/푸딩'),
1111: SChildCategory(b=array([50]), m=array([350]), lbl='관상어사료'),
1112: SChildCategory(b=array([33]), m=array([269]), lbl='정수기 렌탈'),
1113: SChildCategory(b=array([4]), m=array([275]), lbl='화장대의자'),
1114: SChildCategory(b=array([40]), m=array([183]), lbl='유리닦이'),
1115: SChildCategory(b=array([42]), m=array([377]), lbl='스탠드형 김치냉장고'),
1116: SChildCategory(b=array([28]), m=array([58]), lbl='에어렌치'),
1117: SChildCategory(b=array([51]), m=array([378]), lbl='곶감/반건시'),
1118: SChildCategory(b=array([16]), m=array([305]), lbl='컵받침/컵뚜껑'),
1119: SChildCategory(b=array([2]), m=array([199]), lbl='헬기'),
1120: SChildCategory(b=array([7, 9]), m=array([233]), lbl='면/리넨조끼'),
1121: SChildCategory(b=array([28]), m=array([86]), lbl='컷소'),
1122: SChildCategory(b=array([9]), m=array([89]), lbl='모직 스커트'),
1123: SChildCategory(b=array([6]), m=array([231]), lbl='테니스화'),
1124: SChildCategory(b=array([23]), m=array([379]), lbl='유모차 완구'),
1125: SChildCategory(b=array([9]), m=array([39]), lbl='더플코트'),
1126: SChildCategory(b=array([32]), m=array([57]), lbl='카메라배터리'),
1127: SChildCategory(b=array([24]), m=array([255]), lbl='외장하드'),
1128: SChildCategory(b=array([2]), m=array([23]), lbl='헝겊책'),
1129: SChildCategory(b=array([1]), m=array([346]), lbl='우쿨렐레'),
1130: SChildCategory(b=array([39]), m=array([98]), lbl='VGA용 쿨러'),
1131: SChildCategory(b=array([7]), m=array([85]), lbl='카라넥니트'),
1132: SChildCategory(b=array([13]), m=array([380]), lbl='스키/보드 고글'),
1133: SChildCategory(b=array([2]), m=array([23]), lbl='멜로디완구'),
1134: SChildCategory(b=array([12]), m=array([172]), lbl='등산장갑'),
1135: SChildCategory(b=array([24]), m=array([206]), lbl='KVM/모니터케이블'),
1136: SChildCategory(b=array([28]), m=array([65]), lbl='산업용저울'),
1137: SChildCategory(b=array([2]), m=array([300]), lbl='걸음마완구'),
1138: SChildCategory(b=array([34]), m=array([381]), lbl='오일필터'),
1139: SChildCategory(b=array([38]), m=array([147]), lbl='사운드 바'),
1140: SChildCategory(b=array([30]), m=array([195]), lbl='골프벨트'),
1141: SChildCategory(b=array([2]), m=array([199]), lbl='미니카'),
1142: SChildCategory(b=array([34]), m=array([136]), lbl='차량용 바닥매트'),
1143: SChildCategory(b=array([9]), m=array([43]), lbl='오프숄더 티셔츠'),
1144: SChildCategory(b=array([9]), m=array([89]), lbl='니트 스커트'),
1145: SChildCategory(b=array([34]), m=array([262]), lbl='차량용 보수용품'),
1146: SChildCategory(b=array([38]), m=array([280]), lbl='리모컨'),
1147: SChildCategory(b=array([30]), m=array([195]), lbl='골프우산'),
1148: SChildCategory(b=array([48]), m=array([382]), lbl='3단계분유'),
1149: SChildCategory(b=array([16]), m=array([298]), lbl='디캔터/와인쿨러'),
1150: SChildCategory(b=array([21]), m=array([106]), lbl='틈새수납장'),
1151: SChildCategory(b=array([18]), m=array([315]), lbl='남성 메이크업'),
1152: SChildCategory(b=array([39]), m=array([98]), lbl='케이스용 쿨러'),
1153: SChildCategory(b=array([35]), m=array([330]), lbl='손부채'),
1154: SChildCategory(b=array([20]), m=array([174]), lbl='부토니에'),
1155: SChildCategory(b=array([37]), m=array([159]), lbl='헤어세트'),
1156: SChildCategory(b=array([43]), m=array([109]), lbl='키친타월'),
1157: SChildCategory(b=array([16]), m=array([222]), lbl='티포크/티스푼'),
1158: SChildCategory(b=array([14]), m=array([124]), lbl='넥워머'),
1159: SChildCategory(b=array([15]), m=array([171]), lbl='탁상용/USB 선풍기'),
1160: SChildCategory(b=array([18]), m=array([315]), lbl='남성 클렌징'),
1161: SChildCategory(b=array([23]), m=array([162]), lbl='바지/레깅스'),
1162: SChildCategory(b=array([15]), m=array([285]), lbl='스탠드형 에어컨'),
1163: SChildCategory(b=array([45]), m=array([163]), lbl='책꽂이'),
1164: SChildCategory(b=array([35]), m=array([384]), lbl='풍선/풍선용품'),
1165: SChildCategory(b=array([20]), m=array([161]), lbl='진주 펜던트'),
1166: SChildCategory(b=array([27]), m=array([385]), lbl='제과제빵믹스'),
1167: SChildCategory(b=array([40]), m=array([183]), lbl='매직블럭/스펀지'),
1168: SChildCategory(b=array([44]), m=array([351]), lbl='모터싸이클 헬멧'),
1169: SChildCategory(b=array([10]), m=array([236]), lbl='일회용/황사마스크'),
1170: SChildCategory(b=array([ 6, 22]), m=array([202, 287]), lbl='아쿠아슈즈'),
1171: SChildCategory(b=array([15]), m=array([285]), lbl='벽걸이 에어컨'),
1172: SChildCategory(b=array([8]), m=array([8]), lbl='여름자리'),
1173: SChildCategory(b=array([13]), m=array([263]), lbl='요가매트'),
1174: SChildCategory(b=array([2]), m=array([66]), lbl='자석놀이'),
1175: SChildCategory(b=array([28]), m=array([58]), lbl='에어 레규레이터'),
1176: SChildCategory(b=array([36]), m=array([79]), lbl='리클라이너소파'),
1177: SChildCategory(b=array([50]), m=array([201]), lbl='애견미용용품'),
1178: SChildCategory(b=array([35]), m=array([386]), lbl='포스트잇'),
1179: SChildCategory(b=array([15]), m=array([387]), lbl='초음파식 가습기'),
1180: SChildCategory(b=array([18]), m=array([237]), lbl='스크럽/필링젤'),
1181: SChildCategory(b=array([2]), m=array([300]), lbl='아기체육관'),
1182: SChildCategory(b=array([13]), m=array([380]), lbl='스키/보드 장갑'),
1183: SChildCategory(b=array([45]), m=array([185]), lbl='키즈 책장'),
1184: SChildCategory(b=array([36]), m=array([135]), lbl='인테리어의자'),
1185: SChildCategory(b=array([30]), m=array([273]), lbl='여성골프하의'),
1186: SChildCategory(b=array([19]), m=array([308]), lbl='홍삼액/홍삼정'),
1187: SChildCategory(b=array([8]), m=array([81]), lbl='바디필로우'),
1188: SChildCategory(b=array([15, 28]), m=array([18, 47]), lbl='열풍기'),
1189: SChildCategory(b=array([6]), m=array([231]), lbl='야구화'),
1190: SChildCategory(b=array([15]), m=array([82]), lbl='천장형 냉온풍기'),
1191: SChildCategory(b=array([27]), m=array([211]), lbl='케이크'),
1192: SChildCategory(b=array([2]), m=array([134]), lbl='물로켓/에어로켓'),
1193: SChildCategory(b=array([11]), m=array([223]), lbl='학습용스탠드'),
1194: SChildCategory(b=array([32]), m=array([57]), lbl='카메라배터리그립'),
1195: SChildCategory(b=array([28]), m=array([389]), lbl='파이프커터/확관기'),
1196: SChildCategory(b=array([30]), m=array([195]), lbl='골프양말'),
1197: SChildCategory(b=array([42]), m=array([104]), lbl='전기오븐'),
1198: SChildCategory(b=array([20]), m=array([187]), lbl='진주세트'),
1199: SChildCategory(b=array([43]), m=array([357]), lbl='해충퇴치램프'),
1200: SChildCategory(b=array([25]), m=array([177]), lbl='마이크 액세서리'),
1201: SChildCategory(b=array([30]), m=array([195]), lbl='골프장갑'),
1202: SChildCategory(b=array([5]), m=array([310]), lbl='임산부 브라/팬티 세트'),
1203: SChildCategory(b=array([16]), m=array([218]), lbl='양념/소스통'),
1204: SChildCategory(b=array([2]), m=array([23]), lbl='가베'),
1205: SChildCategory(b=array([16]), m=array([258]), lbl='기타주방잡화'),
1206: SChildCategory(b=array([27]), m=array([392]), lbl='봉지라면'),
1207: SChildCategory(b=array([42]), m=array([146]), lbl='핸드블랜더'),
1208: SChildCategory(b=array([50]), m=array([256]), lbl='고양이간식'),
1209: SChildCategory(b=array([51]), m=array([334]), lbl='물김치'),
1210: SChildCategory(b=array([38]), m=array([96]), lbl='프로젝터 액세서리'),
1211: SChildCategory(b=array([23]), m=array([29]), lbl='유아 로션'),
1212: SChildCategory(b=array([45]), m=array([205]), lbl='강연대'),
1213: SChildCategory(b=array([44]), m=array([137]), lbl='자전거 속도계'),
1214: SChildCategory(b=array([12]), m=array([153]), lbl='의자'),
1215: SChildCategory(b=array([21]), m=array([139]), lbl='벽선반'),
1216: SChildCategory(b=array([40]), m=array([235]), lbl='때타월/퍼프'),
1217: SChildCategory(b=array([14]), m=array([13]), lbl='여성 퀼팅백'),
1218: SChildCategory(b=array([6]), m=array([30]), lbl='스포츠 패션소품'),
1219: SChildCategory(b=array([14]), m=array([49]), lbl='행거치프'),
1220: SChildCategory(b=array([43]), m=array([357]), lbl='캐노피/원터치모기장'),
1221: SChildCategory(b=array([27]), m=array([367]), lbl='청국장'),
1222: SChildCategory(b=array([24]), m=array([200]), lbl='모니터 공유/분배기'),
1223: SChildCategory(b=array([40]), m=array([235]), lbl='목욕가운/터번'),
1224: SChildCategory(b=array([40]), m=array([125]), lbl='빨래집게'),
1225: SChildCategory(b=array([1]), m=array([336]), lbl='디지털피아노'),
1226: SChildCategory(b=array([35]), m=array([395]), lbl='앨범'),
1227: SChildCategory(b=array([13]), m=array([263]), lbl='줄넘기'),
1228: SChildCategory(b=array([41]), m=array([396]), lbl='탄산수'),
1229: SChildCategory(b=array([15]), m=array([397]), lbl='전기요'),
1230: SChildCategory(b=array([43]), m=array([109]), lbl='냅킨'),
1231: SChildCategory(b=array([13, 22]), m=array([221, 242]), lbl='보호장비'),
1232: SChildCategory(b=array([14]), m=array([232]), lbl='여권/보조가방'),
1233: SChildCategory(b=array([15]), m=array([171]), lbl='휴대용 선풍기'),
1234: SChildCategory(b=array([41]), m=array([128]), lbl='프라페/버블티 파우더'),
1235: SChildCategory(b=array([44]), m=array([335]), lbl='인라인스케이트'),
1236: SChildCategory(b=array([11]), m=array([398]), lbl='벽거울'),
1237: SChildCategory(b=array([27]), m=array([284]), lbl='즉석수프'),
1238: SChildCategory(b=array([36]), m=array([213]), lbl='다용도테이블'),
1239: SChildCategory(b=array([44]), m=array([366]), lbl='산악용 MTB'),
1240: SChildCategory(b=array([2]), m=array([399]), lbl='붕붕카'),
1241: SChildCategory(b=array([23]), m=array([115]), lbl='부스터시트/안전의자'),
1242: SChildCategory(b=array([14]), m=array([13]), lbl='비치/왕골가방'),
1243: SChildCategory(b=array([43]), m=array([401]), lbl='탈취제'),
1244: SChildCategory(b=array([23]), m=array([239]), lbl='트레이닝 젓가락'),
1245: SChildCategory(b=array([12]), m=array([129]), lbl='그늘막'),
1246: SChildCategory(b=array([3]), m=array([402]), lbl='스마트워치'),
1247: SChildCategory(b=array([1]), m=array([97]), lbl='인형DIY'),
1248: SChildCategory(b=array([38]), m=array([324]), lbl='스마트TV'),
1249: SChildCategory(b=array([42]), m=array([283]), lbl='전기그릴'),
1250: SChildCategory(b=array([14]), m=array([232]), lbl='중/대형캐리어'),
1251: SChildCategory(b=array([51]), m=array([403]), lbl='쌀'),
1252: SChildCategory(b=array([23]), m=array([190]), lbl='폴더매트'),
1253: SChildCategory(b=array([19]), m=array([404]), lbl='선식'),
1254: SChildCategory(b=array([12]), m=array([93]), lbl='아웃도어샌들'),
1255: SChildCategory(b=array([30]), m=array([317]), lbl='유틸리티우드'),
1256: SChildCategory(b=array([16]), m=array([218]), lbl='도자기 용기'),
1257: SChildCategory(b=array([19]), m=array([41]), lbl='감마리놀렌산'),
1258: SChildCategory(b=array([33]), m=array([333]), lbl='의류건조기'),
1259: SChildCategory(b=array([35]), m=array([330]), lbl='스탬프/잉크패드'),
1260: SChildCategory(b=array([20]), m=array([72]), lbl='유색보석 반지'),
1261: SChildCategory(b=array([9]), m=array([7]), lbl='점프수트'),
1262: SChildCategory(b=array([39]), m=array([230]), lbl='하드랙'),
1263: SChildCategory(b=array([19]), m=array([252]), lbl='호박즙/야채즙'),
1264: SChildCategory(b=array([49]), m=array([193]), lbl='태블릿PC 거치대'),
1265: SChildCategory(b=array([12]), m=array([172]), lbl='등산양말'),
1266: SChildCategory(b=array([22]), m=array([242]), lbl='스키/보드용품'),
1267: SChildCategory(b=array([20]), m=array([187]), lbl='실버세트'),
1268: SChildCategory(b=array([27]), m=array([45]), lbl='식초'),
1269: SChildCategory(b=array([17]), m=array([56]), lbl='통굽부츠'),
1270: SChildCategory(b=array([11]), m=array([223]), lbl='벽등'),
1271: SChildCategory(b=array([2]), m=array([328]), lbl='미끄럼틀'),
1272: SChildCategory(b=array([28]), m=array([47]), lbl='탁상 드릴링 머신'),
1273: SChildCategory(b=array([16]), m=array([184]), lbl='건지기/채반'),
1274: SChildCategory(b=array([18]), m=array([345]), lbl='수면팩'),
1275: SChildCategory(b=array([12, 13]), m=array([170, 212]), lbl='기타용품'),
1276: SChildCategory(b=array([39]), m=array([98]), lbl='HDD용 쿨러'),
1277: SChildCategory(b=array([2]), m=array([66]), lbl='태그북'),
1278: SChildCategory(b=array([7]), m=array([233]), lbl='캐주얼조끼'),
1279: SChildCategory(b=array([20]), m=array([348]), lbl='유아동 목걸이/펜던트'),
1280: SChildCategory(b=array([41]), m=array([318]), lbl='차/곡물음료'),
1281: SChildCategory(b=array([41]), m=array([128]), lbl='녹차'),
1282: SChildCategory(b=array([21]), m=array([35]), lbl='압축팩'),
1283: SChildCategory(b=array([8]), m=array([78]), lbl='패브릭 냅킨'),
1284: SChildCategory(b=array([18]), m=array([42]), lbl='볼터치'),
1285: SChildCategory(b=array([26]), m=array([34]), lbl='이발기'),
1286: SChildCategory(b=array([14]), m=array([290]), lbl='니트/털장갑'),
1287: SChildCategory(b=array([49]), m=array([261]), lbl='휴대용 (10~13형)'),
1288: SChildCategory(b=array([5]), m=array([272]), lbl='스포츠브라/팬티세트'),
1289: SChildCategory(b=array([23]), m=array([379]), lbl='유모차 홀더/수납'),
1290: SChildCategory(b=array([15]), m=array([397]), lbl='온수매트'),
1291: SChildCategory(b=array([12]), m=array([11]), lbl='버너'),
1292: SChildCategory(b=array([24]), m=array([99]), lbl='멀티탭'),
1293: SChildCategory(b=array([36]), m=array([79]), lbl='소파베드'),
1294: SChildCategory(b=array([27]), m=array([284]), lbl='즉석죽'),
1295: SChildCategory(b=array([35]), m=array([330]), lbl='사인보드'),
1296: SChildCategory(b=array([40]), m=array([183]), lbl='변기솔'),
1297: SChildCategory(b=array([49]), m=array([301]), lbl='노트북 전원/배터리'),
1298: SChildCategory(b=array([9]), m=array([67]), lbl='가오리 니트'),
1299: SChildCategory(b=array([16]), m=array([218]), lbl='쌀통'),
1300: SChildCategory(b=array([48]), m=array([405]), lbl='천기저귀'),
1301: SChildCategory(b=array([3]), m=array([402]), lbl='스마트워치 액세서리'),
1302: SChildCategory(b=array([15]), m=array([82]), lbl='벽걸이형 냉온풍기'),
1303: SChildCategory(b=array([36]), m=array([135]), lbl='안락/흔들의자'),
1304: SChildCategory(b=array([45]), m=array([163]), lbl='책상서랍'),
1305: SChildCategory(b=array([35]), m=array([173]), lbl='색연필'),
1306: SChildCategory(b=array([18]), m=array([237]), lbl='클렌징 크림'),
1307: SChildCategory(b=array([38]), m=array([147]), lbl='리시버'),
1308: SChildCategory(b=array([34]), m=array([406]), lbl='차량용 선풍기'),
1309: SChildCategory(b=array([7]), m=array([28]), lbl='체크/패턴정장바지'),
1310: SChildCategory(b=array([8]), m=array([78]), lbl='러너'),
1311: SChildCategory(b=array([27]), m=array([320]), lbl='한과/화과자'),
1312: SChildCategory(b=array([40]), m=array([191]), lbl='욕실장'),
1313: SChildCategory(b=array([24]), m=array([206]), lbl='USB케이블'),
1314: SChildCategory(b=array([13]), m=array([338]), lbl='테니스라켓'),
1315: SChildCategory(b=array([16]), m=array([304]), lbl='주방용 칼'),
1316: SChildCategory(b=array([16]), m=array([218]), lbl='도시락/가방'),
1317: SChildCategory(b=array([13]), m=array([123]), lbl='거꾸리'),
1318: SChildCategory(b=array([49]), m=array([301]), lbl='노트북 파우치'),
1319: SChildCategory(b=array([37]), m=array([277]), lbl='공용향수'),
1320: SChildCategory(b=array([25]), m=array([177]), lbl='무선마이크'),
1321: SChildCategory(b=array([9]), m=array([89]), lbl='코듀로이(골덴) 스커트'),
1322: SChildCategory(b=array([40]), m=array([125]), lbl='다리미판'),
1323: SChildCategory(b=array([41]), m=array([128]), lbl='차 선물세트'),
1324: SChildCategory(b=array([27]), m=array([45]), lbl='물엿/올리고당'),
1325: SChildCategory(b=array([50]), m=array([256]), lbl='고양이배변용품'),
1326: SChildCategory(b=array([20]), m=array([196]), lbl='커플목걸이'),
1327: SChildCategory(b=array([34]), m=array([381]), lbl='엔진오일'),
1328: SChildCategory(b=array([51]), m=array([407]), lbl='표고버섯'),
1329: SChildCategory(b=array([37]), m=array([167]), lbl='바디 클렌져'),
1330: SChildCategory(b=array([18]), m=array([237]), lbl='립앤아이 리무버'),
1331: SChildCategory(b=array([23]), m=array([228]), lbl='수유패드'),
1332: SChildCategory(b=array([25]), m=array([46]), lbl='일체형오디오'),
1333: SChildCategory(b=array([23]), m=array([265]), lbl='신생아 블랭킷'),
1334: SChildCategory(b=array([39]), m=array([230]), lbl='3.5형 HDD'),
1335: SChildCategory(b=array([9]), m=array([89]), lbl='청/데님 스커트'),
1336: SChildCategory(b=array([10]), m=array([9]), lbl='체온계'),
1337: SChildCategory(b=array([8]), m=array([245]), lbl='요이불세트'),
1338: SChildCategory(b=array([39]), m=array([169]), lbl='Micro-ATX 파워'),
1339: SChildCategory(b=array([2]), m=array([44]), lbl='목각 인형'),
1340: SChildCategory(b=array([34]), m=array([409]), lbl='트렁크정리함'),
1341: SChildCategory(b=array([42]), m=array([373]), lbl='레인지후드'),
1342: SChildCategory(b=array([35]), m=array([329]), lbl='플래너/스케쥴러'),
1343: SChildCategory(b=array([23]), m=array([379]), lbl='깔개/시트'),
1344: SChildCategory(b=array([40]), m=array([118]), lbl='목욕의자'),
1345: SChildCategory(b=array([1]), m=array([309]), lbl='자동차 RC'),
1346: SChildCategory(b=array([24]), m=array([204]), lbl='바코드스캐너'),
1347: SChildCategory(b=array([18]), m=array([345]), lbl='필오프팩'),
1348: SChildCategory(b=array([18]), m=array([21]), lbl='기능성세트'),
1349: SChildCategory(b=array([37]), m=array([178]), lbl='순간증모제'),
1350: SChildCategory(b=array([22]), m=array([202]), lbl='슬리퍼/실내화'),
1351: SChildCategory(b=array([23]), m=array([295]), lbl='캐리어'),
1352: SChildCategory(b=array([15]), m=array([397]), lbl='전기방석'),
1353: SChildCategory(b=array([17]), m=array([340]), lbl='베이직부티'),
1354: SChildCategory(b=array([13]), m=array([257]), lbl='스포츠 팔토시'),
1355: | |
<gh_stars>0
# -*- coding: iso-8859-15 -*-
"""
Diese Klasse ist die Hauptsteuerroutine des Frameworks.
Sie steuert das gesamte Verhalten.
Es werden die Subklassen instanziert
"""
import cgi
import os
import sys
import traceback
from urlparse import *
from cgi import FieldStorage
from cgi import MiniFieldStorage
from conf.menu import Menu
from conf.config import Config
from conf.authen import Authen
from conf.sidebox import Sidebox
from templateengine import TemplateEngine
from dbaccess.core import *
from session import Session
from version import Version
# ### Hauptklasse des Frameworks
#
class Mframe:
form=None # aktuelle Form (gesetzt aus CGI)
webini=None # Pfad auf Standard Hilfsverzeichnis WEB-INF
menu=None # Menu Objekt
homeurl='start.py' # HOME URL
path=None # Aktueller Pfad gesetzt aus der CGI Variabel path
db=None # Databasehandle
session=None # Session Objekt
authen=None # Autentifizierungsmodul
container=Config.defaulttemplate # Standard Template aus Config
tplateengine = None # Template Engine
flash='' # Meldungszeile
useAuthen = True # Flag zur Benutzung der Autentifizierung
sidebox = None # Sideboxbearbeitung
config = None # Konfigurationsobjekt
showSideboxes = False # True wenn Sideboxes angezeigt werden sollen
sideboxes = '' # Container haelt Sideboxes HTML Snippet
version = None # Versionsinformationen
mframeversion = None # Versionsnummer von mFrame
logintemplate=Config.defaultlogintemplate # Standard Logintemplate
templateparam = {} # Dictionary fuer Template
javascript = '' # Standard Javascript
stylesheet = '' # Stylesheet
query_string = '' # Query String vom CGI
defaultMimeHeader = "Content-Type: text/html"
def __init__(self,webini='../../WEB-INF',sessionlifetime=180,usecgi=True):
"""
Konstruktor:
@param webini Pfad auf WEB-INI Verzeichnis
@param sessionlifetime Minuten welche die Session gueltig ist
@param usecgi True: es wird die cgi Classe fuer das Interfacehandling benutzt
False: es wurd nur der QUERY_STRING geparst
Das Standardverhalten ist usecgi=True.
"""
self.usecgi = usecgi
self.config = Config()
# lese path aus CGI
self.query_string = os.environ['QUERY_STRING']
self.path = self.getPathFromQueryString()
# Konfiguration
self.homeurl=self.config.homeurl
self.webini=webini
self.menu=Menu(self.config)
self.configMenu()
self.sidebox = Sidebox(config=self.config)
self.session = Session(self.config.sessionpath,sessionlifetime=sessionlifetime)
self.mframeversion = Version().VERSION
self.version = '0.1-E001'
self.connectDb()
self.authen = Authen(session=self.session,db=self.db)
self.menu.authen = self.authen
self.tplateengine = TemplateEngine(self.config)
self.writelog("-- usecgi: {0}".format(usecgi))
if usecgi:
self.form=cgi.FieldStorage(keep_blank_values=1)
self.path = self.cgiparam(name='path',nvl='/')
@staticmethod
def getCgiParameter(param,nvl=''):
"""
Liefert den Inhalt eines CGI Parmeters basierend auf den QUERY_STRINGS
@param param Name des CGI Parameters
@param nvl Null-Value wird zurueckgeliefert, wenn der
Parameter nicht vorhanden ist.
Vorgabewert ''
HINT:
Es wird nur das 1. Vorkommen des Parameters ausgewertet!
"""
query_string = os.environ['QUERY_STRING']
parsed = parse_qs(query_string)
retval = parsed.get(param)
if retval is None:
return None
else:
return retval[0]
def loadCgiParameter(self):
"""
Laed den Inhalt des CGI in Abhaengigkeit des Flags usecgi.
usecgi
True: Es wird die Lib cgi verwendet
False: Es wird QUERY_STRING verwendet
HINT:
In bestimmten Situationen z.B. wenn im HTTP Body nur daten uebertragen werden.
verhaelt sich das CGI Modul so, dass es einen Ausnahmebedingung wirft.
Der Flag usecgi ermoeglicht das Abschalten des Moduls. Die CGI Parameter werden
aus dem URL extrahiert und so verspeichert, dass sie mit der Methode cgiparam
wiedergewonnen werden koennen.
"""
if self.usecgi:
self.form=cgi.FieldStorage(keep_blank_values=1)
self.path = self.cgiparam(name='path',nvl='/')
else:
# Form inhalte holen
qs = self.query_string
parsed = parse_qs(qs)
self.writelog ("-- QUERY_STRING: {0}".format(qs))
self.writelog ("-- Parsed:")
self.form = dict()
for key in parsed.keys():
for val in parsed.get(key):
self.writelog(" {0} -> {1}".format(key,val))
self.form[key] = val
try:
self.path=parsed.get('path')[0]
except:
self.form = {'path':'/root'}
self.path = self.cgiparam('path','/root')
self.writelog("-- path (cgiparam): {0}".format(self.cgiparam('path')))
def getParsedQueryString(self):
"""
Liefert den geparsten Query String
"""
return cgi.parse_qs(self.query_string)
def getPathFromQueryString(self):
"""
Liefert aus dem Query String den Path eintrag.
HINT:
Wenn nicht gefunden, wird None zurueckgeliefert
"""
qs = self.getParsedQueryString()
path = [''] if qs.get('path',None) is None else qs.get('path')
return path[0]
def cgiparam(self,name=None,nvl='',noneifnotused=False):
"""
Liefert aus dem CGI einen benannten Parameter
@param name Name des Cgiparmeters
@param nvl NullValue wird geliefert,
wenn der Parameter nicht uebergeben wurde
HINT:
Es wird geprueft, ob self.form ein Dict oder FieldStorage ist.
Je nach Type wird der Inhalt geliefert.
"""
if self.form is None:
return nvl
# Wurde Spezielle CGI Verarbeitung gewünscht
if isinstance(self.form,dict):
return self.form.get(name,nvl)
# wenn Parameter nicht definiert
# null-value zurueckgeben
if name not in self.form:
if noneifnotused:
return None
else:
return nvl
value = self.form.getvalue(name)
if value is None:
value = nvl
else:
if isinstance(value,list):
try:
value = value[0]
except: value = nvl
return value
def writelog(self,*args):
print >>sys.stderr,' '.join([str(a) for a in args])
sys.stderr.flush()
def connectDb(self):
# Verbinden mit Datenbank
# Wenn dbfilename angegeben
if self.config.dbtype == 'sqlite':
if self.config.sqlitefilename is not None:
self.db = Database('sqlite',self.config.sqlitefilename)
else: pass
elif self.config.dbtype == 'mysql':
mysqlConn = self.config.mysql
self.db = Database('mysql',mysqlConn['host'],mysqlConn['port'],mysqlConn['username'],mysqlConn['password'],mysqlConn['schema'])
elif self.config.dbtype == 'oracle':
try:
ocrConn = self.config.oracle
except:
raise ValueError, "Es wurden keine ORACLE Verbindungsparameter in config deklariert (oracle : {...})"
self.db = Database('oracle',
ocrConn['username'],
ocrConn['password'],
ocrConn['sid'],
ocrConn['host'],
ocrConn['port'])
def setContainer(self,name=None):
"""
Setzt den zu verwendenden Container.
Ein Container befindet sich normalerweise als Datei
in ./WEB-INF/temlate und stellt den aeusseren Rahmen
in HTML dar. Er enhaelt Platzhalter in denen die Werte
aus dem Framework eingetragen werden.
@param name Name des Containerfiles
"""
self.tplateengine.tplFileName = name
def setAttribute(self,name=None,value=None):
"""
Setzt ein Attribut in der Session.
@param name Name des Attributes
@param value Wert des Attributes
"""
self.session.setAttribute(name,value)
def getAttribute(self,name=None):
"""
Liefert den Wert eines Attributes oder None
wenn dieses nicht gefunden wurde aus der Session.
@param name Attrbutname
@return Attributwert
"""
return self.session.getAttribute(name)
# depricated
def start_html(self):
print "<html><body>"
# depricated
def end_html(self):
print "</body></html>"
def setEntryDisplay(self,path=None,mode=True):
"""
Setzte den Displaystatus
@param path Patheintrag
@param mode True/False (Vorgabewert True
"""
self.menu.setDisplay(path,mode)
def setEntryText(self,path=None,text=''):
"""
Setzte den Text
@param path Patheintrag
@param text Texteintrag
"""
self.menu.setText(path,text)
def setEntryController(self,path=None,controller=''):
"""
Setzte den Text
@param path Patheintrag
@param contrroller Controllereintrag
"""
self.menu.setController(path,controller)
def setEntryParam(self,path=None,param=[]):
"""
Setzte zusaetzliche Parameter in Entry
@param path Patheintrag
@param param Zusaetzliche Parameter als Liste
"""
self.menu.setParam(path,param)
def addEntry(self,
path='/',
controller=None,
text=None,
addparam=None
):
"""
Fuegt in die Menueeintraege einen Eintrag dynamisch hinzu
@param path Der Path unter dem der Eintrag eingetragen
werden soll
@param controller Controller Name
Beginnt der Controller name mit "@url:"
wird ein Link mit dem Inhalt nach @url: erzeugt
@param text Anzeigetext (darf nicht leer sein da sonst keine
Anzeige erfogt.
@param addparam eine Liste mit Parameter, welche dem
Link hinzugefuegt werden. z.B. ['action=list-edit']
"""
myPath = path
if myPath == '@current':
myPath = self.path
if not myPath.endswith ('/'): myPath = myPath + '/'
aux = {}
aux['text']=text
aux['path']=myPath
if controller is not None: aux['controller']=controller
if addparam is not None:
aux['addparam'] = addparam
else:
aux['addparam'] = []
self.menu.addentries.append(aux)
def configMenu(self,container=None,entry=None):
"""
Liefert ein gerendetes Menue
Konfigurieren des Menues:
@param container Ein Container in dem die Menueeintraege
eingefuegt werden
Beispiel: '<ul>{0}</ul>'
@param entry Ein Format fuer einen Menueeintrag
'Beispiel: <a href={0}?path={1}>{2}</a><br />'
Parameter:
0 URL
1 aktueller Pfad
2 Anzeigetext
"""
if container is None:
container = '<ul class="menu">\n%(entry)s</ul>\n'
self.menu.tplMenuContainer = container
if entry is not None:
self.menu.tplMenuEntry = entry
def reload(self,path):
""""
Liefert einen http-equiv Fuer in Browserreload
@param path Pfadeintrag
@return gerenderte Eintrag
"""
retval = 'meta http-equiv="refresh" content="0;url="{url}">'.format(url=path)
return retval
def redirect(self,path,other=None):
"""
Liefert einen Redirect mittels Statuscode 303 (moved permantly)
HINT:
gibt immer False zurueck um einfacher in einem Controller
verwendet werden zu koennen.
@param path Pfad
@param other weiter CGI Parameter (optional)
"""
moreparam = '&'+other if other is not None else ''
#raise Exception("Refresh: 0; url={0}?path={1}{2}".format(self.config.homeurl,path,moreparam))
print "Refresh: 0; url={0}?path={1}{2}".format(self.config.homeurl,path,moreparam)
return True
def reloadmask(self,msg=''):
lstUrl = list()
for fld in self.form.keys():
val = self.form.getfirst(fld)
if isinstance(val,list):
for v in val:
lstUrl.append('{key}={val}'.format(key=fld,val=v))
else:
lstUrl.append('{key}={val}'.format(key=fld,val=val))
return '&'.join(lstUrl)
def init(self):
"""
Initaialsierungsroutine des Frameworks
HINT:
Prueft ob der Benutzer eingelogt ist
Ist dies nicht der Fall, wird die Einloggmaske verwendet
"""
self.menu.path = self.path
self.menu.homeurl=self.homeurl
if self.useAuthen:
# Ist noch niemand angemeldet
if not self.authen.isAuthenticated():
self.setContainer(name=self.logintemplate)
self.tplateengine.readTemplateFile()
def appException(self,controllerfilename,message):
"""
Ausgabe der Standard Fehlermeldung
@param controllerfilename Name des Controllers
@param message Nachrichtentext
"""
return """Content-Type: text/html\n\n
<html><body>
<div style="border:2px solid red;font-family:Tahoma,sans-serif;padding:16px;">
<h1 style="background-color:red;color:white;margin:0;font-size:1em;">Hoppla...</h1>
<p>
In der Anwendung ist ein unerwartetes Ereignis aufgetreten
</p>
<p>
Controller: <strong>%(filename)s</strong><br />
</p>
<p>
Meldung:
<strong>%(meldung)s</strong>
</p>
<p>
Bitte senden sie diese Meldung an den Anwendungsgentwickler
</p>
| |
# Code for plan-net
# <NAME>, Feb 2019
# Keras
import keras
from keras import backend as K
from keras.models import Sequential, Model
from keras.metrics import binary_accuracy
from keras.layers import Convolution1D, Dense, MaxPooling1D, Flatten, Input, Lambda, Wrapper, merge, concatenate
from keras.engine import InputSpec
from keras.layers.core import Dense, Dropout, Activation, Layer, Lambda, Flatten
from keras.regularizers import l2
from keras.optimizers import RMSprop, Adadelta, adam
from keras.layers.advanced_activations import LeakyReLU
from keras import initializers
import tensorflow as tf
from itertools import product
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
from sklearn import metrics, neighbors
from sklearn.preprocessing import MinMaxScaler
# From: https://github.com/yaringal/ConcreteDropout/blob/master/concrete-dropout-keras.ipynb
class ConcreteDropout(Wrapper):
"""This wrapper allows to learn the dropout probability for any given input Dense layer.
```python
# as the first layer in a model
model = Sequential()
model.add(ConcreteDropout(Dense(8), input_shape=(16)))
# now model.output_shape == (None, 8)
# subsequent layers: no need for input_shape
model.add(ConcreteDropout(Dense(32)))
# now model.output_shape == (None, 32)
```
`ConcreteDropout` can be used with arbitrary layers which have 2D
kernels, not just `Dense`. However, Conv2D layers require different
weighing of the regulariser (use SpatialConcreteDropout instead).
# Arguments
layer: a layer instance.
weight_regularizer:
A positive number which satisfies
$weight_regularizer = l**2 / (\tau * N)$
with prior lengthscale l, model precision $\tau$ (inverse observation noise),
and N the number of instances in the dataset.
Note that kernel_regularizer is not needed.
dropout_regularizer:
A positive number which satisfies
$dropout_regularizer = 2 / (\tau * N)$
with model precision $\tau$ (inverse observation noise) and N the number of
instances in the dataset.
Note the relation between dropout_regularizer and weight_regularizer:
$weight_regularizer / dropout_regularizer = l**2 / 2$
with prior lengthscale l. Note also that the factor of two should be
ignored for cross-entropy loss, and used only for the eculedian loss.
"""
def __init__(self, layer, weight_regularizer=1e-6, dropout_regularizer=1e-5,
init_min=0.1, init_max=0.1, is_mc_dropout=True, **kwargs):
assert 'kernel_regularizer' not in kwargs
super(ConcreteDropout, self).__init__(layer, **kwargs)
self.weight_regularizer = weight_regularizer
self.dropout_regularizer = dropout_regularizer
self.is_mc_dropout = is_mc_dropout
self.supports_masking = True
self.p_logit = None
self.p = None
self.init_min = np.log(init_min) - np.log(1. - init_min)
self.init_max = np.log(init_max) - np.log(1. - init_max)
def build(self, input_shape=None):
self.input_spec = InputSpec(shape=input_shape)
if not self.layer.built:
self.layer.build(input_shape)
self.layer.built = True
super(ConcreteDropout, self).build() # this is very weird.. we must call super before we add new losses
# initialise p
self.p_logit = self.layer.add_weight(name='p_logit',
shape=(1,),
initializer=initializers.RandomUniform(self.init_min, self.init_max),
trainable=True)
self.p = K.sigmoid(self.p_logit[0])
# initialise regulariser / prior KL term
assert len(input_shape) == 2, 'this wrapper only supports Dense layers'
input_dim = np.prod(input_shape[-1]) # we drop only last dim
weight = self.layer.kernel
kernel_regularizer = self.weight_regularizer * K.sum(K.square(weight)) / (1. - self.p)
dropout_regularizer = self.p * K.log(self.p)
dropout_regularizer += (1. - self.p) * K.log(1. - self.p)
dropout_regularizer *= self.dropout_regularizer * input_dim
regularizer = K.sum(kernel_regularizer + dropout_regularizer)
self.layer.add_loss(regularizer)
def compute_output_shape(self, input_shape):
return self.layer.compute_output_shape(input_shape)
def concrete_dropout(self, x):
'''
Concrete dropout - used at training time (gradients can be propagated)
:param x: input
:return: approx. dropped out input
'''
eps = K.cast_to_floatx(K.epsilon())
temp = 0.1
unif_noise = K.random_uniform(shape=K.shape(x))
drop_prob = (
K.log(self.p + eps)
- K.log(1. - self.p + eps)
+ K.log(unif_noise + eps)
- K.log(1. - unif_noise + eps)
)
drop_prob = K.sigmoid(drop_prob / temp)
random_tensor = 1. - drop_prob
retain_prob = 1. - self.p
x *= random_tensor
x /= retain_prob
return x
def call(self, inputs, training=None):
if self.is_mc_dropout:
return self.layer.call(self.concrete_dropout(inputs))
else:
def relaxed_dropped_inputs():
return self.layer.call(self.concrete_dropout(inputs))
return K.in_train_phase(relaxed_dropped_inputs,
self.layer.call(inputs),
training=training)
# This is the BNN class which learns cholesky.
class BNNModel_het_chol:
"""
Builds basic BNN model around training data
"""
def __init__(self, X: np.array, Y: np.array, architecture: list, dropout = 0.1, T = 10,
tau = 1.0, lengthscale = 1., base_lr = 5e-2, gamma = 0.0001*0.25, ens_num = 0, train_flag = True):
"""
:X: training data X -> so far only implemented for 1D data, needs to be of shape (n,1) or (1,n)
:Y: training data y, needs to be passed as array of shape (n,1);
:param architecture: list of perceptrons per layer, as long as network deep
:param dropout: probability of perceptron being dropped out
:param T: number of samples from posterior of weights during test time
:param tau: precision of prior
:param lengthscale: lengthscale
:param base_lr: initial learning rate for SGD optimizer
:param gamma: parameter for decay of initial learning rate according to default SGD learning schedule
"""
if np.shape(X)[0] == len(Y):
assert np.shape(X)[1] >= 1
else:
assert np.shape(X)[1] == len(Y)
X = np.transpose(X)
self.X = X
# assert np.shape(Y)[1] == 1
self.Y = Y
D = self.Y.shape[-1]
self.ens_num = ens_num
self.dropout = dropout
self.T = T
self.tau = tau
self.lengthscale = lengthscale
# Eq. 3.17 Gal thesis:
self.weight_decay = ((1-self.dropout)*self.lengthscale**2)/(self.X.shape[0]*self.tau) # Don't need to dived by two as we are using squared error
self.architecture = architecture
self.train_flag = train_flag
if K.backend() == 'tensorflow':
K.clear_session()
N = self.X.shape[0]
wd = self.lengthscale**2. / N
dd = 2. / N
inp = Input(shape=(np.shape(self.X)[1],))
x = inp
x = ConcreteDropout(Dense(architecture[0], activation='relu'), weight_regularizer=wd, dropout_regularizer=dd)(x)
for jj in range(1,(len(architecture))):
x = ConcreteDropout(Dense(architecture[jj], activation='relu'), weight_regularizer=wd, dropout_regularizer=dd)(x)
mean = ConcreteDropout(Dense(self.Y.shape[-1]), weight_regularizer=wd, dropout_regularizer=dd)(x)
log_var = ConcreteDropout(Dense(int(D * (D+1)/2)), weight_regularizer=wd, dropout_regularizer=dd)(x)
out = concatenate([mean, log_var])
self.model = Model(inp, out)
def heteroscedastic_loss(true, pred):
mean = pred[:, :D]
L = pred[:, D:]
N = tf.shape(true)[0]
# Slow:
k = 1
inc = 0
Z = []
diag = []
for d in range(D):
# for j in range(k):
# L[:,k-1] = K.exp(L[:,k-1]) # constrain diagonal to be positive
if k == 1:
Z.append(tf.concat([tf.exp(tf.reshape(L[:,inc:inc+k],[N,k])),tf.zeros((N,D-k))],1))
else:
Z.append(tf.concat([tf.reshape(L[:,inc:inc+k-1],[N,k-1]),tf.exp(tf.reshape(L[:,inc+k-1],[N,1])),tf.zeros((N,D-k))],1))
diag.append(K.exp(L[:,inc+k-1]))
inc += k
k+=1
diag = tf.concat(tf.expand_dims(diag,-1),-1)
lower = tf.reshape(tf.concat(Z,-1),[N,D,D])
S_inv = tf.matmul(lower,tf.transpose(lower,perm=[0,2,1]))
x = tf.expand_dims((true - mean),-1)
quad = tf.matmul(tf.matmul(tf.transpose(x,perm=[0,2,1]),S_inv),x)
log_det = - 2 * K.sum(K.log(diag),0)
# - 0.5 * [log det + quadratic term] = log likelihood
# remove minus sign as we want to minimise NLL
return K.mean(tf.squeeze(quad,-1) + log_det, 0)
self.model.compile(optimizer='adam', loss=heteroscedastic_loss)
# assert len(model.layers[1].trainable_weights) == 3 # kernel, bias, and dropout prob
# assert len(model.losses) == 5 # a loss for each Concrete Dropout layer
# hist = model.fit(X, Y, nb_epoch=nb_epoch, batch_size=batch_size, verbose=0)
# loss = hist.history['loss'][-1]
# return model, -0.5 * loss # return ELBO up to const.
def train(self, epochs = 100, batch_size = 128, validation_data = ()):
"""
Trains model
:param epochs: defines how many times each training point is revisited during training time
:param batch_size: defines how big batch size used is
"""
# Might want to save model check points?!
weights_file_std = './ens_folder_models/ensemble_'+str(self.ens_num)+'_check_point_weights_het_loss.h5'
model_checkpoint = keras.callbacks.ModelCheckpoint(weights_file_std, monitor='val_loss', save_best_only=True,
save_weights_only=True, mode='auto',verbose=0)
Early_Stop = keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=50, verbose=1, mode='auto')
if self.train_flag:
self.historyBNN = self.model.fit(self.X, self.Y, epochs=epochs,
batch_size=batch_size, verbose=2,
validation_data = validation_data, callbacks=[Early_Stop,model_checkpoint])
self.model.load_weights(weights_file_std)
# tl,vl = historyBNN.history['loss'], historyBNN.history['val_loss']
def predict(self, X_test):
D = self.Y.shape[-1]
Yt_hat = np.array([self.model.predict(X_test, batch_size=500, verbose=0) for _ in range(self.T)])
# Yt_hat = Yt_hat * self.std_y_train + self.mean_y_train
mean = Yt_hat[:, :, :D] # K x N x D
logvar = Yt_hat[:, :, D:]
MC_pred = np.mean(mean, 0)
return MC_pred, mean, logvar
def evaluate(self, x_test, y_test):
# rmse = np.mean((y_test.squeeze() - MC_pred.squeeze())**2.)**0.5
_, mean, logvar = self.predict(x_test)
# We compute the test log-likelihood
LL = np.zeros((x_test.shape[0],mean.shape[0]))
for t in range(mean.shape[0]):
Z = []
diag = []
inc = 0
k=1
N = x_test.shape[0]
D = y_test.shape[1]
for d in range(D):
# for j in range(k):
logvar[t,:,k-1] = np.exp(logvar[t,:,k-1]) # constrain diagonal to be positive
Z.append(np.hstack([np.reshape(logvar[t,:,inc:inc+k],[N,k]),np.zeros((N,D-k))]))
diag.append(logvar[t,:,k-1])
inc += k
k+=1
diag = np.hstack(np.expand_dims(diag,-1))
lower = np.reshape(np.hstack(Z),[N,D,D])
S_inv = np.matmul(lower,np.transpose(lower,axes=[0,2,1]))
x = np.expand_dims(((np.squeeze(mean[t]) - y_test)**2),-1)
quad = np.matmul(np.matmul(np.transpose(x,axes=[0,2,1]),S_inv),x)
log_det = np.sum(- np.log(diag**2),1)
# - 0.5 * [log det + quadratic term] = log likelihood
# remove minus sign as we want to minimise NLL
LL[:,t] = np.squeeze(quad) + log_det
test_ll = np.sum(np.sum(LL,-1),-1)
rmse = np.mean((np.mean(mean, 0) - y_test)**2.)**0.5
return test_ll/N, rmse
def logsumexp(a):
a_max = a.max(axis=0)
return np.log(np.sum(np.exp(a - a_max), axis=0)) + a_max
# HELA PLOTTING FUNCTION taken from https://github.com/exoclime/HELA
def posterior_matrix(estimations, y, names, ranges, colors, soft_colors=None):
cmaps = [LinearSegmentedColormap.from_list("MyReds", [(1, 1, 1), c], N=256)
for c in colors]
ranges = np.array(ranges)
if soft_colors is None:
soft_colors = colors
num_dims = estimations.shape[1]
fig, axes = | |
"""
Interp-level definition of frequently used functionals.
"""
import sys
from pypy.interpreter.baseobjspace import W_Root
from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault
from pypy.interpreter.typedef import TypeDef
from rpython.rlib import jit, rarithmetic
from rpython.rlib.objectmodel import specialize
from rpython.rlib.rarithmetic import r_uint, intmask
from rpython.rlib.rbigint import rbigint
def get_len_of_range(space, lo, hi, step):
"""
Return number of items in range/xrange (lo, hi, step).
Raise ValueError if step == 0 and OverflowError if the true value is too
large to fit in a signed long.
"""
# If lo >= hi, the range is empty.
# Else if n values are in the range, the last one is
# lo + (n-1)*step, which must be <= hi-1. Rearranging,
# n <= (hi - lo - 1)/step + 1, so taking the floor of the RHS gives
# the proper value. Since lo < hi in this case, hi-lo-1 >= 0, so
# the RHS is non-negative and so truncation is the same as the
# floor. Letting M be the largest positive long, the worst case
# for the RHS numerator is hi=M, lo=-M-1, and then
# hi-lo-1 = M-(-M-1)-1 = 2*M. Therefore unsigned long has enough
# precision to compute the RHS exactly.
if step == 0:
raise oefmt(space.w_ValueError, "step argument must not be zero")
elif step < 0:
lo, hi, step = hi, lo, -step
if lo < hi:
uhi = r_uint(hi)
ulo = r_uint(lo)
diff = uhi - ulo - 1
n = intmask(diff // r_uint(step) + 1)
if n < 0:
raise oefmt(space.w_OverflowError, "result has too many items")
else:
n = 0
return n
@unwrap_spec(w_step=WrappedDefault(1))
def range_int(space, w_x, w_y=None, w_step=None):
"""Return a list of integers in arithmetic position from start (defaults
to zero) to stop - 1 by step (defaults to 1). Use a negative step to
get a list in decending order."""
if w_y is None:
w_start = space.newint(0)
w_stop = w_x
else:
w_start = w_x
w_stop = w_y
if space.isinstance_w(w_stop, space.w_float):
raise oefmt(space.w_TypeError,
"range() integer end argument expected, got float.")
if space.isinstance_w(w_start, space.w_float):
raise oefmt(space.w_TypeError,
"range() integer start argument expected, got float.")
if space.isinstance_w(w_step, space.w_float):
raise oefmt(space.w_TypeError,
"range() integer step argument expected, got float.")
w_start = space.int(w_start)
w_stop = space.int(w_stop)
w_step = space.int(w_step)
try:
start = space.int_w(w_start)
stop = space.int_w(w_stop)
step = space.int_w(w_step)
except OperationError as e:
if not e.match(space, space.w_OverflowError):
raise
return range_with_longs(space, w_start, w_stop, w_step)
howmany = get_len_of_range(space, start, stop, step)
if space.config.objspace.std.withliststrategies:
return range_withspecialized_implementation(space, start,
step, howmany)
res_w = [None] * howmany
v = start
for idx in range(howmany):
res_w[idx] = space.newint(v)
v += step
return space.newlist(res_w)
def range_withspecialized_implementation(space, start, step, length):
assert space.config.objspace.std.withliststrategies
from pypy.objspace.std.listobject import make_range_list
return make_range_list(space, start, step, length)
bigint_one = rbigint.fromint(1)
def range_with_longs(space, w_start, w_stop, w_step):
start = lo = space.bigint_w(w_start)
hi = space.bigint_w(w_stop)
step = st = space.bigint_w(w_step)
if not step.tobool():
raise oefmt(space.w_ValueError, "step argument must not be zero")
elif step.sign < 0:
lo, hi, st = hi, lo, st.neg()
if lo.lt(hi):
diff = hi.sub(lo).sub(bigint_one)
n = diff.floordiv(st).add(bigint_one)
try:
howmany = n.toint()
except OverflowError:
raise oefmt(space.w_OverflowError, "result has too many items")
else:
howmany = 0
res_w = [None] * howmany
v = start
for idx in range(howmany):
res_w[idx] = space.newlong_from_rbigint(v)
v = v.add(step)
return space.newlist(res_w)
min_jitdriver = jit.JitDriver(name='min',
greens=['has_key', 'has_item', 'w_type'], reds='auto')
max_jitdriver = jit.JitDriver(name='max',
greens=['has_key', 'has_item', 'w_type'], reds='auto')
@specialize.arg(3)
def min_max_sequence(space, w_sequence, w_key, implementation_of):
if implementation_of == "max":
compare = space.gt
jitdriver = max_jitdriver
else:
compare = space.lt
jitdriver = min_jitdriver
w_iter = space.iter(w_sequence)
w_type = space.type(w_iter)
has_key = w_key is not None
has_item = False
w_max_item = None
w_max_val = None
while True:
jitdriver.jit_merge_point(has_key=has_key, has_item=has_item,
w_type=w_type)
try:
w_item = space.next(w_iter)
except OperationError as e:
if not e.match(space, space.w_StopIteration):
raise
break
if has_key:
w_compare_with = space.call_function(w_key, w_item)
else:
w_compare_with = w_item
if (not has_item or
space.is_true(compare(w_compare_with, w_max_val))):
has_item = True
w_max_item = w_item
w_max_val = w_compare_with
if w_max_item is None:
raise oefmt(space.w_ValueError, "arg is an empty sequence")
return w_max_item
@specialize.arg(3)
@jit.look_inside_iff(lambda space, args_w, w_key, implementation_of:
jit.loop_unrolling_heuristic(args_w, len(args_w), 3))
def min_max_multiple_args(space, args_w, w_key, implementation_of):
# case of multiple arguments (at least two). We unroll it if there
# are 2 or 3 arguments.
if implementation_of == "max":
compare = space.gt
else:
compare = space.lt
w_max_item = args_w[0]
if w_key is not None:
w_max_val = space.call_function(w_key, w_max_item)
else:
w_max_val = w_max_item
for i in range(1, len(args_w)):
w_item = args_w[i]
if w_key is not None:
w_compare_with = space.call_function(w_key, w_item)
else:
w_compare_with = w_item
if space.is_true(compare(w_compare_with, w_max_val)):
w_max_item = w_item
w_max_val = w_compare_with
return w_max_item
@specialize.arg(2)
def min_max(space, args, implementation_of):
w_key = None
if bool(args.keywords):
kwds = args.keywords
if kwds[0] == "key" and len(kwds) == 1:
w_key = args.keywords_w[0]
else:
raise oefmt(space.w_TypeError,
"%s() got unexpected keyword argument",
implementation_of)
#
args_w = args.arguments_w
if len(args_w) > 1:
return min_max_multiple_args(space, args_w, w_key, implementation_of)
elif len(args_w):
return min_max_sequence(space, args_w[0], w_key, implementation_of)
else:
raise oefmt(space.w_TypeError,
"%s() expects at least one argument",
implementation_of)
def max(space, __args__):
"""max(iterable[, key=func]) -> value
max(a, b, c, ...[, key=func]) -> value
With a single iterable argument, return its largest item.
With two or more arguments, return the largest argument.
"""
return min_max(space, __args__, "max")
def min(space, __args__):
"""min(iterable[, key=func]) -> value
min(a, b, c, ...[, key=func]) -> value
With a single iterable argument, return its smallest item.
With two or more arguments, return the smallest argument.
"""
return min_max(space, __args__, "min")
class W_Enumerate(W_Root):
def __init__(self, w_iter_or_list, start, w_start):
# 'w_index' should never be a wrapped int here; if it would be,
# then it is actually None and the unwrapped int is in 'index'.
self.w_iter_or_list = w_iter_or_list
self.index = start
self.w_index = w_start
def descr___new__(space, w_subtype, w_iterable, w_start=None):
from pypy.objspace.std.listobject import W_ListObject
if w_start is None:
start = 0
else:
w_start = space.index(w_start)
if space.is_w(space.type(w_start), space.w_int):
start = space.int_w(w_start)
w_start = None
else:
start = -1
if start == 0 and type(w_iterable) is W_ListObject:
w_iter = w_iterable
else:
w_iter = space.iter(w_iterable)
self = space.allocate_instance(W_Enumerate, w_subtype)
self.__init__(w_iter, start, w_start)
return self
def descr___iter__(self, space):
return self
def descr_next(self, space):
from pypy.objspace.std.listobject import W_ListObject
w_index = self.w_index
w_iter_or_list = self.w_iter_or_list
w_item = None
if w_index is None:
index = self.index
if type(w_iter_or_list) is W_ListObject:
try:
w_item = w_iter_or_list.getitem(index)
except IndexError:
self.w_iter_or_list = None
raise OperationError(space.w_StopIteration, space.w_None)
self.index = index + 1
elif w_iter_or_list is None:
raise OperationError(space.w_StopIteration, space.w_None)
else:
try:
newval = rarithmetic.ovfcheck(index + 1)
except OverflowError:
w_index = space.newint(index)
self.w_index = space.add(w_index, space.newint(1))
self.index = -1
else:
self.index = newval
w_index = space.newint(index)
else:
self.w_index = space.add(w_index, space.newint(1))
if w_item is None:
w_item = space.next(self.w_iter_or_list)
return space.newtuple([w_index, w_item])
def descr___reduce__(self, space):
from pypy.interpreter.mixedmodule import MixedModule
w_mod = space.getbuiltinmodule('_pickle_support')
mod = space.interp_w(MixedModule, w_mod)
w_new_inst = mod.get('enumerate_new')
w_index = self.w_index
if w_index is None:
w_index = space.newint(self.index)
w_info = space.newtuple([self.w_iter_or_list, w_index])
return space.newtuple([w_new_inst, w_info])
# exported through _pickle_support
def _make_enumerate(space, w_iter_or_list, w_index):
if space.is_w(space.type(w_index), space.w_int):
index = space.int_w(w_index)
w_index = None
else:
index = -1
return W_Enumerate(w_iter_or_list, index, w_index)
W_Enumerate.typedef = TypeDef("enumerate",
__new__=interp2app(W_Enumerate.descr___new__.im_func),
__iter__=interp2app(W_Enumerate.descr___iter__),
next=interp2app(W_Enumerate.descr_next),
__reduce__=interp2app(W_Enumerate.descr___reduce__),
)
def reversed(space, w_sequence):
"""Return a iterator that yields items of sequence in reverse."""
w_reversed = None
if space.is_oldstyle_instance(w_sequence):
w_reversed = space.findattr(w_sequence, space.newtext("__reversed__"))
else:
w_reversed_descr = space.lookup(w_sequence, "__reversed__")
if w_reversed_descr is not None:
w_reversed = space.get(w_reversed_descr, w_sequence)
if w_reversed is not None:
return space.call_function(w_reversed)
return W_ReversedIterator(space, w_sequence)
class W_ReversedIterator(W_Root):
def __init__(self, space, w_sequence):
self.remaining = space.len_w(w_sequence) - 1
if not space.issequence_w(w_sequence):
raise oefmt(space.w_TypeError,
"argument to reversed() must be a sequence")
self.w_sequence = w_sequence
def descr___iter__(self, space):
return self
def descr_length(self, space):
return space.newint(0 if self.remaining == -1 else self.remaining + 1)
def descr_next(self, space):
if self.remaining >= 0:
w_index = space.newint(self.remaining)
try:
w_item = space.getitem(self.w_sequence, w_index)
except OperationError as e:
# Done
self.remaining = -1
self.w_sequence = None
if not (e.match(space, space.w_IndexError) or
e.match(space, space.w_StopIteration)):
raise
raise OperationError(space.w_StopIteration, space.w_None)
else:
self.remaining -= 1
return w_item
# Done
self.remaining = -1
self.w_sequence = None
raise OperationError(space.w_StopIteration, space.w_None)
def descr___reduce__(self, space):
from pypy.interpreter.mixedmodule import MixedModule
w_mod = space.getbuiltinmodule('_pickle_support')
mod = space.interp_w(MixedModule, w_mod)
w_new_inst = mod.get('reversed_new')
w_seq = space.w_None if self.w_sequence is None else self.w_sequence
info_w = [w_seq, space.newint(self.remaining)]
w_info = space.newtuple(info_w)
return space.newtuple([w_new_inst, w_info])
W_ReversedIterator.typedef = | |
# -*- coding: utf-8 -*-
"""
Logic for controlling worker processes
"""
import copy
import json
import logging
import os
import pickle
import signal
import time
from collections import Iterable, defaultdict
from datetime import timedelta
from functools import wraps
from multiprocessing import Process
from threading import Thread
from UserDict import IterableUserDict
from zmon_worker_monitor.zmon_worker.common.utils import get_process_cmdline
from .flags import (MONITOR_KILL_REQ, MONITOR_NONE, MONITOR_PING,
MONITOR_RESTART, flags2num, has_flag)
FLOAT_DIGITS = 5
class ProcessController(object):
"""
Class to handle a bunch of child processes
what can it do:
0. define a common target function for every process?
1. spawn N processes that execute the target function, store references to objects and its pid
2. spawn more process after some are running
3. terminate some process *(by pid?)
4. spawn a thread loop for checking the health of child processes *(and take some action if some process dies)?
5. dynamically change the policy on how to react to process dies *(use queue for incoming requests?)
"""
# TODO: keep? ... flags presented also as fields for easier access
MONITOR_RESTART = MONITOR_RESTART
MONITOR_PING = MONITOR_PING
MONITOR_KILL_REQ = MONITOR_KILL_REQ
MONITOR_NONE = MONITOR_NONE
def __init__(self, default_target=None, default_args=None, default_kwargs=None, default_flags=None,
max_processes=1000, start_action_loop=True):
self.logger = logging.getLogger(__name__)
# init default flags
default_flags = default_flags if default_flags is not None else MONITOR_NONE
# initializate ProcessGroup
self.proc_group = ProcessGroup(group_name='main', default_target=default_target, default_args=default_args,
default_kwargs=default_kwargs, default_flags=default_flags,
default_kill_wait=0.5, max_processes=max_processes)
self.proc_groups = {} # TODO: allow creation of separated process groups ?
if start_action_loop:
self.start_action_loop()
def create_proc_group(self, name='main', default_target=None, default_args=None, default_kwargs=None,
default_flags=None, default_kill_wait=0.5, max_processes=1000):
# TODO: allow creation of separated process groups ?
self.proc_groups[name] = ProcessGroup(group_name='main', default_target=default_target,
default_args=default_args, default_kwargs=default_kwargs,
default_flags=default_flags, default_kill_wait=default_kill_wait,
max_processes=max_processes)
return self.proc_groups[name]
def spawn_process(self, target=None, args=None, kwargs=None, flags=None):
return self.proc_group.spawn_process(target=target, args=args, kwargs=kwargs, flags=flags)
def spawn_many(self, num, target=None, args=None, kwargs=None, flags=None):
return self.proc_group.spawn_many(num, target=target, args=args, kwargs=kwargs, flags=flags)
def terminate_process(self, proc_name, kill_wait=None):
return self.proc_group.terminate_process(proc_name=proc_name, kill_wait=kill_wait)
def terminate_all_processes(self, kill_wait=None):
self.proc_group.stop_action_loop() # stop action loop before starting to terminate child processes
self.proc_group.terminate_all(kill_wait=kill_wait)
self.logger.info("proc_stats after terminate_all_processes() : %s", self.proc_group.dead_stats)
return True
def list_running(self):
return [proc.to_dict(serialize_all=True) for proc in self.proc_group.values()]
def get_info(self, proc_name):
"""
Get all the info I can of this process, for example:
1. How long has it been running? *(Do I need an extra pid table for statistics?)
2. How much memory does it use?
"""
raise NotImplementedError('Method get_info not implemented yet')
def list_stats(self):
return [d['stats'] for d in self.list_running()]
def is_action_loop_running(self):
return self.proc_group.is_action_loop_running()
def mark_for_termination(self, pid):
self.proc_group.mark_for_termination(pids=[pid])
def ping(self, pid, data):
self.proc_group.add_ping(pid, data)
def add_events(self, pid, events):
self.proc_group.add_events(pid, events)
def processes_view(self):
return self.proc_group.processes_view()
def single_process_view(self, id, key=None):
key = str(key).lower()
proc = None
if key in ('name', 'proc_name'):
proc = self.proc_group.get_by_name(id)
elif key in ('pid', ) and str(id).isdigit():
proc = self.proc_group.get_by_pid(int(id))
if not proc:
return None
return proc.to_dict(serialize_all=True)
def status_view(self, interval=None):
return self.proc_group.status_view(interval=interval)
def health_state(self):
return self.proc_group.is_healthy()
def start_action_loop(self):
self.proc_group.start_action_loop()
def stop_action_loop(self):
self.proc_group.stop_action_loop()
class SimpleMethodCacheInMemory(object):
"""
Simple cache-like decorator for class methods (receiving self as first argument).
Do not use it for functions, classmethods or staticmethods.
We use it mostly for marking methods of ProcessGroup that will run in the action loop in certain intervals
and for limited caching of some methods without having to add another heavy dependency to the project.
"""
decorated_functions = defaultdict(set) # {region => set(func_id1, func_id2, ...)}
# { region => { class_instance_id => { func_id => { args_key => returned } } } }
returned = defaultdict(lambda: defaultdict(lambda: defaultdict(dict)))
# { region => { class_instance_id => { func_id => { args_key => timestamp } } } }
t_last_exec = defaultdict(lambda: defaultdict(lambda: defaultdict(dict)))
shortcut_cache = False # useful to deactivate all cache during testing
def __init__(self, region='', wait_sec=5, action_flag=None):
assert '-' not in region, "'-' char not allowed in regions"
self.region = region
self.wait_sec = wait_sec
self.action_flag = action_flag if action_flag else MONITOR_NONE
@classmethod
def make_args_key(cls, args, kwargs):
return '{}-{}'.format(args, sorted((k, v) for k, v in kwargs.items()))
def __call__(self, f):
id_f = id(f)
self.decorated_functions[self.region].add(id_f)
@wraps(f)
def wrapper(*args, **kwargs):
id_class_instance = id(args[0]) # TODO: detect case where f is not bounded to support functions
args_key = self.make_args_key(args[1:], kwargs)
t_last = self.t_last_exec[self.region][id_class_instance][id_f].get(args_key, 0)
if time.time() - t_last >= self.wait_sec or self.shortcut_cache:
r = f(*args, **kwargs)
self.returned[self.region][id_class_instance][id_f][args_key] = r
self.t_last_exec[self.region][id_class_instance][id_f][args_key] = time.time()
return r
else:
return self.returned[self.region][id_class_instance][id_f][args_key]
wrapper.action_flag = self.action_flag
wrapper.wrapped_func = f
return wrapper
@classmethod
def get_registered_by_obj(cls, obj, region=''):
methods = []
for name in dir(obj):
f = getattr(obj, name)
if callable(f) and hasattr(f, 'wrapped_func') and id(getattr(f, 'wrapped_func')) in \
cls.decorated_functions.get(region, set()):
methods.append(f)
return methods
@classmethod
def invalidate(cls, region='', obj=None, method=None):
assert obj if method else True, 'Need to pass the object the method is bound to'
if not obj: # invalidate a whole region
cls.t_last_exec.pop(region, None)
elif not method: # invalidate all methods from an object
cls.t_last_exec[region].pop(id(obj), None)
else: # invalidate just this method
cls.t_last_exec[region][id(obj)].pop(id(getattr(method, 'wrapped_func')), None)
register = SimpleMethodCacheInMemory
cache = SimpleMethodCacheInMemory
class ProcessPlus(Process):
"""
A multiprocessing.Process class extended to include all information we attach to the process
"""
_pack_fields = ('target', 'args', 'kwargs', 'flags', 'tags', 'stats', 'name', 'pid', 'previous_proc',
'ping_status', 'actions_last_5', 'errors_last_5', 'task_counts', 'event_counts')
keep_pings = 3000 # covers approx. 24 hours if pings sent every 30 secs
keep_events = 200
initial_wait_pings = 120
_ping_template = {
'timestamp': 0,
'timedelta': 0,
'tasks_done': 0,
'percent_idle': 0,
'task_duration': 0.0,
}
default_status_interval = 60 * 5 # analyze only the pings received since now - this interval
default_ping_count_intervals = (60 * 5, 60 * 30, 60 * 60, 60 * 60 * 6)
default_event_count_intervals = (60 * 60 * 24, )
STATUS_OK = 'OK'
STATUS_OK_IDLE = 'OK-IDLE'
STATUS_OK_INITIATING = 'OK-INITIATING'
STATUS_BAD_NO_PINGS = 'BAD-NO-PINGS'
STATUS_WARN_LONG_TASK = 'WARN_LONG_TASK'
STATUS_BAD_MALFORMED = 'BAD-MALFORMED-PINGS'
STATUS_BAD_DEAD = 'BAD-DEAD'
STATUS_NOT_TRACKED = 'NOT-TRACKED'
_event_template = {
'origin': '',
'type': '',
'body': '',
'timestamp': 0,
'repeats': 0,
}
EVENT_TYPE_ACTION = 'ACTION'
EVENT_TYPE_ERROR = 'ERROR'
EVENT_TYPE_EXCEPTION = 'EXCEPTION'
event_types = (EVENT_TYPE_ACTION, EVENT_TYPE_ERROR, EVENT_TYPE_EXCEPTION)
def __init__(self, target=None, args=(), kwargs=None, flags=None, tags=None, **extra):
# passed info
self.target = target if callable(target) else self._str2func(target)
self.args = args or ()
self.kwargs = kwargs or {}
# flags = FLAG_A | FLAG_B | FLAG_X or flags = (FLAG_A, FLAG_B, FLAG_X)
self.flags = flags2num(flags) if isinstance(flags, Iterable) else (flags or MONITOR_NONE)
self.tags = tags
# extra info we generate
self.stats = {
'stats_closed': False,
'alive': True,
'rebel': False,
'abnormal_termination': False,
'start_time': None,
'end_time': None,
'start_time_str': '',
'end_time_str': '',
't_running_secs': 0,
'exitcode': 0,
'name': None,
'pid': None,
}
self.stored_pings = []
self.stored_events = []
self._rebel = False
self._termination_mark = False
# fields that can not be reused in new process (e.g. pid, name).
self.previous_proc = {
'dead_name': extra.get('name'),
'dead_pid': extra.get('pid'),
'dead_stats': extra.get('stats'),
'previous_deaths': extra['previous_proc']['previous_deaths'] + 1 if extra.get('previous_proc') else -1,
}
self.logger = logging.getLogger(__name__)
super(ProcessPlus, self).__init__(target=self.target, args=self.args, kwargs=self.kwargs)
@property
def abnormal_termination(self):
return self.stats['abnormal_termination']
@abnormal_termination.setter
def abnormal_termination(self, ab_state):
self.stats['abnormal_termination'] = ab_state
@property
def start_time(self):
return self.stats['start_time']
@property
def t_running_secs(self):
end_time = self.stats['end_time'] or time.time()
return end_time - self.stats['start_time'] if self.stats['start_time'] else 0
@property
def ping_status(self):
return self.get_ping_status()
@property
def actions_last_5(self):
return self.get_events(event_type=ProcessPlus.EVENT_TYPE_ACTION, limit=5)
@property
def errors_last_5(self):
return self.get_events(event_type=ProcessPlus.EVENT_TYPE_ERROR, limit=5)
@property
def exceptions_last_5(self):
return self.get_events(event_type=ProcessPlus.EVENT_TYPE_EXCEPTION, limit=5)
@property
def task_counts(self):
return self.get_ping_counts()
@property
def event_counts(self):
return self.get_event_counts()
def is_rebel(self):
return self._rebel
def mark_for_termination(self):
self._termination_mark = True
def should_terminate(self):
return self._termination_mark
def has_flag(self, flag):
return has_flag(self.flags, flag)
def add_event_explicit(self, origin, event_type, body, repeats=1):
event = dict(origin=origin, type=event_type, body=body, repeats=repeats, timestamp=time.time())
self.add_event(event)
def add_event(self, data):
self._assert_valid_event(data)
self.stored_events.append(data)
self.stored_events = self.stored_events[-self.keep_events:]
def get_events(self, event_type=None, interval=None, limit=-1):
tnow = time.time()
r = [e for e in self.stored_events if ((not interval or tnow - e['timestamp'] <= interval) and
(not event_type or e['type'] == event_type))]
return r[-limit:] if limit and limit > 0 else r
def add_ping(self, data):
self._assert_valid_ping(data)
self.stored_pings.append(data)
self.stored_pings = self.stored_pings[-self.keep_pings:]
def get_pings(self, interval=None, limit=-1):
r = self.stored_pings
if interval is not None:
tnow = time.time()
r = [p for p in self.stored_pings if tnow - p['timestamp'] <= interval]
return r[-limit:] if limit and limit > 0 else r
def get_ping_status(self, interval=None):
interval = interval if interval is not None else self.default_status_interval
if not self.is_alive():
return self.STATUS_BAD_DEAD
if not self.has_flag(MONITOR_PING):
return self.STATUS_NOT_TRACKED
if self.t_running_secs < self.initial_wait_pings:
return self.STATUS_OK_INITIATING
agg_data = self.aggregate_pings(interval=interval)
if agg_data['tasks_done'] < 0 | |
<reponame>MarioBonse/hanabi-learning-environment
# coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
from absl import app
from absl import flags
from absl import logging
from hanabi_learning_environment import rl_env
from hanabi_learning_environment import utility
import gin
from six.moves import range
import tensorflow as tf
#from tf_agents.agents.dqn import dqn_agent
from tf_agents.drivers import dynamic_episode_driver
from tf_agents.environments import tf_py_environment
from tf_agents.eval import metric_utils
from tf_agents.metrics import py_metrics
from tf_agents.metrics import tf_metrics
#from tf_agents.replay_buffers import tf_uniform_replay_buffer
from tf_agents.utils import common
#from tf_agents.policies import py_tf_policy
from functools import partial
gpus = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(gpus[0], True)
"""
Nota sul training:
Parlando con Sascha ho scoperto che l'implementazione di DeepMind di fatto sta eseguendo training
anche all'interno di un episodio "trained on a batch of 32 transitions in intervals of 4 environment steps"
(tenendo traccia all'inizio del fatto che non ci sono 32 transizioni nel RB quindi fai andare un po' di più).
Io sarei per passare ad un approccio più così (magari facendo tuning di batch size ed env steps) soprattutto per
via del Prioritized RB. E' inutile trasformare il RB in un dataset su cui poi iteri se di fatto a ogni iterazione
traini e sulla base della loss cambi la priorità con cui il RB dovrebbe samplare... Su questo però faccio due
ulteriori considerazioni rispettivamente contro e a favore del cambiamento:
1) Il nostro RB quando diventa dataset in realtà crea un dataset infinito contenente i numeri [0, +inf]
e mappa ogni numero che contiene col metodo self.get_next() (di fatto cestinando l'inutile numero che
passa come parametro). Per via di questo forse possiamo tenere il codice così com'è perchè forse/probabilmente
(va verificato però fino a che punto) se updatiamo le priority sul RB mentre stiamo iterando sul dataset
la cosa non diventa problematica perchè a ogni iterazione viene chiamata self.get_next() che esegue con le nuove
(e corrette) priority.
N.B.
probabilmente il metodo prefetch() applicato al dataset gioca un ruolo perchè le cose già fetchate a occhio non aggiornano
la priorità, ma di queste sottigliezze forse possiamo fregarcene e contare che non cambino molto
2) Implementare il cambiamento non sarebbe particolarmente fastidioso per il driver visto che potremmo usare il
DynamicStepDriver che come output ti dà lo stato delle cose all'ultimo step che poi puoi passare al
DynamicStepDriver stesso alla prossima iterazione perchè riprenda da lì (e questo dovrebbe mantenere consistency
per quanto riguarda metriche importanti come AverageReturn)
"""
flags.DEFINE_string('root_dir', os.getenv('TEST_UNDECLARED_OUTPUTS_DIR'),
'Root directory for writing logs/summaries/checkpoints.')
flags.DEFINE_multi_string('gin_files', [],
'List of paths to gin configuration files (e.g.'
'"configs/hanabi_rainbow.gin").')
flags.DEFINE_multi_string('gin_bindings', [],
'Gin bindings to override the values set in the config files '
'(e.g. "train_eval.num_iterations=100").')
FLAGS = flags.FLAGS
#TODO Very much unfinished function. it should run an episode stopping step by step
# and printing everything we might want to see.
def run_verbose_mode(agent_1, agent_2):
env = rl_env.make('Hanabi-Full-CardKnowledge', num_players=2)
tf_env = tf_py_environment.TFPyEnvironment(env)
state = tf.env.reset()
@gin.configurable
def train_eval(
root_dir,
num_iterations,
# Params for collect
collect_episodes_per_epoch,
# Number of steps for training update
num_steps,
# Params for decaying Epsilon
initial_epsilon,
decay_type,
decay_time,
reset_at_step,
# Params for train
train_steps_per_epoch,
batch_size,
# Params for eval
eval_interval,
num_eval_episodes,
# Params for checkpoints, summaries, and logging
train_checkpoint_interval,
policy_checkpoint_interval,
rb_checkpoint_interval,
summaries_flush_secs=10,
):
"""A simple train and eval for DQN."""
root_dir = os.path.expanduser(root_dir)
train_dir = os.path.join(root_dir, 'train')
eval_dir = os.path.join(root_dir, 'eval')
"""
FIXME Checkpointing doesn't synergize with tensorboard summaries, i.e. if you checkpoint
at some point, execute some epochs (which are not checkpointed), stop the program and run again
from the last saved checkpoint; then tensorboard will receive (and display) twice the summaries
relative to the epochs that had been executed, but not checkpointed. How to solve this? No idea.
"""
train_summary_writer = tf.summary.create_file_writer(
train_dir, flush_millis=summaries_flush_secs * 1000)
train_summary_writer.set_as_default()
eval_summary_writer = tf.summary.create_file_writer(
eval_dir, flush_millis=summaries_flush_secs * 1000)
# Profiler is used to trace computational resource utilization if required from tensorboard
# Note: "To profile multiple GPUs, install CUDA® Toolkit 10.2 or later. CUDA® Toolkit 10.1 supports only
# single GPU profiling." (from https://www.tensorflow.org/guide/profiler#install_the_profiler_and_gpu_prerequisites)
tf.profiler.experimental.server.start(6009)
"""
TODO use ParallelPyEnvironment to run envs in parallel and see how much we can speed up.
See: https://www.youtube.com/watch?v=U7g7-Jzj9qo&list=TLPQMDkwNDIwMjB-xXfzXt3B5Q&index=2 at minute 26:50
Note: it is more than likely that batching the environment might require also passing a different batch_size
parameter to the metrics and the replay buffer. Also note that the replay buffer actually stores batch_size*max_length
frames, so for example right now to have a RB with 50k capacity you would have batch_size=1, max_length=50k. This is probaably
done for parallelization and memory access issues, where one wants to be sure that the parallel runs don't access the same memory
slots of the RB... As such if you want to run envs in parallel and keep RB capacity fixed you should divide the desired capacity
by batch_size and use that as max_length parameter. Btw, a frame stored by the RB can be variable; if num_steps=2 (as right now)
then a frame is [time_step, action, next_time_step] (where time_step has all info including last reward). If you increase num_steps
then it's obvious how a frame would change, and also how this affects the *actual* number of transitions that the RB is storing.
Also note that if I ever actually manage to do the Prioritized RB, it won't support this batch parallelization. The issue lies with
the SumTree object (which I imported from the DeepMind framework) and the fact that it doesn't seem to me like this object could be
parallelized (meaning that all memory access issues are solved) in any way...
"""
# create the enviroment
env = utility.create_environment()
tf_env = tf_py_environment.TFPyEnvironment(env)
eval_py_env = tf_py_environment.TFPyEnvironment(utility.create_environment())
train_step_1 = tf.Variable(0, trainable=False, name='global_step_1', dtype=tf.int64)
train_step_2 = tf.Variable(0, trainable=False, name='global_step_2', dtype=tf.int64)
epoch_counter = tf.Variable(0, trainable=False, name='Epoch', dtype=tf.int64)
"""
TODO If you want to load back a previous checkpoint, thecurrent implementation of the decaying epsilon
essentially requires you to pass the same reset_at_step argument from the command line (or gin file)
every time after you pass it the first time (if you wish for consistent decaying behaviour).
Maybe implement some checkpointing of something in order to avoid this requirement...
The only negative side-effect of not having this implementation is that epsilon might become very low
all of a sudden if you forget to pass the reset_at_step argument after you passed it once.
"""
# Epsilon implementing decaying behaviour for the two agents
decaying_epsilon_1 = partial(utility.decaying_epsilon,
initial_epsilon=initial_epsilon,
train_step=epoch_counter,
decay_type=decay_type,
decay_time=decay_time,
reset_at_step=reset_at_step)
decaying_epsilon_2 = partial(utility.decaying_epsilon,
initial_epsilon=initial_epsilon,
train_step=epoch_counter,
decay_type=decay_type,
decay_time=decay_time,
reset_at_step=reset_at_step)
"""
TODO Performance Improvement: "When training on GPUs, make use of the TensorCore. GPU kernels use
the TensorCore when the precision is fp16 and input/output dimensions are divisible by 8 or 16 (for int8)"
(from https://www.tensorflow.org/guide/profiler#improve_device_performance). Maybe consider decreasing
precision to fp16 and possibly compensating with increased model complexity to not lose performance?
I mean if this allows us to use TensorCore then maybe it is worthwhile (computationally) to increase
model size and lower precision. Need to test what the impact on agent performance is.
See https://www.tensorflow.org/guide/keras/mixed_precision for more info
"""
# create an agent and a network
tf_agent_1 = utility.create_agent(
environment=tf_env,
n_step_update=num_steps-1, # num_steps parameter must differ by 1 between agent and replay_buffer.as_dataset() call
decaying_epsilon=decaying_epsilon_1,
train_step_counter=train_step_1)
# Second agent. we can have as many as we want
tf_agent_2 = utility.create_agent(
environment=tf_env,
n_step_update=num_steps-1, # num_steps parameter must differ by 1 between agent and replay_buffer.as_dataset() call
decaying_epsilon=decaying_epsilon_2,
train_step_counter=train_step_2)
# replay buffer
replay_buffer, prb_flag = utility.create_replay_buffer(
data_spec=tf_agent_1.collect_data_spec,
batch_size=tf_env.batch_size)
"""
#FIXME we haven't really looked at how train_metrics are managed in the driver when it's running
in particular it is unclear whether any issues come up because of the fact that now the driver
is running two different policies (agents). In other words, we only modified the DynamicEpicodeDriver
with what was stricly necessary to make it run with two different agents. We never checked what the
implications of this would be for logging, summaries and metrics. It seems reasonable though that all these
metrics effectively depend only on the environment and so are unaffected by what happens to the agent(s).
We thus do not expect any surprises here, but for example the metric AverageReturnMetric will most likely
be considering the rewards of the two agents together; this is actually desired (for now), as it tells us
how many cards they managed | |
MEDIAL FORM
FE7E ARABIC SUKUN ISOLATED FORM
FE7F ARABIC SUKUN MEDIAL FORM
FE80 ARABIC LETTER HAMZA ISOLATED FORM
FE81 ARABIC LETTER ALEF WITH MADDA ABOVE ISOLATED FORM
FE82 ARABIC LETTER ALEF WITH MADDA ABOVE FINAL FORM
FE83 ARABIC LETTER ALEF WITH HAMZA ABOVE ISOLATED FORM
FE84 ARABIC LETTER ALEF WITH HAMZA ABOVE FINAL FORM
FE85 ARABIC LETTER WAW WITH HAMZA ABOVE ISOLATED FORM
FE86 ARABIC LETTER WAW WITH HAMZA ABOVE FINAL FORM
FE87 ARABIC LETTER ALEF WITH HAMZA BELOW ISOLATED FORM
FE88 ARABIC LETTER ALEF WITH HAMZA BELOW FINAL FORM
FE89 ARABIC LETTER YEH WITH HAMZA ABOVE ISOLATED FORM
FE8A ARABIC LETTER YEH WITH HAMZA ABOVE FINAL FORM
FE8B ARABIC LETTER YEH WITH HAMZA ABOVE INITIAL FORM
FE8C ARABIC LETTER YEH WITH HAMZA ABOVE MEDIAL FORM
FE8D ARABIC LETTER ALEF ISOLATED FORM
FE8E ARABIC LETTER ALEF FINAL FORM
FE8F ARABIC LETTER BEH ISOLATED FORM
FE90 ARABIC LETTER BEH FINAL FORM
FE91 ARABIC LETTER BEH INITIAL FORM
FE92 ARABIC LETTER BEH MEDIAL FORM
FE93 ARABIC LETTER TEH MARBUTA ISOLATED FORM
FE94 ARABIC LETTER TEH MARBUTA FINAL FORM
FE95 ARABIC LETTER TEH ISOLATED FORM
FE96 ARABIC LETTER TEH FINAL FORM
FE97 ARABIC LETTER TEH INITIAL FORM
FE98 ARABIC LETTER TEH MEDIAL FORM
FE99 ARABIC LETTER THEH ISOLATED FORM
FE9A ARABIC LETTER THEH FINAL FORM
FE9B ARABIC LETTER THEH INITIAL FORM
FE9C ARABIC LETTER THEH MEDIAL FORM
FE9D ARABIC LETTER JEEM ISOLATED FORM
FE9E ARABIC LETTER JEEM FINAL FORM
FE9F ARABIC LETTER JEEM INITIAL FORM
FEA0 ARABIC LETTER JEEM MEDIAL FORM
FEA1 ARABIC LETTER HAH ISOLATED FORM
FEA2 ARABIC LETTER HAH FINAL FORM
FEA3 ARABIC LETTER HAH INITIAL FORM
FEA4 ARABIC LETTER HAH MEDIAL FORM
FEA5 ARABIC LETTER KHAH ISOLATED FORM
FEA6 ARABIC LETTER KHAH FINAL FORM
FEA7 ARABIC LETTER KHAH INITIAL FORM
FEA8 ARABIC LETTER KHAH MEDIAL FORM
FEA9 ARABIC LETTER DAL ISOLATED FORM
FEAA ARABIC LETTER DAL FINAL FORM
FEAB ARABIC LETTER THAL ISOLATED FORM
FEAC ARABIC LETTER THAL FINAL FORM
FEAD ARABIC LETTER REH ISOLATED FORM
FEAE ARABIC LETTER REH FINAL FORM
FEAF ARABIC LETTER ZAIN ISOLATED FORM
FEB0 ARABIC LETTER ZAIN FINAL FORM
FEB1 ARABIC LETTER SEEN ISOLATED FORM
FEB2 ARABIC LETTER SEEN FINAL FORM
FEB3 ARABIC LETTER SEEN INITIAL FORM
FEB4 ARABIC LETTER SEEN MEDIAL FORM
FEB5 ARABIC LETTER SHEEN ISOLATED FORM
FEB6 ARABIC LETTER SHEEN FINAL FORM
FEB7 ARABIC LETTER SHEEN INITIAL FORM
FEB8 ARABIC LETTER SHEEN MEDIAL FORM
FEB9 ARABIC LETTER SAD ISOLATED FORM
FEBA ARABIC LETTER SAD FINAL FORM
FEBB ARABIC LETTER SAD INITIAL FORM
FEBC ARABIC LETTER SAD MEDIAL FORM
FEBD ARABIC LETTER DAD ISOLATED FORM
FEBE ARABIC LETTER DAD FINAL FORM
FEBF ARABIC LETTER DAD INITIAL FORM
FEC0 ARABIC LETTER DAD MEDIAL FORM
FEC1 ARABIC LETTER TAH ISOLATED FORM
FEC2 ARABIC LETTER TAH FINAL FORM
FEC3 ARABIC LETTER TAH INITIAL FORM
FEC4 ARABIC LETTER TAH MEDIAL FORM
FEC5 ARABIC LETTER ZAH ISOLATED FORM
FEC6 ARABIC LETTER ZAH FINAL FORM
FEC7 ARABIC LETTER ZAH INITIAL FORM
FEC8 ARABIC LETTER ZAH MEDIAL FORM
FEC9 ARABIC LETTER AIN ISOLATED FORM
FECA ARABIC LETTER AIN FINAL FORM
FECB ARABIC LETTER AIN INITIAL FORM
FECC ARABIC LETTER AIN MEDIAL FORM
FECD ARABIC LETTER GHAIN ISOLATED FORM
FECE ARABIC LETTER GHAIN FINAL FORM
FECF ARABIC LETTER GHAIN INITIAL FORM
FED0 ARABIC LETTER GHAIN MEDIAL FORM
FED1 ARABIC LETTER FEH ISOLATED FORM
FED2 ARABIC LETTER FEH FINAL FORM
FED3 ARABIC LETTER FEH INITIAL FORM
FED4 ARABIC LETTER FEH MEDIAL FORM
FED5 ARABIC LETTER QAF ISOLATED FORM
FED6 ARABIC LETTER QAF FINAL FORM
FED7 ARABIC LETTER QAF INITIAL FORM
FED8 ARABIC LETTER QAF MEDIAL FORM
FED9 ARABIC LETTER KAF ISOLATED FORM
FEDA ARABIC LETTER KAF FINAL FORM
FEDB ARABIC LETTER KAF INITIAL FORM
FEDC ARABIC LETTER KAF MEDIAL FORM
FEDD ARABIC LETTER LAM ISOLATED FORM
FEDE ARABIC LETTER LAM FINAL FORM
FEDF ARABIC LETTER LAM INITIAL FORM
FEE0 ARABIC LETTER LAM MEDIAL FORM
FEE1 ARABIC LETTER MEEM ISOLATED FORM
FEE2 ARABIC LETTER MEEM FINAL FORM
FEE3 ARABIC LETTER MEEM INITIAL FORM
FEE4 ARABIC LETTER MEEM MEDIAL FORM
FEE5 ARABIC LETTER NOON ISOLATED FORM
FEE6 ARABIC LETTER NOON FINAL FORM
FEE7 ARABIC LETTER NOON INITIAL FORM
FEE8 ARABIC LETTER NOON MEDIAL FORM
FEE9 ARABIC LETTER HEH ISOLATED FORM
FEEA ARABIC LETTER HEH FINAL FORM
FEEB ARABIC LETTER HEH INITIAL FORM
FEEC ARABIC LETTER HEH MEDIAL FORM
FEED ARABIC LETTER WAW ISOLATED FORM
FEEE ARABIC LETTER WAW FINAL FORM
FEEF ARABIC LETTER ALEF MAKSURA ISOLATED FORM
FEF0 ARABIC LETTER ALEF MAKSURA FINAL FORM
FEF1 ARABIC LETTER YEH ISOLATED FORM
FEF2 ARABIC LETTER YEH FINAL FORM
FEF3 ARABIC LETTER YEH INITIAL FORM
FEF4 ARABIC LETTER YEH MEDIAL FORM
FEF5 ARABIC LIGATURE LAM WITH ALEF WITH MADDA ABOVE ISOLATED FORM
FEF6 ARABIC LIGATURE LAM WITH ALEF WITH MADDA ABOVE FINAL FORM
FEF7 ARABIC LIGATURE LAM WITH ALEF WITH HAMZA ABOVE ISOLATED FORM
FEF8 ARABIC LIGATURE LAM WITH ALEF WITH HAMZA ABOVE FINAL FORM
FEF9 ARABIC LIGATURE LAM WITH ALEF WITH HAMZA BELOW ISOLATED FORM
FEFA ARABIC LIGATURE LAM WITH ALEF WITH HAMZA BELOW FINAL FORM
FEFB ARABIC LIGATURE LAM WITH ALEF ISOLATED FORM
FEFC ARABIC LIGATURE LAM WITH ALEF FINAL FORM
FEFF ZERO WIDTH NO-BREAK SPACE
FF01 FULLWIDTH EXCLAMATION MARK
FF02 FULLWIDTH QUOTATION MARK
FF03 FULLWIDTH NUMBER SIGN
FF04 FULLWIDTH DOLLAR SIGN
FF05 FULLWIDTH PERCENT SIGN
FF06 FULLWIDTH AMPERSAND
FF07 FULLWIDTH APOSTROPHE
FF08 FULLWIDTH LEFT PARENTHESIS
FF09 FULLWIDTH RIGHT PARENTHESIS
FF0A FULLWIDTH ASTERISK
FF0B FULLWIDTH PLUS SIGN
FF0C FULLWIDTH COMMA
FF0D FULLWIDTH HYPHEN-MINUS
FF0E FULLWIDTH FULL STOP
FF0F FULLWIDTH SOLIDUS
FF10 FULLWIDTH DIGIT ZERO
FF11 FULLWIDTH DIGIT ONE
FF12 FULLWIDTH DIGIT TWO
FF13 FULLWIDTH DIGIT THREE
FF14 FULLWIDTH DIGIT FOUR
FF15 FULLWIDTH DIGIT FIVE
FF16 FULLWIDTH DIGIT SIX
FF17 FULLWIDTH DIGIT SEVEN
FF18 FULLWIDTH DIGIT EIGHT
FF19 FULLWIDTH DIGIT NINE
FF1A FULLWIDTH COLON
FF1B FULLWIDTH SEMICOLON
FF1C FULLWIDTH LESS-THAN SIGN
FF1D FULLWIDTH EQUALS SIGN
FF1E FULLWIDTH GREATER-THAN SIGN
FF1F FULLWIDTH QUESTION MARK
FF20 FULLWIDTH COMMERCIAL AT
FF21 FULLWIDTH LATIN CAPITAL LETTER A
FF22 FULLWIDTH LATIN CAPITAL LETTER B
FF23 FULLWIDTH LATIN CAPITAL LETTER C
FF24 FULLWIDTH LATIN CAPITAL LETTER D
FF25 FULLWIDTH LATIN CAPITAL LETTER E
FF26 FULLWIDTH LATIN CAPITAL LETTER F
FF27 FULLWIDTH LATIN CAPITAL LETTER G
FF28 FULLWIDTH LATIN CAPITAL LETTER H
FF29 FULLWIDTH LATIN CAPITAL LETTER I
FF2A FULLWIDTH LATIN CAPITAL LETTER J
FF2B FULLWIDTH LATIN CAPITAL LETTER K
FF2C FULLWIDTH LATIN CAPITAL LETTER L
FF2D FULLWIDTH LATIN CAPITAL LETTER M
FF2E FULLWIDTH LATIN CAPITAL LETTER N
FF2F FULLWIDTH LATIN CAPITAL LETTER O
FF30 FULLWIDTH LATIN CAPITAL LETTER P
FF31 FULLWIDTH LATIN CAPITAL LETTER Q
FF32 FULLWIDTH LATIN CAPITAL LETTER R
FF33 FULLWIDTH LATIN CAPITAL LETTER S
FF34 FULLWIDTH LATIN CAPITAL LETTER T
FF35 FULLWIDTH LATIN CAPITAL LETTER U
FF36 FULLWIDTH LATIN CAPITAL LETTER V
FF37 FULLWIDTH LATIN CAPITAL LETTER W
FF38 FULLWIDTH LATIN CAPITAL LETTER X
FF39 FULLWIDTH LATIN CAPITAL LETTER Y
FF3A FULLWIDTH LATIN CAPITAL LETTER Z
FF3B FULLWIDTH LEFT SQUARE BRACKET
FF3C FULLWIDTH REVERSE SOLIDUS
FF3D FULLWIDTH RIGHT SQUARE BRACKET
FF3E FULLWIDTH CIRCUMFLEX ACCENT
FF3F FULLWIDTH LOW LINE
FF40 FULLWIDTH GRAVE ACCENT
FF41 FULLWIDTH LATIN SMALL LETTER A
FF42 FULLWIDTH LATIN SMALL LETTER B
FF43 FULLWIDTH LATIN SMALL LETTER C
FF44 FULLWIDTH LATIN SMALL LETTER D
FF45 FULLWIDTH LATIN SMALL LETTER E
FF46 FULLWIDTH LATIN SMALL LETTER F
FF47 FULLWIDTH LATIN SMALL LETTER G
FF48 FULLWIDTH LATIN SMALL LETTER H
FF49 FULLWIDTH LATIN SMALL LETTER I
FF4A FULLWIDTH LATIN SMALL LETTER J
FF4B FULLWIDTH LATIN SMALL LETTER K
FF4C FULLWIDTH LATIN SMALL LETTER L
FF4D FULLWIDTH LATIN SMALL LETTER M
FF4E FULLWIDTH LATIN SMALL LETTER N
FF4F FULLWIDTH LATIN SMALL LETTER O
FF50 FULLWIDTH LATIN SMALL LETTER P
FF51 FULLWIDTH LATIN SMALL LETTER Q
FF52 FULLWIDTH LATIN SMALL LETTER R
FF53 FULLWIDTH LATIN SMALL LETTER S
FF54 FULLWIDTH LATIN SMALL LETTER T
FF55 FULLWIDTH LATIN SMALL LETTER U
FF56 FULLWIDTH LATIN SMALL LETTER V
FF57 FULLWIDTH LATIN SMALL LETTER W
FF58 FULLWIDTH LATIN SMALL LETTER X
FF59 FULLWIDTH LATIN SMALL LETTER Y
FF5A FULLWIDTH LATIN SMALL LETTER Z
FF5B FULLWIDTH LEFT CURLY BRACKET
FF5C FULLWIDTH VERTICAL LINE
FF5D FULLWIDTH RIGHT CURLY BRACKET
FF5E FULLWIDTH TILDE
FF5F FULLWIDTH LEFT WHITE PARENTHESIS
FF60 FULLWIDTH RIGHT WHITE PARENTHESIS
FF61 HALFWIDTH IDEOGRAPHIC FULL STOP
FF62 HALFWIDTH LEFT CORNER BRACKET
FF63 HALFWIDTH RIGHT CORNER BRACKET
FF64 HALFWIDTH IDEOGRAPHIC COMMA
FF65 HALFWIDTH KATAKANA MIDDLE DOT
FF66 HALFWIDTH KATAKANA LETTER WO
FF67 HALFWIDTH KATAKANA LETTER SMALL A
FF68 HALFWIDTH KATAKANA LETTER SMALL I
FF69 HALFWIDTH KATAKANA LETTER SMALL U
FF6A HALFWIDTH KATAKANA LETTER SMALL E
FF6B HALFWIDTH KATAKANA LETTER SMALL O
FF6C HALFWIDTH KATAKANA LETTER SMALL YA
FF6D HALFWIDTH KATAKANA LETTER SMALL YU
FF6E HALFWIDTH KATAKANA LETTER SMALL YO
FF6F HALFWIDTH KATAKANA LETTER SMALL TU
FF70 HALFWIDTH KATAKANA-HIRAGANA PROLONGED SOUND MARK
FF71 HALFWIDTH KATAKANA LETTER A
FF72 HALFWIDTH KATAKANA LETTER I
FF73 HALFWIDTH KATAKANA LETTER U
FF74 HALFWIDTH KATAKANA LETTER E
FF75 HALFWIDTH KATAKANA LETTER O
FF76 HALFWIDTH KATAKANA LETTER KA
FF77 HALFWIDTH KATAKANA LETTER KI
FF78 HALFWIDTH KATAKANA LETTER KU
FF79 HALFWIDTH KATAKANA LETTER KE
FF7A HALFWIDTH KATAKANA LETTER KO
FF7B HALFWIDTH KATAKANA LETTER SA
FF7C HALFWIDTH KATAKANA LETTER SI
FF7D HALFWIDTH KATAKANA LETTER SU
FF7E HALFWIDTH KATAKANA LETTER SE
FF7F HALFWIDTH KATAKANA LETTER SO
FF80 HALFWIDTH KATAKANA LETTER TA
FF81 HALFWIDTH KATAKANA LETTER TI
FF82 HALFWIDTH KATAKANA LETTER TU
FF83 HALFWIDTH KATAKANA LETTER TE
FF84 HALFWIDTH KATAKANA LETTER TO
FF85 HALFWIDTH KATAKANA LETTER NA
FF86 HALFWIDTH KATAKANA LETTER NI
FF87 HALFWIDTH KATAKANA LETTER NU
FF88 HALFWIDTH KATAKANA LETTER NE
FF89 HALFWIDTH KATAKANA LETTER NO
FF8A HALFWIDTH KATAKANA LETTER HA
FF8B HALFWIDTH KATAKANA LETTER HI
FF8C HALFWIDTH KATAKANA LETTER HU
FF8D HALFWIDTH KATAKANA LETTER HE
FF8E HALFWIDTH KATAKANA LETTER HO
FF8F HALFWIDTH KATAKANA LETTER MA
FF90 HALFWIDTH KATAKANA LETTER MI
FF91 HALFWIDTH KATAKANA LETTER MU
FF92 HALFWIDTH KATAKANA LETTER ME
FF93 HALFWIDTH KATAKANA LETTER MO
FF94 HALFWIDTH KATAKANA LETTER YA
FF95 HALFWIDTH KATAKANA LETTER YU
FF96 HALFWIDTH KATAKANA LETTER YO
FF97 HALFWIDTH KATAKANA LETTER RA
FF98 HALFWIDTH KATAKANA LETTER RI
FF99 HALFWIDTH KATAKANA LETTER RU
FF9A HALFWIDTH KATAKANA LETTER RE
FF9B HALFWIDTH KATAKANA LETTER RO
FF9C HALFWIDTH KATAKANA LETTER WA
FF9D HALFWIDTH KATAKANA LETTER N
FF9E HALFWIDTH KATAKANA VOICED SOUND MARK
FF9F HALFWIDTH KATAKANA SEMI-VOICED SOUND MARK
FFA0 HALFWIDTH HANGUL FILLER
FFA1 HALFWIDTH HANGUL LETTER KIYEOK
FFA2 HALFWIDTH HANGUL LETTER SSANGKIYEOK
FFA3 HALFWIDTH HANGUL LETTER KIYEOK-SIOS
FFA4 HALFWIDTH HANGUL LETTER NIEUN
FFA5 HALFWIDTH HANGUL LETTER NIEUN-CIEUC
FFA6 HALFWIDTH HANGUL LETTER NIEUN-HIEUH
FFA7 HALFWIDTH HANGUL LETTER TIKEUT
FFA8 HALFWIDTH HANGUL LETTER SSANGTIKEUT
FFA9 HALFWIDTH HANGUL LETTER RIEUL
FFAA HALFWIDTH HANGUL LETTER RIEUL-KIYEOK
FFAB HALFWIDTH HANGUL LETTER RIEUL-MIEUM
FFAC HALFWIDTH HANGUL LETTER RIEUL-PIEUP
FFAD HALFWIDTH HANGUL LETTER RIEUL-SIOS
FFAE HALFWIDTH HANGUL LETTER RIEUL-THIEUTH
FFAF HALFWIDTH HANGUL LETTER RIEUL-PHIEUPH
FFB0 HALFWIDTH HANGUL LETTER RIEUL-HIEUH
FFB1 HALFWIDTH HANGUL LETTER MIEUM
FFB2 HALFWIDTH HANGUL LETTER PIEUP
FFB3 HALFWIDTH HANGUL LETTER SSANGPIEUP
FFB4 HALFWIDTH HANGUL LETTER PIEUP-SIOS
FFB5 HALFWIDTH HANGUL LETTER SIOS
FFB6 HALFWIDTH HANGUL LETTER SSANGSIOS
FFB7 HALFWIDTH HANGUL LETTER IEUNG
FFB8 HALFWIDTH HANGUL LETTER CIEUC
FFB9 HALFWIDTH HANGUL LETTER SSANGCIEUC
FFBA HALFWIDTH HANGUL LETTER CHIEUCH
FFBB HALFWIDTH HANGUL LETTER KHIEUKH
FFBC HALFWIDTH HANGUL LETTER THIEUTH
FFBD HALFWIDTH HANGUL LETTER PHIEUPH
FFBE HALFWIDTH HANGUL LETTER HIEUH
FFC2 HALFWIDTH HANGUL LETTER A
FFC3 HALFWIDTH HANGUL LETTER AE
FFC4 HALFWIDTH HANGUL LETTER YA
FFC5 HALFWIDTH HANGUL LETTER YAE
FFC6 HALFWIDTH HANGUL LETTER EO
FFC7 HALFWIDTH HANGUL LETTER E
FFCA HALFWIDTH HANGUL LETTER YEO
FFCB HALFWIDTH HANGUL LETTER YE
FFCC HALFWIDTH HANGUL LETTER O
FFCD HALFWIDTH HANGUL LETTER WA
FFCE HALFWIDTH HANGUL LETTER WAE
FFCF HALFWIDTH HANGUL LETTER OE
FFD2 HALFWIDTH HANGUL LETTER YO
FFD3 HALFWIDTH HANGUL LETTER U
FFD4 HALFWIDTH HANGUL LETTER WEO
FFD5 HALFWIDTH HANGUL LETTER WE
FFD6 HALFWIDTH HANGUL LETTER WI
FFD7 HALFWIDTH HANGUL LETTER YU
FFDA HALFWIDTH HANGUL LETTER EU
FFDB HALFWIDTH HANGUL LETTER YI
FFDC HALFWIDTH HANGUL LETTER I
FFE0 FULLWIDTH CENT SIGN
FFE1 FULLWIDTH POUND SIGN
FFE2 FULLWIDTH NOT SIGN
FFE3 FULLWIDTH MACRON
FFE4 FULLWIDTH BROKEN BAR
FFE5 FULLWIDTH YEN SIGN
FFE6 FULLWIDTH WON SIGN
FFE8 HALFWIDTH FORMS LIGHT VERTICAL
FFE9 HALFWIDTH LEFTWARDS ARROW
FFEA HALFWIDTH UPWARDS ARROW
FFEB HALFWIDTH RIGHTWARDS ARROW
FFEC HALFWIDTH DOWNWARDS ARROW
FFED HALFWIDTH BLACK SQUARE
FFEE HALFWIDTH WHITE CIRCLE
FFF9 INTERLINEAR ANNOTATION ANCHOR
FFFA INTERLINEAR ANNOTATION SEPARATOR
FFFB INTERLINEAR ANNOTATION TERMINATOR
FFFC OBJECT REPLACEMENT CHARACTER
FFFD REPLACEMENT CHARACTER
10000 LINEAR B SYLLABLE B008 A
10001 LINEAR B SYLLABLE B038 E
10002 LINEAR B SYLLABLE | |
get_energy(W, b, s, beta)))
joints /= np.sum(joints)
return joints
def get_theo_joints_pm(W, b, beta):
"""calculate the theoretical state distribution for a Boltzmann
machine
"""
N = len(b)
joints = []
states = get_states(N)
for s in states:
joints.append(np.exp(-1. * get_energy(W, b, (2. * s - 1.), beta)))
joints /= np.sum(joints)
return joints
def get_Z(W, b, beta):
return np.sum([np.exp(-get_energy(W, b, s, beta)) for s in get_states(len(b))])
def get_entropy(W, b, beta):
return entropy(get_theo_joints(W, b, beta))
def get_theo_joints_multi_bm(W, b, beta, M):
N = len(b) / M
joints = []
for i in range(M):
joints.append(get_theo_joints(W[i * N:(i + 1) * N, i * N:(i + 1) * N], b[i * N:(i + 1) * N], beta))
if M == 1:
return joints[0]
else:
return joints
def get_conditionals_from_joints(joints, rvs, vals):
"""calculate a conditional distribution
N: number of random variables
joints: joint distribution
rvs: which random variables to condition on
vals: states of conditioned random variables
"""
N = int(np.log2(len(joints)))
states = get_states(N)
states_cond = []
cond = []
for i, s in enumerate(states):
if np.all(s[rvs] == vals):
states_cond.append(s)
cond.append(joints[i])
cond = np.array(cond) / np.sum(cond)
return states_cond, cond
def get_marginal_dist_from_joints(joints, rvs):
"""calculate the marginal distribution over rvs
N: total number of rvs
joints: joint distribution
rvs: which rvs to calculate marginal form
"""
N = int(np.log2(len(joints)))
joints_states = get_states(N)
marginal_states = joints_states[:, rvs]
marginals_dict = collections.defaultdict(float)
for i, s in enumerate(marginal_states):
marginals_dict[tuple(s)] += joints[i]
marginals = []
marginals_states = []
for s in sorted(marginals_dict.keys()):
marginals_states.append(s)
marginals.append(marginals_dict[s])
assert(np.sum(marginals) - 1. < 1e-12), 'Marginal distribution not normalized.'
return marginals_states, np.array(marginals)
def get_marginals_from_joints(N, joints, rvs):
"""calculate marginal distributions
N: number of random variables
joints: joint distribution
rvs: compute marginals for these random variables
"""
states = get_states(N)
m = []
for i in rvs:
m.append(np.sum(joints[states[:, i] == 1]))
return rvs, m
def get_theo_marginals(W, b, beta):
"""calculate marginal distributions of all random variables"""
N = len(b)
joints = get_theo_joints(W, b, beta)
rvs = np.arange(0, N)
return get_marginals_from_joints(N, joints, rvs)
def get_theo_rates_and_covariances(W, b, beta):
"""calculate rate and covariances for Boltzmann machine from
connectivity and biases"""
N = len(b)
joints = get_theo_joints(W, b, beta)
return get_theo_rates_and_covariances_from_joints(N, joints)
def get_theo_rates_and_covariances_from_joints(N, joints):
states = get_states(N)
rvs, m = get_marginals_from_joints(N, joints, np.arange(0, N))
cov = np.zeros((N, N))
for i in range(N):
for j in range(N):
cov[i, j] = np.sum(joints[np.logical_and(
states[:, i] == 1, states[:, j] == 1)]) - m[i] * m[j]
return m, cov
def get_joints_sparse(N, a_s, steps_warmup, prior=None):
"""create joint distribution of network states from recorded state
array. expected sparse representation of network state."""
return get_joints(np.unpackbits(a_s, axis=1)[:, :N], steps_warmup, prior)
def get_rates_and_covariances(N, a_s, steps_warmup):
a_s_full = get_all_states_from_sparse(N, a_s)
rates = np.mean(a_s_full, axis=0)
cov = np.cov(a_s_full.T)
return rates, cov
def get_sigma2(mu):
"""
returns variance of activity mu
"""
return mu * (1. - mu)
def get_sigma(mu):
"""
returns standard deviation of activity mu
"""
return np.sqrt(get_sigma2(mu))
def get_sigma_input_from_beta(beta):
"""returns standard deviation of input given inverse temperature
beta, by requiring matching of erfc and sigmoidal activation
functions at zero
"""
return np.sqrt(8. / (np.pi * beta ** 2))
def get_sigma_input_from_beta_int(beta):
"""return standard deviation of input given inverse temperature beta,
by requiring matching of Taylor expansion of integral of
activation functions"""
return np.log(2.) * np.sqrt(2. * np.pi) / beta
def get_beta_from_sigma_input(sigma_input):
"""returns inverse temperature beta from standard deviation of
input, by requiring matching of erfc and sigmoidal activation
functions at zero
"""
return np.sqrt(8. / (np.pi * sigma_input ** 2))
def get_beta_from_sigma_input_int(sigma_input):
"""returns inverse temperature beta from standard deviation of
input, by requiring matching of erfc and sigmoidal activation
functions at zero
"""
return np.log(2.) * np.sqrt(2. * np.pi) / sigma_input
def get_steps_warmup(rNrec, Twarmup, tau):
Nrec = rNrec[1] - rNrec[0]
assert(Nrec >= 0)
return int(np.ceil(1. * Nrec * Twarmup / tau))
def get_joints(a_s, steps_warmup, prior=None):
"""create joint distribution of network states from recorded state
array. expected array representation of network state."""
steps_tot = len(a_s[steps_warmup:])
N = len(a_s[0])
possible_states = get_states(N)
state_counter = {}
if prior is None:
for s in possible_states:
state_counter[tuple(s)] = 0.
elif prior == 'uniform':
for s in possible_states:
state_counter[tuple(s)] = 1.
steps_tot += len(possible_states)
else:
raise NotImplementedError('Unknown prior.')
for s in a_s[steps_warmup:]:
state_counter[tuple(s)] += 1
hist = np.zeros(2 ** N)
for i, s in enumerate(possible_states):
hist[i] = state_counter[tuple(s)]
return 1. * hist / np.sum(hist)
def get_joints_multi_bm(a_s, steps_warmup, M, prior=None):
N = len(a_s[0, :]) / M
a_joints = np.empty((M, 2 ** N))
for i in xrange(M):
a_joints[i] = get_joints(a_s[:, i * N:(i + 1) * N], steps_warmup, prior)
return a_joints
def get_joints_sparse(N, a_s, steps_warmup, prior=None):
"""create joint distribution of network states from recorded state
array. expected sparse representation of network state."""
return get_joints(np.unpackbits(a_s, axis=1)[:, :N], steps_warmup, prior)
def get_joints_sparse_multi_bm(N, a_s, steps_warmup, M, prior=None):
return get_joints_multi_bm(np.unpackbits(a_s, axis=1)[:, :N * M], steps_warmup, M, prior)
def get_marginals(a_s, steps_warmup):
"""calculate marginals for each unit for a list of states."""
return np.mean(a_s[steps_warmup:], axis=0)
def get_marginals_multi_bm(a_s, steps_warmup, M):
N = len(a_s[0, :]) / M
a_marginals = np.empty((M, N))
for j in range(M):
for i in range(N):
a_marginals[j, :] = get_marginals(a_s[:, j * N:(j + 1) * N], steps_warmup)
if M == 1:
return a_marginals[0]
else:
return a_marginals
def get_all_states_from_sparse(N, a_s):
"""create array representation of list of network states from sparse
representation."""
return np.unpackbits(a_s, axis=1)[:, :N]
def get_euclidean_distance(x, y):
"""calculate the euclidean distance of two vectors."""
return np.linalg.norm(x - y)
def get_DKL(p, q):
"""returns the Kullback-Leibler divergence of distributions p and q
"""
assert(np.sum(p) - 1. < 1e-12), 'Distributions must be normalized.'
assert(np.sum(q) - 1. < 1e-12), 'Distributions must be normalized.'
if not np.all(p > 0.) or not np.all(q > 0.):
print(p, q)
assert(np.all(p > 0.)), 'Invalid values in distribution.'
assert(np.all(q > 0.)), 'Invalid values in distribution.'
return np.sum(p * np.log(p / q))
def get_DKL_multi_bm(p, q, M):
assert(np.shape(p) == np.shape(q))
DKL = []
for j in range(M):
DKL.append(get_DKL(p[j], q[j]))
return DKL
def theta(x):
"""heaviside function."""
return int(x >= 0)
def Ftheta(x, beta=1.):
"""deterministic activation function (McCulloch-Pitts)"""
return int(x >= 0)
def sigma(x, beta=1.):
"""sigmoid function"""
return 1. / (1. + np.exp(-beta * x))
def Fsigma(x, beta=1.):
"""sigmoid activation function (Ginzburg)"""
return int(sigma(x, beta) > np.random.rand())
def Fdiscrete_factory(Nbm, Nnoise, gamma, g, w, Knoise, mu_target):
def Fdiscrete(x, beta=1.):
KEnoise = int(gamma * Knoise)
KInoise = int(Knoise - KEnoise)
enoise = w * np.random.binomial(KEnoise, mu_target)
inoise = -g * w * np.random.binomial(KInoise, mu_target)
return int(x + enoise + inoise >= 0.)
return Fdiscrete
def erfc_noise(x, beta=1.):
return 0.5 * scipy.special.erfc(-np.sqrt(np.pi) * beta / 4. * x)
def erfc_noise_int(x, beta=1.):
return 0.5 * scipy.special.erfc(-beta / (2. * np.log(2) * np.sqrt(np.pi)) * x)
def erfc_noise_sigma(x, sigma):
return 0.5 * scipy.special.erfc(-1. * x / (np.sqrt(2.) * sigma))
@jit
def numba_sigma(x, beta):
return 1. / (1. + np.exp(-beta * x))
@jit
def numba_Fsigma(x, beta=1.):
"""sigmoid activation function (Ginzburg)"""
return int(numba_sigma(x, beta) > np.random.rand())
def erfc_noise_sigma(mu, sigma):
return 0.5 * scipy.special.erfc(-mu / (np.sqrt(2) * sigma))
def erfc_noise(x, beta=1.):
return 0.5 * scipy.special.erfc(-np.sqrt(np.pi) * beta / 4. * x)
def Ferfc_noise(x, beta=1.):
"""activation function from complementary error function for
stochastic binary neurons (McCulloch-Pitts + white noise)"""
return int(erfc_noise(x, beta) > np.random.rand())
def Ferfc_noise_int(x, beta=1.):
"""activation function from complementary error function for
stochastic binary neurons (McCulloch-Pitts + white noise)"""
return int(erfc_noise_int(x, beta) > np.random.rand())
def sigmainv(y, beta=1.):
"""returns bias b that leads to mean activity y of a stochastic binary
neuron by inverting sigmoidal activation function
"""
return 1. / beta * np.log(1. / (1. / y - 1.))
def get_mu_input(epsilon, N, gamma, g, w, mu):
"""returns mean input for given connection statistics and presynaptic
activity
"""
return get_mu_input_fixed_indegree(int(epsilon * N), gamma, g, w, mu)
def get_mu_input_fixed_indegree(K, gamma, g, w, mu):
"""returns mean input for given connection statistics and presynaptic
activity
"""
return (gamma - (1. - gamma) * g) * K * w * mu
def get_sigma_input(epsilon, N, gamma, g, w, mu):
"""returns standard deviation of input for given connection statistics
and presynaptic activity
"""
return get_sigma_input_fixed_indegree(int(epsilon * N), gamma, g, w, mu)
def get_sigma_input_fixed_indegree(K, gamma, g, w, mu):
"""returns standard deviation of input for given connection statistics
and presynaptic activity
"""
sigma2 = get_sigma2(mu)
return np.sqrt((gamma + (1. - gamma) * g ** 2) * K * w ** 2 * sigma2)
def get_adjusted_weights_and_bias(W, b, b_eff, beta_eff, beta):
"""return adjusted weights | |
<gh_stars>0
import sys
import pygame as pg
import logging
import Project_GameState
from settings import *
# What each module does
# sys - This will set the recursion limit so that algorithms won't run on forever.
# settings - This will import the settings file in the current directory.
# Importing the GameState which will be used purely as the GUI for the application. As it
# As it stands right now, we draw the GUI information from a mix of this file and
# the GameState. In the next update the DisplayState will have more of that responsibility.
from Project_GameState import GameState as DisplayState
# set which version of the GameState you will use for each Player in the game
from Project_GameState import GameState as P1GameState
from Project_GameState import GameState as P2GameState
# set which Player object you will use for each Player in the game
P1Player = Project_GameState.Player_AlphaBeta(1, 0)
P2Player = Project_GameState.Player_AlphaBeta(2, 0) # Project_GameState.Player_AlphaBeta(2, 0)
# The basic Checkers class.
class Checkers:
# The init function where we initalize important information about pygame and checkers.
def __init__(self):
# print("+INITIALIZED77777+")
pg.init() # This initializes pygame, must be done.
pg.display.set_caption(TITLE) # Sets title of the window as defined in settings.
self.clock = pg.time.Clock() # Used to set the FPS.
self.display_state = DisplayState(BOARD_ROWS, BOARD_COLS) # Used to display the GUI
self.width = self.display_state.cols() * TILESIZE # Width of screen.
self.height = self.display_state.rows() * TILESIZE + 40 # Height of screen.
self.screen = pg.display.set_mode( (self.width, self.height) ) # Window Size.
self.font = pg.font.SysFont(FONTNAME, FONTSIZE, bold=FONTBOLD) # Used later.
self.winner = PLAYER_NONE # Won't need to worry about this for now.
self.text_position = (10, self.height-35) # Used later.
self.player_states = [P1GameState(BOARD_ROWS, BOARD_COLS), P2GameState(BOARD_ROWS, BOARD_COLS)]
self.players = [P1Player, P2Player]
# Variables used to create the checkerboard pattern background.
self.flip_color = True # Used to switch background colors when drawing the board.
# The main game update loop of the application
def update(self):
# This sets a limit on how fast our computers process the drawing code.\
self.dt = self.clock.tick(FPS) / 1000
self.do_turn()
self.events() # This will check for any input.
self.draw() # Draw everything on the screen.
# This will draw everything on the screen.
def draw(self):
# Add another parameter for king color.
self.draw_board() # Draw the basic checkerboard for the background.
# Determine if there's a winner.
player = self.display_state.player_to_move()
if (self.winner == PLAYER_NONE):
self.draw_text(PLAYER_NAMES[player] + (": Human" if self.players[player] == None else ": AI Thinking"), self.text_position, PIECECOLOR[player])
else:
self.draw_text(GAME_RESULT_STRING[self.winner], self.text_position, PIECECOLOR[self.winner])
self.draw_piece_list(self.screen, self.display_state.red_piece_list, RED, 2) # Draw all the red pieces.
self.draw_piece_list(self.screen, self.display_state.black_piece_list, BLACK, 2) # Draw all the black pieces.
# If a player has pressed down on a piece then highlight potential moves.
self.draw_piece_list(self.screen, self.display_state.red_piece_potential_move_list, WHITE, 2) # Draw all potential red moves on board.
self.draw_piece_list(self.screen, self.display_state.black_piece_potential_move_list, WHITE, 2) # Draw all potential red moves on board.
pg.display.flip() # Paint the graphics to the screen.
# This will draw the checkered background of the checkers screen.
def draw_board(self):
# This must always be reinitialized or else colors will constantly be flashing.
self.flip_color = True
self.screen.fill(BG_COLOR_1) # Fill the Background to BG Colour 2.
# Draw all the tiles on the screen.
# NOTE: We don't use drawrect to create a rectangle but we instead fill the part
# of the screen(like paintbucket in MS Paint/Photoshop) to fill in the checkerboard
# design.
for c in range(self.display_state.cols()):
for r in range(self.display_state.rows()):
# Draw a colored tile on the screen depending on flip_color value.
if (self.flip_color == True):
self.screen.fill(BG_COLOR_1, (c*TILESIZE, r*TILESIZE, TILESIZE*1, TILESIZE*1))
self.flip_color = False # Draw the next tile a different color.
else:
self.screen.fill(BG_COLOR_2, (c*TILESIZE, r*TILESIZE, TILESIZE*1, TILESIZE*1))
self.flip_color = True # Draw the next tile a different color.
# Flip the color again so the next column starts with a different color.
self.flip_color = not self.flip_color
# This will draw a list of pieces on a board using a list of tuples.
def draw_piece_list(self, surface, piece_list, color, border):
# For every piece in given list, draw a piece at that row and column.
for piece in piece_list:
row, col = self.display_state.rows() - 1 - piece[0], piece[1]
if (piece in self.display_state.red_king_piece_list) or (piece in self.display_state.black_king_piece_list):
pg.draw.circle(surface, color, (col*TILESIZE+TILESIZE//2, row*TILESIZE+TILESIZE//2), TILESIZE//2-PIECEPAD)
pg.draw.circle(surface, GOLD, (col*TILESIZE+TILESIZE//2, row*TILESIZE+TILESIZE//2), TILESIZE//2-PIECEPAD, border)
else:
pg.draw.circle(surface, color, (col*TILESIZE+TILESIZE//2, row*TILESIZE+TILESIZE//2), TILESIZE//2-PIECEPAD)
# draw some text with the given arguments
def draw_text(self, text, pos, color):
label = self.font.render(text, 1, color)
self.screen.blit(label, pos)
# reset the game to a the default state board
def reset(self):
# print("Reset")
self.winner = PLAYER_NONE
self.display_state = DisplayState(BOARD_ROWS, BOARD_COLS)
self.player_states[0] = P1GameState(BOARD_ROWS, BOARD_COLS)
self.player_states[1] = P2GameState(BOARD_ROWS, BOARD_COLS)
# This will execute a move when passed a new row/column location.
def do_move(self, move):
# print("about to do move")
player = self.display_state.player_to_move()
# print("do move player is ", player)
# print("self.players[player] is ", self.players[player])
# print("move is ", move)
# This if statement is used to change the selected index to the one alpha beta
# generated when it found the best move.
if self.players[player] != None:
# # print("AI temp_best_just_done_move is ", self.players[player].temp_best_just_done_move_B)
# # print("AI self.players[player].temp_best_selected_piece is ", self.players[player].temp_best_selected_piece_B)
# # print("AI self.players[player].temp_red_pieces_to_remove_list is ", self.players[player].temp_red_pieces_to_remove_list_B)
# print("move is ", move)
self.display_state.selected_piece = self.players[player].temp_best_selected_piece_B
self.player_states[0].selected_piece = self.players[player].temp_best_selected_piece_B
self.player_states[1].selected_piece = self.players[player].temp_best_selected_piece_B
# Updating the red pieces to remove list.
self.display_state.red_pieces_to_remove_list = self.players[player].temp_red_pieces_to_remove_list_B
self.player_states[0].red_pieces_to_remove_list = self.players[player].temp_red_pieces_to_remove_list_B
self.player_states[1].red_pieces_to_remove_list = self.players[player].temp_red_pieces_to_remove_list_B
# Updating the black pieces to remove list.
self.display_state.black_pieces_to_remove_list = self.players[player].temp_black_pieces_to_remove_list
self.player_states[0].black_pieces_to_remove_list = self.players[player].temp_black_pieces_to_remove_list
self.player_states[1].black_pieces_to_remove_list = self.players[player].temp_black_pieces_to_remove_list
# print("do move")
# Check for winner and do move.
self.winner = self.display_state.winner()
self.display_state.do_move(move)
self.player_states[0].do_move(move)
self.player_states[1].do_move(move)
# This function will do a basic move
def do_turn(self):
# # print("do turn")
self.winner = self.display_state.winner()
if self.winner == PLAYER_NONE: # there is no winner yet, so get the next move from the AI
player = self.display_state.player_to_move() # get the next player to move from the state
# # print("------ ", player)
if self.players[player] != None: # if the current player is an AI, get its move
# print("About to do turn")
if (player == 0):
# NOTE: If both uncommented, program will break.
# Uncomment out this line if you want a AB move.
# self.do_move(self.players[player].get_move(self.player_states[player])) # Get an alpha beta move.
# Uncomment out this line if you want a random move
self.do_move(self.players[player].get_random_move(self.player_states[player])) # Get a random move.
elif (player == 1):
# NOTE: If both uncommented, program will break.
# Uncomment out this line if you want a AB move.
self.do_move(self.players[player].get_move(self.player_states[player])) # Get an alpha beta move.
# Uncomment out this line if you want a random move
# self.do_move(self.players[player].get_random_move(self.player_states[player])) # Get a random mov
# Returns the tile (r,c) on the grid underneath a given mouse position in pixels
def get_tile(self, mpos):
return (mpos[1] // TILESIZE, mpos[0] // TILESIZE)
# This function will handle all user input handling.
def events(self):
# Loop through every event occuring.
for event in pg.event.get():
# If user hit the X button on window, then quit.
if event.type == pg.QUIT:
pg.quit()
quit()
# Check if a key is pressed down.
if event.type == pg.KEYDOWN:
# Reset board to starting state.
if event.key == pg.K_r: self.reset()
# ALL DEBUGGING STUFF.
# If left key pressed, move a black piece.
if event.key == pg.K_LEFT:
print("LEFT")
# self.do_move_by_index(self.black_piece_list, 9, LEGAL_BLACK_ACTIONS[1])
# If left key pressed, move a red piece.
if event.key == pg.K_RIGHT:
print("RIGHT")
# self.do_move_by_index(self.red_piece_list, 9, LEGAL_RED_ACTIONS[1])
# If D is pressed down, print debuging information
if event.key == pg.K_d:
# print("Debugging is cool")
player = self.display_state.player_to_move()
# # print("-- random is ", self.players[0].get_random_move(self.player_states[player]))
# print("Display state red pieces are ", self.display_state.red_piece_list)
# Check if a mousebutton is pressed down.
if event.type == pg.MOUSEBUTTONDOWN:
if pg.mouse.get_pressed()[0]:
move = self.get_tile(event.pos)
repositioned_row = move[0] - (BOARD_ROWS - 1)
move = (abs(repositioned_row), move[1])
# print("Move pressed is ", move)
red_p_moves = self.display_state.red_piece_potential_move_list
black_p_moves = self.display_state.black_piece_potential_move_list
# If player clicked on a potential move then go to | |
if False in checks.values():
raise ValueError('i2c read check failed: ' + repr(checks))
# current and max values
return int.from_bytes(ba[6:8], 'big'), int.from_bytes(ba[4:6], 'big')
@classmethod
def get_display_info(cls, display: Optional[Union[int, str]] = None) -> List[dict]:
'''
Returns information about detected displays by querying the various I2C buses
Args:
display (str or int): [*Optional*] The monitor to return info about.
Pass in the serial number, name, model, interface, edid or index.
This is passed to `filter_monitors`
Returns:
list: list of dicts
Example:
```python
import screen_brightness_control as sbc
# get info about all monitors
info = sbc.linux.I2C.get_display_info()
# EG output: [{'name': 'Benq GL2450H', 'model': 'GL2450H', 'manufacturer': 'BenQ', 'edid': '00ffff...'}]
# get info about the primary monitor
primary_info = sbc.linux.I2C.get_display_info(0)[0]
# get info about a monitor called 'Benq GL2450H'
benq_info = sbc.linux.I2C.get_display_info('Benq GL2450H')[0]
```
'''
all_displays = __cache__.get('i2c_display_info')
if all_displays is None:
all_displays = []
index = 0
for i2c_path in glob.glob('/dev/i2c-*'):
if not os.path.exists(i2c_path):
continue
try:
# open the I2C device using the host read address
device = cls.I2CDevice(i2c_path, cls.HOST_ADDR_R)
# read some 512 bytes from the device
data = device.read(512)
except IOError:
continue
# search for the EDID header within our 512 read bytes
start = data.find(bytes.fromhex('00 FF FF FF FF FF FF 00'))
if start < 0:
continue
# grab 128 bytes of the edid
edid = data[start: start + 128]
# parse the EDID
manufacturer_id, manufacturer, model, name, serial = EDID.parse(edid)
# convert edid to hex string
edid = ''.join(f'{i:02x}' for i in edid)
all_displays.append(
{
'name': name,
'model': model,
'manufacturer': manufacturer,
'manufacturer_id': manufacturer_id,
'serial': serial,
'method': cls,
'index': index,
'edid': edid,
'i2c_bus': i2c_path
}
)
index += 1
if all_displays:
__cache__.store('i2c_display_info', all_displays, expires=2)
if display is not None:
return filter_monitors(display=display, haystack=all_displays, include=['i2c_bus'])
return all_displays
@classmethod
def get_brightness(cls, display: Optional[int] = None) -> List[int]:
'''
Gets the brightness for a display by querying the I2C bus
Args:
display (int): The specific display you wish to query.
Returns:
list: list of ints (0 to 100)
Example:
```python
import screen_brightness_control as sbc
# get the current display brightness
current_brightness = sbc.linux.I2C.get_brightness()
# get the brightness of the primary display
primary_brightness = sbc.linux.I2C.get_brightness(display = 0)[0]
# get the brightness of the secondary display
secondary_brightness = sbc.linux.I2C.get_brightness(display = 1)[0]
```
'''
all_displays = cls.get_display_info()
if display is not None:
all_displays = [all_displays[display]]
results = []
for device in all_displays:
interface = cls.DDCInterface(device['i2c_bus'])
value, max_value = interface.getvcp(0x10)
# make sure display's max brighness is cached
cache_ident = '%s-%s-%s' % (device['name'], device['model'], device['serial'])
if cache_ident not in cls._max_brightness_cache:
cls._max_brightness_cache[cache_ident] = max_value
if max_value != 100:
# if max value is not 100 then we have to adjust the scale to be
# a percentage
value = int((value / max_value) * 100)
results.append(value)
return results
@classmethod
def set_brightness(cls, value: int, display: Optional[int] = None):
'''
Sets the brightness for a display by writing to the I2C bus
Args:
value (int): Set the brightness to this value
display (int): The specific display you wish to adjust.
Example:
```python
import screen_brightness_control as sbc
# set the brightness to 50%
sbc.linux.I2C.set_brightness(50)
# set the primary display brightness to 75%
sbc.linux.I2C.set_brightness(75, display = 0)
# set the secondary display brightness to 25%
sbc.linux.I2C.set_brightness(25, display = 1)
```
'''
all_displays = cls.get_display_info()
if display is not None:
all_displays = [all_displays[display]]
for device in all_displays:
# make sure display brightness max value is cached
cache_ident = '%s-%s-%s' % (device['name'], device['model'], device['serial'])
if cache_ident not in cls._max_brightness_cache:
cls.get_brightness(display=device['index'])
# scale the brightness value according to the max brightness
max_value = cls._max_brightness_cache[cache_ident]
if max_value != 100:
value = int((value / 100) * max_value)
interface = cls.DDCInterface(device['i2c_bus'])
interface.setvcp(0x10, value)
class Light:
'''collection of screen brightness related methods using the light executable'''
executable: str = 'light'
'''the light executable to be called'''
@classmethod
def get_display_info(cls, display: Optional[Union[int, str]] = None) -> List[dict]:
'''
Returns information about detected displays as reported by Light.
It works by taking the output of `SysFiles.get_display_info` and
filtering out any displays that aren't supported by Light
Args:
display (str or int): [*Optional*] The monitor to return info about.
Pass in the serial number, name, model, interface, edid or index.
This is passed to `filter_monitors`
Returns:
list: list of dicts
Example:
```python
import screen_brightness_control as sbc
# get info about all monitors
info = sbc.linux.Light.get_display_info()
# EG output: [{'name': 'edp-backlight', 'path': '/sys/class/backlight/edp-backlight', edid': '00ffff...'}]
# get info about the primary monitor
primary_info = sbc.linux.Light.get_display_info(0)[0]
# get info about a monitor called 'edp-backlight'
edp_info = sbc.linux.Light.get_display_info('edp-backlight')[0]
```
'''
light_output = check_output([cls.executable, '-L']).decode()
displays = []
index = 0
for device in SysFiles.get_display_info():
# SysFiles scrapes info from the same place that Light used to
# so it makes sense to use that output
if device['path'].replace('/sys/class', 'sysfs') in light_output:
del device['scale']
device['light_path'] = device['path'].replace('/sys/class', 'sysfs')
device['method'] = cls
device['index'] = index
displays.append(device)
index += 1
if display is not None:
displays = filter_monitors(display=display, haystack=displays, include=['path', 'light_path'])
return displays
@classmethod
def set_brightness(cls, value: int, display: Optional[int] = None):
'''
Sets the brightness for a display using the light executable
Args:
value (int): Sets the brightness to this value
display (int): The specific display you wish to query.
Example:
```python
import screen_brightness_control as sbc
# set the brightness to 50%
sbc.linux.Light.set_brightness(50)
# set the primary display brightness to 75%
sbc.linux.Light.set_brightness(75, display = 0)
# set the secondary display brightness to 25%
sbc.linux.Light.set_brightness(25, display = 1)
```
'''
info = cls.get_display_info()
if display is not None:
info = [info[display]]
for i in info:
check_output(f'{cls.executable} -S {value} -s {i["light_path"]}'.split(" "))
@classmethod
def get_brightness(cls, display: Optional[int] = None) -> List[int]:
'''
Gets the brightness for a display using the light executable
Args:
display (int): The specific display you wish to query.
Returns:
list: list of ints (0 to 100)
Example:
```python
import screen_brightness_control as sbc
# get the current display brightness
current_brightness = sbc.linux.Light.get_brightness()
# get the brightness of the primary display
primary_brightness = sbc.linux.Light.get_brightness(display = 0)[0]
# get the brightness of the secondary display
edp_brightness = sbc.linux.Light.get_brightness(display = 1)[0]
```
'''
info = cls.get_display_info()
if display is not None:
info = [info[display]]
results = []
for i in info:
results.append(
check_output([cls.executable, '-G', '-s', i['light_path']])
)
results = [int(round(float(i.decode()), 0)) for i in results]
return results
class XRandr:
'''collection of screen brightness related methods using the xrandr executable'''
executable: str = 'xrandr'
'''the xrandr executable to be called'''
@classmethod
def get_display_info(cls, display: Optional[Union[int, str]] = None, brightness: bool = False) -> List[dict]:
'''
Returns info about all detected monitors as reported by xrandr
Args:
display (str or int): [*Optional*] The monitor to return info about.
Pass in the serial number, name, model, interface, edid or index.
This is passed to `filter_monitors`
brightness (bool): whether to include the current brightness
in the returned info
Returns:
list: list of dicts
Example:
```python
import screen_brightness_control as sbc
info = sbc.linux.XRandr.get_display_info()
for i in info:
print('================')
for key, value in i.items():
print(key, ':', value)
# get information about the first XRandr addressable monitor
primary_info = sbc.linux.XRandr.get_display_info(0)[0]
# get information about a monitor with a specific name
benq_info = sbc.linux.XRandr.get_display_info('BenQ GL2450HM')[0]
```
'''
def check_display(display):
if display:
if 'line' in display:
del display['line']
return display['serial'] is None or '\\x' not in display['serial']
return False
xrandr_output = check_output([cls.executable, '--verbose']).decode().split('\n')
valid_displays = []
display_count = 0
tmp_display = {}
for line_index, line in enumerate(xrandr_output):
if line == '':
continue
if not line.startswith((' ', '\t')) and 'connected' in line and 'disconnected' not in line:
if check_display(tmp_display):
valid_displays.append(tmp_display)
tmp_display = {
'interface': line.split(' ')[0],
'name': line.split(' ')[0],
'line': line,
'method': cls,
'index': display_count,
'model': None,
'serial': None,
'manufacturer': None,
'manufacturer_id': None,
'edid': None
}
display_count += 1
elif 'EDID:' in line:
# extract the edid from the chunk of the output that will contain the edid
edid = ''.join(
i.replace('\t', '') for i in xrandr_output[line_index + 1: line_index + 9]
)
tmp_display['edid'] = edid
| |
<gh_stars>0
# 在训练和前向传播预测时为什么有区别
# 在原先使用tf.layers.batch_normalization函数的代码中,我们通过传入一个'training'参数告诉神经层我们是否正在训练网络
# eg:batch_normalized_output = tf.layers.batch_normalization(linear_output, training=self.is_training)
# 这就导致我们在feed_dict向神经网络中传值时需要提供一个self.is_training的参数,例如在NeuraNet的train方法中:
# session.run(train_step, feed_dict={self.input_layer: batch_xs,
# labels: batch_ys,
# self.is_training: True})
# 如果你仔细看不使用tf.layers.batch_normalization的低级实现代码(即使用tf.nn.batch_normalization)的代码中我们在训练和前向传播推导时都有
# 一些不同,但是这是怎样产生的呢?
# 首先,我们看看当训练和预测前向传播时没有区别会发生什么。
# 以下函数与之前的train_and_test类似,但是这次我们只测试一个网络,而不是绘制其准确性,我们对测试输入执行200次预测,一次预测输入一个预测数据。
# 我们可以使用test_training_accuracy参数来监测网络时在训练还是在预测模型(相当于将true或false传递给feed_dict中的is_training参数)
import tensorflow as tf
import tqdm
import numpy as np
import matplotlib.pyplot as plt
# Import MNIST data so we have something for our experiments
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
# 对于网络的构建使用NeuralNet类和Batch_Normalization_Lesson_with_tf_layers文件中的NeuralNet中的类一样
class NeuralNet:
def __init__(self, initial_weights, activation_fn, use_batch_norm):
"""
Initializes this object, creating a TensorFlow graph using the given parameters.
初始化这个对象,使用给出的参数构建一个Tensorflow的graph模型
:param initial_weights: list of NumPy arrays or Tensors
Initial values for the weights for every layer in the network. We pass these in
so we can create multiple networks with the same starting weights to eliminate
training differences caused by random initialization differences.
The number of items in the list defines the number of layers in the network,
and the shapes of the items in the list define the number of nodes in each layer.
e.g. Passing in 3 matrices of shape (784, 256), (256, 100), and (100, 10) would
create a network with 784 inputs going into a hidden layer with 256 nodes,
followed by a hidden layer with 100 nodes, followed by an output layer with 10 nodes.
参数:initial_weights: Numpy数组或张量的列表
为神经网络中每一层的权重赋值,我们将这些传入,以便我们可以创建具有相同起始权重的多个网络,以消除随机初始化差异导致的训练差异。
列表中项目的数量定义了网络中的图层数量,列表中项目的形状定义了每个层中的节点数量。
例如传递形状为(784,256),(256,100)和(100,10)的3个矩阵将创建一个具有784个输入的网络,进入具有256个节点的隐藏层,
随后是具有100个节点的隐藏层,随后是10个节点的输出层。
:param activation_fn: Callable
The function used for the output of each hidden layer. The network will use the same
activation function on every hidden layer and no activate function on the output layer.
e.g. Pass tf.nn.relu to use ReLU activations on your hidden layers.
参数: 激活函数: 可调用
用于输出每个隐藏层的函数。网络将在每个隐藏层上使用相同的激活功能,并且在输出层上不使用激活功能。
例如通过tf.nn.relu在隐藏层上使用ReLU激活函数。
:param use_batch_norm: bool
Pass True to create a network that uses batch normalization; False otherwise
Note: this network will not use batch normalization on layers that do not have an
activation function.
参数:use_batch_norm: bool
如果传入Bool值为True,则会创建一个使用Batch Normalization的神经网络,如果传入值为False,则会创建一个不使用Batch Normalization的神经网路
注意:不会在没有激活函数的层上使用Batch Normalization
"""
# Keep track of whether or not this network uses batch normalization.
# 跟踪与标志这个网络是否使用Batch Normalization
self.use_batch_norm = use_batch_norm
self.name = "With Batch Norm" if use_batch_norm else "Without Batch Norm"
# Batch normalization needs to do different calculations during training and inference,
# so we use this placeholder to tell the graph which behavior to use.
# Batch normalization在训练和前向传播时会使用不同的计算
# 所以我们会用placeholder向图模型中传递我们会使用哪种行为
self.is_training = tf.placeholder(tf.bool, name="is_training")
# This list is just for keeping track of data we want to plot later.
# It doesn't actually have anything to do with neural nets or batch normalization.
# 这个list只是为了保存我们将要画出的图形所需要的数据
# 对于神经网络和BatchNormalization算法没有额外的作用
self.training_accuracies = []
# Create the network graph, but it will not actually have any real values until after you
# call train or test
# 创建神经网络图模型,但是在你调用train或test之前,其不会有实值
self.build_network(initial_weights, activation_fn)
def build_network(self, initial_weights, activation_fn):
"""
Build the graph. The graph still needs to be trained via the `train` method.
构建图模型,这个图模型仍然需要使用train函数来运行训练操作
:param initial_weights: list of NumPy arrays or Tensors
See __init__ for description.
:param activation_fn: Callable
See __init__ for description.
参数:initial_weights:numpy数据数组或者张量构成的列表
参数:激活函数:可调用
"""
self.input_layer = tf.placeholder(tf.float32, [None, initial_weights[0].shape[0]])
# 如果这里形状为[(784,256),(256,100),(100,10)]的列表,则initial_weights[0].shape=(784,256)
layer_in = self.input_layer # 784 表示第一层的神经元数量
for weights in initial_weights[:-1]:
layer_in = self.fully_connected(layer_in, weights, activation_fn)
# 由于这个函数是在for循环语句中创建的,并且需要用到initial_weights[:-1]这个参数
# 所以我们必须知道initial_weight的定义以及fully_connected函数的定义以及返回值,
# 程序中看出本层fully_connected函数的返回值会传入下一层的函数作为参数
# 最后一层单独定义initial_weights[-1]表示输出层神经元个数
self.output_layer = self.fully_connected(layer_in, initial_weights[-1])
def fully_connected(self, layer_in, initial_weights, activation_fn=None):
"""
Creates a standard, fully connected layer. Its number of inputs and outputs will be
defined by the shape of `initial_weights`, and its starting weight values will be
taken directly from that same parameter. If `self.use_batch_norm` is True, this
layer will include batch normalization, otherwise it will not.
创建一个标准的完全连接图层。 其输入和输出的数量将是由`initial_weights`的形状定义,其初始权重值将为
直接从相同的参数中获取。 如果`self.use_batch_norm`为True,则为图层将包含Batch Normalization,否则不会。
:param layer_in: Tensor
The Tensor that feeds into this layer. It's either the input to the network or the output
of a previous layer.
:参数 layer_in: Tensor
该层的输入张量,如果创建的是第一层,则此参数为整个网络的输入,如果为中间层,则为前一层的输出
:param initial_weights: NumPy array or Tensor
Initial values for this layer's weights. The shape defines the number of nodes in the layer.
e.g. Passing in 3 matrix of shape (784, 256) would create a layer with 784 inputs and 256
outputs.
:参数 initial_weights:numpy 数组或者是张量
初始化该层的权重,其形状定义了该层的节点数量。
例如:如果传入一个形状为(784,256)的矩阵,则会创建一个有784个输入,256个输出的神经层
:param activation_fn: Callable or None (default None)
The non-linearity used for the output of the layer. If None, this layer will not include
batch normalization, regardless of the value of `self.use_batch_norm`.
e.g. Pass tf.nn.relu to use ReLU activations on your hidden layers.
:参数 activation_fn:可调用或者没有(默认没有激活函数)
用于输出神经层的非线性,如果该层没有使用激活函数,则无论self.use_batch_norm标志是否激活都不会使用Batch Normalization
例如:使用tf.nn.relu函数来讲ReLU激活方法用于隐藏层
"""
# Since this class supports both options, only use batch normalization when
# requested. However, do not use it on the final layer, which we identify
# by its lack of an activation function.
# 我们只会在use_batch_norm标志被激活时使用BN算法,但是无论标志是否激活,在最后一层都不会使用BN算法,因为最后一层没有添加非线性激活函数
if self.use_batch_norm and activation_fn:
# Batch normalization uses weights as usual, but does NOT add a bias term. This is because
# its calculations include gamma and beta variables that make the bias term unnecessary.
# (See later in the notebook for more details.)
# Batch normalization 和平时一样使用权值,但是不用使用偏置项,这时我们需要额外计算gamma和beta这两个额外项而不用使用偏置属性
weights = tf.Variable(initial_weights)
linear_output = tf.matmul(layer_in, weights) # 线性输出
# Apply batch normalization to the linear combination of the inputs and weights
# 在神经层的输入和权值的线性组合上使用Batch Normalization
batch_normalized_output = tf.layers.batch_normalization(linear_output, training=self.is_training)
# Now apply the activation function, *after* the normalization.
# 在使用BN算法之后使用非线性激活函数
return activation_fn(batch_normalized_output)
else:
# When not using batch normalization, create a standard layer that multiplies
# the inputs and weights, adds a bias, and optionally passes the result
# through an activation function.
# 当不使用Batch Normalization时,创建一个使用权值和输入相乘后加上偏置的标准层然后徐选择性的添加激活函数
weights = tf.Variable(initial_weights)
biases = tf.Variable(tf.zeros([initial_weights.shape[-1]])) # 表示该层神经元的输出个数
linear_output = tf.add(tf.matmul(layer_in, weights), biases)
# 如果没有激活函数直接返回该值,有激活函数则通过激活函数计算后返回该值
return linear_output if not activation_fn else activation_fn(linear_output)
def train(self, session, learning_rate, training_batches, batches_per_sample, save_model_as=None):
"""
Trains the model on the MNIST training dataset.
在MNIST训练数据集上训练模型
:param session: Session
Used to run training graph operations.
:参数 Session: Session
用于运行训练图操作
:param learning_rate: float
Learning rate used during gradient descent.
:参数 学习率:float
梯度下降中使用的学习率
:param training_batches: int
Number of batches to train.
:参数 训练批次数:int
训练的批次数
:param batches_per_sample: int
How many batches to train before sampling the validation accuracy.
:参数 batches_per_sample:int
在抽样验证准确度之前要训练多少批次。
:param save_model_as: string or None (default None)
Name to use if you want to save the trained model.
: 参数 save_model_as:string or None (default None)
如果您想保存训练好的模型,请使用该名称
"""
# This placeholder will store the target labels for each mini batch
# 该占位符将存储每个小批量的目标标签
labels = tf.placeholder(tf.float32, [None, 10])
# Define loss and optimizer
# 定义loss和优化器
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=self.output_layer))
# Define operations for testing
# 定义计算准确率的方法
correct_prediction = tf.equal(tf.argmax(self.output_layer, 1), tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
if self.use_batch_norm:
# If we don't include the update ops as dependencies on the train step, the
# tf.layers.batch_normalization layers won't update their population statistics,
# which will cause the model to fail at inference time
# 如果我们不包含更新操作作为训练操作的依赖关系,tf.layers.batch_normalization层不会更新均值和方差的统计值
# 这会导致模型在前向传播的过程中失败,在训练时也要更新参数数值
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)
else:
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)
# Train for the appropriate number of batches. (tqdm is only for a nice timing display)
# 训练合适的批次数量(tqdm只在最佳的时间显示,tqdm是一个显示条模块)
for i in tqdm.tqdm(range(training_batches)):
# We use batches of 60 just because the original paper did. You can use any size batch you like.
# 我们使用60批次,仅仅是因为原论文这样描述,你可以将其调节为任意批次大小
batch_xs, batch_ys = mnist.train.next_batch(60)
# 在train函数调用时将Sess也传入,这样可以在train函数直接使用当前传入的session
session.run(train_step, feed_dict={self.input_layer: batch_xs,
labels: batch_ys,
self.is_training: True})
# Periodically test accuracy against the 5k validation images and store it for plotting later.
# 在5k验证图片集上计算测试准确率,并且将其保存下来用于画图
if i%batches_per_sample == 0:
test_accuracy = session.run(accuracy, feed_dict={self.input_layer: mnist.validation.images,
labels: mnist.validation.labels,
self.is_training: False})
self.training_accuracies.append(test_accuracy)
# After training, report accuracy against test data
test_accuracy = session.run(accuracy, feed_dict={self.input_layer: mnist.validation.images,
labels: mnist.validation.labels,
self.is_training: False})
print('{}: After training, final accuracy on validation set = {}'.format(self.name, test_accuracy))
# If you want to use this model later for inference instead of having to retrain it,
# just construct it with the same parameters and then pass this file to the 'test' function
# 如果你需要复用这个模型来预测,仅仅是前向传播不对其参数进行再次训练
# 仅仅是使用同样的参数来重新构建这个模型,并且在这个模型上使用test函数
if save_model_as:
tf.train.Saver().save(session, save_model_as)
# 如果save_model_as参数有值得话就保存模型
def test(self, session, test_training_accuracy=False, include_individual_predictions=False, restore_from=None):
"""
Trains a trained model on the MNIST testing dataset.
在MNIST测试集上训练一个已经训练好的模型
:param session: Session
Used to run the testing graph operations.
:参数session: Session
用于运行测试图模型
:param test_training_accuracy: bool (default False)
If True, perform inference with batch normalization using batch mean and variance;
if False, perform inference with batch normalization using estimated population mean and variance.
Note: in real life, *always* perform inference using the population mean and variance.
This parameter exists just to support demonstrating what happens if | |
if domain+"--"+slot_type in dom_type_list:
break
# not labeled
last_turn = turn["context"].split("<system>")[-1]
sys_utt, user_utt = last_turn.split("<user>")
slots_string = ", ".join([" ".join(slot) for slot in turn["slots"]])
if domain == "hotel":
if "hotel" not in last_turn and ("guesthouse" in user_utt or "guest house" in user_utt):
# works 14 for user
pass
self.update_tmp_log(idx, turn, last_turn, domain, slot_type, "guesthouse")
elif "hotel" in user_utt \
and not ("guesthouse" in last_turn or "guest house" in last_turn):
if not ("the hotel" in last_turn \
and "the hotel should" not in user_utt \
and "looking for the hotel" not in user_utt) \
and "car " not in last_turn \
and "taxi" not in last_turn \
and "type" not in last_turn :
# works for 134
self.update_tmp_log(idx, turn, last_turn, domain, slot_type, "hotel")
elif "hotel" in sys_utt \
and not ("guesthouse" in last_turn or "guest house" in last_turn):
if "the hotel" not in last_turn \
and "that hotel" not in last_turn \
and "taxi" not in last_turn:
# works 107 for sys
self.update_tmp_log(idx, turn, last_turn, domain, slot_type, "hotel")
if domain == "attraction":
# work for 146 turns; for user side 39; for sys side 107
poss_value = {"museum":"museum",
"college":"college",
"nightclub":"nightclub",
"architecture":"architecture",
"entertainment":"entertainment",
"theatre":"theatre",
"park":"park",
"swimmingpool":"swimmingpool",
"boat":"boat",
"cinema":"cinema",
"multiple sports":"multiple sports",
"concerthall":"concerthall",
"hiking":"hiking",
"night club":"nightclub",
"theater":"theatre",
"swimming pool":"swimmingpool",
"architectural":"architecture",
"entertain":"entertainment",
"concert hall":"concerthall",
}
flag = 0
for value in poss_value:
if value in user_utt \
and "taxi" not in last_turn \
and "parking" not in last_turn:
flag = 1
# pass
label_value = poss_value[value]
self.update_tmp_log(idx, turn, last_turn, domain, slot_type, label_value)
# break
if flag == 1:
continue
for value in poss_value:
if value in sys_utt \
and "parking" not in last_turn \
and not ("leisure park" in last_turn or "park street" in last_turn or "parkside" in last_turn):
label_value = poss_value[value]
self.update_tmp_log(idx, turn, last_turn, domain, slot_type, label_value)
if idx in self.tmp_log and "|" in self.tmp_log[idx]["add_slot"]:
slot_values = self.tmp_log[idx]["add_slot"].split(" | ")
slot_values[0] = slot_values[0].split()[-1]
if "attraction name" in " ".join([" ".join(slot) for slot in turn["slots"]]):
if idx == "sng1105.json-2":
# self.tmp_log[idx]["add_slot"] = f"attraction type entertainment"
self.update_tmp_log(idx, turn, last_turn, domain, slot_type, "entertainment")
if idx == "mul2466.json-3":
# self.tmp_log[idx]["add_slot"] = f"attraction type nightclub"
self.update_tmp_log(idx, turn, last_turn, domain, slot_type, "nightclub")
else:
if idx == "pmul2272.json-5":
# self.tmp_log[idx]["add_slot"] = "attraction type cinema"
self.update_tmp_log(idx, turn, last_turn, domain, slot_type, "cinema")
elif idx not in ["pmul1455.json-1"]:
del self.tmp_log[idx]
# self.tmp_log[idx]["add_slot"] = f"{domain} {slot_type} "
self._save_tmp_log()
def Count_Dest_Depa_Info(self):
wi_type_set = {
"taxi": set(),
"train": set(),
}
wo_type_set = {
"taxi": set(),
"train": set(),
}
tmp = {
"taxi": set(),
"train": set(),
}
for domain in tmp:
for slot_type in ["departure", "destination"]:
self.tmp_log = {}
self.tmp_log_path = ".".join(self.data_path.split(".")[:-1]) + f"_{domain}_{slot_type}.json"
self._load_otgy()
poss_value = self.otgy[f"{domain}--{slot_type}"]
for idx, turn in self.data.items():
slots_str = ", ".join([" ".join(slot) for slot in turn["slots"]])
for dom in [domain]:
if dom in turn["domains"]:
if dom+" "+slot_type in slots_str:
wi_type_set[dom].add(turn["dial_id"])
else:
wo_type_set[dom].add(turn["dial_id"])
tmp_union = set()
for dom in tmp:
tmp[dom] = wo_type_set[dom] - wi_type_set[dom]
# tmp[dom] = wi_type_set[dom]
tmp_union = tmp_union.union(tmp[dom])
tmp_union = sorted(list(tmp_union))
domain_related = sorted(list(wo_type_set[domain].union(wi_type_set[domain])))
for dial_id in domain_related:
for turn_num in range(30):
idx = dial_id + "-" + str(turn_num)
if idx in self.data:
turn = self.data[idx]
else:
continue
# already labeled
dom_type_list = [slot[0]+"--"+slot[1] for slot in turn["slots"]]
if domain+"--"+slot_type in dom_type_list:
break
# not labeled
last_turn = turn["context"].split("<system>")[-1]
sys_utt, user_utt = last_turn.split("<user>")
slots_string = ", ".join([" ".join(slot) for slot in turn["slots"]])
flag = 0
if domain == "train" and slot_type == "departure":
# in total for 11
for value in poss_value:
label_value = value
if " "+value in user_utt:
if value not in slots_string \
and re.search(r"((leave|depart)s?|from) "+value, user_utt):
# # work for 2
flag = 1
pass
self.update_tmp_log(idx, turn, last_turn, domain, slot_type, label_value)
if flag == 1:
continue
for value in poss_value:
label_value = value
if value in sys_utt:
if value not in slots_string \
and re.search(r"((leave|depart)s?|from) "+value, sys_utt):
# # work for 9
pass
self.update_tmp_log(idx, turn, last_turn, domain, slot_type, label_value)
if domain == "train" and slot_type == "destination":
# in total for 2
for value in poss_value:
label_value = value
if value in user_utt:
if value not in slots_string \
and "to "+value in user_utt:
# # work for 0
flag = 1
pass
self.update_tmp_log(idx, turn, last_turn, domain, slot_type, label_value)
if flag == 1:
continue
for value in poss_value:
label_value = value
if value in sys_utt:
if value not in slots_string \
and re.search(r"(arrives?|arrives? (in|at)|to) "+value, sys_utt):
# # work for 2
pass
self.update_tmp_log(idx, turn, last_turn, domain, slot_type, label_value)
if domain == "taxi" and slot_type == "departure":
# in total for
for value in poss_value:
label_value = value
if " "+value in user_utt:
if value not in slots_string \
and re.search(r"((leave|depart)s?|from) "+value, user_utt):
# # work for 2
if value == "the hotel" or value == "the restaurant":
ref_dom = value.split()[-1]
for slot in turn["slots"]:
if slot[0] == ref_dom and slot[1] == "name":
label_value = slot[2]
flag = 1
pass
self.update_tmp_log(idx, turn, last_turn, domain, slot_type, label_value)
break
else:
flag = 1
pass
self.update_tmp_log(idx, turn, last_turn, domain, slot_type, label_value)
if flag == 1:
continue
for value in poss_value:
label_value = value
if value in sys_utt:
if value not in slots_string \
and 'taxi' in sys_utt \
and re.search(r"((leave|depart)s?|from) "+value, sys_utt):
# # work for 9
if value == "the hotel" or value == "the restaurant":
ref_dom = value.split()[-1]
for slot in turn["slots"]:
if slot[0] == ref_dom and slot[1] == "name":
label_value = slot[2]
flag = 1
pass
self.update_tmp_log(idx, turn, last_turn, domain, slot_type, label_value)
break
else:
pass
self.update_tmp_log(idx, turn, last_turn, domain, slot_type, label_value)
if domain == "taxi" and slot_type == "destination":
poss_value["the hotel"] = 5
# in total for
weird_value_list = ["star", "ask", "cambridge"]
for value in poss_value:
label_value = value
if value in user_utt:
if value not in slots_string \
and value not in weird_value_list \
and poss_value[value] > 1 \
and 'taxi' in user_utt \
and "to "+value in user_utt:
# # work for
if value == "the hotel" or value == "the restaurant":
ref_dom = value.split()[-1]
for slot in turn["slots"]:
if slot[0] == ref_dom and slot[1] == "name":
label_value = slot[2]
flag = 1
pass
self.update_tmp_log(idx, turn, last_turn, domain, slot_type, label_value)
break
else:
flag = 1
pass
self.update_tmp_log(idx, turn, last_turn, domain, slot_type, label_value)
if flag == 1:
continue
for value in poss_value:
label_value = value
if value in sys_utt:
if value not in slots_string \
and value not in weird_value_list \
and poss_value[value] > 1 \
and 'taxi' in sys_utt \
and re.search(r"(arrives?|arrives? (in|at)| to) "+value, sys_utt):
# # work for 2
if value == "the hotel" or value == "the restaurant":
ref_dom = value.split()[-1]
for slot in turn["slots"]:
if slot[0] == ref_dom and slot[1] == "name":
label_value = slot[2]
flag = 1
pass
self.update_tmp_log(idx, turn, last_turn, domain, slot_type, label_value)
break
else:
pass
self.update_tmp_log(idx, turn, last_turn, domain, slot_type, label_value)
self._save_tmp_log()
def Count_Area_Info(self):
wi_type_set = {
"attraction": set(),
"hotel": set(),
"restaurant": set(),
}
wo_type_set = {
"attraction": set(),
"hotel": set(),
"restaurant": set(),
}
tmp = {
"attraction": set(),
"hotel": set(),
"restaurant": set(),
}
for domain in tmp:
slot_type = "area"
self.tmp_log = {}
self.tmp_log_path = ".".join(self.data_path.split(".")[:-1]) + f"_{domain}_{slot_type}.json"
for idx, turn in self.data.items():
slots_str = ", ".join([" ".join(slot) for slot in turn["slots"]])
for dom in [domain]:
if dom in turn["domains"]:
if dom+" "+slot_type in slots_str:
wi_type_set[dom].add(turn["dial_id"])
else:
wo_type_set[dom].add(turn["dial_id"])
tmp_union = set()
for dom in tmp:
tmp[dom] = wo_type_set[dom] - wi_type_set[dom]
# tmp[dom] = wi_type_set[dom]
tmp_union = tmp_union.union(tmp[dom])
tmp_union = sorted(list(tmp_union))
domain_related = sorted(list(wo_type_set[domain].union(wi_type_set[domain])))
for dial_id in domain_related:
for turn_num in range(30):
idx = dial_id + "-" + str(turn_num)
if idx in self.data:
turn = self.data[idx]
else:
continue
# already labeled
dom_type_list = [slot[0]+"--"+slot[1] for slot in turn["slots"]]
if domain+"--"+slot_type in dom_type_list:
break
# not labeled
last_turn = turn["context"].split("<system>")[-1]
sys_utt, user_utt = last_turn.split("<user>")
slots_string = ", ".join([" ".join(slot) | |
self.tunnel["destination"]:
if self.rtr.isCPE():
id = "%s-%s-%s" % (
"tunnel",
self.tunnel["destination"],
self.tunnel["source"],
)
else:
id = "%s-%s-%s" % (
"tunnel",
self.tunnel["source"],
self.tunnel["destination"],
)
return id
## Procedure to override Product info based on extra parameters
## this should could after all the info about the interface, router, PE is known
def UpdateProductInfo(self):
self.product_obj.ProductByInterface(self.intf)
self.product_obj.ProductByIp(self.ip)
# self._find_product_by_pe_interface()
# override automatic found product info
if "ppp_user" in self.vdsl2:
self.product_obj.UpdateProduct("transmission", "VDSL", "'VDSL2 user info'")
## try to determine the product or interface type based on pe interface
## only needed for PE to PE connections so skip it if this is a CPE interace
## TODO: CORRECT, only for PE to PE???
def UpdateProductInfoPE(self):
if self.rtr.isCPE():
log.debug("CPE interface - no need to guess product based on PE interface")
return
else:
self.product_obj.ProductByPEIntf(self.rtr.GetProp("hostname"), self.intf)
# if not p: return
# self.product = p['product']
# self.type = p['type']
# self.function = p['function']
## main parser function
def parse(self, intfobj):
m = self.reIntf.match(intfobj.text)
if not (m and len(m.groups()) > 0):
log.error("interface was not found in '%s'" % intfobj.text)
else:
# self._parse_interface_name(m.groups()[0])
self._parse_interface_name(m.group("INTF"))
for l in intfobj.children:
log.debug("** INTERFACE CONFIG LINE: {}".format(l))
# description
m = self.reDescr.match(l.text)
if m:
self._parse_description(m.group(1))
continue
# bandwidth
m = self.reBW.match(l.text)
if m:
self._parse_intf_bw(m.group(1))
continue
# ip address
m = self.reIp.match(l.text)
if m:
self._parse_ipaddr(l.text, m)
continue
# VRF
m = self.reVRF.match(l.text)
if m:
self._parse_vrf(m.group("VRF"))
continue
# service-policy
m = self.rePolicy.match(l.text)
if m:
self._parse_service_policy(l.text, m)
continue
# dot1q
m = self.reDot1q.match(l.text)
if m:
self._parse_dot1q(m.group(1))
continue
# interface speed (auto-10-100)
m = self.reSpeed.match(l.text)
if m:
self._parse_speed(m.group(1))
continue
# interface duplex
# m = self.reDuplex.match(l.text)
# if m:
# self._parse_duplex(m.group(1))
# continue
# HSRP standby
m = self.reStandby.match(l.text)
if m:
self._parse_standby(l.text, m)
continue
# ACL access-group
m = self.reAccessGroup.match(l.text)
if m:
self._parse_access_group(l.text, m)
continue
# NAT
m = self.reNat.match(l.text)
if m:
self._parse_nat(l.text, m)
continue
m = self.reVDSL2SharedVlanPPP.match(l.text)
if m:
self._parse_vdls2_shared_ppp(l.text, m)
continue
m = self.reVDSL2SharedVlanPPPuser.match(l.text)
if m:
self._parse_vdls2_shared_ppp_user(l.text, m)
continue
m = self.reVDSL2SharedVlanPPPpassword.match(l.text)
if m:
self._parse_vdls2_shared_ppp_password(l.text, m)
continue
m = self.reIpHelper.match(l.text)
if m:
self._parse_iphelper(m)
continue
m = self.rePvc.match(l.text)
if m:
self._parse_pvc(m)
continue
m = self.reTunnel.match(l.text)
if m:
self._parse_tunnel(l.text)
continue
m = self.reTunnelHubIp.match(l.text)
if m:
self._parse_nhrp_hubip(m)
continue
else:
## catch-all rule for debugging
log.debug("XXX skip: %s" % l.text)
# for VDSL PPP Sessions it's possible that there is no IP address information
# inside the virtual-template (IP is obtained dynamically)
# In this case we will assume that the received IP is a /30 and we recalculate
# the ip address information here
if self.ip and self.vdsl2.get("ppp_user", None) and not self.network:
log.debug(
"IP subnet {}/30 is assumed because VDSL PPP ip was received dynamically".format(
self.ip
)
)
calculated_ip = "ip address {} 255.255.255.252".format(self.ip)
m = self.reIp.match(calculated_ip)
if m:
self._parse_ipaddr(calculated_ip, m)
## parse ip nhrp info
def _parse_nhrp_hubip(self, m):
self.tunnel["hubip"] = m.group("IP")
log.debug("--> tunnel hub ip found: %s" % m.group("IP"))
## parse tunnel info
def _parse_tunnel(self, line):
m = self.reTunnelVrf.match(line)
if m:
self.tunnel["vrf"] = m.group("VRF")
log.debug("--> tunnel vrf found: %s" % m.group("VRF"))
return
m = self.reTunnelKey.match(line)
if m:
self.tunnel["key"] = m.group("KEY")
log.debug("--> tunnel key found: %s" % m.group("KEY"))
return
m = self.reTunnelSource.match(line)
if m:
self.tunnel["source"] = m.group("SRC")
log.debug("--> tunnel source found: %s" % m.group("SRC"))
return
m = self.reTunnelDestination.match(line)
if m:
self.tunnel["destination"] = m.group("DST")
log.debug("--> tunnel destination found: %s" % m.group("DST"))
return
m = self.reTunnelIpsecProfile.match(line)
if m:
self.tunnel["ipsecprofile"] = m.group("PROFILE")
log.debug("--> tunnel ipsec profile found: %s" % m.group("PROFILE"))
return
## parse PVC
def _parse_pvc(self, m):
self.pvc = m.group("PVC")
log.debug("--> PVC found: %s" % m.group("PVC"))
## parse IP Helpers
def _parse_iphelper(self, m):
self.iphelpers.append(m.group("HELPER"))
log.debug("--> IP Helper address found: %s" % (m.group("HELPER")))
## parse VDSL2 shared vlan ppp
def _parse_vdls2_shared_ppp(self, text, m):
self.vdsl2["ppp_user"] = m.group("USER")
self.vdsl2["ppp_pass"] = m.group("PASS")
log.debug(
"--> VDLS2 shared vlan PPP found: %s (%s)"
% (self.vdsl2["ppp_user"], self.vdsl2["ppp_pass"])
)
## parse VDSL2 shared vlan ppp username
def _parse_vdls2_shared_ppp_user(self, text, m):
self.vdsl2["ppp_user"] = m.group("USER")
log.debug(
"--> VDLS2 shared vlan PPP username found: {}".format(
self.vdsl2["ppp_user"]
)
)
## parse VDSL2 shared vlan ppp password
def _parse_vdls2_shared_ppp_password(self, text, m):
self.vdsl2["ppp_pass"] = m.group("PASS")
log.debug(
"--> VDLS2 shared vlan PPP password found: {}".format(
self.vdsl2["ppp_pass"]
)
)
## parse NAT
def _parse_nat(self, text, m):
if m.group("DIR") == "inside":
self.nat = self.nat | self.NAT_IN
# self.nat_in = True
else:
# self.nat_out = True
self.nat = self.nat | self.NAT_OUT
log.debug("--> NAT found: %s" % self.nat)
## parse ACL access-group
def _parse_access_group(self, text, m):
if m.group("DIR") == "in":
self.acl_in = m.group("ACL")
log.debug("--> ACL INBOUND found: %s" % self.acl_in)
else:
self.acl_out = m.group("ACL")
log.debug("--> ACL OUTBOUND found: %s" % self.acl_out)
## parse HSRP standby group
def _parse_standby(self, text, m):
if not m.group("ID") in self.standby:
self.standby[m.group("ID")] = {}
self.standby[m.group("ID")][m.group("CMD")] = m.group("PARM")
log.debug(
"--> HSRP line found: id=%s cmd=%s parm=%s"
% (m.group("ID"), m.group("CMD"), m.group("PARM"))
)
## parse interface duplex
def _parse_duplex(self, text):
self.duplex = text
log.debug("--> interface duplex: %s" % self.duplex)
## parse interface speed
def _parse_speed(self, text):
self.speed = text
log.debug("--> interface speed: %s" % self.speed)
## parse dot1q
def _parse_dot1q(self, text):
self.vlan = text
log.debug("--> DOT1Q/vlan id found: %s" % self.vlan)
## parse service-policy
def _parse_service_policy(self, text, m):
if m.group("DIR") == "input":
self.policy_in = m.group("POL")
log.debug("--> INPUT QOS Policy found: %s" % self.policy_in)
else:
self.policy_out = m.group("POL")
log.debug("--> OUTPUT QOS Policy found: %s" % self.policy_out)
## parse VRF info
def _parse_vrf(self, text):
self.vrf = text
log.debug("--> VRF found: %s" % self.vrf)
## parse IP addresses
def _parse_ipaddr(self, text, m):
# print(m.group('IP'), m.group('MASK'))
# net = IPv4Network("%s/%s" % (m.group('IP'), m.group('MASK')))
net = ipaddress.ip_interface("{}/{}".format(m.group("IP"), m.group("MASK")))
log.debug("--> ip address found:")
ip = "%s/%s" % (net.ip, net.network.prefixlen)
if not m.group("SEC"):
self.ip = ip
self.network = net.network
self.mask = net.netmask
log.debug("----> IP: %s" % self.ip)
log.debug("----> NETWORK: %s" % self.network)
log.debug("----> MASK: %s" % self.mask)
else:
self.secip.append(ip)
log.debug("----> SECONDARY IP: %s" % ip)
## parse the interface name (FastEthernet0/0, ATM0.200, ...)
def _parse_interface_name(self, text):
self.intf = text
log.debug("-> interface found: %s" % self.intf)
# check for vlan id
m = re.match("vlan *([0-9]+)", self.intf, re.IGNORECASE)
if m:
self.vlan = m.group(1)
log.debug("--> vlan found: %s" % self.vlan)
# check for subinterface
m = re.match(".*\.(.*)", self.intf)
if m:
self.subint = m.group(1)
log.debug("--> subinterface found: %s" % self.subint)
## parse the interface desciption
def _parse_description(self, text):
self.descr = text
log.debug("--> description found: %s" % self.descr)
## try to find hostname in description
m = re.match(self.reDescrHostnameGuess, text)
if m:
h = m.group("HOSTNAME")
if self.rtr.isCPE():
self.pe_guess = h
else:
self.hostname_guess = h
log.debug("--> alternative hostname found in description: %s" % h)
## get all VT references
m = re.findall(self.reDescrVtRef, text)
if m:
self.vt = m
log.debug("--> found VT references in description: %s" % m)
## get all the VT references based on the PE objects
def GetVTFromPEInterfaces(self):
allvt = []
# [ allvt.append(o.vt) for o in self.pe_intf_objects ]
[allvt.append(vt) for o in self.pe_intf_objects for vt in o.vt]
return allvt
## parse interface bandwidth
def _parse_intf_bw(self, text):
self.intfbw = text
log.debug("--> bandwidth statement: %s" % self.intfbw)
## add PE interface object
def add_pe_intf(self, obj):
## TODO: check for duplicates
self.pe_intf_objects.append(obj)
log.debug(
"---> ADD PE data to CPE interface object: network=%s, PE=%s"
% (self.network, obj.rtr.GetProp("hostname"))
)
## update the hostname_guess value (just overwrite it every time a value is found)
if obj.hostname_guess:
log.debug(
"---> OVERRIDE HOSTNAME_GUESS value: old=%s, new=%s"
% (self.hostname_guess, obj.hostname_guess)
)
self.hostname_guess = obj.hostname_guess
## class to separate code for guessing the Mobistar Product based on IP address, description, hostname, etc
class Product(object):
rfc1918 = ["10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16"]
def __init__(self):
log.debug("new Product object")
self.function = "NONE" # MGMT/LAN/WAN/NNI/OTHER
self.type = "NONE" # P2P/LOOPBACK/IPSEC/IPSEC-MGMT/OTHER
self.product = "NONE" # IP-VPN/CI/IP-UPLINK/VOIPT
self.transmission = "NONE" # ATM/ETHERNET/VDSL/EXPLORE/3G/4G/INTERNET
def UpdateProduct(self, key, value, reason):
log.debug("update product info by %reason: %s = %s" % (reason, key, value))
setattr(self, key, value)
def ProductByInterface(self, intf):
transmission = {
"loopback": {"transmission": "VIRTUAL", "type": "LOOPBACK"},
"dialer": {"transmission": "VIRTUAL", "type": "DIALER"},
"atm": {"transmission": "ATM"},
"cellular": {"transmission": "MOBILE", "type": "CELLULAR"},
"ppp": {"transmission": "VDSL", "type": "PPP"},
"ethernet": {"transmission": "ETHERNET"},
"bundle-ether": {"transmission": "ETHERNET"},
"tengigE": {"transmission": "ETHERNET"},
"port-channel": {"transmission": | |
# coding: utf-8
"""
validateapi
The validation APIs help you validate data. Check if an E-mail address is real. Check if a domain is real. Check up on an IP address, and even where it is located. All this and much more is available in the validation API. # noqa: E501
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from cloudmersive_validate_api_client.api_client import ApiClient
class EmailApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def email_address_get_servers(self, email, **kwargs): # noqa: E501
"""Partially check whether an email address is valid # noqa: E501
Validate an email address by identifying whether its parent domain has email servers defined. This call is less limited than syntaxOnly but not as comprehensive as address/full. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.email_address_get_servers(email, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str email: Email address to validate, e.g. \"<EMAIL>\". The input is a string so be sure to enclose it in double-quotes. (required)
:return: AddressGetServersResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.email_address_get_servers_with_http_info(email, **kwargs) # noqa: E501
else:
(data) = self.email_address_get_servers_with_http_info(email, **kwargs) # noqa: E501
return data
def email_address_get_servers_with_http_info(self, email, **kwargs): # noqa: E501
"""Partially check whether an email address is valid # noqa: E501
Validate an email address by identifying whether its parent domain has email servers defined. This call is less limited than syntaxOnly but not as comprehensive as address/full. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.email_address_get_servers_with_http_info(email, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str email: Email address to validate, e.g. \"<EMAIL>\". The input is a string so be sure to enclose it in double-quotes. (required)
:return: AddressGetServersResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['email'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method email_address_get_servers" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'email' is set
if ('email' not in params or
params['email'] is None):
raise ValueError("Missing the required parameter `email` when calling `email_address_get_servers`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'email' in params:
body_params = params['email']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json', 'application/xml', 'text/xml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'text/json']) # noqa: E501
# Authentication setting
auth_settings = ['Apikey'] # noqa: E501
return self.api_client.call_api(
'/validate/email/address/servers', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AddressGetServersResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def email_full_validation(self, email, **kwargs): # noqa: E501
"""Fully validate an email address # noqa: E501
Performs a full validation of the email address. Checks for syntactic correctness, identifies the mail server in question if any, and then contacts the email server to validate the existence of the account - without sending any emails. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.email_full_validation(email, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str email: Email address to validate, e.g. \"<EMAIL>\". The input is a string so be sure to enclose it in double-quotes. (required)
:return: FullEmailValidationResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.email_full_validation_with_http_info(email, **kwargs) # noqa: E501
else:
(data) = self.email_full_validation_with_http_info(email, **kwargs) # noqa: E501
return data
def email_full_validation_with_http_info(self, email, **kwargs): # noqa: E501
"""Fully validate an email address # noqa: E501
Performs a full validation of the email address. Checks for syntactic correctness, identifies the mail server in question if any, and then contacts the email server to validate the existence of the account - without sending any emails. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.email_full_validation_with_http_info(email, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str email: Email address to validate, e.g. \"<EMAIL>\". The input is a string so be sure to enclose it in double-quotes. (required)
:return: FullEmailValidationResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['email'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method email_full_validation" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'email' is set
if ('email' not in params or
params['email'] is None):
raise ValueError("Missing the required parameter `email` when calling `email_full_validation`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'email' in params:
body_params = params['email']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json', 'application/xml', 'text/xml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'text/json']) # noqa: E501
# Authentication setting
auth_settings = ['Apikey'] # noqa: E501
return self.api_client.call_api(
'/validate/email/address/full', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FullEmailValidationResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def email_post(self, value, **kwargs): # noqa: E501
"""Validate email adddress for syntactic correctness only # noqa: E501
Validate whether a given email address is syntactically correct via a limited local-only check. Use the address/full API to do a full validation. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.email_post(value, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str value: Email address to validate, e.g. \"<EMAIL>\". The input is a string so be sure to enclose it in double-quotes. (required)
:return: AddressVerifySyntaxOnlyResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.email_post_with_http_info(value, **kwargs) # noqa: E501
else:
(data) = self.email_post_with_http_info(value, **kwargs) # noqa: E501
return data
def email_post_with_http_info(self, value, **kwargs): # noqa: E501
"""Validate email adddress for syntactic correctness only # noqa: E501
Validate whether a given email address is syntactically correct via a limited local-only check. Use the address/full API to do a full validation. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.email_post_with_http_info(value, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str value: Email address to validate, e.g. \"<EMAIL>\". The input is a string so be sure to enclose it in double-quotes. (required)
:return: AddressVerifySyntaxOnlyResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['value'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method email_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'value' is set
if ('value' not in params or
params['value'] is None):
raise ValueError("Missing the required parameter `value` when calling `email_post`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'value' in params:
body_params | |
was here????!
nvoice.autoProg(self._cprog, self._progc, self._csize, ncount, tweights, mweights) ### ncount wasnt here?!?
self._voices[mtype] = self._voices[mtype] + (nvoice,)
self._sorting = self._sorting + (tvindicator(mtype, len(self._voices[mtype])-1),)
def addVoiceAsIs(self, voic): # Copies voice !!voice should have same parameters as theme!!
nvoice = copy.deepcopy(voic)
self._voices[nvoice._mtype] = self._voices[nvoice._mtype] + (nvoice,)
self._sorting = self._sorting + (tvindicator(voic._mtype, len(self._voices[voic._mtype])-1),)
def previewAudio(self, bpm): # return audio of all voices' first prog
total = (len(self._cprog)*self._csize*bpmToBeat(bpm)) + 3000
sound = AudioSegment.silent(total)
for mtype in list(self._voices):
for voic in self._voices[mtype]:
sound = sound.overlay(voic._progs[0].getAudio(voic._inst, bpm))
return sound
def infoToFolder(self, bpm, folder): # Exports various information about this theme to a folder
try:
filezart.makeTextFile(folder + "/prog.txt", str(self._cprog))
filezart.makeTextFile(folder + "/sorting.txt", self.sortingString())
(self.previewAudio(bpm)).export(folder + "/preview.mp3", format = "mp3")
except:
print("Could not fully export theme")
for mtype in list(self._voices):
thisfolder = folder + "/" + mtype
filezart.makeFolder(thisfolder)
for i in range(len(self._voices[mtype])):
try:
vfolder = thisfolder+ "/" + str(i)
voic = self._voices[mtype][i]
filezart.makeFolder(vfolder)
filezart.makeTextFile(vfolder + "/tab.txt", voic.toTab())
(voic._progs[0].getAudio(voic._inst, bpm)).export(vfolder + "/preview.mp3", format = "mp3")
except:
print("Could not fully export voice")
def sortingString(self):
string = ""
for tvi in self._sorting:
string = string + tvi.indicationStr(self) + "\n"
return string
def countVoices(self): # Get number of voices in sorting
return len(self._sorting)
def baseDurForStruct(self, size, bpm): # Duration to be requested by a markovzart2 Part
beat = bpmToBeat(bpm) #duration of a beat
progdur = beat*self._csize*len(self._cprog) #duration of a progression
partialProg = math.ceil(size*self._progc) #number of progressions
base = partialProg*progdur
return base
def resetSort(self):
self._sorting = ()
for mt in list(self._voices):
for i in range(len(self._voices[mt])):
self._sorting = self._sorting + (tvindicator(mt, i),)
def shuffleSort(self):
self.resetSort()
l = list(self._sorting)
random.shuffle(l)
self._sorting = tuple(l)
def findAndDelete(self, voic): # locate voice, delete it from voices and from tvindicators
found = False
for i in range(len(self._sorting)):
tvi = self._sorting[i]
if tvi.getVoice(self) == voic:
self._sorting = self._sorting[:i] + self._sorting[i+1:]
found = True
break
if not found:
print("Warning: the voice you are deleting was not found on tvindicators")
found = False
for tip in list(self._voices):
for i in range(len(self._voices[tip])):
tvc = self._voices[tip][i]
if tvc == voic:
self._voices[tip] = self._voices[tip][:i] + self._voices[tip][i+1:]
found = True
vtip = tip
vid = i
break
if found:
break
if not found:
print("Warning: the voice you are deleting was not found anywhere!!")
return
for i in range(len(self._sorting)): #lower the indexes of tvis above the deleted voice
tvi = self._sorting[i]
if tvi._mtype == vtip:
if tvi._index > vid:
tvi._index = tvi._index-1
# TVIndicator class, pointer for voice in theme
# Initialized with voice mtype and voice id (index on theme mtype list)
class tvindicator:
def __init__(self, mtype, index):
if not mtype in ("generic", "chordic", "smelodic", "lmelodic", "percussion"):
raise ValueError ("Invalid mtype: "+str(mtype))
self._mtype = mtype
self._index = index
def getVoice(self, them): # get corresponding voice in theme
return them._voices[self._mtype][self._index]
def indicationStr(self, them):
return "["+self._mtype+"_"+str(self._index)+str(them._voices[self._mtype][self._index])+"]"
def __repr__(self):
return "indicator:"+self._mtype+"-"+str(self._index)
def uppercase(text):
ret = ""
for i in text:
if i in "abcdefghijklmnopqrstuvwxyz" + chr(231): #chr(231) is lowercase cedilha c
ret = ret + ("ABCDEFGHIJKLMNOPQRSTUVWXYZ" + chr(199))[("abcdefghijklmnopqrstuvwxyz"+ chr(231)).index(i)]
else: ret = ret+i
return ret
def lowercase(text):
ret = ""
for i in text:
if i in "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + chr(199): #chr(199) is uppercase cedilha C
ret = ret + ("abcdefghijklmnopqrstuvwxyz"+ chr(231))[("ABCDEFGHIJKLMNOPQRSTUVWXYZ"+ chr(199)).index(i)]
else: ret = ret+i
return ret
# rselect RandomSelect returns random element of list
def rselect(lista):
return random.choice(lista)
# wselect WeightedSelect returns element of dictionary based on dict weights {element:weight}
def wselect(dicti):
if len(list(dicti))==0:
raise ValueError ("cannot select from empty dict")
total=0
for i in list(dicti):
total = total + dicti[i]
if total <= 0:
raise ValueError ("total must be larger than zero")
indice = total*random.random()
for i in list(dicti):
if dicti[i]>=indice:
return i
indice = indice - dicti[i]
raise ValueError ("something went wrong")
# removeEls ElementRemoval returns list without elements that are true for lamb(e, element)
def removeEls(lista, element, lamb):
size = len(lista)
for i in range(len(lista)):
j = size - (i+1)
if lamb(lista[j], element):
lista = lista[:j] + lista[j+1:]
return lista
# filterCopyDict returns copy of dict without elements that are true for lamb(e, element)
def filterCopyDict(dic, element, lamb):
new = {}
for i in list(dic):
if not lamb(i, element):
new[i] = dic[i]
return new
# removeElsList ListComparisionElementRemoval returns list without elements that are true for lamb(e, element), where element is every element of list2
def removeElsList(lista, lista2, lamb):
size = len(lista)
for i in range(len(lista)):
j = size - (i+1)
for i in lista2:
if lamb(lista[j], i):
lista = lista[:j] + lista[j+1:]
break
return lista
#returns string cut after first instance of find. cutafter("43211234", "12") -> "34"
def cutafter(string, find):
def aux(s1, s2):
for i in range(len(s2)):
if s1[i] != s2[i]:
return False
return True
for i in range(len(string)):
if string[i] == find[0]:
if aux(string[i:], find):
return string[i+len(find):]
raise ValueError("no find in string")
# firstOctave returns list of notes in first octave, "C0" to "B0"
def firstOctave():
lista = ()
for i in range(12):
lista = lista + (mnote(i),)
return lista
# Adapted simplified normal distribution probability for note distances (for wselects)
def adaptedNormal(center, value):
return 10*(2.718281828459045**((-1/20)*((center-value)**2)))
# Beats Per Minute to Beat time, time of each beat with given eats per minute (miliseconds)
def bpmToBeat(bpm):
return (60 * 1000)/bpm
# Returns sorted copy of list of notes
def noteSort(notes):
if (len(notes) == 0 or len(notes) == 1):
return notes
under = ()
upper = ()
anchor = notes[0]
for i in notes[1:]:
if i._value < anchor._value:
under = under + (i,)
else:
upper = upper + (i,)
return noteSort(under) + (anchor,) + noteSort(upper)
# List available notes of instrument
def listNotes(inst):
if inst._type == "modulation":
r = inst._rang
notens = str.split(r, "-")
first = mnote.fromName(notens[0])
last = mnote.fromName(notens[1])
v = first._value
ns = ()
while v <= last._value:
ns = ns + (mnote(v),)
v = v+1
return ns
if (inst._noteslist == ()):
notes = ()
notenames = inst.getNoteNames()
for notename in notenames:
notes = notes + (mnote.fromName(notename),)
notes = noteSort(notes)
inst._noteslist = notes
return inst._noteslist
# Get Audiosegment object of instrument note
def getNote(instrument, note):
if instrument._type == "modulation":
base = instrument.getAudio(None)
aud = modulateAudio(base, mnote.fromName(instrument._base), note)
return aud
if instrument._type == "percussion":
return instrument.getAudio(None)
if instrument._type == "randper":
return instrument.getAudio(None)
if instrument._bmsp == "bemol":
typen = note._typename
elif instrument._bmsp == "sharp":
typen = note._sharptypename
elif instrument._bmsp == "esse":
typen = note._sharptypename
if "#" in typen:
i = typen.index("#")
typen = typen[:i] + "s" + typen[i+1:]
else:
raise ValueError(instrument._bmsp + " is not a valid sharpType!")
if instrument._case == "upper":
typen = uppercase(typen[0]) + typen[1:]
elif instrument._case == "lower":
typen = lowercase(typen)
else:
raise ValueError(instrument._case + " is not a valid caseType!")
if instrument._nclt == "name_octave":
final = typen + str(note._octave)
elif instrument._nclt == "octave_name":
final = str(note._octave) + typen
else:
raise ValueError(instrument._nclt + " is not a valid nomenclatureType!")
return instrument.getAudio(final)
# Side Play plays audio on parallel process
def sidePlay(audio):
threading.Thread(target=play, args=(audio,)).start()
#pool = Pool()
#pool.apply_async(play, (audio,))
# Modulate Audio, returns audio of audionote sped up or slowed down to be objetive note
def modulateAudio(audio, audionote, objective):
octaves = (objective._value - audionote._value)/12
new_sample_rate = int(audio.frame_rate * (2.0 ** octaves))
new = audio._spawn(audio.raw_data, overrides={'frame_rate': new_sample_rate})
new = new.set_frame_rate(44100)
return new
def sampleCProg(cprog, inst = filezart.getInfo()[2]):
audio = AudioSegment.silent((len(cprog)*1000) + 3000)
t = 0
for chord in cprog:
audio = audio.overlay(chord.sampleAudio(inst), t)
t = t + 1000
play(audio)
#XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX Song Parameters XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#Chordic Parameters
def chordicMWeights():
return {mmov("general", "repeat"):1,mmov("chordic", "repeat"):4, mmov("chordic", "rise"):4, mmov("chordic", "lower"):4, mmov("general", "rise"):1, mmov("general", "lower"):1}
def chordicTWeights():
return {"whole":4, "half":2, "quarter":1}
def chordicCWeights():
return {1:1, 2:2, 3:3, 4:20, 5:20, 6:20, 7:15, 8:14, 9:13, 10:3, 11:2, 12:1}
#Generic Parameters
def genericMWeights():
return {mmov("general", "repeat"):3,mmov("chordic", "repeat"):1, mmov("chordic", "rise"):1, mmov("chordic", "lower"):1, mmov("general", "rise"):3, mmov("general", "lower"):3}
def genericTWeights():
return {"whole":4, "half":2, "quarter":1}
def genericCWeights():
return {1:1, 2:2, 3:3, 4:20, 5:20, 6:20, 7:15, 8:14, 9:13, 10:3, 11:2, 12:1}
#Small Melodic Parameters
def smelodicMWeights():
return {mmov("general", "repeat"):2,mmov("chordic", "repeat"):4, mmov("chordic", "rise"):4, mmov("chordic", "lower"):4, mmov("general", "rise"):2, mmov("general", "lower"):2}
def smelodicTWeights():
return {"whole":4, | |
columns.index('Door to arterial puncture time (IQR)')
)
mechanical_thrombectomy_outcome_format = self._add_group_header_format(
workbook, '#BEBCBC'
)
self._add_group_text(
worksheet,
mechanical_thrombectomy_outcome_format,
'Outcome after mechanical thrombectomy (as per NIHSS score)',
columns.index('# of patients that improved after mechanical thrombectomy'),
columns.index('% of patients that died after mechanical thrombectomy')
)
complications_format = self._add_group_header_format(
workbook, '#C5D068'
)
self._add_group_text(
worksheet,
complications_format,
'Complications of reperfusion therapy (lysis and/or thrombectomy - expressed as a percentage of the number that received reperfusion therapy)',
columns.index('# of patients with non-fatal symptomatic intracranial haemorrhage (SICH)'),
columns.index('% of patients with failure to achieve reperfusion (<TICI 2b or 3)')
)
post_acute_treatment_format = self._add_group_header_format(
workbook, '#AA8739'
)
self._add_group_text(
worksheet,
post_acute_treatment_format,
'Post-Acute Treatment received for period of assessment (all patients)',
columns.index('# of patients who had a Dysphagia screen done'),
columns.index('% of patients who had Cardiac arrhythmia screening')
)
medical_treatment_format = self._add_group_header_format(
workbook, '#277650'
)
self._add_group_text(
worksheet,
medical_treatment_format,
'Medical treatment received',
columns.index('# of patients who received antihypertensive medication'),
columns.index('% of patients who received anticoagulation')
)
surgical_treatment_format = self._add_group_header_format(
workbook, '#AA5039'
)
self._add_group_text(
worksheet,
surgical_treatment_format,
'Surgical treatment',
columns.index('# of patients who received hemicraniectomy'),
columns.index('% of patients who received clot surgery')
)
# Hide all columns with '#' in the name and keep only percentage values
for column in columns:
if column.startswith('#'):
index = columns.index(column)
workbook_index = xl_col_to_name(index)
worksheet.set_column(
f'{workbook_index}:{workbook_index}', None, None, {'hidden': True})
workbook.close()
def _generate_presentation(self, df, filename, site_name=None):
''' Generate formatted statisics.
:param df: the calculated statisitcs
:type df: DataFrame
:param filename: the name of the created filename
:type filename: str
:param site_name: the name of the site (default is None)
:type site_name: str
'''
script_dir = os.path.dirname(__file__)
master = os.path.normpath(os.path.join(script_dir, 'backgrounds', 'master.pptx'))
prs = Presentation(master)
# Add title to the main page
first_slide = prs.slides[0]
shape = first_slide.shapes[5]
text_frame = shape.text_frame
p = text_frame.paragraphs[0]
run = p.add_run()
# Set title of first slide, if site report the title contains the site name
if site_name is None:
run.text = f'{self.country_name}\nData Summary'
else:
run.text = f'{site_name}\nData Summary'
font = run.font
font.name = 'Centruy Gothic'
font.size = Pt(24)
font.color.rgb = RGBColor(250,250,250)
# Total number of patients
column = 'Total Patients'
graph_df = df[['Facility Name', column]].copy()
graph_df = graph_df.sort_values([column], ascending=True)
total_patients = graph_df.loc[graph_df['Facility Name'] == self.country_name, column].iloc[0]
graph_df = graph_df.loc[graph_df['Facility Name'] != self.country_name].copy()
title = f'Total Patients (n = {total_patients})'
self._create_graph(prs, graph_df, title=title, show_value_axis=False)
# Gender distribution
graph_df = df[['Facility Name', '% Male', '% Female']].copy()
graph_df = graph_df.sort_values(['% Male'], ascending=True)
legend = ['Male', 'Female']
self._create_graph(
prs, graph_df, title='Gender distribution (%)', graph_type='stacked', show_value_axis=True, legend=legend
)
# Total number of patients
column = '% CT or MRI done for stroke patients - Yes'
graph_df = df[['Facility Name', column]].copy()
graph_df = graph_df.sort_values([column], ascending=True)
title = 'CT or MRI done for stroke patients (%)'
self._create_graph(prs, graph_df, title=title, show_value_axis=False)
# Overall outcome for all patients at time of discharge
graph_df = df[[
'Facility Name',
'% of patients with no complications',
'% of patients with pneumonia',
'% of patients with DVT',
'% of patients with pulmonary embolus',
'% of patients with worsening of stroke',
'% of patients with drip sepsis',
'% of patients with UTI',
'% of patients with pressure sore',
'% of patients with death (overall mortality)',
'% of patients with other post-stroke complications'
]].copy()
graph_df = graph_df.sort_values(['% of patients with no complications'], ascending=True)
legend = [
'No complications',
'Pneumonia',
'DVT',
'Pulmonary embolus',
'Worsening of stroke',
'Drip sepsis',
'UTI',
'Pressure sore',
'Death (overall mortality)',
'Other'
]
self._create_graph(
prs,
graph_df,
title='Stroke complications (all patients) (%)',
graph_type='stacked',
show_value_axis=True,
legend=legend
)
# NIHSS score on arival (median)
column = 'NIHSS score on arrival (median)'
graph_df = df[['Facility Name', column]].copy()
graph_df = graph_df.sort_values([column], ascending=True)
title = 'NIHSS score on arrival (median)'
self._create_graph(prs, graph_df, title=title, show_value_axis=False)
# NIHSS score on discharge (median)
column = 'NIHSS score on discharge (median)'
graph_df = df[['Facility Name', column]].copy()
graph_df = graph_df.sort_values([column], ascending=True)
title = 'NIHSS score on discharge (median)'
self._create_graph(prs, graph_df, title=title, show_value_axis=False)
# modified Ranking Score on discharge (median)
column = 'modified Ranking Score on discharge (median)'
graph_df = df[['Facility Name', column]].copy()
graph_df = graph_df.sort_values([column], ascending=True)
title = 'modified Ranking Score on discharge (median)'
self._create_graph(prs, graph_df, title=title, show_value_axis=False)
# Total number stroke patients treated with intravenous lysis for period of assessment
column = 'Total number stroke patients treated with intravenous lysis for period of assessment'
graph_df = df[['Facility Name', column]].copy()
graph_df = graph_df.sort_values([column], ascending=True)
total_patients = graph_df.loc[graph_df['Facility Name'] == self.country_name, column].iloc[0]
graph_df = graph_df.loc[graph_df['Facility Name'] != self.country_name].copy()
title = f'# of stroke patients treated with intravenous lysis (n = {total_patients})'
self._create_graph(prs, graph_df, title=title, show_value_axis=False)
# Total number of patients that arrived at hospital within 3 hours of symptom onset for period of assessment
column = 'Total number of patients that arrived at hospital within 3 hours of symptom onset for period of assessment'
graph_df = df[['Facility Name', column]].copy()
graph_df = graph_df.sort_values([column], ascending=True)
total_patients = graph_df.loc[graph_df['Facility Name'] == self.country_name, column].iloc[0]
graph_df = graph_df.loc[graph_df['Facility Name'] != self.country_name].copy()
title = f'# of patients that arrived at hospital within 3 hours of symptom onset (n = {total_patients})'
self._create_graph(prs, graph_df, title=title, show_value_axis=False)
# % of all patients that received Intravenous thrombolysis or mechanical thrombectomy - Yes
column = '% of all patients that received Intravenous thrombolysis or mechanical thrombectomy - Yes'
graph_df = df[['Facility Name', column]].copy()
graph_df = graph_df.sort_values([column], ascending=True)
title = f'Intravenous thrombolysis or mechanical thrombectomy received (%)'
self._create_graph(prs, graph_df, title=title, show_value_axis=False)
# Symptom onset to needle time (median)
column = 'Symptom onset to needle time (median)'
graph_df = df[['Facility Name', column]].copy()
graph_df = graph_df.sort_values([column], ascending=True)
title = 'Symptom onset to needle time (median)'
self._create_graph(prs, graph_df, title=title, show_value_axis=False)
# Door to needle time (median)
column = 'Door to needle time (median)'
graph_df = df[['Facility Name', column]].copy()
graph_df = graph_df.sort_values([column], ascending=True)
title = 'Door to needle time (median)'
self._create_graph(prs, graph_df, title=title, show_value_axis=False)
# Outcome after intravenous lysis (as per NIHSS score)
graph_df = df[[
'Facility Name',
'% of patients that improved after intravenous lysis',
'% of patients whose clinical condition did not change after intravenous lysis',
'% of patients who deteriorated in hospital after intravenous lysis']].copy()
graph_df = graph_df.sort_values(['% of patients that improved after intravenous lysis'], ascending=True)
legend = ['Improved', 'Did not change', 'Deteriorated']
self._create_graph(
prs, graph_df, title='Outcome after intravenous lysis (as per NIHSS score) (%)', graph_type='stacked', show_value_axis=True, legend=legend
)
# % of all patients that received Mechanical thrombectomy - Yes
column = '% of all patients that received Mechanical thrombectomy - Yes'
graph_df = df[['Facility Name', column]].copy()
graph_df = graph_df.sort_values([column], ascending=True)
title = 'Mechanical thrombectomy received (%)'
self._create_graph(prs, graph_df, title=title, show_value_axis=False)
# Symptom onset to arterial puncture time (median)
column = 'Symptom onset to arterial puncture time (median)'
graph_df = df[['Facility Name', column]].copy()
graph_df = graph_df.sort_values([column], ascending=True)
title = 'Symptom onset to arterial puncture time (median)'
self._create_graph(prs, graph_df, title=title, show_value_axis=False)
# Door to arterial puncture time (median)
column = 'Door to arterial puncture time (median)'
graph_df = df[['Facility Name', column]].copy()
graph_df = graph_df.sort_values([column], ascending=True)
title = 'Door to arterial puncture time (median)'
self._create_graph(prs, graph_df, title=title, show_value_axis=False)
# Outcome after mechanical thrombectomy (as per NIHSS score)
graph_df = df[[
'Facility Name',
'% of patients that improved after mechanical thrombectomy',
'% of patients whose clinical condition did not change after mechanical thrombectomy',
'% of patients who deteriorated in hospital after mechanical thrombectomy']].copy()
graph_df = graph_df.sort_values(['% of patients that improved after mechanical thrombectomy'], ascending=True)
legend = ['Improved', 'Did not change', 'Deteriorated']
self._create_graph(
prs, graph_df, title='Outcome after mechanical thrombectomy (as per NIHSS score) (%)', graph_type='stacked', show_value_axis=True,
legend=legend
)
# Complications of reperfusion therapy
graph_df = df[[
'Facility Name',
'% of patients with non-fatal symptomatic intracranial haemorrhage (SICH)',
'% of patients with fatal SICH',
'% of patients with remote cerebral haemorrhage',
'% of patients with vessel perforation',
'% of patients with procedure related dissection',
'% of patients with embolization to different vascular territory',
'% of patients with groin haematoma | |
of view for this particular renderer"""
return self._vfov
@property
def v_angles(self):
"""Vertical angular scale of the image frame."""
v_angles = self._v_angles[...]
v_angles.setflags(write=False)
return v_angles
@property
def calc(self):
"""Calc object used to calculate verticle rays."""
return self._calc
@property
def geod(self):
"""geod object used to distances between latitude and longitude coordinates."""
return self._geod
@property
def ds(self):
"""distance points."""
ds = self._ds[...]
ds.setflags(write=False)
return ds
def set_location(self,lat_obs,lon_obs):
"""This function can be used to change the position of the renderer.
Parameters
----------
lat_obs: float
new latitude of observer
lon_obs: float
new longitude of observer
"""
self._lat_obs = float(lat_obs)
self._lon_obs = float(lon_obs)
_check_gps(self._lat_obs,self._lon_obs)
def ray_diagram(self,ax,heading,heights=None,style="sphere_top",
eye_level=True,linewidth_rays=0.2,linewidth_earth=0.2,R0=6371008,h_min=0.01):
"""Side profile view showing the rays trajectories.
Parameters
----------
ax: matplotlib Axes object
this function plots lines on a graph, you can specify which graph to plot to by passing in the axes object for the graph you want to plot to.
heading: scalar
the heading to take slice of tarrain for ray paths.
heights: array_like, (N,), optional
list of values which represent the elevation profile along ray trajectory (in addition to elevation)
style: str, optional
style to plot the graph. "flat": plot rays on flat plane, "sphere_top": plot rays as if the earth is filling away from the observer, "sphere_side": plot rays with the 'bulge' in the middle.
eye_level: bool, optional
color the ray which is closest to eye level (0 degrees initial angle) orange.
linewidth_rays: float, optional
linewidth used for plotting rays in diagram
linewidth_earth: float, optional
linewidth used for plotting surface of the earth, both water and land.
h_min: float, optional
minimum values which should count as water when calculating hit locations.
"""
d = self._ds
if len(d) == 0 or not is_sorted(d):
raise ValueError("array 'd' must contain distance values in ascending order.")
d_max = self._ds.max()
h_angle = heading
rs = self._rs
if heights is None:
heights = np.zeros_like(d)
n_v = self.v_angles.size
water = np.zeros(n_v,dtype=np.bool)
land = np.zeros(n_v,dtype=np.bool)
sky = np.zeros(n_v,dtype=np.bool)
inds = np.zeros(n_v,dtype=np.int32)
i_horz = np.abs(self.v_angles).argmin()
if style == "sphere_top":
c = np.cos(np.pi/2-d/R0)
s = np.sin(np.pi/2-d/R0)
_ray_crossing_cpu(h_min,rs,heights,inds,water,land,sky)
rs = rs + R0
heights = heights + R0
for i in range(n_v):
if i == i_horz and eye_level:
i_max = inds[i]
ax.plot(rs[i,:i_max]*c[:i_max],
rs[i,:i_max]*s[:i_max]-R0,color="orange",linewidth=linewidth_rays)
continue
if water[i]:
i_max = inds[i]
ax.plot(rs[i,:i_max]*c[:i_max],
rs[i,:i_max]*s[:i_max]-R0,color="blue",linewidth=linewidth_rays)
continue
if land[i]:
i_max = inds[i]
ax.plot(rs[i,:i_max]*c[:i_max],
rs[i,:i_max]*s[:i_max]-R0,color="green",linewidth=linewidth_rays)
continue
if sky[i]:
ax.plot(rs[i,:]*c,rs[i,:]*s-R0,color="cyan",linewidth=linewidth_rays)
continue
ax.plot(c*heights,s*heights-R0,color="green",linewidth=linewidth_earth)
ax.plot(c*R0,s*R0-R0,color="blue",linewidth=linewidth_earth)
elif style == "sphere_side":
c = np.cos(np.pi/2-d/R0+d_max/(2*R0))
c -= c.min()
s = np.sin(np.pi/2-d/R0+d_max/(2*R0))
_ray_crossing_cpu(h_min,rs,heights,inds,water,land,sky)
rs = rs + R0
heights = heights + R0
for i in range(n_v):
if i == i_horz and eye_level:
i_max = inds[i]
ax.plot(rs[i,:i_max]*c[:i_max],
rs[i,:i_max]*s[:i_max]-R0*s.min(),color="orange",linewidth=linewidth_rays)
continue
if water[i]:
i_max = inds[i]
ax.plot(rs[i,:i_max]*c[:i_max],
rs[i,:i_max]*s[:i_max]-R0*s.min(),color="blue",linewidth=linewidth_rays)
continue
if land[i]:
i_max = inds[i]
ax.plot(rs[i,:i_max]*c[:i_max],
rs[i,:i_max]*s[:i_max]-R0*s.min(),color="green",linewidth=linewidth_rays)
continue
if sky[i]:
ax.plot(rs[i,:]*c,rs[i,:]*s-R0*s.min(),color="cyan",linewidth=linewidth_rays)
continue
ax.plot(c*heights,s*heights-R0*s.min(),color="green",linewidth=linewidth_earth)
ax.plot(c*R0,s*R0-R0*s.min(),color="blue",linewidth=linewidth_earth)
elif style == "flat":
_ray_crossing_cpu(h_min,rs,heights,inds,water,land,sky)
for i in range(n_v):
if i == i_horz and eye_level:
i_max = inds[i]
ax.plot(d[:i_max],rs[i,:i_max],color="orange",linewidth=linewidth_rays)
continue
if water[i]:
i_max = inds[i]
ax.plot(d[:i_max],rs[i,:i_max],color="blue",linewidth=linewidth_rays)
continue
if land[i]:
i_max = inds[i]
ax.plot(d[:i_max],rs[i,:i_max],color="green",linewidth=linewidth_rays)
continue
if sky[i]:
ax.plot(d,rs[i,:],color="cyan",linewidth=linewidth_rays)
continue
ax.plot(d,heights,color="green",linewidth=linewidth_earth)
ax.plot(d,np.zeros_like(d),color="blue",linewidth=linewidth_earth)
else:
raise ValueError
def render_scene(self,scene,image_names,heading_mins,heading_maxs,surface_color=None,background_color=None,
cfunc=None,cfunc_args=None,disp=False,eye_level=False,postprocess=None,h_min=0.01,gpu=False):
"""Renders a composites over a very wide horizontal field.
Parameters
----------
scene: Scene object
object which contains data which the renderer can extract and render
image_names: array_like (n,) or 'str'
name(s) for the image(s) being rendered.
heading_mins: array_like (n,) or float
minimum starting point(s) for composite.
heading_maxs: array_like (n,) or float
maximum starting point(s) for composite, pair with 'heading_mins'.
background_color: array_like, optional
3 element array containing an color via RGB color code (numbered 0 to 255)
default value: `[135,206,250]`
surface_color: array_like, optional
3 element array containing an color via RGB color code (numbered 0 to 255)
default value: `[0,80,120]`
cfunc: callable, optional
Callable function which is used to color the elevation data. The function takes
in two arrays, the first is distances from the observer the second is the elevation
above the surface of the earth.
cfunc_args: array_like, optional
extra arguments to pass into `cfunc`
disp: bool, optional
when rendering topographical data this will print out the heading slice which has been rendered.
eye_level: bool, optional
when rendering the image, an orange line is placed at eye level in the image.
post_process: callable, optional
function which processes the final image before saving it.
h_min: float, optional
minimum value for ray to count as crossing water.
gpu: bool, optional
utilize GPU to calculate 3D land model rendering.
"""
if surface_color is None:
surface_color = np.array([0,80,120])
else:
surface_color = np.fromiter(surface_color,dtype=np.uint8)
if background_color is None:
background_color = np.array([135,206,250])
else:
background_color = np.fromiter(background_color,dtype=np.uint8)
if cfunc is None and cfunc_args is None:
cfunc = _defualt_cfunc
cfunc_args = (background_color,)
elif cfunc is None and cfunc_args is not None:
raise Exception("cfunc_args given without cfunc.")
if cfunc_args is None:
cfunc_args = ()
n_v = self._rs.shape[0]
land_model = scene._land_model
heading_mins = np.atleast_1d(heading_mins).ravel()
heading_maxs = np.atleast_1d(heading_maxs).ravel()
image_names = np.atleast_1d(image_names).ravel()
tup = np.broadcast_arrays(heading_mins,heading_maxs,image_names)
for heading_min,heading_max,image_name in zip(*tup):
print(heading_min,heading_max,image_name)
h_angles = np.arange(heading_min,heading_max,self._dangles)
img_datas,ray_heights = _prep_scene(scene,h_angles,self._lat_obs,self._lon_obs,self._geod,self._sol)
png_data = np.empty((len(h_angles),n_v,3),dtype=np.uint8)
png_data[...] = 0
terrain_args = (self._lat_obs,self._lon_obs,land_model,cfunc,cfunc_args)
image_args = (img_datas,ray_heights)
if gpu:
_render_gpu(png_data,h_min,self._rs,self._ds,h_angles,surface_color,background_color,terrain_args,image_args,disp)
else:
_render_cpu(png_data,h_min,self._rs,self._ds,h_angles,surface_color,background_color,terrain_args,image_args,disp)
if eye_level:
i_horz = np.argwhere(np.abs(self._v_angles)<(self._vfov/800.0)).ravel()
png_data[:,i_horz,0] = 255
png_data[:,i_horz,1] = 100
png_data[:,i_horz,2] = 0
png_data = png_data.transpose((1,0,2))
png_data = png_data[::-1,:,:]
im = Image.fromarray(png_data,mode="RGB")
if postprocess is not None:
im = postprocess(im)
im.save(image_name)
class Scene(object):
"""
Simple wrapper which keeps track of data used to render an image.
"""
def __init__(self):
"""
This function initialized `Scene` object.
"""
self._land_model = land_model()
self._image_dict = {}
@property
def land_model(self):
"""Object used to generate elevation along great circle."""
return self._land_model
def add_elevation_model(self,*args):
"""Add terrain data to the interpolated model.
Parameters
----------
args: tuple
tuple which contains elevation data:
if len(args) == 3: args = (lats,lons,elevation) which contains the arguments for scipy.interpolate.RegularGridInterpolator.
if len(args) == 2: args = (points,elevation) which contains the arguments for scipy.interpolate.LinearNDInterpolator.
if len(args) == 1: args = object that is instance of `land_model`.
"""
if len(args)==1 and isinstance(args[0],land_model):
self._land_model += args[0]
else:
self._land_model.add_elevation_data(*args)
def add_image(self,image,image_pos,dimensions,direction=None):
"""Add image to scene.
Parameters
----------
image: str
string which contains path to image file.
image_pos: array_like
either has (h_obj,lat,lon) or (lat,lon) h_obj is height above the earth's surface
dimensions: tuple
contains dimensions of the image is in meters. If either one has value `-1`
then that dimension is determined by the resolution of the picture.
"""
if type(image) is str:
if image in self._image_dict:
im = self._image_dict[image][0]
else:
im = Image.open(image)
self._image_dict[image] = (im,[])
else:
raise ValueError("image must be a string containing name of file.")
px_width,px_height = im.size
aspect = float(px_height)/px_width
if len(image_pos) == 2:
lat,lon = image_pos
h = self._land_model(lat,lon)
elif len(image_pos) == 3:
h,lat,lon = image_pos
else:
raise ValueError("expecting image_pos to contain gps coordinates and optionally height of image.")
if direction is None:
heading = None
elif type(direction) is tuple:
if len(direction) != 2:
raise ValueError("direction must be either heading or tuple containing latitude and lonitude respectively.")
lat_dir,lon_dir = direction
_check_gps(lat_dir,lon_dir)
heading,b_az,dist = self._geod.inv(lon_obs,lat_obs,lon_dir,lat_dir)
else:
heading = float(direction)
try:
width,height = dimensions
except ValueError:
raise ValueError("dimensions must contain only length and width of object.")
if width == -1 and height == -1:
raise ValueError("at least one height must be specifided to deduce size.")
if width == -1:
width = height/aspect
if height == -1:
height = width*aspect
self._image_dict[image][1].append((lat,lon,h,width,height,heading))
class land_model(object):
"""
This class is used to grab slices of the tarrain alon great circle
slices of the earth.
"""
def __init__(self,ellps='sphere'):
"""
Simple wrapper which does the bilinear interpolation of terrain data.
Parameters
----------
ellps: str, optional
String representing which pyproj datum to use for the data being interpolated.
"""
self._geod = Geod(ellps=ellps)
self._terrain_list = []
self._d_min = np.inf
self._d_max = 0
self._h_max = 0
def get_terrain(self,lat,lon,heading,dist):
"""
Sample terrain along geodesic defined by an intial latitude and longitude point along a heading at different distances.
Parameters
----------
lat: array_like
latitude(s) which starting point(s) begin at
lon: array_like
longitude(s) which starting point(s) | |
1)
xy_stretched_ident = np.repeat(x_stretched_ident, y_stretch, 0)
indices = np.where(xy_stretched_ident == 1.0)
if rank == 2:
base_array[indices[0], indices[1]] = m
else:
arr = base_array[[[i] for i in middle_indices]][0]
arr[indices[0], indices[1]] = m
base_array[[[i] for i in middle_indices]] = arr
return tf.get_variable(name, initializer=base_array)
raise NotImplementedError('Initialization type ' + init_type +
' is not implemented.')
def get_input_tensor_with_stride(fixed_embeddings, linked_embeddings, stride):
"""Constructs an input tensor with a separate dimension for steps.
Args:
fixed_embeddings: list of NamedTensor objects for fixed feature channels
linked_embeddings: list of NamedTensor objects for linked feature channels
stride: int stride (i.e. beam * batch) to use to reshape the input
Returns:
a tensor of shape [stride, num_steps, D], where D is the total input
dimension of the concatenated feature channels
"""
input_tensor = get_input_tensor(fixed_embeddings, linked_embeddings)
shape = tf.shape(input_tensor)
return tf.reshape(input_tensor, [stride, -1, shape[1]])
def convert_network_state_tensorarray(tensorarray):
"""Converts a source TensorArray to a source Tensor.
Performs a permutation between the steps * [stride, D] shape of a
source TensorArray and the (flattened) [stride * steps, D] shape of
a source Tensor.
The TensorArrays used during recurrence have an additional zeroth step that
needs to be removed.
Args:
tensorarray: TensorArray object to be converted.
Returns:
Tensor object after conversion.
"""
tensor = tensorarray.stack() # Results in a [steps, stride, D] tensor.
tensor = tf.slice(tensor, [1, 0, 0], [-1, -1, -1]) # Lop off the 0th step.
tensor = tf.transpose(tensor, [1, 0, 2]) # Switch steps and stride.
return tf.reshape(tensor, [-1, tf.shape(tensor)[2]])
def pass_through_embedding_matrix(act_block, embedding_matrix, step_idx):
"""Passes the activations through the embedding_matrix.
Takes care to handle out of bounds lookups.
Args:
act_block: matrix of activations.
embedding_matrix: matrix of weights.
step_idx: vector containing step indices, with -1 indicating out of bounds.
Returns:
the embedded activations.
"""
# Indicator vector for out of bounds lookups.
step_idx_mask = tf.expand_dims(tf.equal(step_idx, -1), -1)
# Pad the last column of the activation vectors with the indicator.
act_block = tf.concat([act_block, tf.to_float(step_idx_mask)], 1)
return tf.matmul(act_block, embedding_matrix)
def lookup_named_tensor(name, named_tensors):
"""Retrieves a NamedTensor by name.
Args:
name: Name of the tensor to retrieve.
named_tensors: List of NamedTensor objects to search.
Returns:
The NamedTensor in |named_tensors| with the |name|.
Raises:
KeyError: If the |name| is not found among the |named_tensors|.
"""
for named_tensor in named_tensors:
if named_tensor.name == name:
return named_tensor
raise KeyError('Name "%s" not found in named tensors: %s' % (name,
named_tensors))
def activation_lookup_recurrent(component, state, channel_id, source_array,
source_layer_size, stride):
"""Looks up activations from tensor arrays.
If the linked feature's embedding_dim is set to -1, the feature vectors are
not passed through (i.e. multiplied by) an embedding matrix.
Args:
component: Component object in which to look up the fixed features.
state: MasterState object for the live ComputeSession.
channel_id: int id of the fixed feature to look up.
source_array: TensorArray from which to fetch feature vectors, expected to
have size [steps + 1] elements of shape [stride, D] each.
source_layer_size: int length of feature vectors before embedding.
stride: int Tensor of current batch * beam size.
Returns:
NamedTensor object containing the embedding vectors.
"""
feature_spec = component.spec.linked_feature[channel_id]
with tf.name_scope('activation_lookup_recurrent_%s' % feature_spec.name):
# Linked features are returned as a pair of tensors, one indexing into
# steps, and one indexing within the activation tensor (beam x batch)
# stored for a step.
step_idx, idx = dragnn_ops.extract_link_features(
state.handle, component=component.name, channel_id=channel_id)
# We take the [steps, batch*beam, ...] tensor array, gather and concat
# the steps we might need into a [some_steps*batch*beam, ...] tensor,
# and flatten 'idx' to dereference this new tensor.
#
# The first element of each tensor array is reserved for an
# initialization variable, so we offset all step indices by +1.
#
# TODO(googleuser): It would be great to not have to extract
# the steps in their entirety, forcing a copy of much of the
# TensorArray at each step. Better would be to support a
# TensorArray.gather_nd to pick the specific elements directly.
# TODO(googleuser): In the interim, a small optimization would
# be to use tf.unique instead of tf.range.
step_min = tf.reduce_min(step_idx)
ta_range = tf.range(step_min + 1, tf.reduce_max(step_idx) + 2)
act_block = source_array.gather(ta_range)
act_block = tf.reshape(act_block,
tf.concat([[-1], tf.shape(act_block)[2:]], 0))
flat_idx = (step_idx - step_min) * stride + idx
act_block = tf.gather(act_block, flat_idx)
act_block = tf.reshape(act_block, [-1, source_layer_size])
if feature_spec.embedding_dim != -1:
embedding_matrix = component.get_variable(
linked_embeddings_name(channel_id))
act_block = pass_through_embedding_matrix(act_block, embedding_matrix,
step_idx)
dim = feature_spec.size * feature_spec.embedding_dim
else:
# If embedding_dim is -1, just output concatenation of activations.
dim = feature_spec.size * source_layer_size
return NamedTensor(
tf.reshape(act_block, [-1, dim]), feature_spec.name, dim=dim)
def activation_lookup_other(component, state, channel_id, source_tensor,
source_layer_size):
"""Looks up activations from tensors.
If the linked feature's embedding_dim is set to -1, the feature vectors are
not passed through (i.e. multiplied by) an embedding matrix.
Args:
component: Component object in which to look up the fixed features.
state: MasterState object for the live ComputeSession.
channel_id: int id of the fixed feature to look up.
source_tensor: Tensor from which to fetch feature vectors. Expected to have
have shape [steps + 1, stride, D].
source_layer_size: int length of feature vectors before embedding (D). It
would in principle be possible to get this dimension dynamically from
the second dimension of source_tensor. However, having it statically is
more convenient.
Returns:
NamedTensor object containing the embedding vectors.
"""
feature_spec = component.spec.linked_feature[channel_id]
with tf.name_scope('activation_lookup_other_%s' % feature_spec.name):
# Linked features are returned as a pair of tensors, one indexing into
# steps, and one indexing within the stride (beam x batch) of each step.
step_idx, idx = dragnn_ops.extract_link_features(
state.handle, component=component.name, channel_id=channel_id)
# The first element of each tensor array is reserved for an
# initialization variable, so we offset all step indices by +1.
indices = tf.stack([step_idx + 1, idx], axis=1)
act_block = tf.gather_nd(source_tensor, indices)
act_block = tf.reshape(act_block, [-1, source_layer_size])
if feature_spec.embedding_dim != -1:
embedding_matrix = component.get_variable(
linked_embeddings_name(channel_id))
act_block = pass_through_embedding_matrix(act_block, embedding_matrix,
step_idx)
dim = feature_spec.size * feature_spec.embedding_dim
else:
# If embedding_dim is -1, just output concatenation of activations.
dim = feature_spec.size * source_layer_size
return NamedTensor(
tf.reshape(act_block, [-1, dim]), feature_spec.name, dim=dim)
class LayerNorm(object):
"""Utility to add layer normalization to any tensor.
Layer normalization implementation is based on:
https://arxiv.org/abs/1607.06450. "Layer Normalization"
<NAME>, <NAME>, <NAME>
This object will construct additional variables that need to be optimized, and
these variables can be accessed via params().
Attributes:
params: List of additional parameters to be trained.
"""
def __init__(self, component, name, shape, dtype):
"""Construct variables to normalize an input of given shape.
Arguments:
component: ComponentBuilder handle.
name: Human readable name to organize the variables.
shape: Shape of the layer to be normalized.
dtype: Type of the layer to be normalized.
"""
self._name = name
self._shape = shape
self._component = component
beta = tf.get_variable(
'beta_%s' % name,
shape=shape,
dtype=dtype,
initializer=tf.zeros_initializer())
gamma = tf.get_variable(
'gamma_%s' % name,
shape=shape,
dtype=dtype,
initializer=tf.ones_initializer())
self._params = [beta, gamma]
@property
def params(self):
return self._params
def normalize(self, inputs):
"""Apply normalization to input.
The shape must match the declared shape in the constructor.
[This is copied from tf.contrib.rnn.LayerNormBasicLSTMCell.]
Args:
inputs: Input tensor
Returns:
Normalized version of input tensor.
Raises:
ValueError: if inputs has undefined rank.
"""
inputs_shape = inputs.get_shape()
inputs_rank = inputs_shape.ndims
if inputs_rank is None:
raise ValueError('Inputs %s has undefined rank.' % inputs.name)
axis = range(1, inputs_rank)
beta = self._component.get_variable('beta_%s' % self._name)
gamma = self._component.get_variable('gamma_%s' % self._name)
with tf.variable_scope('layer_norm_%s' % self._name):
# Calculate the moments on the last axis (layer activations).
mean, variance = nn.moments(inputs, axis, keep_dims=True)
# Compute layer normalization using the batch_normalization function.
variance_epsilon = 1E-12
outputs = nn.batch_normalization(inputs, mean, variance, beta, gamma,
variance_epsilon)
outputs.set_shape(inputs_shape)
return outputs
class Layer(object):
"""A layer in a feed-forward network.
Attributes:
component: ComponentBuilderBase that produces this layer.
name: Name of this layer.
dim: Dimension of this layer, or negative if dynamic.
"""
def __init__(self, component, name, dim):
check.NotNone(dim, 'Dimension is required')
self.component = component
self.name = name
self.dim = dim
def __str__(self):
return 'Layer: %s/%s[%d]' % (self.component.name, self.name, self.dim)
def create_array(self, stride):
"""Creates a new tensor array to store this layer's activations.
Arguments:
stride: Possibly dynamic | |
total gates.
For example, `populate(base='11:22:33:44:00', count = 10, gate_count = 2) would
route addresses 11:22:33:44::(00, 02, 04, 06, 08) out a gate 0 and the odd-suffixed
addresses out gate 1.
:param base: The base MAC address
:param count: How many addresses beyond base to populate into the routing table
:param gate_count: How many gates to create in the L2Forward module.
"""
pass
class L4Checksum(Module):
"""
recomputes the TCP/Ipv4 and UDP/IPv4 checksum
"""
class MACSwap(Module):
"""
swaps source/destination MAC addresses
The MACSwap module takes no arguments. It swaps the src/destination MAC addresses
within a packet.
__Input Gates__: 1
__Output Gates__: 1
"""
def __init__(self):
"""
The MACSwap module takes no arguments. It swaps the src/destination MAC addresses
within a packet.
__Input Gates__: 1
__Output Gates__: 1
"""
pass
class MPLSPop(Module):
"""
Pop MPLS label
The MPLS pop module removes MPLS labels
__Input Gates__: 1
__Output Gates__: 2
"""
def __init__(self, remove_eth_header=None, next_eth_type=None):
"""
The MPLS pop module removes MPLS labels
__Input Gates__: 1
__Output Gates__: 2
:param remove_eth_header: Remove ETH header with the pop
:param next_eth_type: The next ETH type to set
"""
pass
def set(self, remove_eth_header=None, next_eth_type=None):
"""
The MPLS pop module removes MPLS labels
__Input Gates__: 1
__Output Gates__: 2
:param remove_eth_header: Remove ETH header with the pop
:param next_eth_type: The next ETH type to set
"""
pass
class Measure(Module):
"""
measures packet latency (paired with Timestamp module)
The measure module tracks latencies, packets per second, and other statistics.
It should be paired with a Timestamp module, which attaches a timestamp to packets.
The measure module will log how long (in nanoseconds) it has been for each packet it received since it was timestamped.
This module is somewhat experimental and undergoing various changes.
There is a test for the the Measure module in [`bessctl/module_tests/timestamp.py`](https://github.com/NetSys/bess/blob/master/bessctl/module_tests/timestamp.py).
__Input Gates__: 1
__Output Gates__: 1
"""
def __init__(self, offset=None, jitter_sample_prob=None, latency_ns_max=None, latency_ns_resolution=None):
"""
The measure module tracks latencies, packets per second, and other statistics.
It should be paired with a Timestamp module, which attaches a timestamp to packets.
The measure module will log how long (in nanoseconds) it has been for each packet it received since it was timestamped.
This module is somewhat experimental and undergoing various changes.
There is a test for the the Measure module in [`bessctl/module_tests/timestamp.py`](https://github.com/NetSys/bess/blob/master/bessctl/module_tests/timestamp.py).
__Input Gates__: 1
__Output Gates__: 1
:param offset: int64 warmup = 1; /// removed: instead of warmup delay, user should Clear()
/ Where to store the current time within the packet, offset in bytes.
:param jitter_sample_prob: How often the module should sample packets for inter-packet arrival measurements (to measure jitter).
:param latency_ns_max: maximum latency expected, in ns (default 0.1 s)
:param latency_ns_resolution: resolution, in ns (default 100)
"""
pass
def get_summary(self, clear=None, latency_percentiles=None, jitter_percentiles=None):
"""
The Measure module measures and collects latency/jitter data for packets
annotated by a Timestamp module. Note that Timestamp and Measure module must reside
on the server for accurate measurement (as a result, the most typical use case is
measuring roundtrip time).
Optionally, you can also retrieve percentile values by specifying points in
"percentiles". For example, "percentiles" of [50.0, 99.0] will return
[median, 99'th %-ile tail latency] in "percentile_values_ns" in the response.
:param clear: if true, the data will be all cleared after read
:param latency_percentiles: ascending list of real numbers in [0.0, 100.0]
:param jitter_percentiles: ascending list of real numbers in [0.0, 100.0]
:return: The Measure module function `get_summary()` returns the following values.
Note that the resolution value tells you how grainy the samples are,
e.g., 100 means that anything from 0-99 ns counts as "0",
anything from 100-199 counts as "100", and so on. The average
is of samples using this graininess, but (being a result of division)
may not be a multiple of the resolution.
"""
pass
def clear(self):
pass
class Merge(Module):
"""
All input gates go out of a single output gate
The merge module takes no parameters. It has multiple input gates,
and passes out all packets from a single output gate.
__Input Gates__: many (configurable)
__Output Gates__: 1
"""
def __init__(self):
"""
The merge module takes no parameters. It has multiple input gates,
and passes out all packets from a single output gate.
__Input Gates__: many (configurable)
__Output Gates__: 1
"""
pass
class MetadataTest(Module):
"""
Dynamic metadata test module
The MetadataTest module is used for internal testing purposes.
"""
def __init__(self, read=None, write=None, update=None):
"""
The MetadataTest module is used for internal testing purposes.
"""
pass
class NAT(Module):
"""
Dynamic Network address/port translator
The NAT module implements Dynamic IPv4 address/port translation,
rewriting packet source addresses with external addresses as specified,
and destination addresses for packets on the reverse direction.
L3/L4 checksums are updated correspondingly.
To see an example of NAT in use, see:
[`bess/bessctl/conf/samples/nat.bess`](https://github.com/NetSys/bess/blob/master/bessctl/conf/samples/nat.bess)
Currently only supports TCP/UDP/ICMP.
Note that address/port in packet payload (e.g., FTP) are NOT translated.
__Input Gates__: 2 (0 for internal->external, and 1 for external->internal direction)
__Output Gates__: 2 (same as the input gate)
"""
def __init__(self, ext_addrs=None):
"""
The NAT module implements Dynamic IPv4 address/port translation,
rewriting packet source addresses with external addresses as specified,
and destination addresses for packets on the reverse direction.
L3/L4 checksums are updated correspondingly.
To see an example of NAT in use, see:
[`bess/bessctl/conf/samples/nat.bess`](https://github.com/NetSys/bess/blob/master/bessctl/conf/samples/nat.bess)
Currently only supports TCP/UDP/ICMP.
Note that address/port in packet payload (e.g., FTP) are NOT translated.
__Input Gates__: 2 (0 for internal->external, and 1 for external->internal direction)
__Output Gates__: 2 (same as the input gate)
:param ext_addrs: list of external IP addresses
"""
pass
def get_initial_arg(self):
pass
def get_runtime_config(self):
pass
def set_runtime_config(self):
pass
class NoOP(Module):
"""
creates a task that does nothing
This module is used for testing purposes.
"""
def __init__(self):
"""
This module is used for testing purposes.
"""
pass
class PortInc(Module):
"""
receives packets from a port
The PortInc module connects a physical or virtual port and releases
packets from it. PortInc does not support multiqueueing.
For details on how to configure PortInc using DPDK, virtual ports,
or libpcap, see the sidebar in the wiki.
__Input Gates__: 0
__Output Gates__: 1
"""
def __init__(self, port=None, prefetch=None):
"""
The PortInc module connects a physical or virtual port and releases
packets from it. PortInc does not support multiqueueing.
For details on how to configure PortInc using DPDK, virtual ports,
or libpcap, see the sidebar in the wiki.
__Input Gates__: 0
__Output Gates__: 1
:param port: The portname to connect to.
:param prefetch: Whether or not to prefetch packets from the port.
"""
pass
def set_burst(self, burst=None):
"""
The module PortInc has a function `set_burst(...)` that allows you to specify the
maximum number of packets to be stored in a single PacketBatch released by
the module.
:param burst: The maximum "burst" of packets (ie, the maximum batch size)
"""
pass
class PortOut(Module):
"""
sends pakets to a port
The PortOut module connects to a physical or virtual port and pushes
packets to it. For details on how to configure PortOut with DPDK,
virtual ports, libpcap, etc, see the sidebar in the wiki.
__Input Gates__: 1
__Output Gates__: 0
"""
def __init__(self, port=None):
"""
The PortOut module connects to a physical or virtual port and pushes
packets to it. For details on how to configure PortOut with DPDK,
virtual ports, libpcap, etc, see the sidebar in the wiki.
__Input Gates__: 1
__Output Gates__: 0
:param port: The portname to connect to.
"""
pass
class Queue(Module):
"""
terminates current task and enqueue packets for new task
The Queue module implements a simple packet queue.
__Input Gates__: 1
__Output Gates__: 1
"""
def __init__(self, size=None, prefetch=None, backpressure=None):
"""
The Queue module implements a simple packet queue.
__Input Gates__: 1
__Output Gates__: 1
:param size: The maximum number of packets to store in the queue.
:param prefetch: When prefetch is enabled, the module will perform CPU prefetch on the first 64B of each packet onto CPU L1 | |
# This file is part of the clacks framework.
#
# http://clacks-project.org
#
# Copyright:
# (C) 2010-2012 GONICUS GmbH, Germany, http://www.gonicus.de
#
# License:
# GPL-2: http://www.gnu.org/licenses/gpl-2.0.html
#
# See the LICENSE file in the project's top-level directory for details.
import re
import itertools
from clacks.common import Environment
LINUX = 2 ** 0
ALL = 2 ** 1
class DiskDefinition(object):
"""
The disk definition base class provides the mechanism to
manage disk setups for clients. Implementators of the
interface can concentrate on the conversation to the
desired format.
=========== ==================================
Parameter Description
=========== ==================================
definition Partition scheme definition string
=========== ==================================
"""
#TODO: move to ENBF for parsing and and use _repr instead
# of custom strings that look like kickstart defs.
supportedFsTypes = []
supportedRaidLevels = [0, 1, 5]
supportedDeviceTypes = []
supportEncryption = False
def __init__(self, definition=None, uuid=None, user=None):
self._disks = []
self._parts = []
self._raids = []
self._volgroups = []
self._vols = []
self.uuid = uuid
self.user = user
if definition:
self._parseDiskDefinition(definition)
def _optionIterator(self, source):
for pattern in source:
yield(pattern)
def _parseOption(self, source, target, option, key=None, value=None,
default=None, numeric=False):
target[key if key else option] = default
pattern = self._optionIterator(source)
try:
# Look for the specified option in
while True:
if pattern.next() == "--" + option:
val = value if value else pattern.next()
if numeric:
val = int(val)
target[key if key else option] = val
idx = source.index("--" + option)
if not value:
del source[idx + 1]
del source[idx]
break
except StopIteration:
pass
def _parseDiskDefinition(self, data):
entries = data.split(';')
for entry in entries:
# Skip empty entries that might be produced by additional
# ; elements
if not entry:
continue
# Replace multiple spaced by one and split to single tokens
entry = entry.split()
entryType = entry[0]
# Parse disk
if entryType == "disk":
disk = {}
# Handle initlabel
self._parseOption(entry, disk, 'initlabel', 'initlabel', True)
self._parseOption(entry, disk, 'all', 'removeParts', ALL)
self._parseOption(entry, disk, 'none', 'removeParts', None)
self._parseOption(entry, disk, 'linux', 'removeParts', LINUX)
disk['device'] = entry[1]
self._disks.append(disk)
continue
# Parse partition
if entryType == "part":
part = {}
self._parseOption(entry, part, 'size', numeric=True)
self._parseOption(entry, part, 'maxsize', 'maxSize', numeric=True)
self._parseOption(entry, part, 'grow', 'grow', True)
self._parseOption(entry, part, 'encrypted', 'encrypted', True)
self._parseOption(entry, part, 'passphrase')
self._parseOption(entry, part, 'format', 'format', True)
self._parseOption(entry, part, 'bootable', 'bootable', True)
self._parseOption(entry, part, 'asprimary', 'primary', True)
self._parseOption(entry, part, 'fstype', 'fsType')
self._parseOption(entry, part, 'fsoptions', 'fsOptions')
self._parseOption(entry, part, 'ondisk', 'onDisk')
part['target'] = entry[1]
self._parts.append(part)
continue
# Parse raid device
if entryType == "raid":
raid = {}
self._parseOption(entry, raid, 'level', numeric=True)
self._parseOption(entry, raid, 'name')
self._parseOption(entry, raid, 'spares', numeric=True)
self._parseOption(entry, raid, 'format', 'format', True)
self._parseOption(entry, raid, 'useexisting', 'useExisting', True)
self._parseOption(entry, raid, 'ondisk', 'onDisk')
raid['target'] = entry[1]
raid['devices'] = entry[2:]
raid['device'] = raid['name']
self._raids.append(raid)
continue
# Parse volume group
if entryType == "volgroup":
volgroup = {}
self._parseOption(entry, volgroup, 'pesize', 'peSize', numeric=True)
self._parseOption(entry, volgroup, 'format', 'format', True)
self._parseOption(entry, volgroup, 'useexisting', 'useExisting', True)
volgroup['name'] = entry[1]
volgroup['device'] = volgroup['name']
volgroup['partitions'] = entry[2:]
self._volgroups.append(volgroup)
continue
# Parse volume
if entryType == "logvol":
vol = {}
self._parseOption(entry, vol, 'format', 'format', True)
self._parseOption(entry, vol, 'useexisting', 'useExisting', True)
self._parseOption(entry, vol, 'size', numeric=True)
self._parseOption(entry, vol, 'maxsize', 'maxSize', numeric=True)
self._parseOption(entry, vol, 'grow', 'grow', True)
self._parseOption(entry, vol, 'fstype', 'fsType')
self._parseOption(entry, vol, 'fsoptions', 'fsOptions')
self._parseOption(entry, vol, 'name', 'name')
self._parseOption(entry, vol, 'vgname', 'volGroup')
vol['target'] = entry[1]
self._vols.append(vol)
continue
# If we got here, there's something wrong
raise ValueError("unknown descriptor %s" % entryType)
def dump(self):
"""
Dump the current partitioning configuration in the curent format.
``Return:`` string
"""
return self._dumpDisk() + self._dumpPartition() + \
self._dumpRaidDevice() + self._dumpVolumeGroup() + \
self._dumpVolume()
def getDisks(self):
"""
List the currently configured disks.
>>> o = DebianDiskDefinition()
>>> o.addDisk("sda")
>>> o.getDisks()
[{'device': 'sda', 'initlabel': True, 'removeParts': None}]
The returned list consists of hashes. For details, please see
:meth:`libinst.interface.disk.DiskDefinition.addDisk`.
``Return:`` list of hashes
"""
return self._disks
def addDisk(self, device, initLabel=True, removeParts=None):
"""
Add a new disk to the partitioning scheme.
============ ====================================================
Parameter Description
============ ====================================================
device Device string without /dev/ (i.e. sda or cciss/c0d0)
initlabel Use labels
removeParts Remove partitions of type LINUX/ALL or None
============ ====================================================
"""
self.checkDevice(device)
if not removeParts in [None, LINUX, ALL]:
raise ValueError("removeParts needs to be None, LINUX or ALL")
if device in self._disks:
raise ValueError("device already defined")
self._disks.append({'device': device,
'initlabel': bool(initLabel),
'removeParts': removeParts})
def delDisk(self, diskId):
"""
Delete an existing disk.
============ ========================
Parameter Description
============ ========================
diskId Disk index
============ ========================
"""
# Check if it is used
if self._disks[diskId]['device'] in [p['onDisk'] for p in self._parts]:
raise ValueError("disk still in use")
del self._disks[diskId]
def _dumpDisk(self):
res = ""
for disk in self._disks:
options = []
if disk['initlabel']:
options.append("--initlabel")
if disk['removeParts'] == None:
options.append("--none")
if disk['removeParts'] == LINUX:
options.append("--linux")
if disk['removeParts'] == ALL:
options.append("--all")
res += "disk %s %s;" % (disk['device'], " ".join(options))
return res
def getPartitions(self):
"""
List defined partitions.
============ ========================
Parameter Description
============ ========================
diskId Disk index
============ ========================
Example:
>>> o.addPartition('/', 100, onDisk='sda')
>>> o.getPartitions()
[{'onDisk': 'sda', 'format': True, 'encrypted': False, 'primary': False, 'maxSize': None, 'passphrase': None, 'grow': False, 'size': 100, 'target': '/', 'bootable': False, 'fsType': None, 'fsOptions': None}]
For information about the keys, please see :meth:`libinst.instance.disk.DiskDefinition.addPartition`
``Return:`` dict
"""
return self._parts
def addPartition(self, target, size, maxSize=None, grow=False,
formatPartition=True, boot=False, primary=False, fsType=None,
fsOptions=None, encrypted=False, passphrase=None,
onDisk=None):
"""
Add a partition to the partitioning scheme.
================ ==================================================
Parameter Description
================ ==================================================
target Filesystem target i.e. /, raid.01, swap, pv.01
size Size of the partition
maxSize Maximum size of the partition (used with *grow*)
grow Grow partition from *size* to *maxSize*
formatPartition Do format the partition
boot Mark it as boot partition
primary Create a primary partition
fsType Set filesystem type (i.e. ext4)
fsOptions Set filesystem options for the formatting process
encrypted Create an encrypted partition
passphrase <PASSWORD> for encrypted partition
onDisk Place partition on a special disk
================ ==================================================
"""
# Check target
pt = re.compile(r"^(raid.[0-9][0-9]|swap|/.*|pv.[0-9][0-9])$")
if not pt.match(target):
raise ValueError("target %s is invalid" % target)
if target in [part['target'] for part in self._parts]:
raise ValueError("target already in use")
# Check if disk exists
if onDisk and not onDisk in [disk['device'] \
for disk in self._disks + self._raids + self._volgroups]:
raise ValueError("selected disk %s does not exist" % onDisk)
# Size check
if maxSize and maxSize < size:
raise ValueError("maxSize must be greater than size")
# Check for space
info = self.getDeviceUsage()
if onDisk in info['disk'] and info['disk'][onDisk]['size'] - info['disk'][onDisk]['usage'] < size:
raise ValueError("not enough remaining space available")
# Check fs options
if fsType:
self.checkFsType(fsType)
if fsOptions:
self.checkFsOptions(fsOptions)
# Check for encryption
if not self.supportEncryption and encrypted:
raise ValueError("encryption not supported")
# Assign values
self._parts.append({
"target": target,
"size": size,
"maxSize": None if not maxSize else maxSize,
"grow": bool(grow),
"format": bool(formatPartition),
"primary": bool(primary),
"bootable": bool(boot),
"fsType": fsType,
"fsOptions": fsOptions,
"encrypted": bool(encrypted),
"passphrase": <PASSWORD>,
"onDisk": onDisk})
def delPartition(self, partitionId):
"""
Delete a partition with the partitionId - that's the index
delivered by
:meth:`libinst.interface.disk.DiskDefinition.getPartitions`.
============ ========================
Parameter Description
============ ========================
partitionId Partition index
============ ========================
"""
devs = []
vgs = []
if len(self._raids):
for raid in self._raids:
for dev in raid['devices']:
devs.append(dev)
if len(self._volgroups):
for vg in self._volgroups:
for dev in vg['partitions']:
vgs.append(dev)
if self._parts[partitionId]['target'] in vgs or self._parts[partitionId]['target'] in devs:
raise ValueError("disk still in use")
del self._parts[partitionId]
def _dumpPartition(self):
res = ""
for part in self._parts:
options = []
options.append("--size %s" % part['size'])
if part['maxSize']:
options.append("--maxsize %s" % part['maxSize'])
if part['grow']:
options.append("--grow")
if part['format']:
options.append("--format")
if part['bootable']:
options.append("--bootable")
if part['primary']:
options.append("--asprimary")
if part['fsType']:
options.append("--fstype %s" % part['fsType'])
if part['fsOptions']:
options.append("--fsoptions \"%s\"" % part['fsOptions'])
if part['encrypted']:
options.append("--encrypted")
if part['passphrase']:
options.append("--passphrase \"%s\"" % part['passphrase'])
if part['onDisk']:
options.append("--ondisk %s" % part['onDisk'])
res += "part %s %s;" % (part['target'], " ".join(options))
return res
def getRaidDevices(self):
"""
List RAID defined devices.
For information about the keys, please see :meth:`libinst.instance.disk.DiskDefinition.addRaidDevice`
``Return:`` list of dicts
"""
return self._raids
def addRaidDevice(self, target, name, level="0", spares="0", fsType=None,
fsOptions=None, formatDevice=True, useExisting=False, devices=None):
"""
Add a RAID device to the current disk setup.
============ =======================================================
Parameter Description
============ =======================================================
target Filesystem target, i.e. /, swap, pv.01
name Name of the RAID device, | |
<reponame>lizhangjie316/Minner<gh_stars>1-10
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'SumDialog.ui'
#
# Created by: PyQt5 UI code generator 5.15.0
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
from SumAllTable import DataGridAll
class Sum_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(1214, 675)
Dialog.setStyleSheet("background-color:#555555\n"
"")
self.gridLayout_2 = QtWidgets.QGridLayout(Dialog)
self.gridLayout_2.setObjectName("gridLayout_2")
self.label_20 = QtWidgets.QLabel(Dialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_20.sizePolicy().hasHeightForWidth())
self.label_20.setSizePolicy(sizePolicy)
self.label_20.setMinimumSize(QtCore.QSize(0, 30))
self.label_20.setMaximumSize(QtCore.QSize(16777215, 30))
self.label_20.setStyleSheet("QLabel{\n"
" border:none;\n"
" font-size:16px;\n"
" font-weight:400;\n"
" background-color: qlineargradient(x1:0, y1:0, x2:0, y2:1,stop:0 #777777,stop:1 #000000);\n"
" color:white;\n"
" }")
self.label_20.setObjectName("label_20")
self.gridLayout_2.addWidget(self.label_20, 0, 0, 1, 2)
self.widget = QtWidgets.QWidget(Dialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.widget.sizePolicy().hasHeightForWidth())
self.widget.setSizePolicy(sizePolicy)
self.widget.setMaximumSize(QtCore.QSize(16777215, 740))
self.widget.setStyleSheet("background-color:#444444")
self.widget.setObjectName("widget")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.widget)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.label = QtWidgets.QLabel(self.widget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label.sizePolicy().hasHeightForWidth())
self.label.setSizePolicy(sizePolicy)
self.label.setMinimumSize(QtCore.QSize(0, 30))
self.label.setMaximumSize(QtCore.QSize(16777215, 30))
self.label.setStyleSheet("QLabel {\n"
" border-radius: 3px;\n"
" color:white;\n"
" background:#444444;\n"
"}")
self.label.setObjectName("label")
self.verticalLayout_2.addWidget(self.label)
self.widget_2 = QtWidgets.QWidget(self.widget)
self.widget_2.setMinimumSize(QtCore.QSize(0, 250))
self.widget_2.setStyleSheet("background-color: rgb(85, 85, 85);")
self.widget_2.setObjectName("widget_2")
self.widget_3 = QtWidgets.QWidget(self.widget_2)
self.widget_3.setGeometry(QtCore.QRect(40, 20, 381, 41))
self.widget_3.setObjectName("widget_3")
self.label_2 = QtWidgets.QLabel(self.widget_3)
self.label_2.setGeometry(QtCore.QRect(20, 10, 72, 15))
self.label_2.setStyleSheet("QLabel {\n"
" border-radius: 3px;\n"
" color:white;\n"
"}")
self.label_2.setObjectName("label_2")
self.comboBox = QtWidgets.QComboBox(self.widget_3)
self.comboBox.setGeometry(QtCore.QRect(170, 10, 161, 22))
self.comboBox.setStyleSheet("background-color:#444444;")
self.comboBox.setObjectName("comboBox")
self.widget_4 = QtWidgets.QWidget(self.widget_2)
self.widget_4.setGeometry(QtCore.QRect(40, 70, 381, 41))
self.widget_4.setObjectName("widget_4")
self.label_3 = QtWidgets.QLabel(self.widget_4)
self.label_3.setGeometry(QtCore.QRect(20, 10, 91, 16))
self.label_3.setStyleSheet("QLabel {\n"
" border-radius: 3px;\n"
" color:white;\n"
"}")
self.label_3.setObjectName("label_3")
self.comboBox_2 = QtWidgets.QComboBox(self.widget_4)
self.comboBox_2.setGeometry(QtCore.QRect(170, 10, 161, 22))
self.comboBox_2.setStyleSheet("background-color:#444444;")
self.comboBox_2.setObjectName("comboBox_2")
self.pushButton = QtWidgets.QPushButton(self.widget_2)
self.pushButton.setGeometry(QtCore.QRect(170, 180, 93, 28))
self.pushButton.setStyleSheet("QPushButton{\n"
" border:none;\n"
" background-color: qlineargradient(x1:0, y1:0, x2:0, y2:1,stop:0 rgb(96,157,200),stop:1 rgb(0,94,150));\n"
" font-size:15px;\n"
" color:white;\n"
" width:120px;\n"
" height:40px;\n"
" text-align:center;\n"
" border-radius:5px;\n"
" }\n"
"QPushButton:hover{\n"
" color:#0caaff\n"
"}\n"
"QPushButton:pressed{\n"
"background-color: rgb(50, 88, 138)\n"
"}\n"
"QPushButton:disabled{\n"
"color:rgb(172, 172, 172);\n"
"background-color:rgb(93, 93, 93)\n"
"}")
self.pushButton.setObjectName("pushButton")
self.verticalLayout_2.addWidget(self.widget_2)
self.gridLayout_2.addWidget(self.widget, 1, 0, 1, 1)
self.widget_11 = QtWidgets.QWidget(Dialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.widget_11.sizePolicy().hasHeightForWidth())
self.widget_11.setSizePolicy(sizePolicy)
self.widget_11.setMinimumSize(QtCore.QSize(720, 0))
self.widget_11.setMaximumSize(QtCore.QSize(16777215, 740))
self.widget_11.setStyleSheet("background-color:#444444")
self.widget_11.setObjectName("widget_11")
self.gridLayout_8 = QtWidgets.QGridLayout(self.widget_11)
self.gridLayout_8.setObjectName("gridLayout_8")
self.label_63 = QtWidgets.QLabel(self.widget_11)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_63.sizePolicy().hasHeightForWidth())
self.label_63.setSizePolicy(sizePolicy)
self.label_63.setMinimumSize(QtCore.QSize(0, 30))
self.label_63.setMaximumSize(QtCore.QSize(16777215, 30))
self.label_63.setStyleSheet("QLabel {\n"
" border-radius: 3px;\n"
" color:white;\n"
" background:#444444;\n"
"}")
self.label_63.setObjectName("label_63")
self.gridLayout_8.addWidget(self.label_63, 0, 0, 1, 1)
self.widget_15 = QtWidgets.QWidget(self.widget_11)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.widget_15.sizePolicy().hasHeightForWidth())
self.widget_15.setSizePolicy(sizePolicy)
self.widget_15.setMinimumSize(QtCore.QSize(0, 480))
self.widget_15.setStyleSheet("QWidget{\n"
"background-color:#555555\n"
"}\n"
"QPushButton{\n"
" border:none;\n"
" background-color: qlineargradient(x1:0, y1:0, x2:0, y2:1,stop:0 rgb(96,157,200),stop:1 rgb(0,94,150));\n"
" font-size:15px;\n"
" color:white;\n"
" width:120px;\n"
" height:40px;\n"
" text-align:center;\n"
" border-radius:5px;\n"
" }\n"
"QPushButton:hover{\n"
" color:#0caaff\n"
"}\n"
"QPushButton:pressed{\n"
"background-color: rgb(50, 88, 138)\n"
"}\n"
"QPushButton:disabled{\n"
"color:rgb(172, 172, 172);\n"
"background-color:rgb(93, 93, 93)\n"
"}\n"
"\n"
"QLable{\n"
"color:white;\n"
"background:#222222;\n"
"}")
self.widget_15.setObjectName("widget_15")
self.verticalLayout = QtWidgets.QVBoxLayout(self.widget_15)
self.verticalLayout.setObjectName("verticalLayout")
self.datagridall=DataGridAll()
self.verticalLayout.addWidget(self.datagridall)
self.gridLayout_8.addWidget(self.widget_15, 1, 0, 1, 1)
self.gridLayout_2.addWidget(self.widget_11, 1, 1, 2, 1)
self.widget_5 = QtWidgets.QWidget(Dialog)
self.widget_5.setStyleSheet("background-color:#444444")
self.widget_5.setObjectName("widget_5")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.widget_5)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.tabWidget = QtWidgets.QTabWidget(self.widget_5)
self.tabWidget.setStyleSheet("QTabWidget{\n"
"background-color:#666666;\n"
"}\n"
"QTabWidget::pane{\n"
"\n"
"border-top: 1px solid;\n"
"\n"
"border-color: #333333;\n"
"\n"
"}\n"
"\n"
"QTabBar::tab {\n"
"min-width:80px;\n"
"min-height:30px;\n"
"color: white;\n"
"background:#666666;\n"
"\n"
"border: 0px solid;\n"
"\n"
"}\n"
"\n"
"QTabBar::tab:selected{\n"
"min-width:85px;\n"
"min-height:30px;\n"
"color: white;\n"
"background:#009DE2;\n"
"border: 0px solid;\n"
"border-bottom: 2px solid;\n"
"}\n"
"")
self.tabWidget.setObjectName("tabWidget")
self.tab = QtWidgets.QWidget()
self.tab.setObjectName("tab")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.tab)
self.horizontalLayout.setObjectName("horizontalLayout")
self.widget_6 = QtWidgets.QWidget(self.tab)
self.widget_6.setStyleSheet("background-color: rgb(85, 85, 85);")
self.widget_6.setObjectName("widget_6")
self.gridLayout = QtWidgets.QGridLayout(self.widget_6)
self.gridLayout.setObjectName("gridLayout")
self.label_5 = QtWidgets.QLabel(self.widget_6)
self.label_5.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_5.setObjectName("label_5")
self.gridLayout.addWidget(self.label_5, 1, 0, 1, 1)
self.label_14 = QtWidgets.QLabel(self.widget_6)
self.label_14.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_14.setObjectName("label_14")
self.gridLayout.addWidget(self.label_14, 5, 1, 1, 1)
self.label_12 = QtWidgets.QLabel(self.widget_6)
self.label_12.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_12.setObjectName("label_12")
self.gridLayout.addWidget(self.label_12, 3, 1, 1, 1)
self.label_8 = QtWidgets.QLabel(self.widget_6)
self.label_8.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_8.setObjectName("label_8")
self.gridLayout.addWidget(self.label_8, 4, 0, 1, 1)
self.label_13 = QtWidgets.QLabel(self.widget_6)
self.label_13.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_13.setObjectName("label_13")
self.gridLayout.addWidget(self.label_13, 4, 1, 1, 1)
self.label_11 = QtWidgets.QLabel(self.widget_6)
self.label_11.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_11.setObjectName("label_11")
self.gridLayout.addWidget(self.label_11, 2, 1, 1, 1)
self.label_7 = QtWidgets.QLabel(self.widget_6)
self.label_7.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_7.setObjectName("label_7")
self.gridLayout.addWidget(self.label_7, 3, 0, 1, 1)
self.label_10 = QtWidgets.QLabel(self.widget_6)
self.label_10.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_10.setObjectName("label_10")
self.gridLayout.addWidget(self.label_10, 1, 1, 1, 1)
self.label_4 = QtWidgets.QLabel(self.widget_6)
self.label_4.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_4.setObjectName("label_4")
self.gridLayout.addWidget(self.label_4, 0, 0, 1, 1)
self.label_6 = QtWidgets.QLabel(self.widget_6)
self.label_6.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_6.setObjectName("label_6")
self.gridLayout.addWidget(self.label_6, 2, 0, 1, 1)
self.label_9 = QtWidgets.QLabel(self.widget_6)
self.label_9.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_9.setObjectName("label_9")
self.gridLayout.addWidget(self.label_9, 5, 0, 1, 1)
self.horizontalLayout.addWidget(self.widget_6)
self.tabWidget.addTab(self.tab, "")
self.tab_2 = QtWidgets.QWidget()
self.tab_2.setObjectName("tab_2")
self.widget_7 = QtWidgets.QWidget(self.tab_2)
self.widget_7.setGeometry(QtCore.QRect(10, 10, 431, 171))
self.widget_7.setStyleSheet("background-color: rgb(85, 85, 85);")
self.widget_7.setObjectName("widget_7")
self.gridLayout_3 = QtWidgets.QGridLayout(self.widget_7)
self.gridLayout_3.setContentsMargins(0, 0, 0, 0)
self.gridLayout_3.setObjectName("gridLayout_3")
self.label_15 = QtWidgets.QLabel(self.widget_7)
self.label_15.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_15.setObjectName("label_15")
self.gridLayout_3.addWidget(self.label_15, 0, 0, 1, 1)
self.label_16 = QtWidgets.QLabel(self.widget_7)
self.label_16.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_16.setObjectName("label_16")
self.gridLayout_3.addWidget(self.label_16, 1, 0, 1, 1)
self.label_17 = QtWidgets.QLabel(self.widget_7)
self.label_17.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_17.setObjectName("label_17")
self.gridLayout_3.addWidget(self.label_17, 1, 1, 1, 1)
self.label_18 = QtWidgets.QLabel(self.widget_7)
self.label_18.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_18.setObjectName("label_18")
self.gridLayout_3.addWidget(self.label_18, 2, 0, 1, 1)
self.label_19 = QtWidgets.QLabel(self.widget_7)
self.label_19.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_19.setObjectName("label_19")
self.gridLayout_3.addWidget(self.label_19, 2, 1, 1, 1)
self.label_21 = QtWidgets.QLabel(self.widget_7)
self.label_21.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_21.setObjectName("label_21")
self.gridLayout_3.addWidget(self.label_21, 3, 0, 1, 1)
self.label_22 = QtWidgets.QLabel(self.widget_7)
self.label_22.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_22.setObjectName("label_22")
self.gridLayout_3.addWidget(self.label_22, 3, 1, 1, 1)
self.label_23 = QtWidgets.QLabel(self.widget_7)
self.label_23.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_23.setObjectName("label_23")
self.gridLayout_3.addWidget(self.label_23, 4, 0, 1, 1)
self.label_24 = QtWidgets.QLabel(self.widget_7)
self.label_24.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_24.setObjectName("label_24")
self.gridLayout_3.addWidget(self.label_24, 4, 1, 1, 1)
self.label_25 = QtWidgets.QLabel(self.widget_7)
self.label_25.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_25.setObjectName("label_25")
self.gridLayout_3.addWidget(self.label_25, 5, 0, 1, 1)
self.label_26 = QtWidgets.QLabel(self.widget_7)
self.label_26.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_26.setObjectName("label_26")
self.gridLayout_3.addWidget(self.label_26, 5, 1, 1, 1)
self.tabWidget.addTab(self.tab_2, "")
self.tab_5 = QtWidgets.QWidget()
self.tab_5.setObjectName("tab_5")
self.widget_8 = QtWidgets.QWidget(self.tab_5)
self.widget_8.setGeometry(QtCore.QRect(10, 10, 431, 171))
self.widget_8.setStyleSheet("background-color: rgb(85, 85, 85);")
self.widget_8.setObjectName("widget_8")
self.gridLayout_4 = QtWidgets.QGridLayout(self.widget_8)
self.gridLayout_4.setContentsMargins(0, 0, 0, 0)
self.gridLayout_4.setObjectName("gridLayout_4")
self.label_27 = QtWidgets.QLabel(self.widget_8)
self.label_27.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_27.setObjectName("label_27")
self.gridLayout_4.addWidget(self.label_27, 0, 0, 1, 1)
self.label_28 = QtWidgets.QLabel(self.widget_8)
self.label_28.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_28.setObjectName("label_28")
self.gridLayout_4.addWidget(self.label_28, 1, 0, 1, 1)
self.label_29 = QtWidgets.QLabel(self.widget_8)
self.label_29.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_29.setObjectName("label_29")
self.gridLayout_4.addWidget(self.label_29, 1, 1, 1, 1)
self.label_30 = QtWidgets.QLabel(self.widget_8)
self.label_30.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_30.setObjectName("label_30")
self.gridLayout_4.addWidget(self.label_30, 2, 0, 1, 1)
self.label_31 = QtWidgets.QLabel(self.widget_8)
self.label_31.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_31.setObjectName("label_31")
self.gridLayout_4.addWidget(self.label_31, 2, 1, 1, 1)
self.label_32 = QtWidgets.QLabel(self.widget_8)
self.label_32.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_32.setObjectName("label_32")
self.gridLayout_4.addWidget(self.label_32, 3, 0, 1, 1)
self.label_33 = QtWidgets.QLabel(self.widget_8)
self.label_33.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_33.setObjectName("label_33")
self.gridLayout_4.addWidget(self.label_33, 3, 1, 1, 1)
self.label_34 = QtWidgets.QLabel(self.widget_8)
self.label_34.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_34.setObjectName("label_34")
self.gridLayout_4.addWidget(self.label_34, 4, 0, 1, 1)
self.label_35 = QtWidgets.QLabel(self.widget_8)
self.label_35.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_35.setObjectName("label_35")
self.gridLayout_4.addWidget(self.label_35, 4, 1, 1, 1)
self.label_36 = QtWidgets.QLabel(self.widget_8)
self.label_36.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_36.setObjectName("label_36")
self.gridLayout_4.addWidget(self.label_36, 5, 0, 1, 1)
self.label_37 = QtWidgets.QLabel(self.widget_8)
self.label_37.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_37.setObjectName("label_37")
self.gridLayout_4.addWidget(self.label_37, 5, 1, 1, 1)
self.tabWidget.addTab(self.tab_5, "")
self.tab_6 = QtWidgets.QWidget()
self.tab_6.setObjectName("tab_6")
self.widget_9 = QtWidgets.QWidget(self.tab_6)
self.widget_9.setGeometry(QtCore.QRect(10, 10, 431, 171))
self.widget_9.setStyleSheet("background-color: rgb(85, 85, 85);")
self.widget_9.setObjectName("widget_9")
self.gridLayout_5 = QtWidgets.QGridLayout(self.widget_9)
self.gridLayout_5.setContentsMargins(0, 0, 0, 0)
self.gridLayout_5.setObjectName("gridLayout_5")
self.label_38 = QtWidgets.QLabel(self.widget_9)
self.label_38.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_38.setObjectName("label_38")
self.gridLayout_5.addWidget(self.label_38, 0, 0, 1, 1)
self.label_39 = QtWidgets.QLabel(self.widget_9)
self.label_39.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_39.setObjectName("label_39")
self.gridLayout_5.addWidget(self.label_39, 1, 0, 1, 1)
self.label_40 = QtWidgets.QLabel(self.widget_9)
self.label_40.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_40.setObjectName("label_40")
self.gridLayout_5.addWidget(self.label_40, 1, 1, 1, 1)
self.label_41 = QtWidgets.QLabel(self.widget_9)
self.label_41.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_41.setObjectName("label_41")
self.gridLayout_5.addWidget(self.label_41, 2, 0, 1, 1)
self.label_42 = QtWidgets.QLabel(self.widget_9)
self.label_42.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_42.setObjectName("label_42")
self.gridLayout_5.addWidget(self.label_42, 2, 1, 1, 1)
self.label_43 = QtWidgets.QLabel(self.widget_9)
self.label_43.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_43.setObjectName("label_43")
self.gridLayout_5.addWidget(self.label_43, 3, 0, 1, 1)
self.label_44 = QtWidgets.QLabel(self.widget_9)
self.label_44.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_44.setObjectName("label_44")
self.gridLayout_5.addWidget(self.label_44, 3, 1, 1, 1)
self.label_45 = QtWidgets.QLabel(self.widget_9)
self.label_45.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_45.setObjectName("label_45")
self.gridLayout_5.addWidget(self.label_45, 4, 0, 1, 1)
self.label_46 = QtWidgets.QLabel(self.widget_9)
self.label_46.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_46.setObjectName("label_46")
self.gridLayout_5.addWidget(self.label_46, 4, 1, 1, 1)
self.label_47 = QtWidgets.QLabel(self.widget_9)
self.label_47.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_47.setObjectName("label_47")
self.gridLayout_5.addWidget(self.label_47, 5, 0, 1, 1)
self.label_48 = QtWidgets.QLabel(self.widget_9)
self.label_48.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_48.setObjectName("label_48")
self.gridLayout_5.addWidget(self.label_48, 5, 1, 1, 1)
self.tabWidget.addTab(self.tab_6, "")
self.tab_7 = QtWidgets.QWidget()
self.tab_7.setObjectName("tab_7")
self.widget_10 = QtWidgets.QWidget(self.tab_7)
self.widget_10.setGeometry(QtCore.QRect(10, 10, 431, 171))
self.widget_10.setStyleSheet("background-color: rgb(85, 85, 85);")
self.widget_10.setObjectName("widget_10")
self.gridLayout_6 = QtWidgets.QGridLayout(self.widget_10)
self.gridLayout_6.setContentsMargins(0, 0, 0, 0)
self.gridLayout_6.setObjectName("gridLayout_6")
self.label_49 = QtWidgets.QLabel(self.widget_10)
self.label_49.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_49.setObjectName("label_49")
self.gridLayout_6.addWidget(self.label_49, 0, 0, 1, 1)
self.label_50 = QtWidgets.QLabel(self.widget_10)
self.label_50.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_50.setObjectName("label_50")
self.gridLayout_6.addWidget(self.label_50, 1, 0, 1, 1)
self.label_51 = QtWidgets.QLabel(self.widget_10)
self.label_51.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_51.setObjectName("label_51")
self.gridLayout_6.addWidget(self.label_51, 1, 1, 1, 1)
self.label_52 = QtWidgets.QLabel(self.widget_10)
self.label_52.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_52.setObjectName("label_52")
self.gridLayout_6.addWidget(self.label_52, 2, 0, 1, 1)
self.label_53 = QtWidgets.QLabel(self.widget_10)
self.label_53.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_53.setObjectName("label_53")
self.gridLayout_6.addWidget(self.label_53, 2, 1, 1, 1)
self.label_54 = QtWidgets.QLabel(self.widget_10)
self.label_54.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_54.setObjectName("label_54")
self.gridLayout_6.addWidget(self.label_54, 3, 0, 1, 1)
self.label_55 = QtWidgets.QLabel(self.widget_10)
self.label_55.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_55.setObjectName("label_55")
self.gridLayout_6.addWidget(self.label_55, 3, 1, 1, 1)
self.label_56 = QtWidgets.QLabel(self.widget_10)
self.label_56.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_56.setObjectName("label_56")
self.gridLayout_6.addWidget(self.label_56, 4, 0, 1, 1)
self.label_57 = QtWidgets.QLabel(self.widget_10)
self.label_57.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_57.setObjectName("label_57")
self.gridLayout_6.addWidget(self.label_57, 4, 1, 1, 1)
self.label_58 = QtWidgets.QLabel(self.widget_10)
self.label_58.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_58.setObjectName("label_58")
self.gridLayout_6.addWidget(self.label_58, 5, 0, 1, 1)
self.label_59 = QtWidgets.QLabel(self.widget_10)
self.label_59.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_59.setObjectName("label_59")
self.gridLayout_6.addWidget(self.label_59, 5, 1, 1, 1)
self.tabWidget.addTab(self.tab_7, "")
self.verticalLayout_3.addWidget(self.tabWidget)
self.gridLayout_2.addWidget(self.widget_5, 2, 0, 1, 1)
self.retranslateUi(Dialog)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.label_20.setText(_translate("Dialog", " 分割统计"))
self.label.setText(_translate("Dialog", "统计设置"))
self.label_2.setText(_translate("Dialog", "摄像头ID:"))
self.label_3.setText(_translate("Dialog", "摄像头点位:"))
self.pushButton.setText(_translate("Dialog", "统计"))
self.label_63.setText(_translate("Dialog", "统计设置"))
self.label_5.setText(_translate("Dialog", "(1)20+:"))
self.label_14.setText(_translate("Dialog", "%"))
self.label_12.setText(_translate("Dialog", "%"))
self.label_8.setText(_translate("Dialog", "(4)2+:"))
self.label_13.setText(_translate("Dialog", "%"))
self.label_11.setText(_translate("Dialog", "%"))
self.label_7.setText(_translate("Dialog", "(3)5+:"))
self.label_10.setText(_translate("Dialog", "%"))
self.label_4.setText(_translate("Dialog", "平均矿石尺寸占比:"))
self.label_6.setText(_translate("Dialog", "(2)10+:"))
self.label_9.setText(_translate("Dialog", "(5)平均尺寸:"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("Dialog", "1分钟"))
self.label_15.setText(_translate("Dialog", "平均矿石尺寸占比:"))
self.label_16.setText(_translate("Dialog", "(1)20+:"))
self.label_17.setText(_translate("Dialog", "%"))
self.label_18.setText(_translate("Dialog", "(2)10+:"))
self.label_19.setText(_translate("Dialog", "%"))
self.label_21.setText(_translate("Dialog", "(3)5+:"))
self.label_22.setText(_translate("Dialog", "%"))
self.label_23.setText(_translate("Dialog", "(4)2+:"))
self.label_24.setText(_translate("Dialog", "%"))
self.label_25.setText(_translate("Dialog", "(5)平均尺寸:"))
self.label_26.setText(_translate("Dialog", "%"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("Dialog", "5分钟"))
self.label_27.setText(_translate("Dialog", "平均矿石尺寸占比:"))
self.label_28.setText(_translate("Dialog", "(1)20+:"))
self.label_29.setText(_translate("Dialog", "%"))
self.label_30.setText(_translate("Dialog", "(2)10+:"))
self.label_31.setText(_translate("Dialog", "%"))
self.label_32.setText(_translate("Dialog", "(3)5+:"))
self.label_33.setText(_translate("Dialog", "%"))
self.label_34.setText(_translate("Dialog", "(4)2+:"))
self.label_35.setText(_translate("Dialog", "%"))
self.label_36.setText(_translate("Dialog", "(5)平均尺寸:"))
self.label_37.setText(_translate("Dialog", "%"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_5), _translate("Dialog", "10分钟"))
self.label_38.setText(_translate("Dialog", "平均矿石尺寸占比:"))
self.label_39.setText(_translate("Dialog", "(1)20+:"))
self.label_40.setText(_translate("Dialog", "%"))
self.label_41.setText(_translate("Dialog", "(2)10+:"))
| |
<reponame>tpulkit/txt2vid
# Script to analyze pilot study data collected using qualtrics on one content
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# params
# data_file = 'Video Quality Survey (640x480)_May 7, 2021_07.55.csv'
data_file = 'Video Quality Survey (640x480)_May 14, 2021_08.10.csv'
num_comps = 35
# Comparisons in order -- add to map to values later
choices = [
['Video A1', 'Video B1'],
['Video A1', 'Video B2'],
['Video A1', 'Video B3'],
['Video A1', 'Video B4'],
['Video A1', 'Video B5'],
['Video A1', 'Video B6'],
['Video A1', 'Video B7'],
['Video A1', 'Video B8'],
['Video A1', 'Video B9'],
['Video A1', 'Video B10'],
['Video A2', 'Video B1'],
['Video A2', 'Video B2'],
['Video A2', 'Video B3'],
['Video A2', 'Video B4'],
['Video A2', 'Video B5'],
['Video A2', 'Video B6'],
['Video A2', 'Video B7'],
['Video A2', 'Video B8'],
['Video A2', 'Video B9'],
['Video A2', 'Video B10'],
['Video B2', 'Video B3'],
['Video A2', 'Video A2'],
['Video A2', 'Video A1'],
['Video A3', 'Video A1'],
['Video A3', 'Video A2'],
['Video A3', 'Video B1'],
['Video A3', 'Video B2'],
['Video A3', 'Video B3'],
['Video A3', 'Video B4'],
['Video A3', 'Video B5'],
['Video A3', 'Video B6'],
['Video A3', 'Video B7'],
['Video A3', 'Video B8'],
['Video A3', 'Video B9'],
['Video A3', 'Video B10'],
]
mapping = [
['Video A1', 'Original_audio_driving_video.mp4'],
['Video A2', 'resemble_audio_driving_video.mp4'],
['Video A3', 'original_audio_driving_video_AAC_br1k_bitrateA_4009.mp4'],
['Video B1', 'AV1_crf63_AAC_br1_bitrateV_14659_bitrateA_4025.mp4'],
['Video B2', 'AV1_crf63_AAC_br1_ds2_us2_bitrateV_8158_bitrateA_4025.mp4'],
['Video B3', 'AV1_crf63_AAC_br50_bitrateV_14659_bitrateA_50216.mp4'],
['Video B4', 'AVC_crf30_AAC_br1_ds2_us2_bitrateV_55835_bitrateA_4025.mp4'],
['Video B5', 'AVC_crf30_AAC_br50_ds2_us2_bitrateV_55835_bitrateA_50216.mp4'],
['Video B6', 'AVC_crf35_AAC_br1_bitrateV_94366_bitrateA_4025.mp4'],
['Video B7', 'AVC_crf35_AAC_br50_bitrateV_94366_bitrateA_50216.mp4'],
['Video B8', 'AVC_crf40_AAC_br1_ds2_us2_bitrateV_12636_bitrateA_4025.mp4'],
['Video B9', 'AVC_crf40_AAC_br50_ds2_us2_bitrateV_12636_bitrateA_50216.mp4'],
['Video B10', 'AV1_crf63_AAC_br50_ds2_us2_bitrateV_8158_bitrateA_50216.mp4'],
]
# data = np.genfromtxt(data_file, delimiter=',')
df = pd.read_csv(data_file)
num_rows = len(df.index)
# -2 because 2 rows of metadata
num_subjects = num_rows - 2
print(num_rows, num_subjects)
# print(df)
# print(df[["Pair-1"]])
# print(df[["Pair-1"]])
# print(df.at[2, "Pair-1"])
# ser = pd.Series(df["Pair-1"]).head(num_rows)
# print(ser[2:2+num_rows].values)
data = np.zeros((num_comps, num_subjects), dtype='bool')
data_list = []
# plots for comparison:
resemble_audio_vs_AVC = np.zeros(6)
resemble_audio_vs_AV1 = np.zeros(4)
orig_audio_vs_AVC = np.zeros(6)
orig_audio_vs_AV1 = np.zeros(4)
orig_audio_low_quality_vs_AVC = np.zeros(6)
orig_audio_low_quality_vs_AV1 = np.zeros(4)
avc_br_audio = [4025, 50216, 4025, 50216, 4025, 50216]
avc_br_video = [55835, 55835, 94366, 94366, 12636, 12636]
av1_br_audio = [4025, 4025, 50216, 50216]
av1_br_video = [14659, 8158, 14659, 8158]
# avc_br = [55835 + 4025, 55835 + 50216, 94366 + 4025, 94366 + 50216, 12636 + 4025, 12636 + 50216]
# av1_br = [14659 + 4025, 8158 + 4025, 14659 + 50216, 8158 + 50216]
avc_br = [x + y for (y, x) in zip(avc_br_video, avc_br_audio)]
av1_br = [x + y for (y, x) in zip(av1_br_video, av1_br_audio)]
txt_br = 84
orig_audio_br = 50216 # 72000
orig_audio_low_quality_br = 4009 #### CHECK!!!!!!!
cr_resemble_AVC = [x / txt_br for x in avc_br]
cr_resemble_AV1 = [x / txt_br for x in av1_br]
cr_orig_audio_AVC = [x / orig_audio_br for x in avc_br]
cr_orig_audio_AV1 = [x / orig_audio_br for x in av1_br]
cr_orig_audio_low_quality_AVC = [x / orig_audio_low_quality_br for x in avc_br]
cr_orig_audio_low_quality_AV1 = [x / orig_audio_low_quality_br for x in av1_br]
for i in range(num_comps):
curr_comp = f"Pair-{i + 1}"
ser = pd.Series(df[curr_comp]).head(num_rows)
values = ser[2:2 + num_rows].values
data_list.append(values)
curr_choice = choices[i]
for j in range(num_subjects):
if values[j][-8:] == curr_choice[0][-8:]:
data[i, j] = 0
elif values[j][-8:] == curr_choice[1][-8:]:
data[i, j] = 1
else:
raise ValueError(f"Error in comp {i}, subject {j}: {values[j]}, {curr_choice}, {values[j][-8:]}")
# hardcoded to extract data
if 0 <= i < 3:
orig_audio_vs_AV1[i] = num_subjects - np.sum(data[i, :])
elif 3 <= i < 9:
orig_audio_vs_AVC[i - 3] = num_subjects - np.sum(data[i, :])
elif i == 9:
orig_audio_vs_AV1[3] = num_subjects - np.sum(data[i, :])
elif 10 <= i < 13:
resemble_audio_vs_AV1[i - 10] = num_subjects - np.sum(data[i, :])
elif 13 <= i < 19:
resemble_audio_vs_AVC[i - 13] = num_subjects - np.sum(data[i, :])
elif i == 19:
resemble_audio_vs_AV1[3] = num_subjects - np.sum(data[i, :])
elif 25 <= i < 28:
orig_audio_low_quality_vs_AV1[i - 25] = num_subjects - np.sum(data[i, :])
elif 28 <= i < 34:
orig_audio_low_quality_vs_AVC[i - 28] = num_subjects - np.sum(data[i, :])
elif i == 34:
orig_audio_low_quality_vs_AV1[3] = num_subjects - np.sum(data[i, :])
print(np.sum(data, axis=1))
# plt.figure()
# num_second_choice = np.sum(data, axis=1)
# num_first_choice = total_subjects - num_second_choice
# x_axis = np.arange(num_comps)
#
# plt.bar(x_axis, num_first_choice, color='r')
# plt.bar(x_axis, num_second_choice, bottom=num_first_choice, color='b')
# # plt.xticks(x_axis, choices)
# plt.show()
def plot_score_br(plotting_dict, codec):
"""
:param plotting_dict: dictionary with cr and subjective advantage
:param codec: 'av1' or 'avc'
:return: plot with score on y axis and br advantage against std codec on x
"""
codecs = ['av1', 'avc', 'AV1', 'AVC']
if codec not in codecs:
raise ValueError(f'codec specified: {codec} not AV1 or AVC')
cr_resemble_vs_codec = plotting_dict['cr_resemble_vs_codec']
resemble_audio_vs_codec = plotting_dict['resemble_audio_vs_codec']
cr_orig_audio_vs_codec = plotting_dict['cr_orig_audio_vs_codec']
orig_audio_vs_codec = plotting_dict['orig_audio_vs_codec']
cr_orig_audio_low_quality_vs_codec = plotting_dict['cr_orig_audio_low_quality_vs_codec']
orig_audio_low_quality_vs_codec = plotting_dict['orig_audio_low_quality_vs_codec']
audio_br = plotting_dict['audio_br']
video_br = plotting_dict['video_br']
fig, ax = plt.subplots(1, 3, sharey=True, figsize=(15, 5))
plt.setp(ax, yticks=np.arange(0, num_subjects + 1, 1))
ax[0].plot(cr_resemble_vs_codec, resemble_audio_vs_codec, 'o')
br = '\n'
text_label = [f'V:{x};{br}A:{y}' for (x, y) in zip(video_br, audio_br)]
for i in range(len(cr_resemble_vs_codec)):
ax[0].annotate(text_label[i], (cr_resemble_vs_codec[i], resemble_audio_vs_codec[i]),
textcoords='data', size=12)
ax[0].set_title('Resemble')
ax[0].grid()
ax[1].plot(cr_orig_audio_vs_codec, orig_audio_vs_codec, 'o')
for i in range(len(cr_orig_audio_vs_codec)):
ax[1].annotate(text_label[i], (cr_orig_audio_vs_codec[i], orig_audio_vs_codec[i]),
textcoords='data', size=12)
ax[1].set_title('Orig. Audio')
ax[1].grid()
ax[2].plot(cr_orig_audio_low_quality_vs_codec, orig_audio_low_quality_vs_codec, 'o')
for i in range(len(cr_orig_audio_low_quality_vs_codec)):
ax[2].annotate(text_label[i], (cr_orig_audio_low_quality_vs_codec[i],
orig_audio_low_quality_vs_codec[i]),
textcoords='data', size=12)
ax[2].set_title('Orig. Audio Low Quality')
ax[2].grid()
# plt.xlabel('Ratio of Std Codec BR and Our BR')
fig.text(0.5, 0.01, 'Ratio of Std Codec BR and Our BR', ha='center', fontsize=16)
# plt.ylabel('# Users Preferring Our Method')
fig.text(0.04, 0.5, '# Users Preferring Our Method', va='center', rotation='vertical', fontsize=16)
plt.suptitle(f'Comparison against {codec.upper()}', fontsize=20)
fig.subplots_adjust(hspace=.5)
plt.show()
def plot_br_score(plotting_dict, codec):
"""
:param plotting_dict: dictionary with cr and subjective advantage
:param codec: 'av1' or 'avc'
:return: plot with score on x axis and br advantage against std codec on y
"""
codecs = ['av1', 'avc']
if codec not in codecs:
raise ValueError(f'codec specified: {codec} not AV1 or AVC')
cr_resemble_vs_codec = plotting_dict['cr_resemble_vs_codec']
resemble_audio_vs_codec = plotting_dict['resemble_audio_vs_codec']
cr_orig_audio_vs_codec = plotting_dict['cr_orig_audio_vs_codec']
orig_audio_vs_codec = plotting_dict['orig_audio_vs_codec']
cr_orig_audio_low_quality_vs_codec = plotting_dict['cr_orig_audio_low_quality_vs_codec']
orig_audio_low_quality_vs_codec = plotting_dict['orig_audio_low_quality_vs_codec']
audio_br = plotting_dict['audio_br']
video_br = plotting_dict['video_br']
fig, ax = plt.subplots(3, 1, sharex=True, figsize=(5, 15))
plt.setp(ax, xticks=np.arange(0, num_subjects + 1, 1))
ax[0].plot(resemble_audio_vs_codec, cr_resemble_vs_codec, 'o')
# br = '\n'
text_label = [f'V:{x}; A:{y}' for (x, y) in zip(video_br, audio_br)]
for i in range(len(cr_resemble_vs_codec)):
ax[0].annotate(text_label[i], (resemble_audio_vs_codec[i], cr_resemble_vs_codec[i]),
textcoords='data', size=12)
ax[0].set_title('Resemble', pad=5)
ax[0].grid()
ax[1].plot(orig_audio_vs_codec, cr_orig_audio_vs_codec, 'o')
for i in range(len(cr_orig_audio_vs_codec)):
ax[1].annotate(text_label[i], (orig_audio_vs_codec[i], cr_orig_audio_vs_codec[i]),
textcoords='data', size=12)
ax[1].set_title('Orig. Audio')
ax[1].grid()
ax[2].plot(orig_audio_low_quality_vs_codec, cr_orig_audio_low_quality_vs_codec, 'o')
for i in range(len(cr_orig_audio_low_quality_vs_codec)):
ax[2].annotate(text_label[i], (orig_audio_low_quality_vs_codec[i],
cr_orig_audio_low_quality_vs_codec[i]),
textcoords='data', size=12)
ax[2].set_title('Orig. Audio Low Quality', pad=5)
ax[2].grid()
# plt.xlabel('Ratio of Std Codec BR and Our BR')
fig.text(0.01, 0.5, 'Ratio of Std Codec BR and Our BR', va='center',
rotation='vertical',
fontsize=16)
# plt.ylabel('# Users Preferring Our Method')
fig.text(0.5, 0.01, '# Users Preferring Our Method', ha='center', fontsize=16)
plt.suptitle(f'Comparison against {codec.upper()}', fontsize=20)
fig.subplots_adjust(hspace=.5)
plt.show()
def plot_br_score_bar(plotting_dict, codec):
"""
:param plotting_dict: dictionary with cr and subjective advantage
:param codec: 'av1' or 'avc'
:return: plot with score on x axis and br advantage against std codec on y
"""
codecs = ['av1', 'avc']
if codec not in codecs:
raise ValueError(f'codec specified: {codec} not AV1 or AVC')
cr_resemble_vs_codec = plotting_dict['cr_resemble_vs_codec']
resemble_audio_vs_codec = plotting_dict['resemble_audio_vs_codec']
cr_orig_audio_vs_codec = plotting_dict['cr_orig_audio_vs_codec']
orig_audio_vs_codec = plotting_dict['orig_audio_vs_codec']
cr_orig_audio_low_quality_vs_codec = plotting_dict['cr_orig_audio_low_quality_vs_codec']
orig_audio_low_quality_vs_codec = plotting_dict['orig_audio_low_quality_vs_codec']
audio_br = plotting_dict['audio_br']
video_br = plotting_dict['video_br']
first_bar_ratio = np.asarray([x/(x+y) for (x, y) in zip(video_br, audio_br)])
fig, ax = plt.subplots(3, 1, sharex=True, figsize=(5, 15))
plt.setp(ax, xticks=np.arange(0, num_subjects + 1, 1))
ax[0].bar(resemble_audio_vs_codec, cr_resemble_vs_codec * first_bar_ratio, color='r', alpha=0.3, label='Video Frac')
ax[0].bar(resemble_audio_vs_codec, cr_resemble_vs_codec * (1 - first_bar_ratio),
color='b', bottom=cr_resemble_vs_codec * first_bar_ratio, alpha=0.3, label='Audio Frac')
# text_label = [f'V:{x}; A:{y}' for (x, y) in zip(video_br, audio_br)]
# for i in range(len(cr_resemble_vs_codec)):
# ax[0].annotate(text_label[i], (resemble_audio_vs_codec[i], cr_resemble_vs_codec[i]),
# textcoords='data', size=12)
ax[0].set_title('Resemble', pad=5)
ax[0].grid()
ax[1].bar(orig_audio_vs_codec, cr_orig_audio_vs_codec * first_bar_ratio, color='r', alpha=0.3, label='Video Frac')
ax[1].bar(orig_audio_vs_codec, cr_orig_audio_vs_codec * (1 - first_bar_ratio),
color='b', bottom=cr_orig_audio_vs_codec * first_bar_ratio, alpha=0.3, label='Audio Frac')
# for i in range(len(cr_orig_audio_vs_codec)):
# ax[1].annotate(text_label[i], (orig_audio_vs_codec[i], cr_orig_audio_vs_codec[i]),
# textcoords='data', size=12)
ax[1].set_title('Orig. Audio')
ax[1].grid()
ax[2].bar(orig_audio_low_quality_vs_codec, cr_orig_audio_low_quality_vs_codec * first_bar_ratio,
color='r', alpha=0.3, label='Video Frac')
ax[2].bar(orig_audio_low_quality_vs_codec, cr_orig_audio_low_quality_vs_codec * (1 - first_bar_ratio),
color='b', bottom=cr_orig_audio_low_quality_vs_codec * first_bar_ratio, alpha=0.3, label='Audio Frac')
# for i in range(len(cr_orig_audio_low_quality_vs_codec)):
# ax[2].annotate(text_label[i], (orig_audio_low_quality_vs_codec[i],
# cr_orig_audio_low_quality_vs_codec[i]),
# textcoords='data', size=12)
ax[2].set_title('Orig. Audio Low Quality', pad=5)
ax[2].grid()
# plt.xlabel('Ratio of Std Codec BR and Our BR')
fig.text(0.01, 0.5, 'Ratio of Std Codec BR and Our BR', va='center',
rotation='vertical',
fontsize=16)
# plt.ylabel('# Users Preferring Our Method')
fig.text(0.5, 0.01, '# Users Preferring Our Method', ha='center', fontsize=16)
plt.suptitle(f'Comparison against {codec.upper()}', fontsize=20)
fig.subplots_adjust(hspace=.5)
plt.legend()
plt.show()
plotting_data = {'cr_resemble_vs_codec': cr_resemble_AVC,
'resemble_audio_vs_codec': resemble_audio_vs_AVC,
'cr_orig_audio_vs_codec': cr_orig_audio_AVC,
'orig_audio_vs_codec': orig_audio_vs_AVC,
'cr_orig_audio_low_quality_vs_codec': cr_orig_audio_low_quality_AVC,
'orig_audio_low_quality_vs_codec': orig_audio_low_quality_vs_AVC,
'audio_br': avc_br_audio,
'video_br': avc_br_video
}
plot_score_br(plotting_data, 'avc')
plot_br_score_bar(plotting_data, 'avc')
plotting_data = {'cr_resemble_vs_codec': cr_resemble_AV1,
'resemble_audio_vs_codec': | |
if columns is not None:
# if columns parameter is supplied: slice columns from dataframe
_assert_has_columns(data, [columns])
data = data.loc[:, columns]
_assert_num_columns(data, 5)
_assert_value_range(data, score_range)
return pd.DataFrame(data.sum(axis=1), columns=[score_name])
def idq_post_scan(data: pd.DataFrame, columns: Optional[Union[Sequence[str], pd.Index]] = None) -> pd.DataFrame:
"""Compute the **Post-Scan Imaging Distress Questionnaire** (IDQ_POST).
.. note::
This implementation assumes a score range of [1, 5].
Use :func:`~biopsykit.questionnaires.utils.convert_scale()` to convert the items into the correct range
beforehand.
Parameters
----------
data : :class:`~pandas.DataFrame`
dataframe containing questionnaire data. Can either be only the relevant columns for computing this score or
a complete dataframe if ``columns`` parameter is supplied.
columns : list of str or :class:`pandas.Index`, optional
list with column names in correct order.
This can be used if columns in the dataframe are not in the correct order or if a complete dataframe is
passed as ``data``.
Returns
-------
:class:`~pandas.DataFrame`
IDQ_POST score
Raises
------
:exc:`~biopsykit.utils.exceptions.ValidationError`
if number of columns does not match
:exc:`~biopsykit.utils.exceptions.ValueRangeError`
if values are not within the required score range
References
----------
<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., ... & <NAME>. (1997).
A study of the effects of patient anxiety, perceptions and equipment on motion artifacts in magnetic resonance
imaging. *Magnetic resonance imaging*, 15(3), 301-306.
"""
score_name = "IDQ_POST"
score_range = [1, 3]
# create copy of data
data = data.copy()
if columns is not None:
# if columns parameter is supplied: slice columns from dataframe
_assert_has_columns(data, [columns])
data = data.loc[:, columns]
_assert_num_columns(data, 7)
# columns 1-6 are expected to be in the range [1, 3]
_assert_value_range(data.iloc[:, 0:6], score_range)
# column 7 is expected to be in the range [1, 2]
_assert_value_range(data.iloc[:, [6]], score_range)
return pd.DataFrame(data.sum(axis=1), columns=[score_name])
# def strategies_questionnaire(
# data: pd.DataFrame, columns: Optional[Union[Sequence[str], pd.Index]] = None
# ) -> pd.DataFrame:
# """Compute the **Strategies Questionnaire**.
#
# The Strategies Questionnaire measures the examination of strategies employed during an MRI scan.
#
# .. note::
# This implementation assumes a score range of [1, 9].
# Use :func:`~biopsykit.questionnaires.utils.convert_scale()` to convert the items into the correct range
# beforehand.
#
# Parameters
# ----------
# data : :class:`~pandas.DataFrame`
# dataframe containing questionnaire data. Can either be only the relevant columns for computing this score or
# a complete dataframe if ``columns`` parameter is supplied.
# columns : list of str or :class:`pandas.Index`, optional
# list with column names in correct order.
# This can be used if columns in the dataframe are not in the correct order or if a complete dataframe is
# passed as ``data``.
#
# Returns
# -------
# :class:`~pandas.DataFrame`
# Strategies Questionnaire score
#
#
# Raises
# ------
# :exc:`~biopsykit.utils.exceptions.ValidationError`
# if number of columns does not match
# :exc:`~biopsykit.utils.exceptions.ValueRangeError`
# if values are not within the required score range
#
#
# References
# ----------
# Original version: <NAME>., <NAME>., & <NAME>. (2008).
#
# Claustrophobia in MRI: the role of cognitions. *Magnetic resonance imaging*, 26(8), 1081-1088.
#
# """
# # score_name = "StratQuest"
# score_range = [1, 9]
#
# # create copy of data
# data = data.copy()
#
# if columns is not None:
# # if columns parameter is supplied: slice columns from dataframe
# _assert_has_columns(data, [columns])
# data = data.loc[:, columns]
#
# _assert_value_range(data, score_range)
# n_cols = 8
# try:
# _assert_num_columns(data, 8)
# except ValueError:
# n_cols = 9
# _assert_num_columns(data, 9)
#
# if n_cols == 9:
# pass
# else:
# pass
#
# raise NotImplementedError("No computational infos found.")
def clq(
data: pd.DataFrame,
columns: Optional[Union[Sequence[str], pd.Index]] = None,
subscales: Optional[Dict[str, Sequence[int]]] = None,
) -> pd.DataFrame:
"""Compute the **Claustrophobia Questionnaire (CLQ)**.
It consists of the subscales with the item indices (count-by-one, i.e., the first question has the index 1!):
* Suffocation Subscale (``SS``): [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
* Restriction Subscale (``RS``): [15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26]
.. note::
This implementation assumes a score range of [1, 5].
Use :func:`~biopsykit.questionnaires.utils.convert_scale()` to convert the items into the correct range
beforehand.
.. warning::
Column indices in ``subscales`` are assumed to start at 1 (instead of 0) to avoid confusion with
questionnaire item columns, which typically also start with index 1!
Parameters
----------
data : :class:`~pandas.DataFrame`
dataframe containing questionnaire data. Can either be only the relevant columns for computing this score or
a complete dataframe if ``columns`` parameter is supplied.
columns : list of str or :class:`pandas.Index`, optional
list with column names in correct order.
This can be used if columns in the dataframe are not in the correct order or if a complete dataframe is
passed as ``data``.
subscales : dict, optional
A dictionary with subscale names (keys) and column names or column indices (count-by-1) (values)
if only specific subscales should be computed.
Returns
-------
:class:`~pandas.DataFrame`
CLQ score
Raises
------
ValueError
if ``subscales`` is supplied and dict values are something else than a list of strings or a list of ints
:exc:`~biopsykit.utils.exceptions.ValidationError`
if number of columns does not match
:exc:`~biopsykit.utils.exceptions.ValueRangeError`
if values are not within the required score range
References
----------
<NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2001).
The claustrophobia questionnaire. *Journal of anxiety disorders*, 15(4), 287-297.
"""
score_name = "CLQ"
score_range = [1, 5]
# create copy of data
data = data.copy()
if columns is not None:
# if columns parameter is supplied: slice columns from dataframe
_assert_has_columns(data, [columns])
data = data.loc[:, columns]
if subscales is None:
_assert_num_columns(data, 26)
subscales = {
"SS": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14],
"RS": [15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26],
}
_assert_value_range(data, score_range)
clq_data = _compute_questionnaire_subscales(data, score_name, subscales)
if len(data.columns) == 26:
# compute total score if all columns are present
clq_data[score_name] = data.sum(axis=1)
return pd.DataFrame(clq_data, index=data.index)
def mkhai(data: pd.DataFrame, columns: Optional[Union[Sequence[str], pd.Index]] = None) -> pd.DataFrame:
"""Compute the **Modified Health Anxiety Inventory (MKHAI)**.
The MKHAI is a measure of health anxiety.
.. note::
This implementation assumes a score range of [0, 4].
Use :func:`~biopsykit.questionnaires.utils.convert_scale()` to convert the items into the correct range
beforehand.
Parameters
----------
data : :class:`~pandas.DataFrame`
dataframe containing questionnaire data. Can either be only the relevant columns for computing this score or
a complete dataframe if ``columns`` parameter is supplied.
columns : list of str or :class:`pandas.Index`, optional
list with column names in correct order.
This can be used if columns in the dataframe are not in the correct order or if a complete dataframe is
passed as ``data``.
Returns
-------
:class:`~pandas.DataFrame`
MKHAI score
Raises
------
:exc:`~biopsykit.utils.exceptions.ValidationError`
if number of columns does not match
:exc:`~biopsykit.utils.exceptions.ValueRangeError`
if values are not within the required score range
References
----------
German version: <NAME>. & <NAME>. (2014). Deutsches modifiziertes Health Anxiety Inventory (MK-HAI).
*Zusammenstellung sozialwissenschaftlicher Items und Skalen (ZIS)*. https://doi.org/10.6102/zis71
Original version: <NAME>., <NAME>., <NAME>., & <NAME>. (2002).
The Health Anxiety Inventory: development and validation of scales for the measurement of health anxiety and
hypochondriasis. *Psychological medicine*,32(5), 843-853.
"""
score_name = "MKHAI"
score_range = [0, 4]
# create copy of data
data = data.copy()
if columns is not None:
# if columns parameter is supplied: slice columns from dataframe
_assert_has_columns(data, [columns])
data = data.loc[:, columns]
_assert_num_columns(data, 14)
_assert_value_range(data, score_range)
return pd.DataFrame(data.sum(axis=1), columns=[score_name])
def abi_ms(
data: pd.DataFrame,
columns: Optional[Union[Sequence[str], pd.Index]] = None,
subscales: Optional[Dict[str, Sequence[int]]] = None,
) -> pd.DataFrame:
"""Compute the **Angstbewältigungsinventar für medizinische Situationen (ABI-MS)**.
The ABI-MS is a situation-response inventory designed to measure habitual preferences for the use of cognitive
avoidant and vigilant coping strategies in potentially threatening medical contexts.
It consists of the subscales with the item indices (count-by-one, i.e., the first question has the index 1!):
* Cognitive Avoidance - Specific Factor 1 (``KOG_1``): [1, 4, 5, 7],
* Cognitive Avoidance - Specific Factor 2 (``KOG_2``): [9, 11, 14, 16]
* Cognitive Avoidance - Specific Factor 3 (``KOG_3``): [17, 19, 21, 23]
* Cognitive Avoidance - Specific Factor 4 (``KOG_4``): [25, 27, 28, 31]
* Vigilance - Specific Factor 1 (``VIG_1``): [2, 3, 6, 8]
* Vigilance - Specific Factor 2 (``VIG_2``): [10, 12, 13, 17]
* Vigilance - Specific Factor 3 (``VIG_3``): [18, 20, 22, 24]
* Vigilance - Specific Factor 4 (``VIG_4``): [26, 29, 30, 32]
.. note::
This implementation assumes a score range of [1, 5].
Use | |
0)), ((0, 1), (0, 1))),
]
def test_point_requirements(placement1, placement1owncol, placement1ownrow):
assert placement1._point_requirements((2, 3)) == [
[GriddedPerm(Perm((0,)), ((3, 4),))]
]
assert placement1ownrow._point_requirements((2, 3)) == [
[GriddedPerm(Perm((0,)), ((2, 4),))]
]
assert placement1owncol._point_requirements((2, 3)) == [
[GriddedPerm(Perm((0,)), ((3, 3),))]
]
def test_stretch_gridded_perm(gp1, placement1, placement1owncol, placement1ownrow):
assert set(placement1._stretch_gridded_perm(gp1, (0, 0))) == set(
[
GriddedPerm(
Perm((3, 1, 2, 0, 4)), ((2, 3), (2, 2), (3, 3), (3, 2), (3, 3))
),
GriddedPerm(
Perm((3, 1, 2, 0, 4)), ((2, 3), (2, 2), (3, 3), (3, 0), (3, 3))
),
GriddedPerm(
Perm((3, 1, 2, 0, 4)), ((2, 3), (2, 0), (3, 3), (3, 0), (3, 3))
),
GriddedPerm(
Perm((3, 1, 2, 0, 4)), ((0, 3), (2, 2), (3, 3), (3, 2), (3, 3))
),
GriddedPerm(
Perm((3, 1, 2, 0, 4)), ((0, 3), (2, 2), (3, 3), (3, 0), (3, 3))
),
GriddedPerm(
Perm((3, 1, 2, 0, 4)), ((0, 3), (2, 0), (3, 3), (3, 0), (3, 3))
),
GriddedPerm(
Perm((3, 1, 2, 0, 4)), ((0, 3), (0, 2), (3, 3), (3, 2), (3, 3))
),
GriddedPerm(
Perm((3, 1, 2, 0, 4)), ((0, 3), (0, 2), (3, 3), (3, 0), (3, 3))
),
GriddedPerm(
Perm((3, 1, 2, 0, 4)), ((0, 3), (0, 0), (3, 3), (3, 0), (3, 3))
),
GriddedPerm(
Perm((3, 1, 2, 0, 4)), ((0, 3), (1, 1), (3, 3), (3, 0), (3, 3))
),
]
)
assert set(placement1owncol._stretch_gridded_perm(gp1, (1, 0))) == set(
[
GriddedPerm(
Perm((3, 1, 2, 0, 4)), ((0, 1), (0, 0), (3, 1), (3, 0), (3, 1))
),
GriddedPerm(
Perm((3, 1, 2, 0, 4)), ((0, 1), (0, 0), (1, 1), (3, 0), (3, 1))
),
GriddedPerm(
Perm((3, 1, 2, 0, 4)), ((0, 1), (0, 0), (1, 1), (1, 0), (3, 1))
),
GriddedPerm(
Perm((3, 1, 2, 0, 4)), ((0, 1), (0, 0), (1, 1), (1, 0), (1, 1))
),
GriddedPerm(
Perm((3, 1, 2, 0, 4)), ((0, 1), (0, 0), (1, 1), (2, 0), (3, 1))
),
]
)
assert set(placement1ownrow._stretch_gridded_perm(gp1, (1, 1))) == set(
[
GriddedPerm(
Perm((3, 1, 2, 0, 4)), ((0, 3), (0, 0), (1, 3), (1, 0), (1, 3))
),
GriddedPerm(
Perm((3, 1, 2, 0, 4)), ((0, 3), (0, 0), (1, 1), (1, 0), (1, 3))
),
GriddedPerm(
Perm((3, 1, 2, 0, 4)), ((0, 1), (0, 0), (1, 1), (1, 0), (1, 3))
),
GriddedPerm(
Perm((3, 1, 2, 0, 4)), ((0, 1), (0, 0), (1, 1), (1, 0), (1, 1))
),
GriddedPerm(
Perm((3, 1, 2, 0, 4)), ((0, 3), (0, 0), (1, 2), (1, 0), (1, 3))
),
GriddedPerm(
Perm((3, 1, 2, 0, 4)), ((0, 1), (0, 0), (1, 1), (1, 0), (1, 2))
),
]
)
def test_stretch_gridded_perms(placement1, placement1owncol, placement1ownrow):
gps = [
GriddedPerm(Perm((0, 1)), [(0, 0), (1, 1)]),
GriddedPerm(Perm((0, 1)), [(1, 1), (2, 2)]),
]
for p in (placement1, placement1ownrow, placement1owncol):
assert set(p._stretch_gridded_perms(gps, (1, 1))) == set(
chain.from_iterable(p._stretch_gridded_perm(gp, (1, 1)) for gp in gps)
)
def test_stretched_obstructions(placement1, placement1owncol, placement1ownrow):
orig_obs = placement1._tiling.obstructions
assert sorted(placement1.stretched_obstructions((1, 1))) == sorted(
placement1._stretch_gridded_perms(orig_obs, (1, 1))
)
assert sorted(placement1owncol.stretched_obstructions((1, 1))) == sorted(
placement1owncol._stretch_gridded_perms(orig_obs, (1, 1))
)
assert sorted(placement1ownrow.stretched_obstructions((1, 1))) == sorted(
placement1ownrow._stretch_gridded_perms(orig_obs, (1, 1))
)
def test_stretched_requirements(placement1, placement1owncol, placement1ownrow):
orig_reqs = placement1._tiling.requirements
assert sorted(placement1.stretched_requirements((1, 1))) == sorted(
placement1._stretch_gridded_perms(orig_reqs, (1, 1))
)
orig_reqs = placement1owncol._tiling.requirements
assert sorted(placement1owncol.stretched_requirements((1, 1))) == sorted(
placement1owncol._stretch_gridded_perms(orig_reqs, (1, 1))
)
orig_reqs = placement1ownrow._tiling.requirements
assert sorted(placement1ownrow.stretched_requirements((1, 1))) == sorted(
placement1ownrow._stretch_gridded_perms(orig_reqs, (1, 1))
)
def test_stretched_obstructions_and_assumptions(
placement1, placement1owncol, placement1ownrow
):
obs, reqs, _ = placement1._stretched_obstructions_requirements_and_assumptions(
(1, 1)
)
assert set(obs) == set(
placement1.stretched_obstructions((1, 1))
+ [
GriddedPerm.single_cell(Perm((0, 1)), (2, 2)),
GriddedPerm.single_cell(Perm((1, 0)), (2, 2)),
]
)
assert sorted(reqs) == sorted(
placement1.stretched_requirements((1, 1))
+ [[GriddedPerm(Perm((0,)), ((2, 2),))]]
)
(
obs,
reqs,
_,
) = placement1ownrow._stretched_obstructions_requirements_and_assumptions((1, 1))
assert set(obs) == set(
placement1ownrow.stretched_obstructions((1, 1))
+ [
GriddedPerm.single_cell(Perm((0, 1)), (1, 2)),
GriddedPerm.single_cell(Perm((1, 0)), (1, 2)),
]
)
assert sorted(reqs) == sorted(
placement1ownrow.stretched_requirements((1, 1))
+ [[GriddedPerm(Perm((0,)), ((1, 2),))]]
)
(
obs,
reqs,
_,
) = placement1owncol._stretched_obstructions_requirements_and_assumptions((1, 1))
assert set(obs) == set(
placement1owncol.stretched_obstructions((1, 1))
+ [
GriddedPerm.single_cell(Perm((0, 1)), (2, 1)),
GriddedPerm.single_cell(Perm((1, 0)), (2, 1)),
]
)
assert sorted(reqs) == sorted(
placement1owncol.stretched_requirements((1, 1))
+ [[GriddedPerm(Perm((0,)), ((2, 1),))]]
)
def farther(placement1):
assert placement1._farther((0, 0), (2, 0), DIR_EAST) is False
assert placement1._farther((0, 0), (2, 0), DIR_NORTH) is False
assert placement1._farther((0, 0), (2, 0), DIR_WEST) is True
assert placement1._farther((0, 0), (2, 0), DIR_SOUTH) is False
assert placement1._farther((2, 3), (2, 0), DIR_EAST) is False
assert placement1._farther((2, 3), (2, 0), DIR_NORTH) is True
assert placement1._farther((2, 3), (2, 0), DIR_WEST) is False
assert placement1._farther((2, 3), (2, 0), DIR_SOUTH) is False
assert placement1._farther((1, 1), (3, 4), DIR_EAST) is False
assert placement1._farther((1, 1), (3, 4), DIR_NORTH) is False
assert placement1._farther((1, 1), (3, 4), DIR_WEST) is True
assert placement1._farther((1, 1), (3, 4), DIR_SOUTH) is True
assert placement1._farther((1, 5), (3, 4), DIR_EAST) is False
assert placement1._farther((1, 5), (3, 4), DIR_NORTH) is True
assert placement1._farther((1, 5), (3, 4), DIR_WEST) is True
assert placement1._farther((1, 5), (3, 4), DIR_SOUTH) is False
assert placement1._farther((2, 2), (1, 1), DIR_EAST) is True
assert placement1._farther((2, 2), (1, 1), DIR_NORTH) is True
assert placement1._farther((2, 2), (1, 1), DIR_WEST) is False
assert placement1._farther((2, 2), (1, 1), DIR_SOUTH) is False
def test_forced_obstructions_from_patt(
gp1, placement1, placement1owncol, placement1ownrow
):
assert set(
placement1.forced_obstructions_from_requirement(
(gp1,), (2,), gp1.pos[2], DIR_NORTH
)
) == set(
[
GriddedPerm(
Perm((3, 1, 2, 0, 4)), ((0, 3), (0, 0), (3, 3), (3, 0), (3, 3))
),
GriddedPerm(
Perm((3, 1, 2, 0, 4)), ((0, 3), (0, 0), (1, 3), (3, 0), (3, 3))
),
GriddedPerm(
Perm((3, 1, 2, 0, 4)), ((0, 3), (0, 0), (1, 3), (1, 0), (3, 3))
),
GriddedPerm(
Perm((3, 1, 2, 0, 4)), ((0, 3), (0, 0), (1, 3), (1, 0), (1, 3))
),
]
)
assert set(
placement1owncol.forced_obstructions_from_requirement(
(gp1,), (1,), gp1.pos[1], DIR_EAST
)
) == set(
[
GriddedPerm(
Perm((3, 1, 2, 0, 4)), ((2, 1), (2, 0), (3, 1), (3, 0), (3, 1))
),
GriddedPerm(
Perm((3, 1, 2, 0, 4)), ((0, 1), (2, 0), (3, 1), (3, 0), (3, 1))
),
]
)
assert set(
placement1ownrow.forced_obstructions_from_requirement(
(gp1,), (3,), gp1.pos[3], DIR_SOUTH
)
) == set(
[
GriddedPerm(
Perm((3, 1, 2, 0, 4)), ((0, 3), (0, 2), (1, 3), (1, 0), (1, 3))
),
GriddedPerm(
Perm((3, 1, 2, 0, 4)), ((0, 3), (0, 0), (1, 3), (1, 0), (1, 3))
),
]
)
def test_forced_obstructions_from_list(
gp1, placement1, placement1owncol, placement1ownrow
):
req_list_row = [
GriddedPerm(Perm((0,)), ((0, 0),)),
GriddedPerm(Perm((0,)), ((1, 0),)),
]
assert set(
placement1.forced_obstructions_from_requirement(
req_list_row, (0, 0), (0, 0), DIR_NORTH
)
) == set(
[
GriddedPerm(Perm((0,)), ((0, 2),)),
GriddedPerm(Perm((0,)), ((2, 2),)),
GriddedPerm(Perm((0,)), ((3, 2),)),
]
)
assert set(
placement1.forced_obstructions_from_requirement(
req_list_row, (0, 0), (0, 0), DIR_SOUTH
)
) == set(
[
GriddedPerm(Perm((0,)), ((0, 0),)),
GriddedPerm(Perm((0,)), ((2, 0),)),
GriddedPerm(Perm((0,)), ((3, 0),)),
]
)
assert set(
placement1.forced_obstructions_from_requirement(
req_list_row, (0, 0), (1, 0), DIR_NORTH
)
) == set(
[
GriddedPerm(Perm((0,)), ((0, 2),)),
GriddedPerm(Perm((0,)), ((1, 2),)),
GriddedPerm(Perm((0,)), ((3, 2),)),
]
)
assert set(
placement1.forced_obstructions_from_requirement(
req_list_row, (0, 0), (1, 0), DIR_SOUTH
)
) == set(
[
GriddedPerm(Perm((0,)), ((0, 0),)),
GriddedPerm(Perm((0,)), ((1, 0),)),
GriddedPerm(Perm((0,)), ((3, 0),)),
]
)
assert set(
placement1ownrow.forced_obstructions_from_requirement(
req_list_row, (0, 0), (0, 0), DIR_NORTH
)
) == set([GriddedPerm(Perm((0,)), ((0, 2),)), GriddedPerm(Perm((0,)), ((1, 2),))])
assert set(
placement1ownrow.forced_obstructions_from_requirement(
req_list_row, (0, 0), (0, 0), DIR_SOUTH
)
) == set([GriddedPerm(Perm((0,)), ((0, 0),)), GriddedPerm(Perm((0,)), ((1, 0),))])
assert set(
placement1ownrow.forced_obstructions_from_requirement(
req_list_row, (0, 0), (1, 0), DIR_NORTH
)
) == set([GriddedPerm(Perm((0,)), ((0, 2),)), GriddedPerm(Perm((0,)), ((1, 2),))])
assert set(
placement1ownrow.forced_obstructions_from_requirement(
req_list_row, (0, 0), (1, 0), DIR_SOUTH
)
) == set([GriddedPerm(Perm((0,)), ((0, 0),)), GriddedPerm(Perm((0,)), ((1, 0),))])
req_list_col = [
GriddedPerm(Perm((0,)), ((0, 0),)),
GriddedPerm(Perm((0,)), ((0, 1),)),
]
assert set(
placement1.forced_obstructions_from_requirement(
req_list_col, (0, 0), (0, 0), DIR_EAST
)
) == set(
[
GriddedPerm(Perm((0,)), ((2, 0),)),
GriddedPerm(Perm((0,)), ((2, 2),)),
GriddedPerm(Perm((0,)), ((2, 3),)),
]
)
assert set(
placement1.forced_obstructions_from_requirement(
req_list_col, (0, 0), (0, 0), DIR_WEST
)
) == set(
[
GriddedPerm(Perm((0,)), ((0, 0),)),
GriddedPerm(Perm((0,)), ((0, 2),)),
GriddedPerm(Perm((0,)), ((0, 3),)),
]
)
assert set(
placement1.forced_obstructions_from_requirement(
req_list_col, (0, 0), (0, 1), DIR_EAST
)
) == set(
[
GriddedPerm(Perm((0,)), ((2, 0),)),
GriddedPerm(Perm((0,)), ((2, 1),)),
GriddedPerm(Perm((0,)), ((2, 3),)),
]
)
assert set(
placement1.forced_obstructions_from_requirement(
req_list_col, (0, 0), (0, 1), DIR_WEST
)
) == set(
[
| |
date = date.Date ('2014-05-10')
, weekend_allowed = 0
, required_overtime = 0
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2014-05-11')
, weekend_allowed = 0
, required_overtime = 0
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2014-05-12')
, weekend_allowed = 0
, required_overtime = 0
)
db.time_record.create \
( daily_record = dr
, start = '13:30'
, end = '16:30'
, work_location = '1'
, wp = '22'
)
db.time_record.create \
( daily_record = dr
, start = '11:00'
, end = '13:00'
, work_location = '1'
, wp = '22'
)
db.time_record.create \
( daily_record = dr
, start = '08:00'
, end = '09:30'
, work_location = '1'
, wp = '11'
)
db.time_record.create \
( daily_record = dr
, start = '09:30'
, end = '10:00'
, work_location = '1'
, wp = '11'
)
db.time_record.create \
( daily_record = dr
, start = '10:00'
, end = '11:00'
, work_location = '1'
, wp = '5'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2014-05-13')
, weekend_allowed = 0
, required_overtime = 0
)
db.time_record.create \
( daily_record = dr
, start = '14:30'
, end = '17:00'
, work_location = '1'
, wp = '22'
)
db.time_record.create \
( daily_record = dr
, start = '08:00'
, end = '13:00'
, work_location = '1'
, wp = '22'
)
db.time_record.create \
( daily_record = dr
, start = '13:30'
, end = '14:30'
, work_location = '1'
, wp = '36'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2014-05-14')
, weekend_allowed = 0
, required_overtime = 0
)
db.time_record.create \
( daily_record = dr
, start = '20:00'
, end = '21:00'
, work_location = '1'
, wp = '36'
)
db.time_record.create \
( daily_record = dr
, start = '08:00'
, end = '13:00'
, work_location = '1'
, wp = '5'
)
db.time_record.create \
( daily_record = dr
, start = '13:30'
, end = '19:30'
, work_location = '1'
, wp = '5'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2014-05-15')
, weekend_allowed = 0
, required_overtime = 0
)
db.time_record.create \
( daily_record = dr
, start = '16:00'
, end = '19:00'
, work_location = '1'
, wp = '6'
)
db.time_record.create \
( daily_record = dr
, start = '08:00'
, end = '13:00'
, work_location = '1'
, wp = '6'
)
db.time_record.create \
( daily_record = dr
, start = '13:30'
, end = '15:00'
, work_location = '1'
, wp = '19'
)
db.time_record.create \
( daily_record = dr
, start = '15:00'
, end = '16:00'
, work_location = '1'
, wp = '36'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2014-05-16')
, weekend_allowed = 0
, required_overtime = 0
)
db.time_record.create \
( daily_record = dr
, start = '14:30'
, end = '18:45'
, work_location = '1'
, wp = '40'
)
db.time_record.create \
( daily_record = dr
, start = '08:30'
, end = '13:00'
, work_location = '1'
, wp = '6'
)
db.time_record.create \
( daily_record = dr
, start = '13:30'
, end = '14:30'
, work_location = '1'
, wp = '36'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2014-05-17')
, weekend_allowed = 0
, required_overtime = 0
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2014-05-18')
, weekend_allowed = 0
, required_overtime = 0
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2014-05-19')
, weekend_allowed = 0
, required_overtime = 0
)
db.time_record.create \
( daily_record = dr
, start = '11:00'
, end = '13:00'
, work_location = '1'
, wp = '40'
)
db.time_record.create \
( daily_record = dr
, start = '08:00'
, end = '10:00'
, work_location = '1'
, wp = '11'
)
db.time_record.create \
( daily_record = dr
, start = '13:30'
, end = '15:30'
, work_location = '1'
, wp = '36'
)
db.time_record.create \
( daily_record = dr
, start = '10:00'
, end = '11:00'
, work_location = '1'
, wp = '5'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2014-05-20')
, weekend_allowed = 0
, required_overtime = 0
)
db.time_record.create \
( daily_record = dr
, start = '14:30'
, end = '15:45'
, work_location = '1'
, wp = '6'
)
db.time_record.create \
( daily_record = dr
, start = '08:00'
, end = '13:00'
, work_location = '1'
, wp = '6'
)
db.time_record.create \
( daily_record = dr
, start = '13:30'
, end = '14:30'
, work_location = '1'
, wp = '36'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2014-05-21')
, weekend_allowed = 0
, required_overtime = 0
)
db.time_record.create \
( daily_record = dr
, start = '16:30'
, end = '18:00'
, work_location = '1'
, wp = '34'
)
db.time_record.create \
( daily_record = dr
, start = '09:15'
, end = '13:00'
, work_location = '1'
, wp = '6'
)
db.time_record.create \
( daily_record = dr
, start = '15:00'
, end = '15:30'
, work_location = '1'
, wp = '16'
)
db.time_record.create \
( daily_record = dr
, start = '13:30'
, end = '15:00'
, work_location = '1'
, wp = '16'
)
db.time_record.create \
( daily_record = dr
, start = '15:30'
, end = '16:30'
, work_location = '1'
, wp = '40'
)
db.time_record.create \
( daily_record = dr
, start = '08:15'
, end = '09:15'
, work_location = '1'
, wp = '36'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2014-05-22')
, weekend_allowed = 0
, required_overtime = 0
)
db.time_record.create \
( daily_record = dr
, start = '14:30'
, end = '18:00'
, work_location = '1'
, wp = '40'
)
db.time_record.create \
( daily_record = dr
, start = '12:00'
, end = '13:00'
, work_location = '1'
, wp = '40'
)
db.time_record.create \
( daily_record = dr
, start = '09:00'
, end = '10:00'
, work_location = '5'
, wp = '2'
)
db.time_record.create \
( daily_record = dr
, start = '10:00'
, end = '12:00'
, work_location = '1'
, wp = '26'
)
db.time_record.create \
( daily_record = dr
, start = '19:30'
, end = '21:00'
, time_activity = '7'
, work_location = '2'
, wp = '36'
)
db.time_record.create \
( daily_record = dr
, start = '13:30'
, end = '14:30'
, work_location = '1'
, wp = '36'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2014-05-23')
, weekend_allowed = 0
, required_overtime = 0
)
db.time_record.create \
( daily_record = dr
, start = '15:30'
, end = '18:30'
, work_location = '1'
, wp = '6'
)
db.time_record.create \
( daily_record = dr
, start = '09:15'
, end = '13:00'
, work_location = '1'
, wp = '6'
)
db.time_record.create \
( daily_record = dr
, start = '08:15'
, end = '09:15'
, work_location = '1'
, wp = '11'
)
db.time_record.create \
( daily_record = dr
, start = '13:30'
, end = '15:30'
, work_location = '1'
, wp = '36'
)
dr = db.daily_record.create \
( user = user
, date = date.Date ('2014-05-24')
, weekend_allowed = 0
, required_overtime = 0
)
dr = db.daily_record.create \
( user = user
, date = date.Date | |
# %% [markdown]
# # FOI-based hospital/ICU beds data analysis
import pandas
import altair
altair.data_transformers.disable_max_rows()
# %% [markdown]
# ## BHSCT FOI data
#
# * weekly totals, beds data is summed (i.e. bed days)
bhsct_beds = pandas.read_excel('../data/BHSCT/10-11330 Available_Occupied Beds & ED Atts 2010 - 2020.xlsx', engine='openpyxl', header=[9,10,11], index_col=0, sheet_name='BEDS')
bhsct_beds = bhsct_beds.stack([0,2]).reset_index()
bhsct_beds.rename(columns={'level_0':'Dates','level_1':'Hospital','Dates':'Care'},inplace=True)
bhsct_beds['start'] = pandas.to_datetime(bhsct_beds['Dates'].str.split(' - ', expand=True)[0], format='%d/%m/%Y')
bhsct_beds = bhsct_beds.groupby(['start','Care','Hospital'])['Available', 'Occupied'].sum().reset_index()
bhsct_beds = bhsct_beds.melt(id_vars=['start','Care','Hospital'])
bhsct_beds['col'] = bhsct_beds['Care'] + '-' + bhsct_beds['variable']
bhsct_beds = bhsct_beds.pivot(index=['start','Hospital'], columns='col', values='value')
bhsct_beds.rename(columns={'ICU/Critical Care-Available': 'Critical Care Available', 'NON ICU/Critical Care-Available': 'General Available', 'ICU/Critical Care-Occupied': 'Critical Care Occupied', 'NON ICU/Critical Care-Occupied': 'General Occupied'}, inplace=True)
bhsct_ae = pandas.read_excel('../data/BHSCT/10-11330 Available_Occupied Beds & ED Atts 2010 - 2020.xlsx', engine='openpyxl', header=6, sheet_name='AE')
bhsct_ae['start'] = pandas.to_datetime(bhsct_ae['Dates'].str.split(' - ', expand=True)[0], format='%d/%m/%Y')
bhsct_ae.drop(columns=['Dates'],inplace=True)
bhsct_ae = bhsct_ae.melt(id_vars=['start']).groupby(['start','variable'])['value'].sum().reset_index()
bhsct_ae.rename(columns={'variable': 'Hospital', 'value': 'ED Attendances'}, inplace=True)
bhsct_ae.set_index(['start', 'Hospital'], inplace=True)
bhsct_weekly = bhsct_beds.merge(bhsct_ae, how='left', left_index=True, right_index=True)
bhsct_weekly.fillna(0, inplace=True)
bhsct_weekly = bhsct_weekly.astype(int)
bhsct_weekly = bhsct_weekly.reset_index().replace({
'MIH': 'Mater Infirmorum Hospital',
'RBHSC': 'Royal Belfast Hospital for Sick Children',
'RVH': 'Royal Victoria Hospital',
'BCH': 'Belfast City Hospital',
'MPH': 'Musgrave Park Hospital'
}).set_index(['start','Hospital'])
# %% [markdown]
# ## NHSCT FOI data
#
# * daily data
nhsct_ae = pandas.read_excel('../data/NHSCT/20210208_PB080121_Response_Attachment_IJ.xlsx', engine='openpyxl', header=6, sheet_name='ED Attendances')
nhsct_ae.dropna(axis='columns', how='all', inplace=True)
nhsct_ae.dropna(axis='index', subset=['Arrival Date'], inplace=True)
nhsct_ae['date'] = pandas.to_datetime(nhsct_ae['Arrival Date'], format='%Y-%m-%d')
nhsct_ae.rename(columns={'Site': 'Hospital'}, inplace=True)
nhsct_ae_daily = nhsct_ae.groupby(['date','Hospital'])['Attendances'].sum()
nhsct_ae_daily.name = 'ED Attendances'
nhsct_icu = pandas.read_excel('../data/NHSCT/20210208_PB080121_Response_Attachment_IJ.xlsx', engine='openpyxl', header=5, sheet_name='ICU Wards')
nhsct_icu['date'] = pandas.to_datetime(nhsct_icu['DATE'], format='%Y-%m-%d')
nhsct_icu.rename(columns={'HOSPITAL': 'Hospital'}, inplace=True)
nhsct_icu_daily = nhsct_icu.groupby(['date','Hospital'])['AVAILABLE BEDS','OCCUPIED BEDS'].sum()
nhsct_icu_daily.rename(columns={'AVAILABLE BEDS': 'Critical Care Available', 'OCCUPIED BEDS': 'Critical Care Occupied'}, inplace=True)
nhsct_daily = nhsct_icu_daily.merge(nhsct_ae_daily, how='left', left_index=True, right_index=True)
nhsct_nonicu = pandas.read_excel('../data/NHSCT/20210208_PB080121_Response_Attachment_IJ.xlsx', engine='openpyxl', header=6, sheet_name='Non ICU Wards')
nhsct_nonicu['date'] = pandas.to_datetime(nhsct_nonicu['DATE'], format='%Y-%m-%d')
nhsct_nonicu.rename(columns={'HOSPITAL': 'Hospital'}, inplace=True)
nhsct_nonicu_daily = nhsct_nonicu.groupby(['date','Hospital'])['AVAILABLE BEDS','OCCUPIED BEDS'].sum()
nhsct_nonicu_daily.rename(columns={'AVAILABLE BEDS': 'General Available', 'OCCUPIED BEDS': 'General Occupied'}, inplace=True)
nhsct_daily = nhsct_daily.merge(nhsct_nonicu_daily, how='left', left_index=True, right_index=True)
nhsct_daily = nhsct_daily.astype(int)
nhsct_daily.reset_index(inplace=True)
nhsct_daily['start'] = nhsct_daily['date'] - pandas.to_timedelta((nhsct_daily['date'].dt.dayofweek+3)%7, unit='d')
nhsct_weekly = nhsct_daily.groupby(['start','Hospital']).sum()
nhsct_weekly = nhsct_weekly.reset_index().replace({'ANTRIM':'Antrim Area Hospital','CAUSEWAY':'Causeway Hospital'}).set_index(['start', 'Hospital'])
# %% [markdown]
# ## SEHSCT FOI data
#
# * weekly data, beds data is summed (i.e. bed days)
sehsct_beds = pandas.read_excel('../data/SEHSCT/Attachment 1 - Occupied & Available Beds and A&E Attendances.xlsx', engine='openpyxl', header=[9,10,11], sheet_name='Beds')
sehsct_beds.dropna(axis='columns', how='all', inplace=True)
sehsct_beds[sehsct_beds.columns[0]] = sehsct_beds[sehsct_beds.columns[0]].replace(to_replace ='27/03/2020 - 31/03/20', value = '2020-03-27 00:00:00')
sehsct_beds['start'] = pandas.to_datetime(sehsct_beds[sehsct_beds.columns[0]], format='%Y-%m-%d 00:00:00')
sehsct_beds.drop(columns=sehsct_beds.columns[0], inplace=True)
sehsct_beds = sehsct_beds.melt(id_vars=[('start','','')])
sehsct_beds.rename(columns={('start','',''): 'start', 'variable_0': 'hospital', 'variable_1': 'state', 'variable_2': 'ward'}, inplace=True)
sehsct_beds['col'] = sehsct_beds['ward'] + '-' + sehsct_beds['state']
sehsct_beds = sehsct_beds.pivot(index=['start', 'hospital'], columns='col', values='value').reset_index(1)
sehsct_beds.rename(columns={'ICU/Critical Care-Available': 'Critical Care Available', 'Non Critical Care-Available': 'General Available', 'ICU/Critical Care-Occupied': 'Critical Care Occupied', 'Non Critical Care-Occupied': 'General Occupied'}, inplace=True)
sehsct_beds.fillna(0, inplace=True)
sehsct_beds.rename(columns={'hospital': 'Hospital'}, inplace=True)
sehsct_weekly = sehsct_beds.groupby(['start','Hospital']).sum()
sehsct_ae = pandas.read_excel('../data/SEHSCT/Attachment 1 - Occupied & Available Beds and A&E Attendances.xlsx', engine='openpyxl', header=7, sheet_name='ED')
sehsct_ae['Week'] = sehsct_ae['Week'].replace(to_replace ='27/03/2020 - 31/03/20', value = '2020-03-27 00:00:00')
sehsct_ae['start'] = pandas.to_datetime(sehsct_ae['Week'], format='%Y-%m-%d 00:00:00')
sehsct_ae.drop(columns='Week', inplace=True)
sehsct_ae = sehsct_ae.melt(id_vars='start', var_name='Hospital').set_index(['start','Hospital'])
sehsct_ae['value'] = sehsct_ae['value'].fillna('0').replace(' ', '0').astype('int')
sehsct_ae = sehsct_ae.groupby(['start','Hospital'])['value'].sum()
sehsct_ae.name = 'ED Attendances'
sehsct_weekly = sehsct_weekly.merge(sehsct_ae, how='left', left_index=True, right_index=True)
sehsct_weekly.fillna(0, inplace=True)
sehsct_weekly = sehsct_weekly.astype(int)
sehsct_weekly = sehsct_weekly.reset_index().replace({
'Ards': 'Ards Hospital',
'Bangor': 'Bangor Hospital',
'Downe': 'Downe Hospital',
'Lagan Valley': 'Lagan Valley Hospital',
'Ulster': 'Ulster Hospital'
}).set_index(['start', 'Hospital'])
# %% [markdown]
# ## SHSCT FOI data
#
# * daily data
shsct_ae = pandas.read_excel('../data/SHSCT/FOI 350 EC MIU ATTENDANCES.xlsx', engine='openpyxl', header=10, sheet_name='DATA')
shsct_ae['date'] = pandas.to_datetime(shsct_ae['Arrival Date'], format='%Y-%m-%d')
shsct_ae.rename(columns={'HOSPITAL':'Hospital'}, inplace=True)
shsct_ae.replace({'CRAIGAVON AREA HOSPITAL': 'Craigavon Area Hospital', 'DAISY HILL ': 'Daisy Hill Hospital', 'SOUTH TYRONE HOSPITAL': 'South Tyrone Hospital'}, inplace=True)
shsct_ae_daily = shsct_ae.groupby(['date','Hospital'])['No. of Attendances - Original'].sum()
shsct_ae_daily.name = 'ED Attendances'
shsct_icu = pandas.read_excel('../data/SHSCT/FOI 350 PARTS 1 AND 2 ALLOCATED AND OCCUPIED BEDS.xlsx', engine='openpyxl', header=11, sheet_name='INTENSIVE CARE BEDS')
shsct_icu.dropna(axis='index', subset=['Hospital Code'], inplace=True)
shsct_icu['date'] = pandas.to_datetime(shsct_icu['Date'], format='%Y-%m-%d 00:00:00')
shsct_icu.rename(columns={'Hospital Code':'Hospital'}, inplace=True)
shsct_icu_daily = shsct_icu.groupby(['date','Hospital'])['ALLOCATED','OCCUPIED'].sum()
shsct_icu_daily.rename(columns={'ALLOCATED': 'Critical Care Available', 'OCCUPIED': 'Critical Care Occupied'}, inplace=True)
shsct_nonicu = pandas.read_excel('../data/SHSCT/FOI 350 PARTS 1 AND 2 ALLOCATED AND OCCUPIED BEDS.xlsx', engine='openpyxl', header=11, sheet_name='OTHER BEDS')
shsct_nonicu.dropna(axis='index', subset=['Hospital Code'], inplace=True)
shsct_nonicu['date'] = pandas.to_datetime(shsct_nonicu['Date'], format='%Y-%m-%d 00:00:00')
shsct_nonicu.rename(columns={'Hospital Code':'Hospital'}, inplace=True)
shsct_nonicu_daily = shsct_nonicu.groupby(['date','Hospital'])['Allocated','Occupied'].sum()
shsct_nonicu_daily.rename(columns={'Allocated': 'General Available', 'Occupied': 'General Occupied'}, inplace=True)
shsct_daily = shsct_nonicu_daily.merge(shsct_icu_daily, how='left', left_index=True, right_index=True)
shsct_daily = shsct_daily.merge(shsct_ae_daily, how='left', left_index=True, right_index=True)
shsct_daily.fillna(0, inplace=True)
shsct_daily = shsct_daily.astype(int)
shsct_daily.reset_index(inplace=True)
shsct_daily['start'] = shsct_daily['date'] - pandas.to_timedelta((shsct_daily['date'].dt.dayofweek+3)%7, unit='d')
shsct_daily = shsct_daily[shsct_daily['date'] < '2020-04-01']
shsct_weekly = shsct_daily.groupby(['start','Hospital']).sum()
# %% [markdown]
# ## WHSCT FOI data
#
# * weekly data, beds data is summed (i.e. bed days)
whsct_ae = pandas.read_excel('../data/WHSCT/FOI.21.017 A&E Attendances and Bedday Activity ALT & SWA 2010 to 2020.xlsx', engine='openpyxl', header=5, sheet_name='AE')
whsct_ae['start'] = pandas.to_datetime(whsct_ae['Weekly Time Periods (7 Days)'].str.split(' - ', expand=True)[0], format='%d/%m/%Y')
whsct_ae.dropna(axis='index', how='all', inplace=True)
whsct_ae.drop(columns=['Weekly Time Periods (7 Days)'], inplace=True)
whsct_ae.rename(columns={'ALT': 'Altnagelvin Hospital', 'SWAH': 'South West Acute Hospital'}, inplace=True)
whsct_ae_weekly = whsct_ae.melt(id_vars='start', var_name='Hospital').groupby(['start','Hospital'])['value'].sum()
whsct_ae_weekly.name = 'ED Attendances'
whsct_beds = pandas.read_excel('../data/WHSCT/FOI.21.017 A&E Attendances and Bedday Activity ALT & SWA 2010 to 2020.xlsx', engine='openpyxl', header=[5,6,7], sheet_name='BEDS')
whsct_beds['start'] = pandas.to_datetime(whsct_beds[whsct_beds.columns[0]].str.split(' - ', expand=True)[0], format='%d/%m/%Y')
whsct_beds.drop(columns=whsct_beds.columns[0], inplace=True)
whsct_beds = whsct_beds.melt(id_vars=[('start','','')])
whsct_beds.rename(columns={('start','',''): 'start', 'variable_0': 'Hospital', 'variable_1': 'state', 'variable_2': 'ward'}, inplace=True)
whsct_beds['col'] = whsct_beds['ward'] + '-' + whsct_beds['state']
whsct_beds = whsct_beds.pivot(index=['start', 'Hospital'], columns='col', values='value').reset_index()
whsct_beds.replace({'ALTNAGELVIN HOSPITAL': 'Altnagelvin Hospital', 'ERNE / SOUTH WEST ACUTE HOSPITAL': 'South West Acute Hospital'}, inplace=True)
whsct_beds.rename(columns={'ICU/Critical Care-Available': 'Critical Care Available', 'NON ICU/Critical Care-Available': 'General Available', 'ICU/Critical Care-Occupied': 'Critical Care Occupied', 'NON ICU/Critical Care-Occupied': 'General Occupied'}, inplace=True)
whsct_beds.fillna(0, inplace=True)
whsct_weekly = whsct_beds.groupby(['start','Hospital']).sum()
whsct_weekly = whsct_weekly.merge(whsct_ae_weekly, how='left', left_index=True, right_index=True)
whsct_weekly.fillna(0, inplace=True)
whsct_weekly = whsct_weekly.astype(int)
# %% Combine all weekly into single dataframe
all_foi_weekly = pandas.concat([bhsct_weekly, nhsct_weekly, sehsct_weekly, shsct_weekly, whsct_weekly], keys=['BHSCT', 'NHSCT', 'SEHSCT', 'SHSCT', 'WHSCT'], names=['Trust', 'Week beginning', 'Hospital']).reset_index()
all_foi_weekly.to_csv('../data/all-foi-weekly.csv', index=False)
# %% Plot available ICU beds on a pre-COVID day
altair.Chart(
(all_foi_weekly[all_foi_weekly['Week beginning']=='2020-01-03'].groupby(['Trust','Hospital']).sum() / 7).reset_index()
).mark_bar().encode(
x=altair.X(field='Critical Care Available', type='quantitative', axis=altair.Axis(title='Critical Care Beds Available')),
y=altair.Y(field='Hospital', type='ordinal'),
color='Trust'
)
# %% Plot available ICU beds by Trust on a pre-COVID day
altair.Chart(
(all_foi_weekly[all_foi_weekly['Week beginning']=='2020-01-03'].groupby(['Trust','Hospital']).sum() / 7).reset_index()
).mark_bar().encode(
x=altair.X(field='Critical Care Available', type='quantitative', aggregate='sum', axis=altair.Axis(title='Critical Care Beds Available')),
y=altair.Y(field='Trust', type='ordinal'),
color='Trust'
)
# %% Plot available non-ICU beds on a pre-COVID day
altair.Chart(
(all_foi_weekly[all_foi_weekly['Week beginning']=='2020-01-03'].groupby(['Trust','Hospital']).sum() / 7).reset_index()
).mark_bar().encode(
x=altair.X(field='General Available', type='quantitative', axis=altair.Axis(title='General Beds Available')),
y=altair.Y(field='Hospital', type='ordinal'),
color='Trust'
)
# %%
altair.Chart(
all_foi_weekly.groupby(['Week beginning', 'Trust']).sum().reset_index()
).mark_area().encode(
x=altair.X(field='Week beginning', type='temporal'),
y=altair.Y(field='Critical Care Occupied', type='quantitative', aggregate='sum', axis=altair.Axis(format=',d', title='Critical Care Occupied Bed Days')),
color='Trust'
)
# %%
altair.Chart(
all_foi_weekly.groupby(['Week beginning', 'Trust']).sum().reset_index()
).mark_area().encode(
x=altair.X(field='Week beginning', type='temporal'),
y=altair.Y(field='Critical Care Available', type='quantitative', aggregate='sum', axis=altair.Axis(format=',d', title='Critical Care Available Bed Days')),
color='Trust'
)
# %% Trim the data to when data from all 5 trusts is available
all_foi_weekly_trimmed = all_foi_weekly[
(all_foi_weekly['Week beginning'] >= all_foi_weekly[all_foi_weekly['Trust']=='SHSCT']['Week beginning'].min())
& ((all_foi_weekly['Week beginning'] <= all_foi_weekly[all_foi_weekly['Trust']=='BHSCT']['Week beginning'].max()))
]
# %%
altair.Chart(
all_foi_weekly_trimmed.groupby(['Week beginning', 'Trust']).sum().reset_index()
).mark_area().encode(
x=altair.X(field='Week beginning', type='temporal'),
y=altair.Y(field='Critical Care Occupied', type='quantitative', aggregate='sum', axis=altair.Axis(format=',d', title='Critical Care Occupied Bed Days')),
color='Trust'
)
# %%
cc_hospitals = ['Altnagelvin Hospital','Antrim Area Hospital', 'Belfast City Hospital', 'Causeway Hospital', 'Craigavon Area Hospital', 'Mater Infirmorum Hospital', 'Royal Belfast Hospital for Sick Children', 'Royal Victoria Hospital', 'South West Acute Hospital', 'Ulster Hospital']
# %%
p = altair.Chart(
all_foi_weekly_trimmed.groupby(['Week beginning', 'Trust']).sum().reset_index()
).mark_area().encode(
x=altair.X(field='Week beginning', type='temporal'),
y=altair.Y(field='Critical Care Available', type='quantitative', aggregate='sum', axis=altair.Axis(format=',d', title='Critical Care Available Bed Days')),
color='Trust'
)
# %%
foi_icu_hospital = (all_foi_weekly_trimmed[all_foi_weekly_trimmed['Hospital'].isin(cc_hospitals)].groupby(['Week beginning', 'Hospital']).sum() / 7)[['Critical Care Occupied','Critical Care Available']].reset_index().melt(id_vars=['Hospital','Week beginning'],var_name=['Status'])
foi_icu_hospital['Status'] = foi_icu_hospital['Status'].replace({'Critical Care Available': 'Available', 'Critical Care Occupied': 'Occupied'})
p = altair.Chart(
foi_icu_hospital[(foi_icu_hospital['Week beginning'] < foi_icu_hospital['Week beginning'].max()) & (foi_icu_hospital['Hospital'].isin(['Altnagelvin Hospital','Antrim Area Hospital']))]
).mark_line().encode(
x=altair.X(field='Week beginning', type='temporal'),
y=altair.Y(field='value', type='quantitative', aggregate='sum', axis=altair.Axis(format=',d', title='Critical Care Beds')),
color='Status',
row=altair.Row('Hospital',type='ordinal')
).properties(
height=200
).resolve_scale(
y='independent'
)
p.save('cc-alt-ant-%s.png'%datetime.datetime.now().date().strftime('%Y-%d-%m'))
p
# %%
foi_icu_trust = (all_foi_weekly_trimmed.groupby(['Week beginning', 'Trust']).sum() / 7)[['Critical Care Occupied','Critical Care Available']].reset_index().melt(id_vars=['Trust','Week beginning'],var_name=['Status'])
foi_icu_trust['Status'] = foi_icu_trust['Status'].replace({'Critical Care Available': 'Available', 'Critical Care Occupied': 'Occupied'})
p = altair.Chart(
foi_icu_trust[(foi_icu_trust['Week beginning'] < foi_icu_trust['Week beginning'].max()) & (foi_icu_trust['Week beginning'] > foi_icu_trust['Week beginning'].min())]
).mark_line().encode(
x=altair.X(field='Week beginning', type='temporal'),
y=altair.Y(field='value', type='quantitative', aggregate='sum', axis=altair.Axis(format=',d', title='Critical Care Beds')),
color=altair.Color('Status',title=None),
row=altair.Row('Trust',type='ordinal')
).properties(
height=90
).resolve_scale(
y='independent'
)
p.save('cc-trusts-%s.png'%datetime.datetime.now().date().strftime('%Y-%d-%m'))
p
# %% [markdown]
# ## Load the COVID-era ICU/General beds data from the DoH sheet
#
# * Data needs to be interpolated to account for missing (Christmas/New Year)
# * General beds data needs to take into account the change in reporting on 18/10/2021
# * General beds is also interpolated to include the reporting change day
covid_icu = pandas.read_excel('../data/doh-dd-160621.xlsx', engine='openpyxl', sheet_name='ICU')
covid_icu.interpolate(inplace=True)
covid_icu['Critical Care Occupied'] = covid_icu['Confirmed COVID Occupied'] + covid_icu['Non COVID Occupied']
covid_icu['Critical Care Available'] = covid_icu['Confirmed COVID Occupied'] + covid_icu['Non COVID Occupied'] + covid_icu['Unoccupied Beds']
covid_icu['date'] = pandas.to_datetime(covid_icu['Date'], format='%Y-%m-%d')
covid_icu = covid_icu[['date','Critical Care Occupied','Critical Care Available']].set_index('date')
newind = pandas.date_range(start=covid_icu.index.min(), end=covid_icu.index.max())
covid_icu = covid_icu.reindex(newind).interpolate()
covid_nonicu = pandas.read_excel('../data/doh-dd-160621.xlsx', engine='openpyxl', sheet_name='General Beds')
covid_nonicu.dropna(axis='columns', how='all', inplace=True)
covid_nonicu['Confirmed COVID Occupied'] = covid_nonicu['Confirmed COVID Occupied'].replace('Break in Series - See Notes on Dashboard', 'NaN').astype('float')
covid_nonicu.interpolate(inplace=True)
covid_nonicu['date'] = pandas.to_datetime(covid_nonicu['Date'], format='%Y-%m-%d')
covid_nonicu.fillna(0, inplace=True)
covid_nonicu['General Occupied'] = covid_nonicu['Confirmed COVID Occupied'] + covid_nonicu['Non-COVID Occupied'] + covid_nonicu['Awaiting Admission']
covid_nonicu['General Available'] = covid_nonicu['Confirmed COVID Occupied'] + covid_nonicu['Non-COVID Occupied'] + covid_nonicu['Awaiting Admission'] + covid_nonicu['Unoccupied Beds']
covid_nonicu = covid_nonicu[['date','General Occupied','General Available']].set_index('date')
newind = pandas.date_range(start=covid_nonicu.index.min(), end=covid_nonicu.index.max())
covid_nonicu = covid_nonicu.reindex(newind).interpolate()
covid_daily = covid_icu.merge(covid_nonicu, how='left', left_index=True, right_index=True).reset_index()
covid_daily['Week beginning'] = covid_daily['index'] - pandas.to_timedelta((covid_daily['index'].dt.dayofweek+3)%7, unit='d')
covid_weekly = covid_daily.groupby('Week beginning').sum().reset_index()
covid_weekly['Trust'] = 'DoH'
# %% [markdown]
#
# ## Final tidy up
#
# Divide everything by 7
#
# Sort out the transition between the two datasets
#
# * BHSCT: 5 days in old data
# * NHSCT: 5 days in old data
# * SEHSCT: 5 days in old data
# * SHSCT: 5 days in old data
# * WHSCT: 5 days in old data
# * DoH: 2 or 3 days in first week
#
# Also strip off the final, incomplete week
all_weekly = pandas.concat([all_foi_weekly_trimmed.groupby('Week beginning').sum().reset_index(), covid_weekly])
all_weekly = (all_weekly.set_index(['Trust','Week beginning']) / 7.0).reset_index()
all_weekly['Year'] = all_weekly['Week beginning'].dt.year
all_weekly['Week'] | |
2, 3], 'val': ['a', 'b', 'c']})
t2 = XFrame({'id': [1, 2, 4], 'doubled': ['aa', 'bb', 'dd']})
res = t1.join(t2, how='right').sort('id').head()
assert len(res) == 3
assert res.column_names() == ['id', 'val', 'doubled']
assert res.column_types() == [int, str, str]
assert res[0] == {'id': 1, 'val': 'a', 'doubled': 'aa'}
assert res[1] == {'id': 2, 'val': 'b', 'doubled': 'bb'}
assert res[2] == {'id': 4, 'val': None, 'doubled': 'dd'}
def test_join_full(self):
t1 = XFrame({'id': [1, 2, 3], 'val': ['a', 'b', 'c']})
t2 = XFrame({'id': [1, 2, 4], 'doubled': ['aa', 'bb', 'dd']})
res = t1.join(t2, how='full').sort('id').head()
assert len(res) == 4
assert res.column_names() == ['id', 'val', 'doubled']
assert res.column_types() == [int, str, str]
assert res[0] == {'id': 1, 'val': 'a', 'doubled': 'aa'}
assert res[1] == {'id': 2, 'val': 'b', 'doubled': 'bb'}
assert res[2] == {'id': 3, 'val': 'c', 'doubled': None}
assert res[3] == {'id': 4, 'val': None, 'doubled': 'dd'}
def test_join_cartesian(self):
t1 = XFrame({'id': [1, 2, 3], 'val': ['a', 'b', 'c']})
t2 = XFrame({'id': [10, 20, 30], 'doubled': ['aa', 'bb', 'cc']})
res = t1.join(t2, how='cartesian').sort(['id', 'id.1']).head()
assert len(res) == 9
assert res.column_names() == ['id', 'val', 'doubled', 'id.1']
assert res.column_types() == [int, str, str, int]
assert res[0] == {'id': 1, 'val': 'a', 'doubled': 'aa', 'id.1': 10}
assert res[1] == {'id': 1, 'val': 'a', 'doubled': 'bb', 'id.1': 20}
assert res[3] == {'id': 2, 'val': 'b', 'doubled': 'aa', 'id.1': 10}
assert res[8] == {'id': 3, 'val': 'c', 'doubled': 'cc', 'id.1': 30}
def test_join_bad_how(self):
t1 = XFrame({'id': [1, 2, 3], 'val': ['a', 'b', 'c']})
t2 = XFrame({'id': [1, 2, 3], 'doubled': ['aa', 'bb', 'cc']})
with pytest.raises(ValueError) as exception_info:
t1.join(t2, how='xx')
exception_message = exception_info.value.args[0]
assert exception_message == 'Invalid join type.'
# noinspection PyTypeChecker
def test_join_bad_right(self):
t1 = XFrame({'id': [1, 2, 3], 'val': ['a', 'b', 'c']})
with pytest.raises(TypeError) as exception_info:
t1.join([1, 2, 3])
exception_message = exception_info.value.args[0]
assert exception_message == 'Can only join two XFrames.'
def test_join_bad_on_list(self):
t1 = XFrame({'id': [1, 2, 3], 'val': ['a', 'b', 'c']})
t2 = XFrame({'id': [1, 2, 3], 'doubled': ['aa', 'bb', 'cc']})
with pytest.raises(TypeError) as exception_info:
t1.join(t2, on=['id', 1])
exception_message = exception_info.value.args[0]
assert exception_message == 'Join keys must each be a str.'
# noinspection PyTypeChecker
def test_join_bad_on_type(self):
t1 = XFrame({'id': [1, 2, 3], 'val': ['a', 'b', 'c']})
t2 = XFrame({'id': [1, 2, 3], 'doubled': ['aa', 'bb', 'cc']})
with pytest.raises(TypeError) as exception_info:
t1.join(t2, on=1)
exception_message = exception_info.value.args[0]
assert exception_message == "Must pass a 'str', 'list', or 'dict' of join keys."
def test_join_bad_on_col_name(self):
t1 = XFrame({'id': [1, 2, 3], 'val': ['a', 'b', 'c']})
t2 = XFrame({'id': [1, 2, 3], 'doubled': ['aa', 'bb', 'cc']})
with pytest.raises(ValueError) as exception_info:
t1.join(t2, on='xx')
exception_message = exception_info.value.args[0]
assert exception_message == "Key 'xx' is not a column name."
# noinspection PyClassHasNoInit
class TestXFrameSplitDatetime:
"""
Tests XFrame split_datetime
"""
def test_split_datetime(self):
t = XFrame({'id': [1, 2, 3], 'val': [datetime(2011, 1, 1),
datetime(2012, 2, 2),
datetime(2013, 3, 3)]})
res = t.split_datetime('val')
assert len(res) == 3
assert res.column_names() == ['id',
'val.year', 'val.month', 'val.day',
'val.hour', 'val.minute', 'val.second']
assert res.column_types() == [int, int, int, int, int, int, int]
assert list(res['id']) == [1, 2, 3]
assert list(res['val.year']) == [2011, 2012, 2013]
assert list(res['val.month']) == [1, 2, 3]
assert list(res['val.day']) == [1, 2, 3]
assert list(res['val.hour']) == [0, 0, 0]
assert list(res['val.minute']) == [0, 0, 0]
assert list(res['val.second']) == [0, 0, 0]
# noinspection PyTypeChecker
def test_split_datetime_col_conflict(self):
t = XFrame({'id': [1, 2, 3],
'val.year': ['x', 'y', 'z'],
'val': [datetime(2011, 1, 1),
datetime(2012, 2, 2),
datetime(2013, 3, 3)]})
res = t.split_datetime('val', limit='year')
assert len(res) == 3
assert res.column_names() == ['id', 'val.year', 'val.year.1']
assert res.column_types() == [int, str, int]
assert list(res['id']) == [1, 2, 3]
assert list(res['val.year']) == ['x', 'y', 'z']
assert list(res['val.year.1']) == [2011, 2012, 2013]
def test_split_datetime_bad_col(self):
t = XFrame({'id': [1, 2, 3], 'val': [datetime(2011, 1, 1),
datetime(2011, 2, 2),
datetime(2011, 3, 3)]})
with pytest.raises(KeyError) as exception_info:
t.split_datetime('xx')
exception_message = exception_info.value.args[0]
assert exception_message == "Column 'xx' does not exist in current XFrame."
# noinspection PyClassHasNoInit
class TestXFrameFilterby:
"""
Tests XFrame filterby
"""
def test_filterby_int_id(self):
t = XFrame({'id': [1, 2, 3, 4], 'val': ['a', 'b', 'c', 'd']})
res = t.filterby(1, 'id').sort('id')
assert len(res) == 1
assert res[0] == {'id': 1, 'val': 'a'}
def test_filterby_str_id(self):
t = XFrame({'id': ['qaz', 'wsx', 'edc', 'rfv'], 'val': ['a', 'b', 'c', 'd']})
res = t.filterby('qaz', 'id').sort('id')
assert len(res) == 1
assert res[0] == {'id': 'qaz', 'val': 'a'}
def test_filterby_object_id(self):
t = XFrame({'id': [datetime(2016, 2, 1, 0, 0),
datetime(2016, 2, 2, 0, 0),
datetime(2016, 2, 3, 0, 0),
datetime(2016, 2, 4, 0, 0)],
'val': ['a', 'b', 'c', 'd']})
res = t.filterby(datetime(2016, 2, 1, 0, 0), 'id').sort('id')
assert len(res) == 1
assert res[0] == {'id': datetime(2016, 2, 1, 0, 0), 'val': 'a'}
def test_filterby_list_id(self):
t = XFrame({'id': [1, 2, 3, 4], 'val': ['a', 'b', 'c', 'd']})
res = t.filterby([1, 3], 'id').sort('id')
assert len(res) == 2
assert res[0] == {'id': 1, 'val': 'a'}
assert res[1] == {'id': 3, 'val': 'c'}
def test_filterby_tuple_id(self):
t = XFrame({'id': [1, 2, 3, 4], 'val': ['a', 'b', 'c', 'd']})
res = t.filterby((1, 3), 'id').sort('id')
assert len(res) == 2
assert res[0] == {'id': 1, 'val': 'a'}
assert res[1] == {'id': 3, 'val': 'c'}
def test_filterby_iterable_id(self):
t = XFrame({'id': [1, 2, 3, 4], 'val': ['a', 'b', 'c', 'd']})
res = t.filterby(range(3), 'id').sort('id')
assert len(res) == 2
assert res[0] == {'id': 1, 'val': 'a'}
assert res[1] == {'id': 2, 'val': 'b'}
def test_filterby_set_id(self):
t = XFrame({'id': [1, 2, 3, 4], 'val': ['a', 'b', 'c', 'd']})
res = t.filterby({1, 3}, 'id').sort('id')
assert len(res) == 2
assert res[0] == {'id': 1, 'val': 'a'}
assert res[1] == {'id': 3, 'val': 'c'}
def test_filterby_list_val(self):
t = XFrame({'id': [1, 2, 3], 'val': ['a', 'b', 'c']})
res = t.filterby(['a', 'b'], 'val').sort('id')
assert len(res) == 2
assert res[0] == {'id': 1, 'val': 'a'}
assert res[1] == {'id': 2, 'val': 'b'}
assert list(res['id']) == [1, 2]
def test_filterby_xarray(self):
t = XFrame({'id': [1, 2, 3], 'val': ['a', 'b', 'c']})
a = XArray([1, 3])
res = t.filterby(a, 'id').sort('id')
assert len(res) == 2
assert res[0] == {'id': 1, 'val': 'a'}
assert res[1] == {'id': 3, 'val': 'c'}
assert list(res['id']) == [1, 3]
def test_filterby_function(self):
t = XFrame({'id': [1, 2, 3], 'val': ['a', 'b', 'c']})
res = t.filterby(lambda x: x != 2, 'id').sort('id')
assert len(res) == 2
assert res[0] == {'id': 1, 'val': 'a'}
assert res[1] == {'id': 3, 'val': 'c'}
assert list(res['id']) == [1, 3]
def test_filterby_function_exclude(self):
t = XFrame({'id': [1, 2, 3], 'val': ['a', 'b', 'c']})
res = t.filterby(lambda x: x == 2, 'id', exclude=True).sort('id')
assert len(res) == 2
assert res[0] == {'id': 1, 'val': 'a'}
assert res[1] == {'id': 3, 'val': 'c'}
assert list(res['id']) == [1, 3]
def test_filterby_function_row(self):
t = XFrame({'id': [1, 2, 3], 'val': ['a', 'b', 'c']})
res = t.filterby(lambda row: row['id'] != 2, None).sort('id')
assert len(res) == 2
assert res[0] == {'id': 1, 'val': 'a'}
assert res[1] == {'id': 3, 'val': 'c'}
assert list(res['id']) == [1, 3]
def test_filterby_list_exclude(self):
t = XFrame({'id': [1, 2, 3, 4], 'val': ['a', 'b', 'c', 'd']})
res = t.filterby([1, 3], 'id', exclude=True).sort('id')
assert len(res) == 2
assert res[0] == {'id': 2, 'val': 'b'}
assert res[1] == {'id': 4, 'val': 'd'}
assert list(res['id']) == [2, 4]
def test_filterby_bad_column_type_list(self):
t = XFrame({'id': [1, 2, 3, 4], 'val': ['a', 'b', 'c', 'd']})
with pytest.raises(TypeError) as exception_info:
t.filterby([1, 3], 'val')
exception_message = exception_info.value.args[0]
assert exception_message == 'Value type (int) does not match column type (str).'
def test_filterby_xarray_exclude(self):
t = XFrame({'id': [1, 2, 3, 4], 'val': ['a', 'b', 'c', 'd']})
a = XArray([1, 3])
res = t.filterby(a, 'id', exclude=True).sort('id')
assert len(res) == 2
assert res[0] == {'id': 2, 'val': 'b'}
assert res[1] == {'id': 4, 'val': 'd'}
assert list(res['id']) == [2, 4]
# noinspection PyTypeChecker
def test_filterby_bad_column_name_type(self):
t = XFrame({'id': [1, 2, 3, 4], 'val': ['a', 'b', 'c', 'd']})
with pytest.raises(TypeError) as exception_info:
t.filterby([1, 3], 1)
exception_message = exception_info.value.args[0]
assert exception_message == 'Column_name must be a string.'
def test_filterby_bad_column_name(self):
t = XFrame({'id': [1, 2, 3, 4], 'val': ['a', 'b', 'c', 'd']})
with pytest.raises(KeyError) | |
"""
simple, elegant templating
(part of web.py)
Template design:
Template string is split into tokens and the tokens are combined into nodes.
Parse tree is a nodelist. TextNode and ExpressionNode are simple nodes and
for-loop, if-loop etc are block nodes, which contain multiple child nodes.
Each node can emit some python string. python string emitted by the
root node is validated for safeeval and executed using python in the given environment.
Enough care is taken to make sure the generated code and the template has line to line match,
so that the error messages can point to exact line number in template. (It doesn't work in some cases still.)
Grammar:
template -> defwith sections
defwith -> '$def with (' arguments ')' | ''
sections -> section*
section -> block | assignment | line
assignment -> '$ ' <assignment expression>
line -> (text|expr)*
text -> <any characters other than $>
expr -> '$' pyexpr | '$(' pyexpr ')' | '${' pyexpr '}'
pyexpr -> <python expression>
"""
__all__ = [
"Template",
"Render", "render", "frender",
"ParseError", "SecurityError",
"test"
]
import tokenize
import os
import sys
import glob
import re
from UserDict import DictMixin
from utils import storage, safeunicode, safestr, re_compile
from webapi import config
from net import websafe
def splitline(text):
r"""
Splits the given text at newline.
>>> splitline('foo\nbar')
('foo\n', 'bar')
>>> splitline('foo')
('foo', '')
>>> splitline('')
('', '')
"""
index = text.find('\n') + 1
if index:
return text[:index], text[index:]
else:
return text, ''
class Parser:
"""Parser Base.
"""
def __init__(self):
self.statement_nodes = STATEMENT_NODES
self.keywords = KEYWORDS
def parse(self, text, name="<template>"):
self.text = text
self.name = name
defwith, text = self.read_defwith(text)
suite = self.read_suite(text)
return DefwithNode(defwith, suite)
def read_defwith(self, text):
if text.startswith('$def with'):
defwith, text = splitline(text)
defwith = defwith[1:].strip() # strip $ and spaces
return defwith, text
else:
return '', text
def read_section(self, text):
r"""Reads one section from the given text.
section -> block | assignment | line
>>> read_section = Parser().read_section
>>> read_section('foo\nbar\n')
(<line: [t'foo\n']>, 'bar\n')
>>> read_section('$ a = b + 1\nfoo\n')
(<assignment: 'a = b + 1'>, 'foo\n')
read_section('$for in range(10):\n hello $i\nfoo)
"""
if text.lstrip(' ').startswith('$'):
index = text.index('$')
begin_indent, text2 = text[:index], text[index+1:]
ahead = self.python_lookahead(text2)
if ahead == 'var':
return self.read_var(text2)
elif ahead in self.statement_nodes:
return self.read_block_section(text2, begin_indent)
elif ahead in self.keywords:
return self.read_keyword(text2)
elif ahead.strip() == '':
# assignments starts with a space after $
# ex: $ a = b + 2
return self.read_assignment(text2)
return self.readline(text)
def read_var(self, text):
r"""Reads a var statement.
>>> read_var = Parser().read_var
>>> read_var('var x=10\nfoo')
(<var: x = 10>, 'foo')
>>> read_var('var x: hello $name\nfoo')
(<var: x = join_(u'hello ', escape_(name, True))>, 'foo')
"""
line, text = splitline(text)
tokens = self.python_tokens(line)
if len(tokens) < 4:
raise SyntaxError('Invalid var statement')
name = tokens[1]
sep = tokens[2]
value = line.split(sep, 1)[1].strip()
if sep == '=':
pass # no need to process value
elif sep == ':':
#@@ Hack for backward-compatability
if tokens[3] == '\n': # multi-line var statement
block, text = self.read_indented_block(text, ' ')
lines = [self.readline(x)[0] for x in block.splitlines()]
nodes = []
for x in lines:
nodes.extend(x.nodes)
nodes.append(TextNode('\n'))
else: # single-line var statement
linenode, _ = self.readline(value)
nodes = linenode.nodes
parts = [node.emit('') for node in nodes]
value = "join_(%s)" % ", ".join(parts)
else:
raise SyntaxError('Invalid var statement')
return VarNode(name, value), text
def read_suite(self, text):
r"""Reads section by section till end of text.
>>> read_suite = Parser().read_suite
>>> read_suite('hello $name\nfoo\n')
[<line: [t'hello ', $name, t'\n']>, <line: [t'foo\n']>]
"""
sections = []
while text:
section, text = self.read_section(text)
sections.append(section)
return SuiteNode(sections)
def readline(self, text):
r"""Reads one line from the text. Newline is supressed if the line ends with \.
>>> readline = Parser().readline
>>> readline('hello $name!\nbye!')
(<line: [t'hello ', $name, t'!\n']>, 'bye!')
>>> readline('hello $name!\\\nbye!')
(<line: [t'hello ', $name, t'!']>, 'bye!')
>>> readline('$f()\n\n')
(<line: [$f(), t'\n']>, '\n')
"""
line, text = splitline(text)
# supress new line if line ends with \
if line.endswith('\\\n'):
line = line[:-2]
nodes = []
while line:
node, line = self.read_node(line)
nodes.append(node)
return LineNode(nodes), text
def read_node(self, text):
r"""Reads a node from the given text and returns the node and remaining text.
>>> read_node = Parser().read_node
>>> read_node('hello $name')
(t'hello ', '$name')
>>> read_node('$name')
($name, '')
"""
if text.startswith('$$'):
return TextNode('$'), text[2:]
elif text.startswith('$#'): # comment
line, text = splitline(text)
return TextNode('\n'), text
elif text.startswith('$'):
text = text[1:] # strip $
if text.startswith(':'):
escape = False
text = text[1:] # strip :
else:
escape = True
return self.read_expr(text, escape=escape)
else:
return self.read_text(text)
def read_text(self, text):
r"""Reads a text node from the given text.
>>> read_text = Parser().read_text
>>> read_text('hello $name')
(t'hello ', '$name')
"""
index = text.find('$')
if index < 0:
return TextNode(text), ''
else:
return TextNode(text[:index]), text[index:]
def read_keyword(self, text):
line, text = splitline(text)
return StatementNode(line.strip() + "\n"), text
def read_expr(self, text, escape=True):
"""Reads a python expression from the text and returns the expression and remaining text.
expr -> simple_expr | paren_expr
simple_expr -> id extended_expr
extended_expr -> attr_access | paren_expr extended_expr | ''
attr_access -> dot id extended_expr
paren_expr -> [ tokens ] | ( tokens ) | { tokens }
>>> read_expr = Parser().read_expr
>>> read_expr("name")
($name, '')
>>> read_expr("a.b and c")
($a.b, ' and c')
>>> read_expr("a. b")
($a, '. b')
>>> read_expr("name</h1>")
($name, '</h1>')
>>> read_expr("(limit)ing")
($(limit), 'ing')
>>> read_expr('a[1, 2][:3].f(1+2, "weird string[).", 3 + 4) done.')
($a[1, 2][:3].f(1+2, "weird string[).", 3 + 4), ' done.')
"""
def simple_expr():
identifier()
extended_expr()
def identifier():
tokens.next()
def extended_expr():
lookahead = tokens.lookahead()
if lookahead is None:
return
elif lookahead.value == '.':
attr_access()
elif lookahead.value in parens:
paren_expr()
extended_expr()
else:
return
def attr_access():
from token import NAME # python token constants
dot = tokens.lookahead()
if tokens.lookahead2().type == NAME:
tokens.next() # consume dot
identifier()
extended_expr()
def paren_expr():
begin = tokens.next().value
end = parens[begin]
while True:
if tokens.lookahead().value in parens:
paren_expr()
else:
t = tokens.next()
if t.value == end:
break
return
parens = {
"(": ")",
"[": "]",
"{": "}"
}
def get_tokens(text):
"""tokenize text using python tokenizer.
Python tokenizer ignores spaces, but they might be important in some cases.
This function introduces dummy space tokens when it identifies any ignored space.
Each token is a storage object containing type, value, begin and end.
"""
readline = iter([text]).next
end = None
for t in tokenize.generate_tokens(readline):
t = storage(type=t[0], value=t[1], begin=t[2], end=t[3])
if end is not None and end != t.begin:
_, x1 = end
_, x2 = t.begin
yield storage(type=-1, value=text[x1:x2], begin=end, end=t.begin)
end = t.end
yield t
class BetterIter:
"""Iterator like object with 2 support for 2 look aheads."""
def __init__(self, items):
self.iteritems = iter(items)
self.items = []
self.position = 0
self.current_item = None
def lookahead(self):
if len(self.items) <= self.position:
self.items.append(self._next())
return self.items[self.position]
def _next(self):
try:
return self.iteritems.next()
except StopIteration:
return None
def lookahead2(self):
if len(self.items) <= self.position+1:
self.items.append(self._next())
return self.items[self.position+1]
def next(self):
self.current_item = self.lookahead()
self.position += 1
return self.current_item
tokens = BetterIter(get_tokens(text))
if tokens.lookahead().value in parens:
paren_expr()
else:
simple_expr()
row, col = tokens.current_item.end
return ExpressionNode(text[:col], escape=escape), text[col:]
def read_assignment(self, text):
r"""Reads assignment statement from text.
>>> read_assignment = Parser().read_assignment
>>> read_assignment('a = b + 1\nfoo')
(<assignment: 'a = b + 1'>, 'foo')
"""
line, text = splitline(text)
return AssignmentNode(line.strip()), text
def python_lookahead(self, text):
"""Returns the first python token from the given text.
>>> python_lookahead = Parser().python_lookahead
>>> python_lookahead('for i in range(10):')
'for'
>>> python_lookahead('else:')
'else'
>>> python_lookahead(' x = 1')
' '
"""
readline = iter([text]).next
tokens = tokenize.generate_tokens(readline)
return tokens.next()[1]
def python_tokens(self, text):
readline = iter([text]).next
tokens = tokenize.generate_tokens(readline)
return [t[1] for t in tokens]
def read_indented_block(self, text, indent):
r"""Read a block of text. A block is what typically follows a for or it statement.
It can be in the | |
<reponame>ftc-team-8813/website
# Builder.py v1.4
# Basic template-based HTML compiler.
# Requires Python 3.x
# THE FORMAT
# To avoid conflicting with standard HTML, metadata is stored in standard HTML comments:
# <!-- $command -->
# There can be a maximum of ONE command per line; later commands will be ignored.
# Commands should be included on their own line, because the line containing the command
# will be removed from the output.
# There are two kinds of files: templates and pages; each has its own set of commands.
# Essentially, templates include sections from the pages by inserting the contents
# of the section wherever the include was.
# Page commands:
# Use these before any section tags; don't put them twice:
# $template <file> -- Use a template out of the templates directory
# $output <directory> -- Copy this file into a different directory (instead of '../');
# the directory must exist or an error will occur! Please note
# that the path is relative to the location of this file.
#
# $begin -- Ends the 'header', begins the content. If not included,
# you will probably experience errors!
# Available after the header:
# $section <name> -- Use the content between this tag and an $endsection to
# replace a section placeholder in the layout. Nesting is
# not allowed!
# $include-file <file> -- Include the entire contents of an external file
# $endsection -- End a section. There must be one of these for every
# $section tag.
#
# If a template is not used, the file will be copied as-is. Otherwise, if there is
# any text outside a section, IT WILL BE REMOVED FROM THE OUTPUT! Unused
# sections are not added to the output either.
# Template commands:
# $include <name> -- Replace this tag with the contents of a section defined in
# the page.
# $req-include <name> -- Same as $include, but raises a ParseException if the
# section is not defined.
# $include-file <file> -- Include the entire contents of an external file
# Templates are also run through the page parser, so that you can apply super-templates
# to them. This also means that they require the $begin command before the content!
import os
import os.path
import glob
import time
import sys
import shutil
def print_help():
print("Usage: " + sys.argv[0] + " [options]")
print("Options:")
print(" -v|--verbose Show build progress (useful if any errors occur)")
print(" -h|--help Show this help")
verbose = False
# Check flags
if len(sys.argv) > 1:
for i in range(1, len(sys.argv)):
if sys.argv[i] == '-v' or sys.argv[i] == '--verbose':
verbose = True
elif sys.argv[i] == '-h' or sys.argv[i] == '--help':
print_help()
sys.exit(0)
# Print but only if the verbose flag is set
def printv(*args, **kwargs):
if verbose:
print(*args, **kwargs)
template_dir = "templates/"
output_header = "<!DOCTYPE html>\n" \
+ "<!-- This file is auto-generated. Please edit the appropriate file in pages/\n" \
+ " and re-generate the website using builder.py instead of editing this file. -->\n"
# TODO require file/line/token information
class ParseException(Exception):
pass
# base template class
class Parser:
def parse(self, command):
pass
def parseLine(self, line):
pass
def end(self):
pass
# PageParser: parse page commands
class PageParser(Parser):
def __init__(self, default_outdir):
self.header = True
self.section = None
self.template = None
self.sections = {}
self.outdir = default_outdir
self.base_outdir = default_outdir
def parse(self, command):
cmd = command.split(" ")
if cmd[0] == "template":
# template: set the template for the page (this can only be done once)
# reads templates from template_dir
# HEADER command: cannot be invoked after begin
if not self.header:
raise ParseException("Template command outside header!")
if self.template:
raise ParseException("Template already set!")
if len(cmd) < 2:
raise ParseException("Not enough arguments for template");
printv("Using template file " + cmd[1])
self.template = template_dir + cmd[1]
elif cmd[0] == "output":
# output: set the output directory for the file
# HEADER command: cannot be invoked after begin
if not self.header:
raise ParseException("Output command outside header!")
if len(cmd) < 2:
raise ParseException("Not enough arguments for output")
printv("Using output directory " + cmd[1])
self.outdir = self.base_outdir + "/" + cmd[1]
elif cmd[0] == "begin":
# begin: end the header and start the content
if not self.header:
raise ParseException("Multiple begin commands!")
printv("Begin content")
self.header = False
elif cmd[0] == "section":
# section: begin a section
if self.header:
raise ParseException("Cannot start section in header!")
if self.section:
raise ParseException("Cannot nest sections!")
if len(cmd) < 2:
raise ParseException("Not enough arguments for section")
printv("Start section " + cmd[1])
section_name = cmd[1]
if section_name in self.sections:
raise ParseException("Duplicate section name " + section_name)
self.section = section_name
self.sections[section_name] = []
elif cmd[0] == "include-file":
if self.header:
return 1
if not self.section:
return 1
if len(cmd) < 2:
raise ParseException("Not enough arguments for include-file")
if not os.path.exists(cmd[1]):
raise ParseException("File " + cmd[1] + " does not exist")
printv("Including " + cmd[1])
copier = PageCopier(self.sections[self.section])
parse(cmd[1], copier)
elif cmd[0] == "endsection":
if self.header:
raise ParseException("Cannot end section in header!")
if not self.section:
raise ParseException("No section to end!")
printv("End section " + self.section)
self.section = None
else:
return 1
return 0
def parseLine(self, line):
if self.section:
self.sections[self.section].append(line);
def end(self):
if self.header:
raise ParseException("Reached end of file; no begin command found!")
if self.section:
raise ParseException("Reached end of file; could not find end of section " + self.section)
class TemplateParser(Parser):
def __init__(self, sections, outfile, keep_includes=True):
self.sections = sections
self.outfile = outfile
self.keep_includes = keep_includes
def addsection(self, section):
for line in section:
self.outfile.write(line)
def parse(self, command):
cmd = command.split(" ")
if cmd[0] == "include":
if len(cmd) < 2:
raise ParseException("Not enough arguments for include")
if cmd[1] in self.sections:
printv("Inserting section " + cmd[1])
self.addsection(self.sections[cmd[1]])
elif self.keep_includes:
printv("Keeping include " + cmd[1])
return 1 # Keep missing includes
elif cmd[0] == "req-include":
if len(cmd) < 2:
raise ParseException("Not enough arguments for req-include")
if cmd[1] in self.sections:
printv("Inserting section " + cmd[1])
self.addsection(self.sections[cmd[1]])
else:
raise ParseException("Undefined section: '" + cmd[1] + "'")
elif cmd[0] == "include-file":
if len(cmd) < 2:
raise ParseException("Not enough arguments for include-file")
if not os.path.exists(cmd[1]):
raise ParseException("File " + cmd[1] + " does not exist")
printv("Including " + cmd[1])
copier = Copier(self.outfile)
parse(cmd[1], copier)
else:
return 1
return 0
def parseLine(self, line):
self.outfile.write(line)
class Copier(Parser):
def __init__(self, outfile, keep=["include","req-include","include-file"]):
self.outfile = outfile
self.keep = keep
def parse(self, command):
cmd = command.split(" ")
if cmd[0] in self.keep:
return 1
return 0
def parseLine(self, line):
self.outfile.write(line)
class PageCopier(Copier):
def parseLine(self, line):
# Append to an array instead of writing to a file
self.outfile.append(line)
def parse(filename, parser):
with open(filename, "r", encoding="utf8") as f:
for line in f:
if '<!--' in line and '-->' in line:
start = line.find('<!--') + 4
end = line.find("-->") - 1
content = line[start:end].strip()
if content.startswith('$'):
content = content[1:]
# print("Parsing '" + content + "'")
missingno = parser.parse(content)
if missingno:
parser.parseLine(line) # Don't remove unknown commands
else:
parser.parseLine(line)
else:
parser.parseLine(line)
parser.end()
def parse_page(filename, default_outdir=".."):
printv("Parsing page " + filename)
page_parser = PageParser(default_outdir)
parse(filename, page_parser) # Fill it up with data
return page_parser
def write_page(filename, page_parser, keep_includes=True, write_header=False):
# Open up an output file for the template parser (write a tmp file in case of errors)
os.makedirs(os.path.dirname(page_parser.outdir + '/' + filename), exist_ok=True)
with open(page_parser.outdir + "/" + filename + ".partial", "w", encoding="utf8") as outfile:
printv("Parsing template and writing output")
if not page_parser.template:
printv("No template; copying file as-is")
copier = Copier(outfile)
if write_header:
copier.parseLine(output_header)
parse(filename, copier)
else:
template_parser = TemplateParser(page_parser.sections, outfile, keep_includes)
if write_header:
template_parser.parseLine(output_header)
parse("generated/" + page_parser.template, template_parser)
targetname = page_parser.outdir + "/" + filename
printv("Updating " + targetname)
if (os.path.exists(targetname)):
os.remove(targetname)
os.rename(targetname + ".partial", targetname)
def parse_template(filename):
printv("Parsing template requirement " + filename)
pp = parse_page(filename, "generated")
if pp.template:
if os.path.realpath(pp.template) == os.path.realpath(filename):
raise ParseException("Template cannot include itself")
else:
parse_template(pp.template)
write_page(filename, pp)
def parse_dir(dirname, outdir):
printv("Scanning directory %s for HTML files" % dirname)
for file in glob.glob(dirname + "/*.html"):
page_parser = parse_page(file, outdir)
if page_parser.template:
# Generate required templates
parse_template(page_parser.template)
write_page(file, page_parser, keep_includes=False, write_header=True)
def read_config(filename):
printv("Reading " + filename)
out = {}
out["dirs"] = ["."]
out["outdir"] = ".."
out["deps"] = []
with open(filename) as config:
for line in config:
printv(line)
if line.startswith('#'):
continue
if not ':' in line:
printv("Ignoring bad configuration line")
continue
property = line[0:line.index(':')].strip()
value = | |
the position of the specified vehicle (relative to the
current edge).
Parameters
----------
veh_id : str or list<str>
vehicle id, or list of vehicle ids
error : any, optional
value that is returned if the vehicle is not found
Returns
-------
str
"""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_edge(vehID, error) for vehID in veh_id]
return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_ROAD_ID, error)
def get_lane(self, veh_id, error=-1001):
"""Returns the lane index of the specified vehicle.
Parameters
----------
veh_id : str or list<str>
vehicle id, or list of vehicle ids
error : any, optional
value that is returned if the vehicle is not found
Returns
-------
int
"""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_lane(vehID, error) for vehID in veh_id]
return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_LANE_INDEX, error)
def set_length(self, veh_id, length):
self.__vehicles[veh_id]["length"] = length
def get_length(self, veh_id, error=-1001):
"""Returns the length of the specified vehicle.
Parameters
----------
veh_id : str or list<str>
vehicle id, or list of vehicle ids
error : any, optional
value that is returned if the vehicle is not found
Returns
-------
float
"""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_length(vehID, error) for vehID in veh_id]
return self.__vehicles.get(veh_id, {}).get("length", error)
def get_acc_controller(self, veh_id, error=None):
"""Returns the acceleration controller of the specified vehicle.
Parameters
----------
veh_id : str or list<str>
vehicle id, or list of vehicle ids
error : any, optional
value that is returned if the vehicle is not found
Returns
-------
object
"""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_acc_controller(vehID, error) for vehID in veh_id]
return self.__vehicles.get(veh_id, {}).get("acc_controller", error)
def get_lane_changing_controller(self, veh_id, error=None):
"""Returns the lane changing controller of the specified vehicle.
Parameters
----------
veh_id : str or list<str>
vehicle id, or list of vehicle ids
error : any, optional
value that is returned if the vehicle is not found
Returns
-------
object
"""
if isinstance(veh_id, (list, np.ndarray)):
return [
self.get_lane_changing_controller(vehID, error)
for vehID in veh_id
]
return self.__vehicles.get(veh_id, {}).get("lane_changer", error)
def get_routing_controller(self, veh_id, error=None):
"""Returns the routing controller of the specified vehicle.
Parameters
----------
veh_id : str or list<str>
vehicle id, or list of vehicle ids
error : any, optional
value that is returned if the vehicle is not found
Returns
-------
object
"""
if isinstance(veh_id, (list, np.ndarray)):
return [
self.get_routing_controller(vehID, error) for vehID in veh_id
]
return self.__vehicles.get(veh_id, {}).get("router", error)
def get_route(self, veh_id, error=list()):
"""Returns the route of the specified vehicle.
Parameters
----------
veh_id : str or list<str>
vehicle id, or list of vehicle ids
error : any, optional
value that is returned if the vehicle is not found
Returns
-------
list<str>
"""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_route(vehID, error) for vehID in veh_id]
return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_EDGES, error)
def get_leader(self, veh_id, error=""):
"""Returns the leader of the specified vehicle.
Parameters
----------
veh_id : str or list<str>
vehicle id, or list of vehicle ids
error : any, optional
value that is returned if the vehicle is not found
Returns
-------
str
"""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_leader(vehID, error) for vehID in veh_id]
return self.__vehicles.get(veh_id, {}).get("leader", error)
def get_follower(self, veh_id, error=""):
"""Returns the follower of the specified vehicle.
Parameters
----------
veh_id : str or list<str>
vehicle id, or list of vehicle ids
error : any, optional
value that is returned if the vehicle is not found
Returns
-------
str
"""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_follower(vehID, error) for vehID in veh_id]
return self.__vehicles.get(veh_id, {}).get("follower", error)
def get_headway(self, veh_id, error=-1001):
"""Returns the headway of the specified vehicle(s).
Parameters
----------
veh_id : str or list<str>
vehicle id, or list of vehicle ids
error : any, optional
value that is returned if the vehicle is not found
Returns
-------
float
"""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_headway(vehID, error) for vehID in veh_id]
return self.__vehicles.get(veh_id, {}).get("headway", error)
def set_lane_headways(self, veh_id, lane_headways):
self.__vehicles[veh_id]["lane_headways"] = lane_headways
def get_lane_headways(self, veh_id, error=list()):
"""Returns the headways between the specified vehicle and the vehicle
ahead of it in all lanes.
Parameters
----------
veh_id : str or list<str>
vehicle id, or list of vehicle ids
error : any, optional
value that is returned if the vehicle is not found
Returns
-------
list<float>
"""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_lane_headways(vehID, error) for vehID in veh_id]
return self.__vehicles.get(veh_id, {}).get("lane_headways", error)
def set_lane_leaders(self, veh_id, lane_leaders):
self.__vehicles[veh_id]["lane_leaders"] = lane_leaders
def get_lane_leaders(self, veh_id, error=list()):
"""Returns the leaders for the specified vehicle in all lanes.
Parameters
----------
veh_id : str or list<str>
vehicle id, or list of vehicle ids
error : any, optional
value that is returned if the vehicle is not found
Returns
-------
list<float>
"""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_lane_leaders(vehID, error) for vehID in veh_id]
return self.__vehicles.get(veh_id, {}).get("lane_leaders", error)
def set_lane_tailways(self, veh_id, lane_tailways):
self.__vehicles[veh_id]["lane_tailways"] = lane_tailways
def get_lane_tailways(self, veh_id, error=list()):
"""Returns the tailways between the specified vehicle and the vehicle
behind it in all lanes.
Parameters
----------
veh_id : str or list<str>
vehicle id, or list of vehicle ids
error : any, optional
value that is returned if the vehicle is not found
Returns
-------
list<float>
"""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_lane_tailways(vehID, error) for vehID in veh_id]
return self.__vehicles.get(veh_id, {}).get("lane_tailways", error)
def set_lane_followers(self, veh_id, lane_followers):
self.__vehicles[veh_id]["lane_followers"] = lane_followers
def get_lane_followers(self, veh_id, error=list()):
"""Returns the followers for the specified vehicle in all lanes.
Parameters
----------
veh_id : str or list<str>
vehicle id, or list of vehicle ids
error : list, optional
value that is returned if the vehicle is not found
Returns
-------
list<str>
"""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_lane_followers(vehID, error) for vehID in veh_id]
return self.__vehicles.get(veh_id, {}).get("lane_followers", error)
# TODO(ak): setting sumo observations?
def set_state(self, veh_id, state_name, state):
"""Generic set function.
Updates the state *state_name* of the vehicle with id *veh_id* with the
value *state*.
"""
self.__vehicles[veh_id][state_name] = state
# TODO(ak): getting sumo observations?
def get_state(self, veh_id, state_name, error=None):
"""Generic get function. Returns the value of *state_name* of the
specified vehicles at the current time step.
"""
if isinstance(veh_id, list):
return [
self.get_state(vehID, state_name, error) for vehID in veh_id
]
return self.__vehicles.get(veh_id, {}).get(state_name, error)
def _multi_lane_headways(self, env):
"""Computes the lane leaders/followers/headways/tailways for all
vehicles in the network."""
edge_list = env.scenario.get_edge_list()
junction_list = env.scenario.get_junction_list()
tot_list = edge_list + junction_list
num_edges = (len(env.scenario.get_edge_list()) + len(
env.scenario.get_junction_list()))
# maximum number of lanes in the network
max_lanes = max(
[env.scenario.num_lanes(edge_id) for edge_id in tot_list])
# Key = edge id
# Element = list, with the ith element containing tuples with the name
# and position of all vehicles in lane i
edge_dict = dict.fromkeys(tot_list)
# add the vehicles to the edge_dict element
for veh_id in self.get_ids():
edge = self.get_edge(veh_id)
lane = self.get_lane(veh_id)
pos = self.get_position(veh_id)
if edge:
if edge_dict[edge] is None:
edge_dict[edge] = [[] for _ in range(max_lanes)]
edge_dict[edge][lane].append((veh_id, pos))
# sort all lanes in each edge by position
for edge in tot_list:
if edge_dict[edge] is None:
del edge_dict[edge]
else:
for lane in range(max_lanes):
edge_dict[edge][lane].sort(key=lambda x: x[1])
for veh_id in self.get_rl_ids():
# collect the lane leaders, followers, headways, and tailways for
# each vehicle
edge = self.get_edge(veh_id)
if edge:
headways, tailways, leaders, followers = \
self._multi_lane_headways_util(veh_id, edge_dict,
num_edges, env)
# add the above values to the vehicles class
self.set_lane_headways(veh_id, headways)
self.set_lane_tailways(veh_id, tailways)
self.set_lane_leaders(veh_id, leaders)
self.set_lane_followers(veh_id, followers)
self._ids_by_edge = dict().fromkeys(edge_list)
for edge_id in edge_dict:
edges = list(itertools.chain.from_iterable(edge_dict[edge_id]))
# check for edges with no vehicles
if len(edges) > 0:
edges, _ = zip(*edges)
self._ids_by_edge[edge_id] = list(edges)
else:
self._ids_by_edge[edge_id] = []
def _multi_lane_headways_util(self, veh_id, edge_dict, num_edges, env):
"""Utility function for _multi_lane_headways()
Computes the lane headways, tailways, leaders, and followers for the
specified vehicle.
Parameters
----------
veh_id : str
name of the vehicle
edge_dict : dict < list<tuple> >
Key = Edge name
Index = lane index
Element = sorted list of (vehicle id, position)
Returns
-------
headway : list<float>
Index = lane index
Element = headway at this lane
tailway : list<float>
Index = lane index
Element = tailway at this lane
leader : list<str>
Index = lane index
Element = leader at this lane
follower : list<str>
Index = lane index
Element = follower at | |
commit of a repo.
After a scan, record what is the most recent commit scanned, such that
another (future) scan will not process the same commits twice.
Parameters
----------
url: str
The url of the repository scanned
last_commit: str
The most recent commit scanned
Returns
-------
bool
`True` if the update is successful, `False` otherwise
"""
query = 'UPDATE repos SET last_commit=%s WHERE url=%s RETURNING true'
cursor = self.db.cursor()
try:
cursor.execute(query, (last_commit, url))
self.db.commit()
return bool(cursor.fetchone()[0])
except (TypeError, IndexError):
""" A TypeError is raised if any of the required arguments is
missing. """
self.db.rollback()
return False
except Error:
self.db.rollback()
return False
def update_discovery(self, discovery_id, new_state):
""" Change the state of a discovery.
Parameters
----------
discovery_id: int
The id of the discovery to be updated
new_state: str
The new state of this discovery
Returns
-------
bool
`True` if the update is successful, `False` otherwise
"""
if new_state not in ('new', 'false_positive', 'addressing',
'not_relevant', 'fixed'):
return False
query = 'UPDATE discoveries SET state=%s WHERE id=%s RETURNING true'
cursor = self.db.cursor()
try:
cursor.execute(query, (new_state, discovery_id))
self.db.commit()
return bool(cursor.fetchone()[0])
except (TypeError, IndexError):
""" A TypeError is raised if any of the required arguments is
missing. """
self.db.rollback()
return False
except Error:
self.db.rollback()
return False
def update_discovery_group(self, repo_url, file_name, snippet, new_state):
""" Change the state of a group of discoveries.
A group of discoveries is identified by the url of their repository,
their filename,and their snippet.
Parameters
----------
repo_url: str
The url of the repository
file_name: str
The name of the file
snippet: str
The snippet
new_state: str
The new state of this discovery
Returns
-------
bool
`True` if the update is successful, `False` otherwise
"""
if new_state not in ('new', 'false_positive', 'addressing',
'not_relevant', 'fixed'):
return False
query = 'UPDATE discoveries SET state=%s WHERE repo_url=%s and \
file_name=%s and snippet=%s RETURNING true'
cursor = self.db.cursor()
try:
cursor.execute(query, (new_state, repo_url, file_name, snippet))
self.db.commit()
return bool(cursor.fetchone()[0])
except (TypeError, IndexError):
""" A TypeError is raised if any of the required arguments is
missing. """
self.db.rollback()
return False
except Error:
self.db.rollback()
return False
def scan(self, repo_url, category=None, scanner=GitScanner,
models=[], exclude=[], force=False, verbose=False,
generate_snippet_extractor=False):
""" Launch the scan of a repository.
Parameters
----------
repo_url: str
The url of the repo to scan
category: str, optional
If specified, scan the repo using all the rules of this category,
otherwise use all the rules in the db
scanner: class, default: `GitScanner`
The class of the scanner, a subclass of `scanners.BaseScanner`
models: list, optional
A list of models for the ML false positives detection
exclude: list, optional
A list of rules to exclude
force: bool, default `False`
Force a complete re-scan of the repository, in case it has already
been scanned previously
verbose: bool, default `False`
Flag used to decide whether to visualize the progressbars during
the scan (e.g., during the insertion of the detections in the db)
generate_snippet_extractor: bool, default `False`
Generate the extractor model to be used in the SnippetModel. The
extractor is generated using the ExtractorGenerator. If `False`,
use the pre-trained extractor model
Returns
-------
list
The id of the discoveries detected by the scanner (excluded the
ones classified as false positives).
"""
def analyze_discoveries(model_manager, discoveries, verbose):
""" Use a model to analyze a list of discoveries. """
false_positives = set()
# Analyze all the discoveries ids with the current model
if verbose:
print('Analyzing discoveries with model %s' %
model_manager.model)
for i in tqdm(range(len(discoveries))):
did = discoveries[i]
if model_manager.launch_model(self.get_discovery(did)):
false_positives.add(did)
else:
for did in discoveries:
if model_manager.launch_model(self.get_discovery(did)):
false_positives.add(did)
# For each false positive, update the db
if verbose:
print('Model %s classified %s discoveries' % (
model_manager.model.__class__.__name__,
len(false_positives)))
print('Change state to these discoveries')
fp_id = iter(false_positives)
for i in tqdm(range(len(false_positives))):
self.update_discovery(next(fp_id), 'false_positive')
else:
for fp_id in false_positives:
self.update_discovery(fp_id, 'false_positive')
# Update the discovery ids (remove false positives)
discoveries = list(set(discoveries) - false_positives)
# Return discovery ids of non-false positives
return discoveries
# Try to add the repository to the db
if self.add_repo(repo_url):
# The repository is new, scan from the first commit
from_commit = None
else:
# Get the latest commit recorded on the db
from_commit = self.get_repo(repo_url)['last_commit']
# Force complete scan
if force:
if verbose:
print('Force complete scan')
from_commit = None
# Prepare rules
rules = self.get_rules(category)
if exclude:
rules = list(filter(lambda x: x['id'] not in exclude, rules))
if not rules:
raise ValueError('No rules found')
# Call scanner
s = scanner(rules)
if verbose:
print('Scanning commits...')
latest_commit, these_discoveries = s.scan(repo_url,
since_commit=from_commit)
if verbose:
print('Detected %s discoveries' % len(these_discoveries))
# Update latest commit of the repo
self.update_repo(repo_url, latest_commit)
# Insert the discoveries into the db
discoveries_ids = list()
if verbose:
for i in tqdm(range(len(these_discoveries))):
curr_d = these_discoveries[i]
new_id = self.add_discovery(curr_d['file_name'],
curr_d['commit_id'],
curr_d['snippet'],
repo_url,
curr_d['rule_id'])
if new_id != -1:
discoveries_ids.append(new_id)
else:
# IDs of the discoveries added to the db (needed in the ML)
discoveries_ids = map(lambda d: self.add_discovery(d['file_name'],
d['commit_id'],
d['snippet'],
repo_url,
d['rule_id']),
these_discoveries)
discoveries_ids = list(filter(lambda i: i != -1,
discoveries_ids))
if not discoveries_ids:
return []
# Verify if the SnippetModel is needed, and, in this case, check
# whether the pre-trained or the generated extractor is wanted
snippet_with_generator = False
if 'SnippetModel' in models:
if generate_snippet_extractor:
# Here, the scan is being run with the SnippetModel and its
# generator.
# Remove the snippet model from the list of models to be run:
# we will launch it manually at the end, as last model.
# In fact, the SnippetModel may take some time, and in case we
# need to generate its extractor this delay will be even higher
snippet_with_generator = True
models.remove('SnippetModel')
else:
# If the SnippetModel is not chosen, but the generator flag is set
# to True, do not generate the model (to save time and resources)
if generate_snippet_extractor and verbose:
print('generate_snippet_extractor=True but SnippetModel is',
'not in the chosen models.',
'Do not generate the extractor.')
# For each of the new discovery ids, select it from the db and analyze
# it. If it is classified as false positive, update the corresponding
# entry on the db
for model in models:
# Try to instantiate the model
try:
mm = ModelManager(model)
except ModuleNotFoundError:
print('Model %s not found. Skip it.' % model)
# Continue with another model (if any)
continue
# Analyze discoveries with this model, and filter out false
# positives
discoveries_ids = analyze_discoveries(mm,
discoveries_ids,
verbose)
# Check if we have to run the snippet model, and, in this case, if it
# will use the pre-trained extractor or the generated one
# Yet, since the SnippetModel may be slow, run it only if we still have
# discoveries to check
if snippet_with_generator and len(discoveries_ids) == 0:
if verbose:
print('No more discoveries to filter. Skip SnippetModel.')
return list(discoveries_ids)
if snippet_with_generator:
# Generate extractor and run the model
print('Generate snippet model (it may take some time...)')
extractor_folder, extractor_name = \
self._generate_snippet_extractor(repo_url)
try:
# Load SnippetModel with the generated extractor, instead
# of the default one (i.e., the pre-trained one)
mm = ModelManager('SnippetModel',
model_extractor=extractor_folder,
binary_extractor=extractor_name)
discoveries_ids = analyze_discoveries(mm,
discoveries_ids,
verbose)
except ModuleNotFoundError:
print('SnippetModel not found. Skip it.')
return list(discoveries_ids)
def scan_user(self, username, category=None, models=[], exclude=[],
verbose=False, generate_snippet_extractor=False):
""" Scan all the repositories of a user on github.com.
Find all the repositories of a user, and scan
them. Please note that git limits the list of repositories to maximum
100 (due to pagination).
Parameters
----------
username: str
The username as on github.com
category: str, optional
If specified, scan the repo using all the rules of this category,
otherwise use all the rules in the db
models: list, optional
A list of models for the ML false positives detection
exclude: list, optional
A list of rules to exclude
verbose: bool, default `False`
Flag used to decide whether to visualize the progressbars during
the scan (e.g., during the insertion of the detections in the db)
generate_snippet_extractor: bool, default `False`
Generate the extractor | |
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class _VappContainerState:
def __init__(__self__, *,
cpu_expandable: Optional[pulumi.Input[bool]] = None,
cpu_limit: Optional[pulumi.Input[int]] = None,
cpu_reservation: Optional[pulumi.Input[int]] = None,
cpu_share_level: Optional[pulumi.Input[str]] = None,
cpu_shares: Optional[pulumi.Input[int]] = None,
custom_attributes: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
memory_expandable: Optional[pulumi.Input[bool]] = None,
memory_limit: Optional[pulumi.Input[int]] = None,
memory_reservation: Optional[pulumi.Input[int]] = None,
memory_share_level: Optional[pulumi.Input[str]] = None,
memory_shares: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
parent_folder_id: Optional[pulumi.Input[str]] = None,
parent_resource_pool_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
Input properties used for looking up and filtering VappContainer resources.
:param pulumi.Input[bool] cpu_expandable: Determines if the reservation on a vApp
container can grow beyond the specified value if the parent resource pool has
unreserved resources. Default: `true`
:param pulumi.Input[int] cpu_limit: The CPU utilization of a vApp container will not
exceed this limit, even if there are available resources. Set to `-1` for
unlimited.
Default: `-1`
:param pulumi.Input[int] cpu_reservation: Amount of CPU (MHz) that is guaranteed
available to the vApp container. Default: `0`
:param pulumi.Input[str] cpu_share_level: The CPU allocation level. The level is a
simplified view of shares. Levels map to a pre-determined set of numeric
values for shares. Can be one of `low`, `normal`, `high`, or `custom`. When
`low`, `normal`, or `high` are specified values in `cpu_shares` will be
ignored. Default: `normal`
:param pulumi.Input[int] cpu_shares: The number of shares allocated for CPU. Used to
determine resource allocation in case of resource contention. If this is set,
`cpu_share_level` must be `custom`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] custom_attributes: A list of custom attributes to set on this resource.
:param pulumi.Input[bool] memory_expandable: Determines if the reservation on a vApp
container can grow beyond the specified value if the parent resource pool has
unreserved resources. Default: `true`
:param pulumi.Input[int] memory_limit: The CPU utilization of a vApp container will not
exceed this limit, even if there are available resources. Set to `-1` for
unlimited.
Default: `-1`
:param pulumi.Input[int] memory_reservation: Amount of CPU (MHz) that is guaranteed
available to the vApp container. Default: `0`
:param pulumi.Input[str] memory_share_level: The CPU allocation level. The level is a
simplified view of shares. Levels map to a pre-determined set of numeric
values for shares. Can be one of `low`, `normal`, `high`, or `custom`. When
`low`, `normal`, or `high` are specified values in `memory_shares` will be
ignored. Default: `normal`
:param pulumi.Input[int] memory_shares: The number of shares allocated for CPU. Used to
determine resource allocation in case of resource contention. If this is set,
`memory_share_level` must be `custom`.
:param pulumi.Input[str] name: The name of the vApp container.
:param pulumi.Input[str] parent_folder_id: The managed object ID of
the vApp container's parent folder.
:param pulumi.Input[str] parent_resource_pool_id: The managed object ID
of the parent resource pool. This can be the root resource pool for a cluster
or standalone host, or a resource pool itself. When moving a vApp container
from one parent resource pool to another, both must share a common root
resource pool or the move will fail.
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: The IDs of any tags to attach to this resource.
"""
if cpu_expandable is not None:
pulumi.set(__self__, "cpu_expandable", cpu_expandable)
if cpu_limit is not None:
pulumi.set(__self__, "cpu_limit", cpu_limit)
if cpu_reservation is not None:
pulumi.set(__self__, "cpu_reservation", cpu_reservation)
if cpu_share_level is not None:
pulumi.set(__self__, "cpu_share_level", cpu_share_level)
if cpu_shares is not None:
pulumi.set(__self__, "cpu_shares", cpu_shares)
if custom_attributes is not None:
pulumi.set(__self__, "custom_attributes", custom_attributes)
if memory_expandable is not None:
pulumi.set(__self__, "memory_expandable", memory_expandable)
if memory_limit is not None:
pulumi.set(__self__, "memory_limit", memory_limit)
if memory_reservation is not None:
pulumi.set(__self__, "memory_reservation", memory_reservation)
if memory_share_level is not None:
pulumi.set(__self__, "memory_share_level", memory_share_level)
if memory_shares is not None:
pulumi.set(__self__, "memory_shares", memory_shares)
if name is not None:
pulumi.set(__self__, "name", name)
if parent_folder_id is not None:
pulumi.set(__self__, "parent_folder_id", parent_folder_id)
if parent_resource_pool_id is not None:
pulumi.set(__self__, "parent_resource_pool_id", parent_resource_pool_id)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="cpuExpandable")
def cpu_expandable(self) -> Optional[pulumi.Input[bool]]:
"""
Determines if the reservation on a vApp
container can grow beyond the specified value if the parent resource pool has
unreserved resources. Default: `true`
"""
return pulumi.get(self, "cpu_expandable")
@cpu_expandable.setter
def cpu_expandable(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "cpu_expandable", value)
@property
@pulumi.getter(name="cpuLimit")
def cpu_limit(self) -> Optional[pulumi.Input[int]]:
"""
The CPU utilization of a vApp container will not
exceed this limit, even if there are available resources. Set to `-1` for
unlimited.
Default: `-1`
"""
return pulumi.get(self, "cpu_limit")
@cpu_limit.setter
def cpu_limit(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "cpu_limit", value)
@property
@pulumi.getter(name="cpuReservation")
def cpu_reservation(self) -> Optional[pulumi.Input[int]]:
"""
Amount of CPU (MHz) that is guaranteed
available to the vApp container. Default: `0`
"""
return pulumi.get(self, "cpu_reservation")
@cpu_reservation.setter
def cpu_reservation(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "cpu_reservation", value)
@property
@pulumi.getter(name="cpuShareLevel")
def cpu_share_level(self) -> Optional[pulumi.Input[str]]:
"""
The CPU allocation level. The level is a
simplified view of shares. Levels map to a pre-determined set of numeric
values for shares. Can be one of `low`, `normal`, `high`, or `custom`. When
`low`, `normal`, or `high` are specified values in `cpu_shares` will be
ignored. Default: `normal`
"""
return pulumi.get(self, "cpu_share_level")
@cpu_share_level.setter
def cpu_share_level(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cpu_share_level", value)
@property
@pulumi.getter(name="cpuShares")
def cpu_shares(self) -> Optional[pulumi.Input[int]]:
"""
The number of shares allocated for CPU. Used to
determine resource allocation in case of resource contention. If this is set,
`cpu_share_level` must be `custom`.
"""
return pulumi.get(self, "cpu_shares")
@cpu_shares.setter
def cpu_shares(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "cpu_shares", value)
@property
@pulumi.getter(name="customAttributes")
def custom_attributes(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A list of custom attributes to set on this resource.
"""
return pulumi.get(self, "custom_attributes")
@custom_attributes.setter
def custom_attributes(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "custom_attributes", value)
@property
@pulumi.getter(name="memoryExpandable")
def memory_expandable(self) -> Optional[pulumi.Input[bool]]:
"""
Determines if the reservation on a vApp
container can grow beyond the specified value if the parent resource pool has
unreserved resources. Default: `true`
"""
return pulumi.get(self, "memory_expandable")
@memory_expandable.setter
def memory_expandable(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "memory_expandable", value)
@property
@pulumi.getter(name="memoryLimit")
def memory_limit(self) -> Optional[pulumi.Input[int]]:
"""
The CPU utilization of a vApp container will not
exceed this limit, even if there are available resources. Set to `-1` for
unlimited.
Default: `-1`
"""
return pulumi.get(self, "memory_limit")
@memory_limit.setter
def memory_limit(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "memory_limit", value)
@property
@pulumi.getter(name="memoryReservation")
def memory_reservation(self) -> Optional[pulumi.Input[int]]:
"""
Amount of CPU (MHz) that is guaranteed
available to the vApp container. Default: `0`
"""
return pulumi.get(self, "memory_reservation")
@memory_reservation.setter
def memory_reservation(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "memory_reservation", value)
@property
@pulumi.getter(name="memoryShareLevel")
def memory_share_level(self) -> Optional[pulumi.Input[str]]:
"""
The CPU allocation level. The level is a
simplified view of shares. Levels map to a pre-determined set of numeric
values for shares. Can be one of `low`, `normal`, `high`, or `custom`. When
`low`, `normal`, or `high` are specified values in `memory_shares` will be
ignored. Default: `normal`
"""
return pulumi.get(self, "memory_share_level")
@memory_share_level.setter
def memory_share_level(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "memory_share_level", value)
@property
@pulumi.getter(name="memoryShares")
def memory_shares(self) -> Optional[pulumi.Input[int]]:
"""
The number of shares allocated for CPU. Used to
determine resource allocation in case of resource contention. If this is set,
`memory_share_level` must be `custom`.
"""
return pulumi.get(self, "memory_shares")
@memory_shares.setter
def memory_shares(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "memory_shares", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the vApp container.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="parentFolderId")
def parent_folder_id(self) -> Optional[pulumi.Input[str]]:
"""
The managed object ID of
the vApp container's parent folder.
"""
return pulumi.get(self, "parent_folder_id")
@parent_folder_id.setter
def parent_folder_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "parent_folder_id", value)
@property
@pulumi.getter(name="parentResourcePoolId")
def parent_resource_pool_id(self) -> Optional[pulumi.Input[str]]:
"""
The managed object ID
of the parent resource pool. This can be the root resource pool for a cluster
or standalone host, or a resource pool itself. When moving a vApp container
from one parent resource pool to another, both must share a common root
resource pool or the move will fail.
"""
return pulumi.get(self, "parent_resource_pool_id")
@parent_resource_pool_id.setter
def parent_resource_pool_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "parent_resource_pool_id", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The IDs of any tags to attach to this resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class VappContainer(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
cpu_expandable: Optional[pulumi.Input[bool]] = None,
cpu_limit: Optional[pulumi.Input[int]] = None,
cpu_reservation: Optional[pulumi.Input[int]] = None,
cpu_share_level: Optional[pulumi.Input[str]] = None,
cpu_shares: | |
# -*- coding: utf-8 -*-
# pylint: disable=invalid-name,too-many-instance-attributes, too-many-arguments
"""
Copyright 2019 <NAME>
Copyright 2015 <NAME>.
FilterPy library.
http://github.com/rlabbe/filterpy
Documentation at:
https://filterpy.readthedocs.org
Supporting book at:
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the readme.MD file
for more information.
"""
from copy import deepcopy
from math import log, exp, sqrt
import sys
import numpy as np
from numpy import dot, outer, eye, zeros, ones, diag
import scipy.linalg as linalg
from filterpy.stats import logpdf
from filterpy.common import pretty_str, reshape_z
'''
UDU decomposition:
P = U * diag(D) * U^T
'''
def udu(p):
if 2 != len(p.shape):
return None
if p.shape[0] != p.shape[1]:
return None
n = p.shape[0]
u = zeros((n, n))
d = zeros((n))
d[n-1] = p[n-1,n-1]
u[:,n-1] = p[:,n-1] / d[n-1]
for j in range(n-2, -1, -1):
dd = d[j+1:]
c = dd * u[j,j+1:] #dd is meant to be diag(d[j+1:])
d[j] = p[j,j] - np.dot(u[j,j+1:].T, c)
if d[j] == 0:
return None
for i in range(j, -1, -1):
c = dd * u[j,j+1:]
u[i,j] = (p[i,j] - np.dot(u[i,j+1:].T, c))/d[j]
return u, d
'''
MWGS update:
U * diag(D) * U^T = w * diag(d) * w^T
Params:
w - is n*k float full rank
d - is k*None float
where k>n
return:
u - is n*n float upper triangular
D - id n*None float
'''
def mwgs(w,d):
if 1 != len(d.shape):
return None
if 2 != len(w.shape):
return None
if w.shape[1] != d.shape[0]:
return None
if w.shape[0] >= d.shape[0]:
return None
n = w.shape[0]
u = np.eye(n)
D = np.zeros((n))
for i in range(n-1, -1, -1):
c = w[i,:] * d
D[i] = np.dot(w[i,:], c)
if D[i] <= 0:
# How about partial reset heu_ristics here?
return None
dd = c/D[i]
for j in range(0, i):
u[j,i] = np.dot(dd, w[j,:])
w[j,:] -= u[j,i] * w[i,:]
return u, D
class UDExtendedKalmanFilter(object):
""" Implements an UD modification of extended Kalman filter (EKF).
You are responsible for setting the various state variables to
reasonable values; the defaults will not give you a functional filter.
You will have to set the following attributes after constructing this
object for the filter to perform properly. Please note that there are
various checks in place to ensure that you have made everything the
'correct' size. However, it is possible to provide incorrectly sized
arrays such that the linear algebra can not perform an operation.
It can also fail silently - you can end up with matrices of a size that
allows the linear algebra to work, but are the wrong shape for the problem
you are trying to solve.
Parameters
----------
dim_x : int
Number of state variables for the Kalman filter. For example, if
you are tracking the position and velocity of an object in two
dimensions, dim_x would be 4.
This is used to set the default size of P, Q, and u
dim_z : int
Number of of measurement inputs. For example, if the sensor
provides you with position in (x,y), dim_z would be 2.
Attributes
----------
x : numpy.array(dim_x, 1)
State estimate vector
P : numpy.array(dim_x, dim_x)
Covariance matrix
x_prior : numpy.array(dim_x, 1)
Prior (predicted) state estimate. The *_prior and *_post attributes
are for convienence; they store the prior and posterior of the
current epoch. Read Only.
P_prior : numpy.array(dim_x, dim_x)
Prior (predicted) state covariance matrix. Read Only.
x_post : numpy.array(dim_x, 1)
Posterior (updated) state estimate. Read Only.
P_post : numpy.array(dim_x, dim_x)
Posterior (updated) state covariance matrix. Read Only.
R : numpy.array(dim_z, dim_z)
Measurement noise matrix
Q : numpy.array(dim_x, dim_x)
Process noise matrix
F : numpy.array()
State Transition matrix
H : numpy.array(dim_x, dim_x)
Measurement function
y : numpy.array
Residual of the update step. Read only.
K : numpy.array(dim_x, dim_z)
Kalman gain of the update step. Read only.
S : numpy.array
Systen uncertaintly projected to measurement space. Read only.
z : ndarray
Last measurement used in update(). Read only.
log_likelihood : float
log-likelihood of the last measurement. Read only.
likelihood : float
likelihood of last measurment. Read only.
Computed from the log-likelihood. The log-likelihood can be very
small, meaning a large negative value such as -28000. Taking the
exp() of that results in 0.0, which can break typical algorithms
which multiply by this value, so by default we always return a
number >= sys.float_info.min.
mahalanobis : float
mahalanobis distance of the innovation. E.g. 3 means measurement
was 3 standard deviations away from the predicted value.
Read only.
"""
def __init__(self, dim_x, dim_z, dim_u=0):
self.dim_x = dim_x
self.dim_z = dim_z
self.dim_u = dim_u
self.x = zeros((dim_x, 1)) # state
# uncertainty covariance
self.U = eye(dim_x)
self.D = ones((dim_x))
self.B = 0 # control transition matrix
self.F = np.eye(dim_x) # state transition matrix
# state uncertainty
self.Dm = eye(dim_z) #Decorrelation matrix
self.Ur = eye(dim_z) #Decorrelation matrix
self.Dr = ones((dim_z))
# process uncertainty
self.Uq = eye(dim_x)
self.Dq = ones((dim_x))
z = np.array([None]*self.dim_z)
self.z = reshape_z(z, self.dim_z, self.x.ndim)
# residual is computed during the innovation step. We
# save them so that in case you want to inspect it for various
# purposes
self.y = zeros((dim_z, 1)) # residual
self.S = np.zeros((dim_z, dim_z)) # system uncertainty
self.SI = np.zeros((dim_z, dim_z)) # inverse system uncertainty
self._log_likelihood = log(sys.float_info.min)
self._likelihood = sys.float_info.min
self._mahalanobis = None
# these will always be a copy of x,P after predict() is called
self.x_prior = self.x.copy()
self.U_prior = self.U.copy()
self.D_prior = self.D.copy()
# these will always be a copy of x,P after update() is called
self.x_post = self.x.copy()
self.U_post = self.U.copy()
self.D_post = self.D.copy()
@property
def Q(self):
""" Process uncertainty"""
return dot(self.Uq, dot(diag(self.Dq), self.Uq.T))
@Q.setter
def Q(self, value):
""" Process uncertainty"""
self.Uq, self.Dq = udu(value)
@property
def P(self):
""" covariance matrix"""
return dot(self.U, dot(diag(self.D), self.U.T))
@property
def P_prior(self):
""" covariance matrix of the prior"""
return dot(self.U_prior, dot(diag(self.D_prior), self.U_prior.T))
@property
def P_post(self):
""" covariance matrix of the posterior"""
return dot(self.U_post, dot(diag(self.D_post), self.U_post.T))
@P.setter
def P(self, value):
""" covariance matrix"""
self.U,self.D = udu(value);
@property
def R(self):
""" measurement uncertainty"""
return dot(self.Ur, dot(diag(self.Dr), self.Ur.T))
@R.setter
def R(self, value):
""" measurement uncertainty"""
self.Ur, self.Dr = udu(value)
self.Dm = linalg.inv(self.Ur)
def predict_x(self, u=0):
"""
Predicts the next state of X. If you need to
compute the next state yourself, override this function. You would
need to do this, for example, if the usual Taylor expansion to
generate F is not providing accurate results for you.
"""
self.x = dot(self.F, self.x) + dot(self.B, u)
def predict(self, u=0):
"""
Predict next state (prior) using the Kalman filter state propagation
equations.
Parameters
----------
u : np.array
Optional control vector. If non-zero, it is multiplied by B
to create the control input into the system.
"""
self.predict_x(u)
W = np.concatenate((dot(self.F, self.U), self.Uq), axis=1)
D = np.concatenate((self.D, self.Dq))
self.U, self.D = mwgs(W, D)
# save prior
self.x_prior = np.copy(self.x)
self.U_prior = np.copy(self.U)
self.D_prior = np.copy(self.D)
def _scalar_update(self, axis_residual, axis_hjacobian, r):
"""Joseph scalar update
Parameters
----------
axis_residual : function which returns current axis residual
returns scalar, float.
axis_hjacobian : function which returns current axis HJacobian row
returns np.array, float.
r : scalar, float, current axis state disp
"""
u, d, n = self.U, self.D, self.dim_x
nu = axis_residual(self.x)
h = axis_hjacobian(self.x)
f = h.dot(u)
v = d * f
a = r + f.dot(v)
K = u.dot(v / a).reshape((n, 1))
WW = np.concatenate((outer(K, f) - u, K), axis = 1)
DD = np.concatenate((d, np.array([r])))
self.U, self.D = mwgs(WW, DD)
self.x += (K*nu).reshape(self.x.shape)
def update(self, z, HJacobian, Hx, R=None, args=(), hx_args=(),
residual=np.subtract):
""" Performs the update innovation of the extended Kalman filter.
Parameters
----------
z : np.array
measurement for this step.
If `None`, posterior is not computed
HJacobian : function
function which computes the Jacobian of the H matrix (measurement
function). Takes state variable (self.x) as input, returns H.
Hx : function
function which takes as input the state variable | |
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0xC0,
0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x01, 0x00, 0x00, 0x00, 0x00,
0x08, 0x02, 0x04, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01, 0x88, 0x60, 0x0F,
0xE3, 0x83, 0xF9, 0xC3, 0xF9, 0xE0, 0x0F, 0xFC, 0x00, 0xFF, 0xC0, 0x07,
0xF8, 0x06, 0x3E, 0x0F, 0x03, 0xF8, 0x60, 0x7E, 0x00, 0xE0, 0x7E, 0x78,
0x07, 0xF7, 0x01, 0xFF, 0xE0, 0x03, 0xFF, 0x00, 0xFF, 0xE0, 0x01, 0xFE,
0x00, 0x1F, 0xFF, 0x00, 0xFF, 0xC0, 0x3F, 0xFC, 0x03, 0x87, 0x40, 0x0F,
0xFE, 0x00, 0xFF, 0x00, 0x03, 0xFB, 0x00, 0xFF, 0x00, 0x07, 0x97, 0xC0,
0x0F, 0xF0, 0x0D, 0xFF, 0x00, 0x0F, 0xB8, 0x01, 0xFE, 0x00, 0x3F, 0xB8,
0x03, 0x3E, 0xC0, 0x3F, 0xF8, 0x06, 0x7F, 0x00, 0xCF, 0x80, 0x03, 0xFE,
0x00, 0x83, 0xE0, 0x00, 0x7C, 0x0F, 0xFF, 0x01, 0xDF, 0xC0, 0x0F, 0xC7,
0xE0, 0x10, 0x1E, 0x07, 0x87, 0xC0, 0xF1, 0xF8, 0x63, 0x1C, 0xE0, 0x00,
0xFC, 0x00, 0x1F, 0x07, 0x07, 0x81, 0xE7, 0xE0, 0xDE, 0x3C, 0x0F, 0x8F,
0x01, 0xFE, 0x7C, 0x3F, 0xBD, 0x83, 0xF1, 0xC0, 0x3D, 0xC4, 0x78, 0x3E,
0x0E, 0x7F, 0x80, 0xFF, 0xE0, 0x03, 0xDC, 0x43, 0x07, 0xFC, 0x03, 0xC0,
0x70, 0xF8, 0x7E, 0x1F, 0x0F, 0x07, 0xC1, 0xF0, 0x78, 0x3E, 0x3F, 0x07,
0xEF, 0x81, 0xC1, 0xF0, 0xF8, 0x0F, 0xFC, 0x07, 0xFE, 0x03, 0xE0, 0x00,
0x38, 0x1F, 0x8F, 0x87, 0xC0, 0xF0, 0x1F, 0x07, 0xC7, 0x81, 0xE0, 0xFE,
0x18, 0x0F, 0xC0, 0x07, 0xFF, 0x80, 0xFF, 0x80, 0x0F, 0xFE, 0x07, 0xCF,
0xC1, 0x00, 0xFC, 0x04, 0x0F, 0x87, 0x80, 0xFF, 0x78, 0x1F, 0xFE, 0x07,
0x7F, 0x03, 0xEF, 0xE0, 0x00, 0xFF, 0x00, 0x1F, 0xE0, 0x1F, 0x8F, 0x0F,
0x03, 0xC1, 0xE0, 0x7C, 0xF8, 0x38, 0x7F, 0x07, 0x1F, 0x8F, 0x86, 0x31,
0xFE, 0x00, 0x0F, 0xF0, 0x01, 0xFC, 0xC0, 0x1F, 0xCF, 0x00, 0x0F, 0xE0,
0x30, 0x7E, 0x3C, 0x1F, 0xFC, 0x01, 0xFF, 0xC0, 0x3F, 0xF8, 0x01, 0xFF,
0x00, 0x03, 0xFC, 0x00, 0x07, 0xC7, 0x80, 0x3F, 0xC0, 0xF0, 0x7C, 0x00,
0x1F, 0xE0, 0xF0, 0x70, 0xF8, 0x3F, 0x8C, 0x07, 0x83, 0xE1, 0xF0, 0xFC,
0x00, 0x1F, 0x80, 0x07, 0xC3, 0x03, 0x73, 0x80, 0x76, 0x78, 0x3C, 0x00,
0xF0, 0x38, 0x38, 0x1F, 0x0E, 0x1C, 0xF0, 0x00, 0xF0, 0xC0, 0x1C, 0x38,
0x01, 0xCE, 0x38, 0xEE, 0x7F, 0xFC, 0xE0, 0x7F, 0x80, 0x0F, 0xF0, 0x00,
0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x7F, 0xFF, 0xFC, 0x07, 0xFF, 0x1E, 0x7F,
0xCF, 0x9F, 0x80, 0x00, 0x00, 0x30, 0x0E, 0x00, 0xFF, 0x80, 0xFF, 0xF0,
0x18, 0x00, 0x00, 0x00, 0x01, 0x07, 0x38, 0xFF, 0x00, 0x3F, 0xF0, 0x1B,
0x3F, 0xFE, 0x00, 0x00, 0x00, 0x00, 0x1F, 0xFF, 0xFC, 0x7F, 0xFE, 0x07,
0xFF, 0xC3, 0xDF, 0xF8, 0x1C, 0x3F, 0xF8, 0x00, 0xEF, 0x00, 0x1F, 0xF7,
0x0F, 0xFF, 0x01, 0xC0, 0xFE, 0xC0, 0x1F, 0xF8, 0x00, 0x07, 0x80, 0x00,
0x00, 0x00, 0x0F, 0x00, 0x00, 0xFF, 0xFC, 0xFF, 0xFF, 0xFF, 0xFF, 0xF8,
0x1C, 0x0F, 0x03, 0x8F, 0xC0, 0x00, 0xFE, 0x3E, 0x7F, 0xFF, 0xFF, 0xDF,
0xFF, 0xFF, 0xF8, 0x00, 0x7E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x07,
0xFF, 0xC0, 0xE7, 0xFF, 0xF0, 0x3F, 0xF0, 0x04, 0x1F, 0xC0, 0x07, 0xFE,
0x00, 0x10, 0xFC, 0x00, 0x00, 0x30, 0x01, 0xDF, 0x00, 0x1F, 0x01
),
"W": (
0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3F, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x07, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0x9C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x0F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xE0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1F, 0xFF, 0xFF, 0xFF,
0xFF, 0xFC, 0x00, 0x00, 0x07, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x00,
0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x00,
0x00, 0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x00, 0x00,
0x00, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x00, 0x00, 0x00,
0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x7F,
0xFF, 0xFF, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x1F, 0xFF, 0xFF, 0xFF, 0xFE, 0x00, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x7F, 0xFF, 0xFF,
0xFF, 0xC0, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x00, 0x0F,
0xFF, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFE, 0x00, 0x00,
0x0F, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x00, 0x0F, 0xFF, 0xFF, 0xFF, 0x00,
0x00, 0x00, 0x0F, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x00, 0x00,
0x00, 0x7F, 0xFF, 0xFF, 0xFE, 0x00, 0x00, 0x00, 0x00, 0x1F, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x07, 0xFF, 0xFF, 0xFF, 0xC0,
0x00, 0x00, 0x00, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0x00, 0x00,
0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x3F, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x1F, 0xFF, 0xFF, 0xFF,
0x00, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00,
0x3F, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xFF, 0x80, 0x00,
0x00, 0x07, 0x80, 0xF0, 0x08, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x00,
0xC7, 0xFF, 0xFF, 0xC3, 0xC0, 0x00, 0x00, 0x40, 0x00, 0x00, 0x3F, 0xFF,
0xFF, 0xFC, 0x00, 0x00, 0x00, 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0x00,
0x00, 0x00, 0x0F, 0xFF, 0xFF, 0xFE, 0x00, 0x00, 0x00, 0x0F, 0x8F, 0xFF,
0xFF, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xF8, 0x00,
0x00, 0x00, 0x00, 0x00, 0x33, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x07,
0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x03, 0xFF, 0xFE, 0x0F, 0xFF, 0xFF, 0xFF,
0xE0, 0x00, 0x03, 0x01, 0xFF, 0xFF, 0x9F, 0x80, 0x00, 0x0C, 0x03, 0xE0,
0x01, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x0F, 0xFF, 0xFF, 0xFE, 0x00,
0x00, 0x70, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x07,
0xFF, 0xFF, 0xC0, 0x0C, 0x7F, 0xFF, 0x00, 0x00, 0x00, 0x1F, 0xFF, 0xFF,
0xC0, 0x00, 0x00, 0x01, 0xF8, 0x30, 0x00, 0x0F, 0xFF, 0xF8, 0x00, 0x00,
0x01, 0xFF, 0xFE, 0x00, 0x00, 0x0F, 0xFF, 0xF0, 0x00, 0x00, 0x03, 0xFF,
0xFF, 0xFE, 0x00, 0x00, 0x03, 0xFF, 0xE0, 0x00, 0x1F, 0xFF, 0xFF, 0x00,
0x00, 0x00, 0x7F, 0xFF, 0xFC, 0x00, 0x00, 0xFF, 0xE0, 0x00, 0x00, 0x00,
0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x03, 0xFF, 0xE0, 0x00, 0x00, 0x7F, 0xFF,
0x00, 0x00, 0x00, 0x3F, 0xFC, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xC0
),
"Y": (
0x00, 0x00, 0x30, 0x00, 0x04, 0x00, 0x06, 0x00, 0xC7, 0x80, 0x00, 0x00,
0x00, 0x00, 0xF0, 0x03, 0x00, 0xC0, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x60, 0x03, 0xC7, 0xFC, 0x7F, 0x00, 0xFC, 0x07, 0x83, 0xC0,
0x7F, 0xF1, 0xFF, 0xFF, 0xFF, 0xFF, 0xF0, 0xFC, 0x02, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x3E, 0x07, 0xF7, 0xFF, 0xFF, 0xF9, 0xFF, 0xFF,
0x87, 0xF0, 0x0C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x78, 0x3F,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC3, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x01, 0xF0, 0x1F, 0xEF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F,
0xC0, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE1, 0xF8, 0xFF, 0xFF,
0xFF, 0xF8, 0x7F, 0x00, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1F,
0x01, 0xFC, 0xFF, 0x9F, 0xFF, 0xFF, 0xFF, 0xE1, 0xFE, 0x00, 0xF0, 0x00,
0x00, 0x00, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xE0, 0xFE,
0x03, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x03, 0xF0, 0xFC,
0x3F, 0xFF, 0xFF, 0xFF, 0xC3, 0xF8, 0x01, 0xF8, 0x00, 0x80, 0x00, 0x00,
0x00, 0x07, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xF0, 0xFF, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x1F, 0x03, 0xFF, 0xFE, 0x1F, 0xFF, 0xFF,
0xFF, 0xE0, 0xFC, 0x03, 0xE0, 0x00, 0x00, 0x00, 0xF0, 0x1F, 0xF3, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0xFC, 0x00, 0xC0, 0x00, 0x00, 0x00, 0x1C,
0x00, 0x80, 0xFF, 0xFF, 0xFF, | |
<gh_stars>0
import re
import fasttext
# To download the fasttext model:
# wget -O /tmp/lid.176.bin https://dl.fbaipublicfiles.com/fasttext/supervised-models/lid.176.bin
import sentencepiece
import kenlm
import pathlib
from languages_id import langs_id
from parameters_filtering import parameters_filtering
from normalization import normalization
from stopwords import stopwords
from badwords import badwords
class LoadParameters:
@staticmethod
def load_parameters(lang_oscar_id):
if lang_oscar_id in parameters_filtering:
param = parameters_filtering[lang_oscar_id]
else:
param = parameters_filtering["default"]
return param
@staticmethod
def load_stopwords(lang_oscar_id):
stopwords_lang_id = langs_id.loc[
langs_id["oscar_id"] == lang_oscar_id, "stopwords_id"
].iloc[0]
if stopwords_lang_id:
stopwords_lang = set(stopwords[stopwords_lang_id])
else:
stopwords_lang = None
return stopwords_lang
@staticmethod
def load_badwords(lang_oscar_id):
badwords_lang_id = langs_id.loc[
langs_id["oscar_id"] == lang_oscar_id, "badwords_id"
].iloc[0]
if badwords_lang_id:
badwords_lang = set(badwords[badwords_lang_id])
else:
badwords_lang = None
return badwords_lang
@staticmethod
def load_model_lang_id(lang_oscar_id, path_fasttext_model):
fasttext_lang_id = langs_id.loc[
langs_id["oscar_id"] == lang_oscar_id, "fasttext_id"
].iloc[0]
if fasttext_lang_id:
model_lang_id = fasttext.load_model(path_fasttext_model)
else:
model_lang_id = None
return model_lang_id
@staticmethod
def load_sentencepiece_model(lang_oscar_id, path_sentencepiece_model):
sentencepiece_lang_id = langs_id.loc[
langs_id["oscar_id"] == lang_oscar_id, "sentencepiece_id"
].iloc[0]
if sentencepiece_lang_id:
sentencepiece_model = sentencepiece.SentencePieceProcessor()
sentencepiece_model.load(path_sentencepiece_model)
else:
sentencepiece_model = None
return sentencepiece_model
@staticmethod
def load_kenlm_model(lang_oscar_id, path_kenlm_model):
kenlm_lang_id = langs_id.loc[
langs_id["oscar_id"] == lang_oscar_id, "kenlm_id"
].iloc[0]
if kenlm_lang_id:
kenlm_model = kenlm.Model(path_kenlm_model)
else:
kenlm_model = None
return kenlm_model
class ModifyingSentences:
@staticmethod
def remove_empty_el_from_list(list_):
return [el for el in list_ if el]
@staticmethod
def remove_non_printing_characters(sentence, non_printing_characters_re):
return non_printing_characters_re.sub("", sentence)
@staticmethod
def uniform_whitespace(
sentence,
whitespace=[
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
"",
"",
],
):
"""There are different whitespace characters."""
whitespace = set(whitespace)
sentence = "".join(
[char if char not in whitespace else " " for char in sentence]
)
return sentence
@staticmethod
def replace_digits_with_zeros(sentence, digits_re):
return digits_re.sub("0", sentence)
@staticmethod
def replace_unicode_punctuation(sentence, unicode_punctuation):
return "".join(unicode_punctuation.get(c, c) for c in sentence)
@staticmethod
def normalization(
sentence,
remove_non_printing_characters,
strip,
lower_case,
uniform_whitespace,
replace_digits_with_zeros,
replace_unicode_punctuation,
non_printing_characters_re=normalization["non_printing_characters_re"],
digits_re=normalization["digits_re"],
unicode_punctuation=normalization["unicode_punctuation"],
):
if remove_non_printing_characters:
sentence = ModifyingSentences.remove_non_printing_characters(
sentence, non_printing_characters_re
)
if strip:
sentence = sentence.strip()
if not sentence:
return sentence
if lower_case:
sentence = sentence.lower()
if uniform_whitespace:
sentence = ModifyingSentences.uniform_whitespace(sentence)
if replace_digits_with_zeros:
sentence = ModifyingSentences.replace_digits_with_zeros(sentence, digits_re)
if replace_unicode_punctuation:
sentence = ModifyingSentences.replace_unicode_punctuation(
sentence, unicode_punctuation
)
return sentence
@staticmethod
def tokenization(sentence, sentencepiece_model, join_on_whitespace):
sentence_tokenized = sentencepiece_model.encode_as_pieces(sentence)
if join_on_whitespace:
sentence_tokenized = " ".join(sentence_tokenized)
return sentence_tokenized
@staticmethod
def split_on_whitespace(
sentence,
new_line=False,
tab=False,
):
"""This method also removes concatenated spaces."""
sep = [" "] + new_line * ["\n"] + tab * ["\t"]
sep = "|".join(sep)
split_sentence = re.split(sep, sentence)
split_sentence = ModifyingSentences.remove_empty_el_from_list(split_sentence)
return split_sentence
@staticmethod
def strip(sentence, strip_characters):
"""Way faster than sentence.strip(strip_characters)
since strip_characters is now a set instead of a str,
and it contains a lot of elements (all the emojis)."""
if not sentence:
return sentence
beg_ind = 0
end_ind = len(sentence)
for i in range(len(sentence)):
if sentence[i] in strip_characters:
beg_ind += 1
else:
break
for i in range(1, len(sentence) + 1):
if sentence[-i] in strip_characters:
end_ind -= 1
else:
break
sentence_stripped = sentence[beg_ind:end_ind]
return sentence_stripped
@staticmethod
def get_words_from_sentence(
sentence, sentencepiece_model_tok, lower_case, strip_characters
):
"""Get words from a sentence. Non reversible since the sentence
is split on multiple characters, words are stripped of
special characters and characters are converted to lower case.
Useful to compute ratios, like the stopwords ratio."""
if sentencepiece_model_tok:
sentence_normalized = ModifyingSentences.normalization(
sentence=sentence,
remove_non_printing_characters=True,
strip=True,
lower_case=True,
uniform_whitespace=True,
replace_digits_with_zeros=True,
replace_unicode_punctuation=True,
)
words = ModifyingSentences.tokenization(
sentence_normalized, sentencepiece_model_tok, join_on_whitespace=False
)
else:
words = ModifyingSentences.split_on_whitespace(
sentence, new_line=True, tab=True
)
if lower_case:
words = [word.lower() for word in words]
if strip_characters:
words = [ModifyingSentences.strip(word, strip_characters) for word in words]
words = ModifyingSentences.remove_empty_el_from_list(words)
return words
@staticmethod
def split_on_newline_tab_whitespace(sentence):
"""First split on "\n", then on "\t", then on " "."""
sentences = sentence.split("\n")
sentences = [sentence.split("\t") for sentence in sentences]
sentences = [
[
ModifyingSentences.split_on_whitespace(subsentence)
for subsentence in sentence
]
for sentence in sentences
]
return sentences
@staticmethod
def merge_on_whitespace_tab_newline(sentences):
"""Invert the method split_on_newline_tab_whitespace.
Removes concatenated separators."""
sentences = [
[" ".join(subsentence) for subsentence in sentence if subsentence]
for sentence in sentences
]
sentences = ["\t".join(sentence) for sentence in sentences if sentence]
if not sentences:
return ""
sentence = "\n".join(sentences)
return sentence
@staticmethod
def should_keep_word_with_incorrect_substrings(
word, strip_characters, incorrect_word_substrings
):
word = ModifyingSentences.strip(word, strip_characters)
should_keep = all(
[(i_substr not in word) for i_substr in incorrect_word_substrings]
)
return should_keep
@staticmethod
def remove_words_with_incorrect_substrings(
sentence,
strip_characters,
incorrect_word_substrings,
):
sentences = ModifyingSentences.split_on_newline_tab_whitespace(sentence)
sentences = [
[
[
word
for word in subsentence
if ModifyingSentences.should_keep_word_with_incorrect_substrings(
word, strip_characters, incorrect_word_substrings
)
]
for subsentence in sentence
]
for sentence in sentences
]
sentence = ModifyingSentences.merge_on_whitespace_tab_newline(sentences)
return sentence
@staticmethod
def should_keep_long_word(
word, strip_characters, special_characters, length_word_max_cutoff
):
"""If the word is too long but it contains only one
special character, it might be a concatenation of one word,
a punctuation, and another word, with no space between them.
In this case, we give the word a pass."""
if len(word) <= length_word_max_cutoff:
return True
word = ModifyingSentences.strip(word, strip_characters)
if not word: # The word consisted only of strip characters
return False
if len(word) <= length_word_max_cutoff:
return True
num_special_char = len([char for char in word if char in special_characters])
if num_special_char == 1:
return True
return False
def remove_long_words(
sentence,
strip_characters,
special_characters,
length_word_max_cutoff,
):
sentences = ModifyingSentences.split_on_newline_tab_whitespace(sentence)
sentences = [
[
[
word
for word in subsentence
if ModifyingSentences.should_keep_long_word(
word,
strip_characters,
special_characters,
length_word_max_cutoff,
)
]
for subsentence in sentence
]
for sentence in sentences
]
sentence = ModifyingSentences.merge_on_whitespace_tab_newline(sentences)
return sentence
@staticmethod
def modifying_sentences(
sentence,
cond_uniform_whitespace,
cond_replace_unicode_punctuation,
cond_remove_words_with_incorrect_substrings,
strip_characters,
incorrect_word_substrings,
cond_remove_long_words,
special_characters,
length_word_max_cutoff,
):
sentence = ModifyingSentences.normalization(
sentence=sentence,
remove_non_printing_characters=False,
strip=True,
lower_case=False,
uniform_whitespace=cond_uniform_whitespace,
replace_digits_with_zeros=False,
replace_unicode_punctuation=cond_replace_unicode_punctuation,
)
if cond_remove_words_with_incorrect_substrings:
sentence = ModifyingSentences.remove_words_with_incorrect_substrings(
sentence,
strip_characters,
incorrect_word_substrings,
)
if cond_remove_long_words:
sentence = ModifyingSentences.remove_long_words(
sentence,
strip_characters,
special_characters,
length_word_max_cutoff,
)
return sentence
class OscarModifyingSentences:
def __init__(self, lang_oscar_id):
self.lang_oscar_id = lang_oscar_id
self.param = LoadParameters.load_parameters(lang_oscar_id)
def __call__(self, example):
example["text"] = ModifyingSentences.modifying_sentences(
sentence=example["text"],
cond_uniform_whitespace=self.param["cond_uniform_whitespace"],
cond_replace_unicode_punctuation=self.param[
"cond_replace_unicode_punctuation"
],
cond_remove_words_with_incorrect_substrings=self.param[
"cond_remove_words_with_incorrect_substrings"
],
strip_characters=self.param["strip_characters"],
incorrect_word_substrings=self.param["incorrect_word_substrings"],
cond_remove_long_words=self.param["cond_remove_long_words"],
special_characters=self.param["special_characters"],
length_word_max_cutoff=self.param["length_word_max_cutoff"],
)
return example
def __reduce__(self):
return (self.__class__, (self.lang_oscar_id,))
class Filtering:
@staticmethod
def check_number_words(
sentence,
sentencepiece_model_tok,
strip_characters,
number_words_min_cutoff,
number_words_max_cutoff,
):
words = ModifyingSentences.get_words_from_sentence(
sentence,
sentencepiece_model_tok,
lower_case=False,
strip_characters=strip_characters,
)
cond = (len(words) >= number_words_min_cutoff) and (
len(words) <= number_words_max_cutoff
)
return cond
@staticmethod
def compute_special_characters_ratio(sentence, special_characters):
special_characters_ratio = len(
[char for char in sentence if char in special_characters]
) / len(sentence)
return special_characters_ratio
@staticmethod
def check_special_characters(
sentence,
special_characters,
special_characters_max_cutoff,
):
special_characters_ratio = Filtering.compute_special_characters_ratio(
sentence, special_characters
)
cond = special_characters_ratio <= special_characters_max_cutoff
return cond
@staticmethod
def compute_stopwords_ratio(
sentence, sentencepiece_model_tok, strip_characters, stopwords
):
words = ModifyingSentences.get_words_from_sentence(
sentence,
sentencepiece_model_tok,
lower_case=True,
strip_characters=strip_characters,
)
if not words:
return 0
stopwords_ratio = len([word for word in words if word in stopwords]) / len(
words
)
return stopwords_ratio
@staticmethod
def check_stopwords(
sentence,
sentencepiece_model_tok,
strip_characters,
stopwords,
stopwords_min_cutoff,
):
cond = True
if stopwords:
stopwords_ratio = Filtering.compute_stopwords_ratio(
sentence, sentencepiece_model_tok, strip_characters, stopwords
)
cond = stopwords_ratio >= stopwords_min_cutoff
return cond
@staticmethod
def compute_badwords_ratio(
sentence, sentencepiece_model_tok, strip_characters, badwords
):
words = ModifyingSentences.get_words_from_sentence(
sentence,
sentencepiece_model_tok,
lower_case=True,
strip_characters=strip_characters,
)
if not words:
return 0
badwords_ratio = len([word for word in words if word in badwords]) / len(words)
return badwords_ratio
@staticmethod
def check_badwords(
sentence,
sentencepiece_model_tok,
strip_characters,
badwords,
badwords_max_cutoff,
):
cond = True
if badwords:
badwords_ratio = Filtering.compute_badwords_ratio(
sentence, sentencepiece_model_tok, strip_characters, badwords
)
cond = badwords_ratio <= badwords_max_cutoff
return cond
@staticmethod
def compute_lang_id_pred_score(sentence, model_lang_id):
sentence = sentence.lower().replace("\n", " ")
pred = model_lang_id.predict(sentence)
lang_pred_fasttext_id = pred[0][0].replace("__label__", "")
score_pred = pred[1][0]
lang_pred_oscar_id = langs_id.loc[
langs_id["fasttext_id"] == lang_pred_fasttext_id, "oscar_id"
]
if len(lang_pred_oscar_id) > 0:
lang_pred_oscar_id = lang_pred_oscar_id.iloc[0]
else:
lang_pred_oscar_id = "unknown"
return lang_pred_oscar_id, score_pred
@staticmethod
def check_lang_id(
sentence,
lang_oscar_id,
model_lang_id,
lang_id_min_cutoff,
):
cond = True
if model_lang_id:
lang_pred_oscar_id, score_pred = Filtering.compute_lang_id_pred_score(
sentence, model_lang_id
)
cond = (lang_pred_oscar_id == lang_oscar_id) and (
score_pred >= lang_id_min_cutoff
)
return cond
@staticmethod
def compute_perplexity_score(doc, sentencepiece_model, kenlm_model):
doc = ModifyingSentences.normalization(
sentence=doc,
remove_non_printing_characters=True,
strip=True,
lower_case=True,
uniform_whitespace=True,
replace_digits_with_zeros=True,
replace_unicode_punctuation=True,
)
doc = ModifyingSentences.tokenization(
doc, sentencepiece_model, join_on_whitespace=True
)
doc_log_score, doc_length = 0, 0
for line in doc.split("\n"):
log_score = kenlm_model.score(line)
length = len(line.split()) + 1
doc_log_score += log_score
doc_length += length
pp_score = 10.0 ** (-doc_log_score / doc_length)
pp_score = round(pp_score, 1)
return pp_score
@staticmethod
def check_perplexity(
sentence,
sentencepiece_model,
kenlm_model,
perplexity_max_cutoff,
):
cond = True
if kenlm_model:
score = Filtering.compute_perplexity_score(
sentence, sentencepiece_model, kenlm_model
)
cond = score <= perplexity_max_cutoff
return cond
@staticmethod
def filtering(
sentence,
cond_check_number_words,
sentencepiece_model_tok,
strip_characters,
number_words_min_cutoff,
number_words_max_cutoff,
cond_check_special_characters,
special_characters,
special_characters_max_cutoff,
cond_check_stopwords,
stopwords,
stopwords_min_cutoff,
cond_check_badwords,
badwords,
| |
c:
c.argument('planner_bucket_id', type=str, help='key: id of plannerBucket')
c.argument('planner_task_id', type=str, help='key: id of plannerTask')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('planner planner-bucket-task show-detail') as c:
c.argument('planner_bucket_id', type=str, help='key: id of plannerBucket')
c.argument('planner_task_id', type=str, help='key: id of plannerTask')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('planner planner-bucket-task show-progress-task-board-format') as c:
c.argument('planner_bucket_id', type=str, help='key: id of plannerBucket')
c.argument('planner_task_id', type=str, help='key: id of plannerTask')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('planner planner-bucket-task update-assigned-to-task-board-format') as c:
c.argument('planner_bucket_id', type=str, help='key: id of plannerBucket')
c.argument('planner_task_id', type=str, help='key: id of plannerTask')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('order_hints_by_assignee', type=validate_file_or_dict, help='plannerOrderHintsByAssignee Expected '
'value: json-string/@json-file.')
c.argument('unassigned_order_hint', type=str, help='Hint value used to order the task on the AssignedTo view '
'of the Task Board when the task is not assigned to anyone, or if the orderHintsByAssignee '
'dictionary does not provide an order hint for the user the task is assigned to. The format is '
'defined as outlined here.')
with self.argument_context('planner planner-bucket-task update-bucket-task-board-format') as c:
c.argument('planner_bucket_id', type=str, help='key: id of plannerBucket')
c.argument('planner_task_id', type=str, help='key: id of plannerTask')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('order_hint', type=str, help='Hint used to order tasks in the Bucket view of the Task Board. The '
'format is defined as outlined here.')
with self.argument_context('planner planner-bucket-task update-detail') as c:
c.argument('planner_bucket_id', type=str, help='key: id of plannerBucket')
c.argument('planner_task_id', type=str, help='key: id of plannerTask')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('checklist', type=validate_file_or_dict, help='plannerChecklistItems Expected value: '
'json-string/@json-file.')
c.argument('description', type=str, help='Description of the task')
c.argument('preview_type', arg_type=get_enum_type(['automatic', 'noPreview', 'checklist', 'description',
'reference']), help='')
c.argument('references', type=validate_file_or_dict, help='plannerExternalReferences Expected value: '
'json-string/@json-file.')
with self.argument_context('planner planner-bucket-task update-progress-task-board-format') as c:
c.argument('planner_bucket_id', type=str, help='key: id of plannerBucket')
c.argument('planner_task_id', type=str, help='key: id of plannerTask')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('order_hint', type=str, help='Hint value used to order the task on the Progress view of the Task '
'Board. The format is defined as outlined here.')
with self.argument_context('planner planner-plan create-bucket') as c:
c.argument('planner_plan_id', type=str, help='key: id of plannerPlan')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('name', type=str, help='Name of the bucket.')
c.argument('order_hint', type=str, help='Hint used to order items of this type in a list view. The format is '
'defined as outlined here.')
c.argument('plan_id', type=str, help='Plan ID to which the bucket belongs.')
c.argument('tasks', type=validate_file_or_dict, help='Read-only. Nullable. The collection of tasks in the '
'bucket. Expected value: json-string/@json-file.')
with self.argument_context('planner planner-plan create-task') as c:
c.argument('planner_plan_id', type=str, help='key: id of plannerPlan')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('active_checklist_item_count', type=int, help='Number of checklist items with value set to false, '
'representing incomplete items.')
c.argument('applied_categories', type=validate_file_or_dict, help='plannerAppliedCategories Expected value: '
'json-string/@json-file.')
c.argument('assignee_priority', type=str, help='Hint used to order items of this type in a list view. The '
'format is defined as outlined here.')
c.argument('assignments', type=validate_file_or_dict, help='plannerAssignments Expected value: '
'json-string/@json-file.')
c.argument('bucket_id', type=str, help='Bucket ID to which the task belongs. The bucket needs to be in the '
'plan that the task is in. It is 28 characters long and case-sensitive. Format validation is done '
'on the service.')
c.argument('checklist_item_count', type=int, help='Number of checklist items that are present on the task.')
c.argument('completed_date_time', help='Read-only. Date and time at which the \'percentComplete\' of the task '
'is set to \'100\'. The Timestamp type represents date and time information using ISO 8601 format '
'and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like this: '
'\'2014-01-01T00:00:00Z\'')
c.argument('conversation_thread_id', type=str, help='Thread ID of the conversation on the task. This is the ID '
'of the conversation thread object created in the group.')
c.argument('created_date_time', help='Read-only. Date and time at which the task is created. The Timestamp '
'type represents date and time information using ISO 8601 format and is always in UTC time. For '
'example, midnight UTC on Jan 1, 2014 would look like this: \'2014-01-01T00:00:00Z\'')
c.argument('due_date_time', help='Date and time at which the task is due. The Timestamp type represents date '
'and time information using ISO 8601 format and is always in UTC time. For example, midnight UTC on '
'Jan 1, 2014 would look like this: \'2014-01-01T00:00:00Z\'')
c.argument('has_description', arg_type=get_three_state_flag(), help='Read-only. Value is true if the details '
'object of the task has a non-empty description and false otherwise.')
c.argument('order_hint', type=str, help='Hint used to order items of this type in a list view. The format is '
'defined as outlined here.')
c.argument('percent_complete', type=int, help='Percentage of task completion. When set to 100, the task is '
'considered completed.')
c.argument('plan_id', type=str, help='Plan ID to which the task belongs.')
c.argument('preview_type', arg_type=get_enum_type(['automatic', 'noPreview', 'checklist', 'description',
'reference']), help='')
c.argument('reference_count', type=int, help='Number of external references that exist on the task.')
c.argument('start_date_time', help='Date and time at which the task starts. The Timestamp type represents date '
'and time information using ISO 8601 format and is always in UTC time. For example, midnight UTC on '
'Jan 1, 2014 would look like this: \'2014-01-01T00:00:00Z\'')
c.argument('title', type=str, help='Title of the task.')
c.argument('bucket_task_board_format', action=AddBucketTaskBoardFormat, nargs='+',
help='plannerBucketTaskBoardTaskFormat')
c.argument('progress_task_board_format', action=AddProgressTaskBoardFormat, nargs='+',
help='plannerProgressTaskBoardTaskFormat')
c.argument('microsoft_graph_entity_id', type=str, help='Read-only.', arg_group='Details')
c.argument('checklist', type=validate_file_or_dict, help='plannerChecklistItems Expected value: '
'json-string/@json-file.', arg_group='Details')
c.argument('description', type=str, help='Description of the task', arg_group='Details')
c.argument('microsoft_graph_planner_preview_type', arg_type=get_enum_type(['automatic', 'noPreview',
'checklist', 'description',
'reference']), help='',
arg_group='Details')
c.argument('references', type=validate_file_or_dict, help='plannerExternalReferences Expected value: '
'json-string/@json-file.', arg_group='Details')
c.argument('id1', type=str, help='Read-only.', arg_group='Assigned To Task Board Format')
c.argument('order_hints_by_assignee', type=validate_file_or_dict, help='plannerOrderHintsByAssignee Expected '
'value: json-string/@json-file.', arg_group='Assigned To Task Board Format')
c.argument('unassigned_order_hint', type=str, help='Hint value used to order the task on the AssignedTo view '
'of the Task Board when the task is not assigned to anyone, or if the orderHintsByAssignee '
'dictionary does not provide an order hint for the user the task is assigned to. The format is '
'defined as outlined here.', arg_group='Assigned To Task Board Format')
c.argument('application', action=AddApplication, nargs='+', help='identity', arg_group='Created By')
c.argument('device', action=AddApplication, nargs='+', help='identity', arg_group='Created By')
c.argument('user', action=AddApplication, nargs='+', help='identity', arg_group='Created By')
c.argument('microsoft_graph_identity_application', action=AddApplication, nargs='+', help='identity',
arg_group='Completed By')
c.argument('microsoft_graph_identity_device', action=AddApplication, nargs='+', help='identity',
arg_group='Completed By')
c.argument('microsoft_graph_identity_user', action=AddApplication, nargs='+', help='identity',
arg_group='Completed By')
with self.argument_context('planner planner-plan delete-bucket') as c:
c.argument('planner_plan_id', type=str, help='key: id of plannerPlan')
c.argument('planner_bucket_id', type=str, help='key: id of plannerBucket')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('planner planner-plan delete-detail') as c:
c.argument('planner_plan_id', type=str, help='key: id of plannerPlan')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('planner planner-plan delete-task') as c:
c.argument('planner_plan_id', type=str, help='key: id of plannerPlan')
c.argument('planner_task_id', type=str, help='key: id of plannerTask')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('planner planner-plan list-bucket') as c:
c.argument('planner_plan_id', type=str, help='key: id of plannerPlan')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('planner planner-plan list-task') as c:
c.argument('planner_plan_id', type=str, help='key: id of plannerPlan')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('planner planner-plan show-bucket') as c:
c.argument('planner_plan_id', type=str, help='key: id of plannerPlan')
c.argument('planner_bucket_id', type=str, help='key: id of plannerBucket')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('planner planner-plan show-detail') as c:
c.argument('planner_plan_id', type=str, help='key: id of plannerPlan')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('planner planner-plan show-task') as c:
c.argument('planner_plan_id', type=str, help='key: id of plannerPlan')
c.argument('planner_task_id', type=str, help='key: id of plannerTask')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('planner planner-plan update-bucket') as c:
c.argument('planner_plan_id', type=str, help='key: id of plannerPlan')
c.argument('planner_bucket_id', type=str, help='key: id of plannerBucket')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('name', type=str, help='Name of the bucket.')
c.argument('order_hint', type=str, help='Hint used to order items of this type in a list view. The format is '
'defined as outlined here.')
c.argument('plan_id', type=str, help='Plan ID to which the bucket belongs.')
c.argument('tasks', type=validate_file_or_dict, help='Read-only. Nullable. The collection of tasks in the '
'bucket. Expected value: json-string/@json-file.')
with self.argument_context('planner planner-plan update-detail') as c:
c.argument('planner_plan_id', type=str, help='key: id of plannerPlan')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('category_descriptions', action=AddCategoryDescriptions, nargs='+',
help='plannerCategoryDescriptions')
c.argument('shared_with', type=validate_file_or_dict, help='plannerUserIds Expected value: '
'json-string/@json-file.')
with self.argument_context('planner planner-plan update-task') as c:
c.argument('planner_plan_id', type=str, help='key: id of plannerPlan')
c.argument('planner_task_id', type=str, help='key: id of plannerTask')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('active_checklist_item_count', type=int, help='Number of checklist items with | |
<gh_stars>1-10
# -*- coding: utf-8 -*-
"""Tools for hydrology.
hydrotoolbox baseflow_sep --area 59.1 --area_units 'mile**2' linear < daily.csv
hydrotoolbox baseflow_sep sliding < daily.csv
hydrotoolbox baseflow_sep eckardt,sliding < daily.csv
...
hydrotoolbox recession """
from __future__ import absolute_import, division, print_function
import warnings
import numpy as np
import pandas as pd
from tstoolbox import tsutils
from .baseflow.comparison import strict_baseflow
from .baseflow.separation import separation
warnings.filterwarnings("ignore")
def bfsep(Q, method, print_input, bfi=False, date=None, area=None, ice_period=None):
ntsd = pd.DataFrame()
if print_input is True:
ntsd = Q.copy()
Qb = pd.DataFrame()
for col in Q.columns:
ndf = pd.DataFrame(
separation(
Q.loc[:, col].astype("float64").values,
date=date,
area=area,
ice_period=ice_period,
method=method,
)[bfi],
index=Q.index,
)
ndf.columns = [col]
Qb = Qb.join(ndf, how="outer")
return tsutils.return_input(print_input, ntsd, Qb, suffix=method.lower())
@tsutils.doc(tsutils.docstrings)
def boughton(
input_ts="-",
columns=None,
source_units=None,
start_date=None,
end_date=None,
dropna="no",
clean=False,
round_index=None,
skiprows=None,
index_type="datetime",
names=None,
target_units=None,
print_input=False,
):
"""Boughton double-parameter filter (Boughton, 2004)
! C k
! Qb = ----- Q + ----- Qb
! i 1 + C i 1 + C (i-1)
Parameters
----------
input_ts
Streamflow
${columns}
${source_units}
${start_date}
${end_date}
${dropna}
${clean}
${round_index}
${skiprows}
${index_type}
${names}
${target_units}
${print_input}
${tablefmt}
"""
Q = tsutils.common_kwds(
tsutils.read_iso_ts(
input_ts,
skiprows=skiprows,
names=names,
index_type=index_type,
),
start_date=start_date,
end_date=end_date,
pick=columns,
round_index=round_index,
dropna=dropna,
clean=clean,
source_units=source_units,
target_units=target_units,
)
return bfsep(Q, "Boughton", print_input)
@tsutils.doc(tsutils.docstrings)
def chapman(
input_ts="-",
columns=None,
source_units=None,
start_date=None,
end_date=None,
dropna="no",
clean=False,
round_index=None,
skiprows=None,
index_type="datetime",
names=None,
target_units=None,
print_input=False,
):
"""Chapman filter (Chapman, 1991)
Parameters
----------
input_ts
Streamflow
${columns}
${source_units}
${start_date}
${end_date}
${dropna}
${clean}
${round_index}
${skiprows}
${index_type}
${names}
${target_units}
${print_input}
${tablefmt}
"""
Q = tsutils.common_kwds(
tsutils.read_iso_ts(
input_ts,
skiprows=skiprows,
names=names,
index_type=index_type,
),
start_date=start_date,
end_date=end_date,
pick=columns,
round_index=round_index,
dropna=dropna,
clean=clean,
source_units=source_units,
target_units=target_units,
)
return bfsep(Q, "Chapman", print_input)
@tsutils.doc(tsutils.docstrings)
def cm(
input_ts="-",
columns=None,
source_units=None,
start_date=None,
end_date=None,
dropna="no",
clean=False,
round_index=None,
skiprows=None,
index_type="datetime",
names=None,
target_units=None,
print_input=False,
):
"""CM filter (Chapman and Maxwell, 1996)
! 1 - k k
! Qb = ----- Q + ----- Qb
! i 2 - k i 2 - k (i-1)
Parameters
----------
input_ts
Streamflow
${columns}
${source_units}
${start_date}
${end_date}
${dropna}
${clean}
${round_index}
${skiprows}
${index_type}
${names}
${target_units}
${print_input}
${tablefmt}
"""
Q = tsutils.common_kwds(
tsutils.read_iso_ts(
input_ts,
skiprows=skiprows,
names=names,
index_type=index_type,
),
start_date=start_date,
end_date=end_date,
pick=columns,
round_index=round_index,
dropna=dropna,
clean=clean,
source_units=source_units,
target_units=target_units,
)
return bfsep(Q, "CM", print_input)
@tsutils.doc(tsutils.docstrings)
def eckhardt(
input_ts="-",
columns=None,
source_units=None,
start_date=None,
end_date=None,
dropna="no",
clean=False,
round_index=None,
skiprows=None,
index_type="datetime",
names=None,
target_units=None,
print_input=False,
):
"""Eckhardt filter (Eckhardt, 2005)
Parameters
----------
input_ts
Streamflow
${columns}
${source_units}
${start_date}
${end_date}
${dropna}
${clean}
${round_index}
${skiprows}
${index_type}
${names}
${target_units}
${print_input}
${tablefmt}
"""
Q = tsutils.common_kwds(
tsutils.read_iso_ts(
input_ts,
skiprows=skiprows,
names=names,
index_type=index_type,
),
start_date=start_date,
end_date=end_date,
pick=columns,
round_index=round_index,
dropna=dropna,
clean=clean,
source_units=source_units,
target_units=target_units,
)
return bfsep(Q, "Eckhardt", print_input)
@tsutils.doc(tsutils.docstrings)
def ewma(
input_ts="-",
columns=None,
source_units=None,
start_date=None,
end_date=None,
dropna="no",
clean=False,
round_index=None,
skiprows=None,
index_type="datetime",
names=None,
target_units=None,
print_input=False,
):
"""Exponential Weighted Moving Average (EWMA) filter (Tularam and Ilahee, 2008)
Parameters
----------
input_ts
Streamflow
${columns}
${source_units}
${start_date}
${end_date}
${dropna}
${clean}
${round_index}
${skiprows}
${index_type}
${names}
${target_units}
${print_input}
${tablefmt}
"""
Q = tsutils.common_kwds(
tsutils.read_iso_ts(
input_ts,
skiprows=skiprows,
names=names,
index_type=index_type,
),
start_date=start_date,
end_date=end_date,
pick=columns,
round_index=round_index,
dropna=dropna,
clean=clean,
source_units=source_units,
target_units=target_units,
)
return bfsep(Q, "EWMA", print_input)
@tsutils.doc(tsutils.docstrings)
def usgs_hysep_fixed(
area=None,
input_ts="-",
columns=None,
source_units=None,
start_date=None,
end_date=None,
dropna="no",
clean=False,
round_index=None,
skiprows=None,
index_type="datetime",
names=None,
target_units=None,
print_input=False,
):
"""USGS HYSEP Fixed interval method.
<NAME>., and <NAME>. “HYSEP: A Computer Program for
Streamflow Hydrograph Separation and Analysis.” USGS Numbered Series.
Water-Resources Investigations Report. Geological Survey (U.S.), 1996.
http://pubs.er.usgs.gov/publication/wri964040
Parameters
----------
area: float
basin area in mile^2
input_ts
Streamflow
${columns}
${source_units}
${start_date}
${end_date}
${dropna}
${clean}
${round_index}
${skiprows}
${index_type}
${names}
${target_units}
${print_input}
${tablefmt}
"""
Q = tsutils.common_kwds(
tsutils.read_iso_ts(
input_ts,
skiprows=skiprows,
names=names,
index_type=index_type,
),
start_date=start_date,
end_date=end_date,
pick=columns,
round_index=round_index,
dropna=dropna,
clean=clean,
source_units=source_units,
target_units=target_units,
)
return bfsep(Q, "Fixed", print_input, area=area)
@tsutils.doc(tsutils.docstrings)
def furey(
input_ts="-",
columns=None,
source_units=None,
start_date=None,
end_date=None,
dropna="no",
clean=False,
round_index=None,
skiprows=None,
index_type="datetime",
names=None,
target_units=None,
print_input=False,
):
"""Furey digital filter (<NAME>, 2001, 2003)
Parameters
----------
input_ts
Streamflow
${columns}
${source_units}
${start_date}
${end_date}
${dropna}
${clean}
${round_index}
${skiprows}
${index_type}
${names}
${target_units}
${print_input}
${tablefmt}
"""
Q = tsutils.common_kwds(
tsutils.read_iso_ts(
input_ts,
skiprows=skiprows,
names=names,
index_type=index_type,
),
start_date=start_date,
end_date=end_date,
pick=columns,
round_index=round_index,
dropna=dropna,
clean=clean,
source_units=source_units,
target_units=target_units,
)
return bfsep(Q, "Furey", print_input)
@tsutils.doc(tsutils.docstrings)
def lh(
input_ts="-",
columns=None,
source_units=None,
start_date=None,
end_date=None,
dropna="no",
clean=False,
round_index=None,
skiprows=None,
index_type="datetime",
names=None,
target_units=None,
print_input=False,
):
"""LH digital filter (<NAME>, 1979)
Parameters
----------
input_ts
Streamflow
${columns}
${source_units}
${start_date}
${end_date}
${dropna}
${clean}
${round_index}
${skiprows}
${index_type}
${names}
${target_units}
${print_input}
${tablefmt}
"""
Q = tsutils.common_kwds(
tsutils.read_iso_ts(
input_ts,
skiprows=skiprows,
names=names,
index_type=index_type,
),
start_date=start_date,
end_date=end_date,
pick=columns,
round_index=round_index,
dropna=dropna,
clean=clean,
source_units=source_units,
target_units=target_units,
)
return bfsep(Q, "LH", print_input)
@tsutils.doc(tsutils.docstrings)
def usgs_hysep_local(
area=None,
input_ts="-",
columns=None,
source_units=None,
start_date=None,
end_date=None,
dropna="no",
clean=False,
round_index=None,
skiprows=None,
index_type="datetime",
names=None,
target_units=None,
print_input=False,
):
"""USGS HYSEP Local minimum graphical method (Sloto and Crouse, 1996)
Parameters
----------
area: float
basin area in mile^2
input_ts
Streamflow
${columns}
${source_units}
${start_date}
${end_date}
${dropna}
${clean}
${round_index}
${skiprows}
${index_type}
${names}
${target_units}
${print_input}
${tablefmt}
"""
Q = tsutils.common_kwds(
tsutils.read_iso_ts(
input_ts,
skiprows=skiprows,
names=names,
index_type=index_type,
),
start_date=start_date,
end_date=end_date,
pick=columns,
round_index=round_index,
dropna=dropna,
clean=clean,
source_units=source_units,
target_units=target_units,
)
return bfsep(Q, "Local", print_input, area=area)
@tsutils.doc(tsutils.docstrings)
def ihacres(
k,
C,
a,
input_ts="-",
columns=None,
source_units=None,
start_date=None,
end_date=None,
dropna="no",
clean=False,
round_index=None,
skiprows=None,
index_type="datetime",
names=None,
target_units=None,
print_input=False,
):
"""IHACRES
Parameters
----------
k: float
k
coefficient
C: float
C
coefficient
a: float
a
coefficient
input_ts
Streamflow
${columns}
${source_units}
${start_date}
${end_date}
${dropna}
${clean}
${round_index}
${skiprows}
${index_type}
${names}
${target_units}
${print_input}
${tablefmt}
"""
Q = tsutils.common_kwds(
tsutils.read_iso_ts(
input_ts,
skiprows=skiprows,
names=names,
index_type=index_type,
),
start_date=start_date,
end_date=end_date,
pick=columns,
round_index=round_index,
dropna=dropna,
clean=clean,
source_units=source_units,
target_units=target_units,
)
ntsd = pd.DataFrame()
if print_input is True:
ntsd = Q.copy()
Qb = Q.copy()
for col in range(len(Q.columns)):
for row in range(1, len(Q.index)):
Qb.iloc[row, col] = k / (1 + C) * Qb.iloc[row - 1, col] + C / (1 + C) * (
Q.iloc[row, col] + a * Q.iloc[row - 1, col]
)
Qb.mask(Qb < Q, other=Q, inplace=True)
return tsutils.return_input(print_input, ntsd, Qb, suffix="ihacres")
@tsutils.doc(tsutils.docstrings)
def usgs_hysep_slide(
area=None,
input_ts="-",
columns=None,
source_units=None,
start_date=None,
end_date=None,
dropna="no",
clean=False,
round_index=None,
skiprows=None,
index_type="datetime",
names=None,
target_units=None,
print_input=False,
):
"""USGS HYSEP sliding interval method
The USGS HYSEP sliding interval method described in
`Sloto and Crouse, 1996`
The flow series is filter with scipy.ndimage.genericfilter1D using
numpy.nanmin function over a window of size `size`
<NAME>., and <NAME>. “HYSEP: A Computer Program for
Streamflow Hydrograph Separation and Analysis.” USGS Numbered Series.
Water-Resources Investigations Report. Geological Survey (U.S.), 1996.
http://pubs.er.usgs.gov/publication/wri964040.
Parameters
----------
area: float
Area of watershed in miles**2
input_ts
Streamflow
${columns}
${source_units}
${start_date}
${end_date}
${dropna}
${clean}
${round_index}
${skiprows}
${index_type}
${names}
${target_units}
${print_input}
${tablefmt}
"""
Q = tsutils.common_kwds(
tsutils.read_iso_ts(
input_ts,
skiprows=skiprows,
names=names,
index_type=index_type,
),
start_date=start_date,
end_date=end_date,
pick=columns,
round_index=round_index,
dropna=dropna,
clean=clean,
source_units=source_units,
target_units=target_units,
)
return bfsep(Q, "Slide", print_input, area=area)
@tsutils.doc(tsutils.docstrings)
def ukih(
input_ts="-",
columns=None,
source_units=None,
start_date=None,
end_date=None,
dropna="no",
clean=False,
round_index=None,
skiprows=None,
index_type="datetime",
names=None,
target_units=None,
print_input=False,
):
"""Graphical method developed by UK Institute of Hydrology (UKIH, 1980)
Parameters
----------
input_ts
Streamflow
${columns}
${source_units}
${start_date}
${end_date}
${dropna}
${clean}
${round_index}
${skiprows}
${index_type}
${names}
${target_units}
${print_input}
${tablefmt}
"""
Q = tsutils.common_kwds(
tsutils.read_iso_ts(
input_ts,
skiprows=skiprows,
names=names,
index_type=index_type,
),
start_date=start_date,
end_date=end_date,
pick=columns,
round_index=round_index,
dropna=dropna,
clean=clean,
source_units=source_units,
target_units=target_units,
)
return bfsep(Q, "UKIH", print_input)
@tsutils.doc(tsutils.docstrings)
def willems(
input_ts="-",
columns=None,
source_units=None,
start_date=None,
end_date=None,
dropna="no",
clean=False,
round_index=None,
skiprows=None,
index_type="datetime",
names=None,
target_units=None,
print_input=False,
):
"""Digital filter (Willems, 2009)
Parameters
----------
input_ts
Streamflow
${columns}
${source_units}
${start_date}
${end_date}
${dropna}
${clean}
${round_index}
${skiprows}
${index_type}
${names}
${target_units}
${print_input}
${tablefmt}
"""
Q = tsutils.common_kwds(
tsutils.read_iso_ts(
input_ts,
skiprows=skiprows,
names=names,
index_type=index_type,
),
start_date=start_date,
end_date=end_date,
pick=columns,
round_index=round_index,
dropna=dropna,
clean=clean,
source_units=source_units,
target_units=target_units,
)
return bfsep(Q, "Willems", print_input)
@tsutils.doc(tsutils.docstrings)
def five_day(
input_ts="-",
columns=None,
source_units=None,
start_date=None,
end_date=None,
dropna="no",
clean=False,
round_index=None,
skiprows=None,
index_type="datetime",
names=None,
target_units=None,
print_input=False,
):
"""Value kept if less than 90 percent of adjacent 5-day blocks.
Parameters
----------
input_ts
Streamflow
${columns}
${source_units}
${start_date}
${end_date}
${dropna}
${clean}
${round_index}
${skiprows}
${index_type}
${names}
${target_units}
${print_input}
${tablefmt}
"""
Q = tsutils.common_kwds(
tsutils.read_iso_ts(
input_ts,
skiprows=skiprows,
names=names,
index_type=index_type,
),
start_date=start_date,
end_date=end_date,
pick=columns,
round_index=round_index,
dropna=dropna,
clean=clean,
source_units=source_units,
target_units=target_units,
)
ntsd = pd.DataFrame()
if print_input is True:
ntsd = Q.copy()
ndf = pd.DataFrame()
for col in Q:
vals = Q[col].groupby(pd.Grouper(freq="5D")).min().astype("float64")
srccol = 0.9 * vals
prevrow = vals.shift(-1)
nextrow = vals.shift(1)
mask = (srccol > prevrow) | (srccol > nextrow)
nanmask = Q[col] == np.nan
vals.loc[mask] = None
vals = vals.interpolate(method="linear")
vals.loc[nanmask] = None
vals = vals.reindex(Q.index).ffill()
ltmask = Q[col] < vals
vals.loc[ltmask] = Q.loc[ltmask, col]
ndf = ndf.join(vals, how="outer")
return tsutils.return_input(print_input, ntsd, ndf, suffix="five_day")
@tsutils.doc(tsutils.docstrings)
def strict(
input_ts="-",
columns=None,
source_units=None,
start_date=None,
end_date=None,
dropna="no",
clean=False,
round_index=None,
skiprows=None,
index_type="datetime",
names=None,
target_units=None,
print_input=False,
):
"""Return "strict" baseflow.
Parameters
----------
input_ts
Streamflow
${columns}
${source_units}
${start_date}
${end_date}
${dropna}
${clean}
${round_index}
${skiprows}
${index_type}
${names}
${target_units}
${tablefmt}
"""
Q = tsutils.common_kwds(
tsutils.read_iso_ts(
input_ts,
skiprows=skiprows,
names=names,
index_type=index_type,
),
start_date=start_date,
end_date=end_date,
pick=columns,
round_index=round_index,
dropna=dropna,
clean=clean,
source_units=source_units,
target_units=target_units,
)
ntsd | |
except:
child_comments = resp.json
except:
# WE need logging in the web app!
try:
import sys
sys.stderr.write('Error: got a {c} from {u}\n'.format(c=resp.status_code,
u=get_children_url))
except:
pass # well that sucks, we failed to even write to stderr
metadata = parse_comment_metadata(comment['body'])
##print(metadata)
# Examine the comment metadata (if any) to get the best display name
# and URL for its author. Guests should appear here as the name and
# email address they entered when creating a comment, rather than the
# GitHub app (bot).
#
# Default values are what we can fetch from the issues API
author_display_name = comment['user']['login']
author_link = comment['user']['html_url']
# Now let's try for something more friendly...
if metadata:
meta_author_info = metadata.get('Author', None)
if meta_author_info:
# Try to parse this fron a Markdown hyperlink. Typical values include:
# u'opentreeapi'
# u'11'
# u'[<NAME>](https://github.com/jimallman)'
# u'[<NAME>](mailto:<EMAIL>)'
regex = re.compile(r'\[(.*)\]\((.*)\)')
markdown_fields = regex.findall(meta_author_info)
if len(markdown_fields) > 0:
# look for parts of a markdown link
author_display_name, author_link = markdown_fields[0]
else:
# it's not a markdown link, just a bare name or numeric userid
if meta_author_info.isdigit():
# ignore ugly userid (login is better)
pass
else:
author_display_name = meta_author_info
# Is this node for an issue (thread starter) or a comment (reply)?
issue_node = 'number' in comment
# Is the current user logged in? If so, what is their GitHub ID (login)?
current_user_id = auth.user and auth.user.github_login or None
# Cook up some reasonably strong regular expressions to detect bare
# URLs and wrap them in hyperlinks. Adapted from
# http://stackoverflow.com/questions/1071191/detect-urls-in-a-string-and-wrap-with-a-href-tag
link_regex = re.compile( r'''
(?x)( # verbose identify URLs within text
(http|https) # make sure we find a resource type
:// # ...needs to be followed by colon-slash-slash
(\w+[:.]?){2,} # at least two domain groups, e.g. (gnosis.)(cx)
(/?| # could be just the domain name (maybe w/ slash)
[^ \n\r"]+ # or stuff then space, newline, tab, quote
[\w/]) # resource name ends in alphanumeric or slash
(?=([\s\.,>)'"\]]|$)) # assert: followed by white or clause ending OR end of line
) # end of match group
''')
# link_replace = r'<a href="\1" />\1</a>'
# let's try this do-nothing version
link_replace = r'\1'
# NOTE the funky constructor required to use this below
# Define a consistent cleaner to sanitize user input. We need a few
# elements that are common in our markdown but missing from the Bleach
# whitelist.
# N.B. HTML comments are stripped by default. Non-allowed tags will appear
# "naked" in output, so we can identify any bad actors.
common_feedback_tags = [u'p', u'br',
u'h1', u'h2', u'h3', u'h4', u'h5', u'h6',
]
ot_markdown_tags = list(set( bleach.sanitizer.ALLOWED_TAGS + common_feedback_tags))
ot_cleaner = Cleaner(tags=ot_markdown_tags)
try: # TODO: if not comment.deleted:
# N.B. some missing information (e.g. supporting URL) will appear here as a string like "None"
supporting_reference_url = metadata.get('Supporting reference', None)
has_supporting_reference_url = supporting_reference_url and (supporting_reference_url != u'None')
# Prepare a sanitized rendering of this user-submitted markup
rendered_comment_markdown = markdown(
get_visible_comment_body(comment['body'] or ''),
extras={'link-patterns':None},
link_patterns=[(link_regex, link_replace)]).encode('utf-8')
safe_comment_markup = XML(
ot_cleaner.clean(rendered_comment_markdown),
sanitize=False) # gluon's sanitize will break on Unicode!
markup = LI(
DIV(##T('posted by %(first_name)s %(last_name)s',comment.created_by),
# not sure why this doesn't work... db.auth record is not a mapping!?
('title' in comment) and DIV( comment['title'], A(T('on GitHub'), _href=comment['html_url'], _target='_blank'), _class='topic-title') or '',
DIV( safe_comment_markup, _class=(issue_node and 'body issue-body' or 'body comment-body')),
DIV( A(T('Supporting reference (opens in a new window)'), _href=supporting_reference_url, _target='_blank'), _class='body issue-supporting-reference' ) if has_supporting_reference_url else '',
DIV(
A(T(author_display_name), _href=author_link, _target='_blank'),
# SPAN(' [local expertise]',_class='badge') if comment.claimed_expertise else '',
SPAN(' ',metadata.get('Feedback type'),' ',_class='badge') if metadata.get('Feedback type') else '',
T(' - %s',prettydate(utc_to_local(datetime.strptime(comment['created_at'], GH_DATETIME_FORMAT)),T)),
SPAN(
issue_node and A(T(child_comments and 'Hide comments' or 'Show/add comments'),_class='toggle',_href='#') or '',
issue_node and comment['user']['login'] == current_user_id and SPAN(' | ') or '',
A(T('Delete'),_class='delete',_href='#') if comment['user']['login'] == current_user_id else '',
_class='controls'),
_class='byline'),
_id='r%s' % comment.get('number', comment['id']),
_class='msg-wrapper'),
# child messages (toggle hides/shows these)
issue_node and SUL(*[node(comment) for comment in child_comments], _style=("" if child_comments else "display: none;")) or '',
issue_node and DIV(_class='reply', _style=("" if child_comments else "display: none;")) or '',
_class=(issue_node and 'issue' or 'comment'))
return markup
except:
import sys
print "Unexpected error:", sys.exc_info()[0]
raise
if thread_parent_id == 'delete':
# delete the specified comment or close an issue...
try:
if issue_or_comment == 'issue':
print("CLOSING ISSUE {0}".format(comment_id))
close_issue(comment_id)
clear_local_comments()
return 'closed'
else:
print("DELETING COMMENT {0}".format(comment_id))
delete_comment(comment_id)
clear_local_comments()
return 'deleted'
except:
clear_local_comments() # hopefully a cleaner result
return error()
elif thread_parent_id:
# add a new comment using the submitted vars
if not request.vars.body:
print('MISSING BODY:')
print(request.vars.body)
return error()
if not (visitor_name or auth.user):
print('MISSING USER-ID:')
print(' visitor_name:')
print(visitor_name)
print(' auth.user:')
print(auth.user)
return error()
# build useful links for some footer fields
if auth.user:
author_link = '[{0}]({1})'.format(auth.user.name, auth.user.github_url)
elif visitor_name and visitor_email:
author_link = '[{0}](mailto:{1})'.format(visitor_name, visitor_email)
elif visitor_name:
# no email provided
author_link = visitor_name
elif visitor_email:
# no name provided
author_link = '[{0}](mailto:{1})'.format(visitor_email, visitor_email)
else:
# no identifying information provided
author_link = 'Anonymous'
if (thread_parent_id == '0'):
# create a new issue (thread starter)
msg_body = request.vars.body
if len(re.compile('\s+').sub('',msg_body))<1:
return ''
# more useful links for some footer fields
if url.startswith('http'):
# repeat full (absolute) URL as link text
url_link = '[{0}]({1})'.format(url, url)
else:
# expand hidden link for root-relative URL
url_link = '[{0}]({1}{2})'.format(url, request.get('env').get('http_origin'), url)
# add full metadata for an issue
footer = build_comment_metadata_footer(comment_type='starter', metadata={
"Author": author_link,
"Upvotes": 0,
"URL": url_link,
"Target node label": target_node_label,
"Synthetic tree id": synthtree_id,
"Synthetic tree node id": synthtree_node_id,
"Source tree id": sourcetree_id,
"Open Tree Taxonomy id": ottol_id,
"Supporting reference": reference_url or 'None'
})
msg_data = {
"title": issue_title,
"body": "{0}\n{1}".format(msg_body, footer),
"labels": [ ]
}
if feedback_type:
# omit an empty value here!
msg_data['labels'].append(feedback_type)
new_msg = add_or_update_issue(msg_data)
else:
# attach this comment to an existing issue
##print("ADD A COMMENT")
msg_body = request.vars.body
if len(re.compile('\s+').sub('',msg_body))<1:
return ''
# add abbreviated metadata for a comment
footer = build_comment_metadata_footer(comment_type='reply', metadata={
"Author" : author_link,
"Upvotes" : 0,
})
print(footer)
msg_data = {
"body": "{0}\n{1}".format(msg_body, footer)
}
new_msg = add_or_update_comment(msg_data, parent_issue_id=thread_parent_id)
clear_local_comments()
return node(new_msg)
# retrieve related comments, based on the chosen filter
if filter == 'skip_comments':
# sometimes we just want the markup/UI (eg, an empty page that's quickly updated by JS)
comments = [ ]
elif filter == 'synthtree_id,synthtree_node_id':
comments = get_local_comments({
"Synthetic tree id": synthtree_id,
"Synthetic tree node id": synthtree_node_id})
elif filter == 'sourcetree_id':
comments = get_local_comments({"Source tree id(s)": sourcetree_id})
elif filter == 'ottol_id':
comments = get_local_comments({"Open Tree Taxonomy id": ottol_id})
else: # fall back to url
if 'parentWindowURL=' in url:
#pprint("=== EXTRACTING parentWindowURL...")
try:
from urllib import unquote_plus
except ImportError:
from urllib.parse import unquote_plus
# capture the absolute URL of a parent window (i.e. from OneZoom or the study-curation app)
raw_qs_value = url.split('parentWindowURL=')[1];
#pprint("=== raw_qs_value: %s" % raw_qs_value)
url = unquote_plus(raw_qs_value) # decode to a proper URL
#pprint("=== NEW url: %s" % url)
comments = get_local_comments({"URL": url})
#pprint(comments)
for comment in comments:
#thread[comment.thread_parent_id] = thread.get(comment.thread_parent_id,[])+[comment]
threads.append(comment)
return DIV(script,
DIV(FORM(# anonymous users should see be encouraged to login or add a name-or-email to their comments
'' if auth.user_id else A(T('Login'),_href=URL(r=request,c='default',f='user',args=['login']),_class='login-logout reply'),
'' if auth.user_id else T(' or '),
'' if auth.user_id else INPUT(_type='text',_id='visitor_name',_name='visitor_name',_value=session.get('visitor_name',''),_placeholder="Enter your name"),
'' if auth.user_id else T(' '),
'' if auth.user_id else INPUT(_type='text',_id='visitor_email',_name='visitor_email',_value=session.get('visitor_email',''),_placeholder="Your email (visible on GitHub)"),
'' if auth.user_id else BR(),
SELECT(
OPTION('What kind of feedback is this?', _value=''),
OPTION('General feedback'),
OPTION('Correction to relationships in the synthetic tree'),
OPTION('Suggest a phylogeny to incorporate'),
OPTION('Correction to names (taxonomy)'),
OPTION('Extinct/extant issue (taxonomy)'),
OPTION('Bug report (website behavior)'),
OPTION('New feature request'),
_name='feedback_type',value='',_style='width: 100%; margin-right: -4px;'),
LABEL(INPUT(_type='checkbox',_name=T('claimed_expertise')), T(' I claim expertise in this area'),_style='float: right;',_class='expertise-option'),
INPUT(_type='text',_id='issue_title',_name='issue_title',_value='',_placeholder="Give this topic a title"), # should appear for proper issues only
TEXTAREA(_name='body',_placeholder="Add more to this topic, using Markdown (click 'Markdown help' below to learn more)."),
INPUT(_type='text',_id='reference_url',_name='reference_url',_value='',_placeholder="..."), # visibility (and placeholder) depends on feedback type
INPUT(_type='hidden',_name='synthtree_id',_value=synthtree_id),
INPUT(_type='hidden',_name='synthtree_node_id',_value=synthtree_node_id),
INPUT(_type='hidden',_name='sourcetree_id',_value=sourcetree_id),
INPUT(_type='hidden',_name='ottol_id',_value=ottol_id),
INPUT(_type='hidden',_name='target_node_label',_value=target_node_label),
INPUT(_type='hidden',_name='url',_value=url),
# INPUT(_type='text',_name='thread_parent_id',_value=0), # we'll get this from a nearby id, eg 'r8'
DIV(A(T('Close'),_class='msg-close',_href='#',_style='margin-right: 6px'),
| |
##############################################################################
# Copyright by The HDF Group. #
# All rights reserved. #
# #
# This file is part of HSDS (HDF5 Scalable Data Service), Libraries and #
# Utilities. The full HSDS copyright notice, including #
# terms governing use, modification, and redistribution, is contained in #
# the file COPYING, which can be found at the root of the source code #
# distribution tree. If you do not have access to this file, you may #
# request a copy from <EMAIL>. #
##############################################################################
#
# data node of hsds cluster
#
import asyncio
import time
import numpy as np
from aiohttp.web_exceptions import HTTPGone, HTTPInternalServerError, HTTPBadRequest, HTTPNotFound, HTTPForbidden, HTTPServiceUnavailable
from .util.idUtil import validateInPartition, getS3Key, isValidUuid, isValidChunkId, getDataNodeUrl, isSchema2Id, getRootObjId, isRootObjId
from .util.storUtil import getStorJSONObj, putStorJSONObj, putStorBytes, getStorBytes, isStorObj, deleteStorObj
from .util.domainUtil import isValidDomain, getBucketForDomain
from .util.attrUtil import getRequestCollectionName
from .util.httpUtil import http_post
from .util.dsetUtil import getChunkLayout, getDeflateLevel, isShuffle, getFillValue
from .util.chunkUtil import getDatasetId
from .util.arrayUtil import arrayToBytes, bytesToArray
from .util.hdf5dtype import createDataType
from . import config
from . import hsds_logger as log
def get_obj_id(request, body=None):
""" Get object id from request
Raise HTTPException on errors.
"""
obj_id = None
collection = None
app = request.app
if body and "id" in body:
obj_id = body["id"]
else:
collection = getRequestCollectionName(request) # returns datasets|groups|datatypes
obj_id = request.match_info.get('id')
if not obj_id:
msg = "Missing object id"
log.error(msg)
raise HTTPInternalServerError()
if not isValidUuid(obj_id, obj_class=collection):
msg = f"Invalid obj id: {obj_id}"
log.error(msg)
raise HTTPInternalServerError()
try:
validateInPartition(app, obj_id)
except KeyError:
log.error("Domain not in partition")
raise HTTPInternalServerError()
return obj_id
async def notify_root(app, root_id, bucket=None):
# flag to write to S3
log.info(f"notify_root: {root_id}")
if not isValidUuid(root_id) or not isSchema2Id(root_id):
log.error(f"unexpected call to notify with invalid id: {root_id}")
return
notify_req = getDataNodeUrl(app, root_id) + "/roots/" + root_id
log.info(f"Notify: {notify_req} [{bucket}]")
params = {}
if bucket:
params["bucket"] = bucket
await http_post(app, notify_req, data={}, params=params)
async def check_metadata_obj(app, obj_id, bucket=None):
""" Return False is obj does not exist
"""
if isValidDomain(obj_id):
bucket = getBucketForDomain(obj_id)
try:
validateInPartition(app, obj_id)
except KeyError:
log.error("Domain not in partition")
raise HTTPInternalServerError()
deleted_ids = app['deleted_ids']
if obj_id in deleted_ids:
msg = f"{obj_id} has been deleted"
log.info(msg)
return False
meta_cache = app['meta_cache']
if obj_id in meta_cache:
found = True
else:
# Not in chache, check s3 obj exists
s3_key = getS3Key(obj_id)
log.debug(f"check_metadata_obj({s3_key})")
# does key exist?
found = await isStorObj(app, s3_key, bucket=bucket)
return found
async def get_metadata_obj(app, obj_id, bucket=None):
""" Get object from metadata cache (if present).
Otherwise fetch from S3 and add to cache
"""
log.info(f"get_metadata_obj: {obj_id} bucket: {bucket}")
if isValidDomain(obj_id):
bucket = getBucketForDomain(obj_id)
# don't call validateInPartition since this is used to pull in
# immutable data from other nodes
deleted_ids = app['deleted_ids']
if obj_id in deleted_ids:
msg = f"{obj_id} has been deleted"
log.warn(msg)
raise HTTPGone()
meta_cache = app['meta_cache']
obj_json = None
if obj_id in meta_cache:
log.debug(f"{obj_id} found in meta cache")
obj_json = meta_cache[obj_id]
else:
s3_key = getS3Key(obj_id)
pending_s3_read = app["pending_s3_read"]
if obj_id in pending_s3_read:
# already a read in progress, wait for it to complete
read_start_time = pending_s3_read[obj_id]
log.info(f"s3 read request for {s3_key} was requested at: {read_start_time}")
while time.time() - read_start_time < 2.0:
log.debug("waiting for pending s3 read, sleeping")
await asyncio.sleep(1) # sleep for sub-second?
if obj_id in meta_cache:
log.info(f"object {obj_id} has arrived!")
obj_json = meta_cache[obj_id]
break
if not obj_json:
log.warn(f"s3 read for object {s3_key} timed-out, initiaiting a new read")
# invoke S3 read unless the object has just come in from pending read
if not obj_json:
log.debug(f"getS3JSONObj({s3_key}, bucket={bucket})")
if obj_id not in pending_s3_read:
pending_s3_read[obj_id] = time.time()
# read S3 object as JSON
try:
obj_json = await getStorJSONObj(app, s3_key, bucket=bucket)
except HTTPNotFound:
log.warn(f"HTTPpNotFound error for {s3_key} bucket:{bucket}")
if obj_id in pending_s3_read:
del pending_s3_read[obj_id]
raise
except HTTPForbidden:
log.warn(f"HTTPForbidden error for {s3_key} bucket:{bucket}")
if obj_id in pending_s3_read:
del pending_s3_read[obj_id]
raise
except HTTPInternalServerError:
log.warn(f"HTTPInternalServerError error for {s3_key} bucket:{bucket}")
if obj_id in pending_s3_read:
del pending_s3_read[obj_id]
raise
if obj_id in pending_s3_read:
# read complete - remove from pending map
elapsed_time = time.time() - pending_s3_read[obj_id]
log.info(f"s3 read for {s3_key} took {elapsed_time}")
del pending_s3_read[obj_id]
meta_cache[obj_id] = obj_json # add to cache
return obj_json
async def save_metadata_obj(app, obj_id, obj_json, bucket=None, notify=False, flush=False):
""" Persist the given object """
log.info(f"save_metadata_obj {obj_id} bucket={bucket} notify={notify} flush={flush}")
if notify and not flush:
log.error("notify not valid when flush is false")
raise HTTPInternalServerError()
if not isinstance(obj_json, dict):
log.error("Passed non-dict obj to save_metadata_obj")
raise HTTPInternalServerError()
try:
validateInPartition(app, obj_id)
except KeyError:
log.error("Domain not in partition")
raise HTTPInternalServerError()
dirty_ids = app["dirty_ids"]
deleted_ids = app['deleted_ids']
if obj_id in deleted_ids:
if isValidUuid(obj_id):
# domain objects may be re-created, but shouldn't see repeats of
# deleted uuids
log.warn(f"{obj_id} has been deleted")
raise HTTPInternalServerError()
elif obj_id in deleted_ids:
deleted_ids.remove(obj_id) # un-gone the domain id
# update meta cache
meta_cache = app['meta_cache']
log.debug(f"save: {obj_id} to cache")
meta_cache[obj_id] = obj_json
meta_cache.setDirty(obj_id)
now = int(time.time())
if flush:
# write to S3 immediately
if isValidChunkId(obj_id):
log.warn("flush not supported for save_metadata_obj with chunks")
raise HTTPBadRequest()
try:
await write_s3_obj(app, obj_id, bucket=bucket)
except KeyError as ke:
log.error(f"s3 sync got key error: {ke}")
raise HTTPInternalServerError()
except HTTPInternalServerError:
log.warn(f" failed to write {obj_id}")
raise # re-throw
if obj_id in dirty_ids:
log.warn(f"save_metadata_obj flush - object {obj_id} is still dirty")
# message AN immediately if notify flag is set
# otherwise AN will be notified at next S3 sync
if notify:
if isValidUuid(obj_id) and isSchema2Id(obj_id):
root_id = getRootObjId(obj_id)
await notify_root(app, root_id, bucket=bucket)
else:
log.debug(f"setting dirty_ids[{obj_id}] = ({now}, {bucket})")
if isValidUuid(obj_id) and not bucket:
log.warn(f"bucket is not defined for save_metadata_obj: {obj_id}")
dirty_ids[obj_id] = (now, bucket)
async def delete_metadata_obj(app, obj_id, notify=True, root_id=None, bucket=None):
""" Delete the given object """
meta_cache = app['meta_cache']
dirty_ids = app["dirty_ids"]
log.info(f"delete_meta_data_obj: {obj_id} notify: {notify}")
if isValidDomain(obj_id):
bucket = getBucketForDomain(obj_id)
try:
validateInPartition(app, obj_id)
except KeyError:
log.error(f"obj: {obj_id} not in partition")
raise HTTPInternalServerError()
deleted_ids = app['deleted_ids']
if obj_id in deleted_ids:
log.warn(f"{obj_id} has already been deleted")
else:
log.debug(f"adding {obj_id} to deleted ids")
deleted_ids.add(obj_id)
if obj_id in meta_cache:
log.debug(f"removing {obj_id} from meta_cache")
del meta_cache[obj_id]
if obj_id in dirty_ids:
log.debug(f"removing dirty_ids for: {obj_id}")
del dirty_ids[obj_id]
# remove from S3 (if present)
s3key = getS3Key(obj_id)
if await isStorObj(app, s3key, bucket=bucket):
await deleteStorObj(app, s3key, bucket=bucket)
else:
log.info(f"delete_metadata_obj - key {s3key} not found (never written)?")
if isValidUuid(obj_id) and isSchema2Id(obj_id):
if isRootObjId(obj_id):
# add to gc ids so sub-objects will be deleted
gc_ids = app["gc_ids"]
log.info(f"adding root id: {obj_id} for GC cleanup")
gc_ids.add(obj_id)
elif notify:
root_id = getRootObjId(obj_id)
await notify_root(app, root_id, bucket=bucket)
# no notify for domain deletes since the root group is being deleted
log.debug(f"delete_metadata_obj for {obj_id} done")
"""
Utility method for GET_Chunk, PUT_Chunk, and POST_CHunk
Get a numpy array for the chunk (possibly initizaling a new chunk if requested)
"""
async def get_chunk(app, chunk_id, dset_json, bucket=None, s3path=None, s3offset=0, s3size=0, chunk_init=False):
# if the chunk cache has too many dirty items, wait till items get flushed to S3
MAX_WAIT_TIME = 10.0 # TBD - make this a config
chunk_cache = app['chunk_cache']
if chunk_init and s3offset > 0:
log.error(f"unable to initiale chunk {chunk_id} for reference layouts ")
raise HTTPInternalServerError()
log.debug(f"getChunk cache utilization: {chunk_cache.cacheUtilizationPercent} per, dirty_count: {chunk_cache.dirtyCount}, mem_dirty: {chunk_cache.memDirty}")
chunk_arr = None
dims = getChunkLayout(dset_json)
type_json = dset_json["type"]
dt = createDataType(type_json)
# note - officially we should follow the order in which the filters are defined in the filter_list,
# but since we currently have just deflate and shuffle we will always apply deflate then shuffle on read,
# and shuffle then deflate on write
# also note - get deflate and shuffle will update the deflate and shuffle map so that the s3sync will do the right thing
deflate_level = getDeflateLevel(dset_json)
shuffle = isShuffle(dset_json)
s3key = None
if s3path:
if s3path.startswith("s3://"):
# trim off the s3:// if found
path = s3path[5:]
else:
path = s3path
index = path.find('/') # split bucket and key
if index < 1:
log.error(f"s3path is invalid: {s3path}")
raise HTTPInternalServerError()
bucket = path[:index]
s3key = path[(index+1):]
log.debug(f"Using s3path bucket: {bucket} and s3key: {s3key}")
else:
s3key = getS3Key(chunk_id)
log.debug(f"getChunk chunkid: {chunk_id} bucket: {bucket}")
if chunk_id in chunk_cache:
chunk_arr = chunk_cache[chunk_id]
else:
if s3path and s3size == 0:
obj_exists = False
else:
obj_exists = await isStorObj(app, s3key, bucket=bucket)
# TBD - potential race condition?
if obj_exists:
pending_s3_read = app["pending_s3_read"]
| |
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 12 13:37:37 2019 by <NAME> - <EMAIL>
Write this
"""
import torch
import torch.nn as nn
import torch.optim as optim
import torch.backends.cudnn as cudnn
import torchvision.transforms.functional as TF
from torchvision import transforms
import time
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
import argparse
import warnings
import random
from PIL import Image
from Minicity_train import MiniCity_train
from helpers.model import UNet
from helpers.minicity import MiniCity
from helpers.helpers import AverageMeter, ProgressMeter, iouCalc
from model import enc_config
from model import EfficientSeg
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
import warnings
warnings.filterwarnings("ignore")
parser = argparse.ArgumentParser(description='VIPriors Segmentation baseline training script')
parser.add_argument('--dataset_path', metavar='path/to/minicity/root', default='./minicity',
type=str, help='path to dataset (ends with /minicity)')
parser.add_argument('--colorjitter_factor', metavar='0.3', default=0.3,
type=float, help='data augmentation: color jitter factor')
parser.add_argument('--scale_factor', metavar='0.3', default=0.3,
type=float, help='data augmentation: random scale factor')
parser.add_argument('--hflip', metavar='[True,False]', default=True,
type=float, help='data augmentation: random horizontal flip')
parser.add_argument('--crop_size', metavar='384 768', default=[384,768], nargs="+",
type=int, help='data augmentation: random crop size, height width, space separated')
parser.add_argument('--train_size', metavar='512 1024', default=[384,768], nargs="+",
type=int, help='image size during training, height width, space separated')
parser.add_argument('--test_size', metavar='512 1024', default=[512,1024], nargs="+",
type=int, help='image size during validation and testing, height width, space separated')
parser.add_argument('--batch_size', metavar='5', default=4, type=int, help='batch size')
parser.add_argument('--test_batch_size', metavar='2', default=2, type=int, help='test batch size')
parser.add_argument('--pin_memory', metavar='[True,False]', default=True,
type=bool, help='pin memory on GPU')
parser.add_argument('--num_workers', metavar='8', default=4, type=int,
help='number of dataloader workers')
parser.add_argument('--lr_init', metavar='1e-2', default=1e-3, type=float,
help='initial learning rate')
parser.add_argument('--lr_min', metavar='1e-5', default=1e-4, type=float,
help='lower bound on learning rate')
parser.add_argument('--lr_patience', metavar='5', default=5, type=int,
help='patience for reduce learning rate on plateau')
parser.add_argument('--lr_momentum', metavar='0.9', default=0.9, type=float,
help='momentum for SGD optimizer')
parser.add_argument('--lr_weight_decay', metavar='1e-4', default=1.e-6, type=float,
help='weight decay for SGD optimizer')
parser.add_argument('--weights', metavar='path/to/checkpoint', default=None,
type=str, help='resume training from checkpoint')
parser.add_argument('--epochs', metavar='200', default=300, type=int,
help='number of training epochs')
parser.add_argument('--seed', metavar='42', default=42, type=int,
help='random seed to use')
parser.add_argument('--dataset_mean', metavar='[0.485, 0.456, 0.406]',
default=[0.2870, 0.3257, 0.2854], type=float,
help='mean for normalization', nargs=3)
parser.add_argument('--dataset_std', metavar='[0.229, 0.224, 0.225]',
default=[0.1879, 0.1908, 0.1880], type=float,
help='std for normalization', nargs=3)
parser.add_argument('--predict', metavar='path/to/weights',
default=None, type=str,
help='provide path to model weights to predict on validation set')
parser.add_argument('--depth_coeff', metavar='1.0', default=1.6, type=float, help='depth coefficient')
parser.add_argument('--width_coeff', metavar='1.0', default=6.0, type=float, help='width coefficient')
"""
def adjust_learning_rate(optimizer, epoch):
lr = 0.5 * args.lr_init * (1 + np.cos(np.pi * (epoch)/ args.epochs ))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
"""
def adjust_learning_rate(optimizer, epoch):
lr = args.lr_init * ( (1 - epoch / args.epochs) ** 0.9 )
for param_group in optimizer.param_groups:
param_group['lr'] = lr
"""
===========
Main method
===========
"""
def weight_init(m):
if isinstance(m, torch.nn.Linear) or isinstance(m, torch.nn.Conv2d):
torch.nn.init.kaiming_uniform(m.weight)
def main():
global args
args = parser.parse_args()
args.train_size = tuple(args.train_size)
args.test_size = tuple(args.test_size)
# Fix seed
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
# Create directory to store run files
if not os.path.isdir('baseline_run'):
os.makedirs('baseline_run/images')
os.makedirs('baseline_run/results_color')
# Load dataset
trainset = MiniCity_train(args.dataset_path, split='train', class_additions=40)
valset = MiniCity(args.dataset_path, split='val', transforms=test_trans)
testset = MiniCity(args.dataset_path, split='val', transforms=test_trans)
dataloaders = {}
dataloaders['val'] = torch.utils.data.DataLoader(valset,
batch_size=args.test_batch_size, shuffle=False,
pin_memory=args.pin_memory, num_workers=args.num_workers)
dataloaders['test'] = torch.utils.data.DataLoader(testset,
batch_size=args.test_batch_size, shuffle=False,
pin_memory=args.pin_memory, num_workers=args.num_workers)
# Load model
model = EfficientSeg(enc_config=enc_config, dec_config=None, num_classes=len(MiniCity.validClasses),
width_coeff=args.width_coeff)
print(sum(p.numel() for p in model.parameters()))
model.apply(weight_init)
# Define loss, optimizer and scheduler
criterion = nn.CrossEntropyLoss(ignore_index=MiniCity.voidClass,weight=torch.from_numpy(np.array([1.0, #road
1.0, #sidewalk
1.0, #building
2.0, #wall
2.0, #fence
2.0, #pole
1.0, #traffic light
1.0, #traffic sign
1.0, #vegetation
1.0, #terrain
1.0, #sky
1.0, #person
2.0, #rider
1.0, #car
3.0, #truck
3.0, #bus
3.0, #train
2.0, #motorcycle
2.0, #bicycle
2.0] #void
)).float().cuda())
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr_init,
# momentum=args.lr_momentum,
weight_decay=args.lr_weight_decay
)
# Initialize metrics
best_miou = 0.0
metrics = {'train_loss' : [],
'train_acc' : [],
'val_acc' : [],
'val_loss' : [],
'miou' : []}
start_epoch = 0
# Push model to GPU
if torch.cuda.is_available():
model = model.cuda()
print('Model pushed to {} GPU(s), type {}.'.format(torch.cuda.device_count(), torch.cuda.get_device_name(0)))
# Resume training from checkpoint
if args.weights:
print('Resuming training from {}.'.format(args.weights))
checkpoint = torch.load(args.weights)
model.load_state_dict(checkpoint['model_state_dict'], strict=True)
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
metrics = checkpoint['metrics']
best_miou = checkpoint['best_miou']
start_epoch = checkpoint['epoch']+1
# No training, only running prediction on test set
if args.predict:
checkpoint = torch.load(args.predict)
model.load_state_dict(checkpoint['model_state_dict'], strict=True)
print('Loaded model weights from {}'.format(args.predict))
# Create results directory
if not os.path.isdir('results'):
os.makedirs('results')
predict(dataloaders['test'], model, MiniCity.mask_colors)
return
# Generate log file
with open('baseline_run/log_epoch.csv', 'a') as epoch_log:
epoch_log.write('epoch, train loss, val loss, train acc, val acc, miou\n')
since = time.time()
for epoch in range(start_epoch,args.epochs):
# Train
print('--- Training ---')
train_loss, train_acc = train_epoch(trainset, model,
criterion, optimizer, None,
epoch, void=MiniCity.voidClass)
metrics['train_loss'].append(train_loss)
metrics['train_acc'].append(train_acc)
print('Epoch {} train loss: {:.4f}, acc: {:.4f}'.format(epoch,train_loss,train_acc))
# Validate
print('--- Validation ---')
val_acc, val_loss, miou = validate_epoch(dataloaders['val'],
model,
criterion, epoch,
MiniCity.classLabels,
MiniCity.validClasses,
void=MiniCity.voidClass,
maskColors=MiniCity.mask_colors)
metrics['val_acc'].append(val_acc)
metrics['val_loss'].append(val_loss)
metrics['miou'].append(miou)
#scheduler.step(val_loss)
# Write logs
with open('baseline_run/log_epoch.csv', 'a') as epoch_log:
epoch_log.write('{}, {:.5f}, {:.5f}, {:.5f}, {:.5f}, {:.5f}\n'.format(
epoch, train_loss, val_loss, train_acc, val_acc, miou))
# Save checkpoint
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'best_miou': best_miou,
'metrics': metrics,
}, 'baseline_run/checkpoint.pth.tar')
# Save best model to file
if miou > best_miou:
print('mIoU improved from {:.4f} to {:.4f}.'.format(best_miou, miou))
# best_miou = miou
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict(),
}, 'baseline_run/best_weights.pth.tar')
if miou > 0.40:
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict(),
}, 'baseline_run/best_weights'+ str(miou)+'.pth.tar')
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
# Plot learning curves
x = np.arange(args.epochs)
fig, ax1 = plt.subplots()
ax1.set_xlabel('epochs')
ax1.set_ylabel('loss')
ln1 = ax1.plot(x, metrics['train_loss'], color='tab:red')
ln2 = ax1.plot(x, metrics['val_loss'], color='tab:red', linestyle='dashed')
ax1.grid()
ax2 = ax1.twinx()
ax2.set_ylabel('accuracy')
ln3 = ax2.plot(x, metrics['train_acc'], color='tab:blue')
ln4 = ax2.plot(x, metrics['val_acc'], color='tab:blue', linestyle='dashed')
ln5 = ax2.plot(x, metrics['miou'], color='tab:green')
lns = ln1+ln2+ln3+ln4+ln5
plt.legend(lns, ['Train loss','Validation loss','Train accuracy','Validation accuracy','mIoU'])
plt.tight_layout()
plt.savefig('baseline_run/learning_curve.pdf', bbox_inches='tight')
# Load best model
checkpoint = torch.load('baseline_run/best_weights.pth.tar')
model.load_state_dict(checkpoint['model_state_dict'], strict=True)
print('Loaded best model weights (epoch {}) from baseline_run/best_weights.pth.tar'.format(checkpoint['epoch']))
# Create results directory
if not os.path.isdir('results'):
os.makedirs('results')
# Run prediction on validation set
# For predicting on test set, simple replace 'val' by 'test'
predict(dataloaders['val'], model, MiniCity.mask_colors)
"""
=================
Routine functions
=================
"""
def train_epoch(trainset, model, criterion, optimizer, lr_scheduler, epoch, void=-1):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
loss_running = AverageMeter('Loss', ':.4e')
acc_running = AverageMeter('Accuracy', ':.3f')
trainset.create_an_epoch()
dataloader = torch.utils.data.DataLoader(trainset,
batch_size=args.batch_size, shuffle=False,
pin_memory=args.pin_memory, num_workers=args.num_workers)
progress = ProgressMeter(
len(dataloader),
[batch_time, data_time, loss_running, acc_running],
prefix="Train, epoch: [{}]".format(epoch))
# input resolution
res = args.crop_size[0] * args.crop_size[1]
if epoch in [200, 400]:
for param_group in optimizer.param_groups:
param_group['lr'] = param_group['lr']/10
#adjust_learning_rate(optimizer, epoch)
# Set model in training mode
model.train()
end = time.time()
with torch.set_grad_enabled(True):
# Iterate over data.
for epoch_step, (inputs, labels) in enumerate(dataloader):
data_time.update(time.time()-end)
inputs = inputs.float().cuda()
labels = labels.long().cuda()
_, _, h, w = inputs.shape
# zero the parameter gradients
optimizer.zero_grad()
# forward pass
outputs = model(inputs)
preds = torch.argmax(outputs, 1)
loss = criterion(outputs, labels)
# backward pass
loss.backward()
optimizer.step()
# Statistics
bs = inputs.size(0) # current batch size
loss = loss.item()
loss_running.update(loss, bs)
corrects = torch.sum(preds == labels.data)
nvoid = int((labels==void).sum())
acc = corrects.double()/(bs*res-nvoid) # correct/(batch_size*resolution-voids)
acc_running.update(acc, bs)
# output training info
progress.display(epoch_step)
# Measure time
batch_time.update(time.time() - end)
end = time.time()
# Reduce learning rate
return loss_running.avg, acc_running.avg
def validate_epoch(dataloader, model, criterion, epoch, classLabels, validClasses, void=-1, maskColors=None):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
loss_running = AverageMeter('Loss', ':.4e')
acc_running = AverageMeter('Accuracy', ':.4e')
iou = iouCalc(classLabels, validClasses, voidClass = void)
progress = ProgressMeter(
len(dataloader),
[batch_time, data_time, loss_running, acc_running],
prefix="Test, epoch: [{}]".format(epoch))
# input resolution
res = args.test_size[0]*args.test_size[1]
# Set model in evaluation mode
model.eval() # TODO ADD PLATO SCHEDULAR INSPECT LOSSES
with torch.no_grad():
end = time.time()
for epoch_step, (inputs, labels, filepath) in enumerate(dataloader):
data_time.update(time.time()-end)
inputs = inputs.float().cuda()
labels = labels.long().cuda()
# forward
outputs = model(inputs)
preds = torch.argmax(outputs, 1)
loss = criterion(outputs, labels)
# Statistics
bs = inputs.size(0) # current batch size
loss = loss.item()
loss_running.update(loss, bs)
corrects = torch.sum(preds == labels.data)
nvoid = int((labels==void).sum())
acc = corrects.double()/(bs*res-nvoid) # correct/(batch_size*resolution-voids)
acc_running.update(acc, bs)
# Calculate IoU scores of current batch
iou.evaluateBatch(preds, labels)
# Save visualizations of first batch
if epoch_step == 0 and maskColors is not None:
for i in range(inputs.size(0)):
filename = os.path.splitext(os.path.basename(filepath[i]))[0]
# Only save inputs and labels once
if epoch == 0:
img = visim(inputs[i,:,:,:])
label = vislbl(labels[i,:,:], maskColors)
if len(img.shape) == 3:
cv2.imwrite('baseline_run/images/{}.png'.format(filename),img[:,:,::-1])
else:
cv2.imwrite('baseline_run/images/{}.png'.format(filename),img)
cv2.imwrite('baseline_run/images/{}_gt.png'.format(filename),label[:,:,::-1])
# Save predictions
pred = vislbl(preds[i,:,:], maskColors)
cv2.imwrite('baseline_run/images/{}_epoch_{}.png'.format(filename,epoch),pred[:,:,::-1])
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# print progress info
progress.display(epoch_step)
miou = iou.outputScores()
print('Accuracy : {:5.3f}'.format(acc_running.avg))
print('---------------------')
return acc_running.avg, loss_running.avg, miou
def predict(dataloader, model, maskColors):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
progress = ProgressMeter(
len(dataloader),
[batch_time, data_time],
prefix='Predict: ')
# Set model in evaluation mode
model.eval()
with torch.no_grad():
end = time.time()
for epoch_step, batch in enumerate(dataloader):
if len(batch) == 2:
inputs, filepath = batch
else:
inputs, _, filepath = batch
data_time.update(time.time()-end)
inputs = inputs.float().cuda()
# forward
outputs = model(inputs)
preds = torch.argmax(outputs, 1)
# Save visualizations of first batch
for i in range(inputs.size(0)):
filename = os.path.splitext(os.path.basename(filepath[i]))[0]
# Save input
img = visim(inputs[i,:,:,:])
img = Image.fromarray(img, 'RGB')
img.save('baseline_run/results_color/{}_input.png'.format(filename))
# Save prediction with color labels
pred = preds[i,:,:].cpu()
pred_color = vislbl(pred, maskColors)
pred_color = Image.fromarray(pred_color.astype('uint8'))
pred_color.save('baseline_run/results_color/{}_prediction.png'.format(filename))
# Save class id prediction (used for evaluation)
pred_id = MiniCity.trainid2id[pred]
pred_id = Image.fromarray(pred_id)
pred_id = pred_id.resize((2048,1024), resample=Image.NEAREST)
pred_id.save('results/{}.png'.format(filename))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# print progress info
progress.display(epoch_step)
"""
====================
Data transformations
====================
"""
def test_trans(image, mask=None):
# Resize, 1 for Image.LANCZOS
image = TF.resize(image, args.test_size, interpolation=1)
# From PIL to Tensor
image = TF.to_tensor(image)
# Normalize
image = TF.normalize(image, args.dataset_mean, args.dataset_std)
if mask:
# Resize, 0 for Image.NEAREST
mask = TF.resize(mask, args.test_size, interpolation=0)
mask = np.array(mask, np.uint8) # PIL Image to numpy array
mask = torch.from_numpy(mask) # Numpy array to tensor
return image, mask
else:
return image
def train_trans(image, mask):
# Generate random parameters for augmentation
bf = np.random.uniform(1-args.colorjitter_factor,1+args.colorjitter_factor)
cf = np.random.uniform(1-args.colorjitter_factor,1+args.colorjitter_factor)
sf = np.random.uniform(1-args.colorjitter_factor,1+args.colorjitter_factor)
hf = np.random.uniform(-args.colorjitter_factor,+args.colorjitter_factor)
scale_factor = np.random.uniform(1-args.scale_factor,1+args.scale_factor)
pflip = np.random.randint(0,1) > 0.5
# Resize, 1 for Image.LANCZOS
image = TF.resize(image, args.train_size, interpolation=1)
# Resize, 0 for Image.NEAREST
mask = TF.resize(mask, args.train_size, interpolation=0)
# Random scaling
image = TF.affine(image, 0, [0,0], scale_factor, [0,0])
mask = TF.affine(mask, 0, [0,0], scale_factor, [0,0])
# Random cropping
if not args.train_size == args.crop_size:
# From PIL to Tensor
image = TF.to_tensor(image)
mask = TF.to_tensor(mask)
h, w = args.train_size
th, tw = args.crop_size
i = np.random.randint(0, h - th)
j = np.random.randint(0, w - tw)
image = image[:,i:i+th,j:j+tw]
mask = mask[:,i:i+th,j:j+tw]
image = TF.to_pil_image(image)
mask = TF.to_pil_image(mask[0,:,:])
# H-flip
if pflip == True and args.hflip == True:
image = TF.hflip(image)
mask = TF.hflip(mask)
# Color jitter
image = TF.adjust_brightness(image, bf)
image = TF.adjust_contrast(image, cf)
image = TF.adjust_saturation(image, sf)
image = TF.adjust_hue(image, hf)
# From PIL to Tensor
image = TF.to_tensor(image)
# Normalize
image = TF.normalize(image, args.dataset_mean, args.dataset_std)
# Convert ids to train_ids
mask = np.array(mask, | |
0, 0, 0, 0],
[1503, 22.309103, 0, 9999, -9999, 1.0, 100, 1, 45.972187, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1504, 78.164168, 0, 9999, -9999, 1.0, 100, 1, 188.822836, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1505, 2.662713, 0, 9999, -9999, 1.0, 100, 1, 26.765913, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1506, 2.109963, 0, 9999, -9999, 1.0, 100, 1, 56.406717, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1507, 0.357627, 0, 9999, -9999, 1.0, 100, 1, 15.438042, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1508, 0.00852, 0, 9999, -9999, 1.0, 100, 1, 0.065259, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1510, 55.850177, 0, 9999, -9999, 1.0, 100, 1, 107.008141, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1511, 40.830885, 0, 9999, -9999, 1.0, 100, 1, 155.22192, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1512, 23.774776, 0, 9999, -9999, 1.0, 100, 1, 64.130052, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1513, 3.183401, 0, 9999, -9999, 1.0, 100, 1, 23.051786, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1514, 5.1e-05, 0, 9999, -9999, 1.0, 100, 1, 0.027711, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1516, 0.010219, 0, 9999, -9999, 1.0, 100, 1, 0.02881, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1517, 0.869419, 0, 9999, -9999, 1.0, 100, 1, 1.286804, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1518, 0.102142, 0, 9999, -9999, 1.0, 100, 1, 0.670542, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1519, 0.007089, 0, 9999, -9999, 1.0, 100, 1, 0.04654, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1520, 9.128906, 0, 9999, -9999, 1.0, 100, 1, 79.674256, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1521, 0.656346, 0, 9999, -9999, 1.0, 100, 1, 31.179116, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1522, 10.12943, 0, 9999, -9999, 1.0, 100, 1, 40.212666, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1523, 0.674948, 0, 9999, -9999, 1.0, 100, 1, 20.304521, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1524, 0.447167, 0, 9999, -9999, 1.0, 100, 1, 26.159251, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1525, 22.128093, 0, 9999, -9999, 1.0, 100, 1, 68.425403, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1526, 14.750253, 0, 9999, -9999, 1.0, 100, 1, 44.478558, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1527, 1.118768, 0, 9999, -9999, 1.0, 100, 1, 103.998682, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1528, 25.796628, 0, 9999, -9999, 1.0, 100, 1, 41.386726, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1529, 1.960262, 0, 9999, -9999, 1.0, 100, 1, 84.378012, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1530, 1.547219, 0, 9999, -9999, 1.0, 100, 1, 79.055155, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1531, 111.015399, 0, 9999, -9999, 1.0, 100, 1, 183.821409, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1532, 13.359955, 0, 9999, -9999, 1.0, 100, 1, 37.379033, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1534, 13.514949, 0, 9999, -9999, 1.0, 100, 1, 29.516607, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1535, 3.414378, 0, 9999, -9999, 1.0, 100, 1, 8.931779, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1536, 15.519597, 0, 9999, -9999, 1.0, 100, 1, 39.26145, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1537, 10.928206, 0, 9999, -9999, 1.0, 100, 1, 99.740166, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1538, 0.612969, 0, 9999, -9999, 1.0, 100, 1, 130.774402, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1539, 3.426117, 0, 9999, -9999, 1.0, 100, 1, 201.766963, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1540, 1.926911, 0, 9999, -9999, 1.0, 100, 1, 4.160189, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1541, 1.615996, 0, 9999, -9999, 1.0, 100, 1, 3.429917, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1543, 0.139795, 0, 9999, -9999, 1.0, 100, 1, 14.788669, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1544, 1.28417, 0, 9999, -9999, 1.0, 100, 1, 121.437126, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1545, 1.432746, 0, 9999, -9999, 1.0, 100, 1, 185.545128, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1546, 3.690747, 0, 9999, -9999, 1.0, 100, 1, 255.44343, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1547, 2.279037, 0, 9999, -9999, 1.0, 100, 1, 362.597919, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1548, 0.555758, 0, 9999, -9999, 1.0, 100, 1, 21.273779, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1549, 1.627576, 0, 9999, -9999, 1.0, 100, 1, 77.017486, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1550, 0.091885, 0, 9999, -9999, 1.0, 100, 1, 5.214715, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1551, 0.217717, 0, 9999, -9999, 1.0, 100, 1, 9.576491, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1552, 0.655794, 0, 9999, -9999, 1.0, 100, 1, 54.035471, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1553, 0.723558, 0, 9999, -9999, 1.0, 100, 1, 92.480282, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1554, 1.00565, 0, 9999, -9999, 1.0, 100, 1, 155.333413, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1555, 1.598065, 0, 9999, -9999, 1.0, 100, 1, 103.865774, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1556, 1.870929, 0, 9999, -9999, 1.0, 100, 1, 40.376346, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1557, 1.04975, 0, 9999, -9999, 1.0, 100, 1, 25.990242, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1558, 0.849571, 0, 9999, -9999, 1.0, 100, 1, 24.622373, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1559, 4.121265, 0, 9999, -9999, 1.0, 100, 1, 112.609207, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1560, 33.269215, 0, 9999, -9999, 1.0, 100, 1, 86.395942, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1561, 8.193726, 0, 9999, -9999, 1.0, 100, 1, 19.127379, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1562, 23.77193, 0, 9999, -9999, 1.0, 100, 1, 61.888351, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1563, 2.793766, 0, 9999, -9999, 1.0, 100, 1, 106.233907, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1564, 36.059515, 0, 9999, -9999, 1.0, 100, 1, 58.27282, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1565, 6.882348, 0, 9999, -9999, 1.0, 100, 1, 12.83938, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1566, 211.840386, 0, 9999, -9999, 1.0, 100, 1, 358.676351, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1567, 11.397636, 0, 9999, -9999, 1.0, 100, 1, 29.531771, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1568, 0.89042, 0, 9999, -9999, 1.0, 100, 1, 89.300597, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1569, 10.185536, 0, 9999, -9999, 1.0, 100, 1, 328.718571, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1570, 52.808462, 0, 9999, -9999, 1.0, 100, 1, 243.241909, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1571, 5.611297, 0, 9999, -9999, 1.0, 100, 1, 203.443403, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1572, 4.641055, 0, 9999, -9999, 1.0, 100, 1, 232.127956, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1573, 1.703575, 0, 9999, -9999, 1.0, 100, 1, 80.403772, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1574, 1.520859, 0, 9999, -9999, 1.0, 100, 1, 144.715972, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1575, 57.895109, 0, 9999, -9999, 1.0, 100, 1, 153.606376, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1576, 13.886165, 0, 9999, -9999, 1.0, 100, 1, 34.262017, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1577, 63.707824, 0, 9999, -9999, 1.0, 100, 1, 217.054488, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1578, 7.567151, 0, 9999, -9999, 1.0, 100, 1, 16.348222, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1579, 13.765382, 0, 9999, -9999, 1.0, 100, 1, 35.164333, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1580, 8.801431, 0, 9999, -9999, 1.0, 100, 1, 21.892492, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1581, 58.647584, 0, 9999, -9999, 1.0, 100, 1, 156.277964, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1582, 2.598832, 0, 9999, -9999, 1.0, 100, 1, 8.151092, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1583, 0.482686, 0, 9999, -9999, 1.0, 100, 1, 1.791968, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1584, 27.657158, 0, 9999, -9999, 1.0, 100, 1, 81.24993, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1585, 1.419616, 0, 9999, -9999, 1.0, 100, 1, 3.685182, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1586, 25.888722, 0, 9999, -9999, 1.0, 100, 1, 61.31549, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1587, 81.028923, 0, 9999, -9999, 1.0, 100, 1, 191.635296, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1588, 40.36151, 0, 9999, -9999, 1.0, 100, 1, 59.424343, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1589, 0.896612, 0, 9999, -9999, 1.0, 100, 1, 48.538268, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1590, 3.96406, 0, 9999, -9999, 1.0, 100, 1, 119.077525, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1591, 10.729508, 0, 9999, -9999, 1.0, 100, 1, 142.8447, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1592, 2.42519, 0, 9999, -9999, 1.0, 100, 1, 9.842361, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1593, 1.858626, 0, 9999, -9999, 1.0, 100, 1, 7.183183, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1594, 0.448825, 0, 9999, -9999, 1.0, 100, 1, 9.56089, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1595, 1.626777, 0, 9999, -9999, 1.0, 100, 1, 54.79001, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1596, 2.415129, 0, 9999, -9999, 1.0, 100, 1, 138.730049, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1597, 0.23924, 0, 9999, -9999, 1.0, 100, 1, 2.858987, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1598, 0.966807, 0, 9999, -9999, 1.0, 100, 1, 4.795494, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1599, 1.735807, 0, 9999, -9999, 1.0, 100, 1, 86.703571, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1600, 1.275921, 0, 9999, -9999, 1.0, 100, 1, 25.356501, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1601, 0.457121, 0, 9999, -9999, 1.0, 100, 1, 7.643653, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1602, 0.834043, 0, 9999, -9999, 1.0, 100, 1, 45.658169, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1603, 2.192456, 0, 9999, -9999, 1.0, 100, 1, 26.209248, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1604, 1.662929, 0, 9999, -9999, 1.0, 100, 1, 16.363032, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1605, 5.468088, 0, 9999, -9999, 1.0, 100, 1, 43.477178, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1606, 1.350159, 0, 9999, -9999, 1.0, 100, 1, 42.024907, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1607, 0.901549, 0, 9999, -9999, 1.0, 100, 1, 19.395236, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1608, 0.58357, 0, 9999, -9999, 1.0, 100, 1, 19.491249, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1609, 0.168831, 0, 9999, -9999, 1.0, 100, 1, 6.052272, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1610, 1.999426, 0, 9999, -9999, 1.0, 100, 1, 18.571656, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1611, 0.158266, 0, 9999, -9999, 1.0, 100, 1, 6.420554, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1612, 0.300828, 0, 9999, -9999, 1.0, 100, 1, 10.811203, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1613, 1.139692, 0, 9999, -9999, 1.0, 100, 1, 27.976217, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1614, 1.200623, 0, 9999, -9999, 1.0, 100, 1, 28.183827, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1615, 75.095413, 0, 9999, -9999, 1.0, 100, 1, 193.234776, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1616, 2.078735, 0, 9999, -9999, 1.0, 100, 1, 6.865586, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1617, 3.173248, 0, 9999, -9999, 1.0, 100, 1, 10.63107, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1618, 0.345718, 0, 9999, -9999, 1.0, 100, 1, 4.920368, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1619, 1.915216, 0, 9999, -9999, 1.0, 100, 1, 6.689637, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1620, 0.09673, 0, 9999, -9999, 1.0, 100, 1, 1.912024, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1621, 0.224736, 0, 9999, -9999, 1.0, 100, 1, 8.056388, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1622, 0.159123, 0, 9999, -9999, 1.0, 100, 1, 5.693597, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1623, 0.716592, 0, 9999, -9999, 1.0, 100, 1, 20.717111, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1624, 1.43755, 0, 9999, -9999, 1.0, 100, 1, 8.938454, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1625, 1.280389, 0, 9999, -9999, 1.0, 100, 1, 65.182465, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1626, 0.282991, 0, 9999, -9999, 1.0, 100, 1, 11.878862, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1627, 0.404063, 0, 9999, -9999, 1.0, 100, 1, 10.196496, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1628, 1.248624, 0, 9999, -9999, 1.0, 100, 1, 66.613993, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1629, 38.534428, 0, 9999, -9999, 1.0, 100, 1, 121.671047, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1630, 0.310685, 0, 9999, -9999, 1.0, 100, 1, 12.452584, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1631, 0.946487, 0, 9999, -9999, 1.0, 100, 1, 32.486249, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1632, 0.983205, 0, 9999, -9999, 1.0, 100, 1, 25.874893, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1634, 0.268997, 0, 9999, -9999, 1.0, 100, 1, 9.643044, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1635, 0.791147, 0, 9999, -9999, 1.0, 100, 1, 19.166135, 0.0, 0, 0, 0, 0, 0, 0, 0, | |
<gh_stars>1-10
# Copyright (c) 2021 <NAME>
#
# Rehabilitation project for lower extremity stroke patients.
#
# Project consist of several scripts running concurrently in a multi-threaded fashion
# All programs are run by raspberry pi 4
# mainProg.py: Rehab-bot system execution.
# livePlotter.py: plotting important states (Leg pushing-pulling force, position of slider, knee angle, etc.)
# saveDatabase.py: sending the data to a cloud to be accessed by essential parties (e.g. doctors)
#
# MIT license, all rights reserved.
#!/usr/bin/env python3
import subProgramFunctions as spf
from subProgramFunctions import admittance_type
import RPi.GPIO as GPIO # import GPIO
from hx711 import HX711 # import the class HX711
#from gpiozero import DistanceSensor
import serial
import time
import numpy as np
import pandas as pd
# =================================================================
# ==================1. MAIN SYSTEM FUNCTIONS=======================
# =================================================================
# 1. selection of rehabilitation mode
def run_rehab_program(activationCode, force_sensor):
if activationCode[0] == '1' or activationCode[0] == '0':
passive_mode(activationCode)
elif activationCode[0] == '2':
semi_active_mode(activationCode, force_sensor)
elif activationCode[0] == '3':
full_active_mode(activationCode, force_sensor)
def passive_mode(activationCode):
return 0
#----------------------------
# 2. For SEMI-ACTIVE program
#----------------------------
# b. main semi-active mode/semi-assistive sub-program
def semi_active_mode(activationCode, force_sensor):
'''
Semi active-strength exercise
Constructing Admittance haptic system difference equation for
ASSISTED resistance training.
Args:
activationCode [str]: activation string to determine which damper_spring
system to be used in the admittance system.
'''
stopCondition = False
assist_level = assistive_constants(activationCode[1]) # assign assistive level of machine
damper_spring = admittance1_constants(activationCode[2]) # assign which damper-spring system
sysModel = admittance_type(damper_spring, freqSample, force_sensor) # initialize dynamic system model
start_loop = time.time()
print("ACTIVATION CODE: ", activationCode)
print("training mode: Semi-assistive")
print("Assistive constant: ", assistive_constants(activationCode[1]))
print("Spring damper constant: ", admittance1_constants(activationCode[2]))
print("System coef denumerator: ", sysModel.a_i)
print("System coef numerator: ", sysModel.b_i)
print(" ")
print("waiting command")
print(" ")
sysModel.set_force_window(weightMeanWindow)
time_count = 0
while not stopCondition:
start_loop = time.time()
# this time library attempts to make the system sampling frequency
# consistent at about "freqSample"
#time.sleep(abs(sample_period - ((time.time()-start_loop)%sample_period)))
# this time library attempts to make the system sampling frequency
# consistent at about "freqSample"
pos_setpoint = sysModel.haptic_rendering_1()
actual_pos = spf.command_actuator(pos_setpoint)
sysModel.haptic_rendering_2(actual_pos)
command = spf.serial_routine(ser_command)
capture_time = time.time()
if (time_count>under_sample_time):
print("Input Force: ", round(sysModel.force_in0,2),
" N. Target position: ", round(sysModel.pos_now,2), " mm.")
time_count = 0
if command == "-s":
stopCondition = True
time_count = time_count + sample_period
time.sleep(abs(sample_period - ((time.time()-start_loop)%sample_period)))
def assistive_constants(assistiveConstCode):
'''
Assistive value constants
values are still placeholders
'''
assist_const = {
'0': 50, # [ ] unit not decided
'1': 200,
'2': 100
}
return assist_const.get(assistiveConstCode)
def admittance1_constants(admittanceCode):
'''
Spring, mass, damper constants selection (Three options)
'''
damper_spring_pair = {
'0': den_semi_1,
'1': den_semi_2,
'2': den_semi_3,
}
return damper_spring_pair.get(admittanceCode)
#----------------------------
# 3. For FULL-ACTIVE program
#----------------------------
# c. main full-active mode sub-program
def full_active_mode(activationCode, force_sensor):
'''
Sub-program 3: Patient full-active treatment.
Patient's strength level has increased into levels
where they could start actively train their muscle strength.
full active mode acts like an extremely light "work-out in a gym"
training for the stroke patient.
The resistance of this rehabilitation training strategy uses
haptic rendering of an admittance environment (inputs force,
outputs position). Other resistance strategies could be used such
as a friction model.
ADMITTANCE-type device algorithm (mass-spring-damper)
1. read force of the user
2. calculate the resulting position
3. send corresponding position to low level controller
4. CHANGE virtual environment STATE
For analogy with IMPEDANCE-type algorithm (see stanford hapkit)
1. read position of user (compute position in counts --> to meters)
2. calculate the resulting force
3. send corresponding force to motor
4. change virtual environment of state
'''
if activationCode[1] == "0":
isotonic_training(activationCode, force_sensor)
elif activationCode[1] =="1":
isometric_training(activationCode, force_sensor)
def isotonic_training(activationCode, force_sensor):
'''
Full active-strength exercise-isotonic training
Constructing Admittance haptic system difference equation for
resistance training.
Args:
activationCode [str]: activation string to determine which damper_spring
system to be used in the admittance system.
'''
stopCondition = False
damper_spring = admittance2_constants(activationCode[2]) # assign which damper-spring system
sysModel = admittance_type(damper_spring, freqSample, force_sensor) # initialize dynamic system model
# sysModel.set_initial_position(round(distance_sensor.distance*1000, 0))
print("ACTIVATION CODE: ", activationCode)
print("training mode: full-active")
print("training mode: isotonic")
print("Spring damper constant: ", admittance2_constants(activationCode[2]))
print("System coef denumerator: ", sysModel.a_i)
print("System coef numerator: ", sysModel.b_i)
print(" ")
print("waiting command")
print(" ")
sysModel.set_force_window(weightMeanWindow)
start_code = time.time()
time_count = 0.0
start_us = time.time()
print("Force position time")
while not stopCondition:
start_loop = time.time()
# this time library attempts to make the system sampling frequency
# consistent at about "freqSample"
#time.sleep(abs(sample_period - ((time.time()-start_loop)%sample_period)))
# this time library attempts to make the system sampling frequency
# consistent at about "freqSample"
pos_setpoint = sysModel.haptic_rendering_1()
actual_pos = spf.command_actuator(pos_setpoint, ser_act)
sysModel.haptic_rendering_2(actual_pos)
command = spf.serial_routine(ser_command)
capture_time = time.time()
if ((capture_time-start_us)>under_sample_time):
print(round(sysModel.force_in0,2),
" ", round(sysModel.pos_now,2),
" ", round(capture_time-start_code,2))
'''("Input Force: ", round(sysModel.force_in0,2),
" N. Target position: ", round(sysModel.pos_now,2),
" mm. Time: ", capture_time-start_code,
" s. One loop execution: ", round(time.time()-start_loop, 5),
" s. Sample time: ", sample_period, " s.") #time.time()-start_loop, 5'''
start_us = time.time()
if command == "-s":
stopCondition = True
trainData = {'force': sysModel.force_data, 'position': sysModel.position_data}
rehab_data = pd.DataFrame(trainData)
save_path, csvname = spf.csv_name_address(activationCode)
rehab_data.to_csv(save_path+csvname)
time.sleep(abs(sample_period - (time.time()-start_loop)))
def isometric_training(activationCode, force_sensor): # Position Control
stopCondition = False
damper_spring = admittance2_constants(activationCode[2]) # assign which damper-spring system
sysModel = admittance_type(damper_spring, freqSample, force_sensor) # initialize dynamic system model
#sysModel.set_initial_position(round(distance_sensor.distance*1000, 0))
#sysModel.set_force_window(weightMeanWindow)
print("ACTIVATION CODE: ", activationCode)
print("training mode: full-active")
print("training mode: isometric")
print("Spring damper constant: ", admittance2_constants(activationCode[2]))
print("System coef denumerator: ", sysModel.a_i)
print("System coef numerator: ", sysModel.b_i)
print(" ")
print("waiting command")
print(" ")
sysModel.set_force_window(weightMeanWindow)
time_count = 0
start_code = time.time()
while not stopCondition:
start_loop = time.time()
# this time library attempts to make the system sampling frequency
# consistent at about "freqSample"
#time.sleep(abs(sample_period - ((time.time()-start_loop)%sample_period)))
# this time library attempts to make the system sampling frequency
# consistent at about "freqSample"
pos_setpoint = sysModel.haptic_rendering_1()
actual_pos = spf.command_actuator(pos_setpoint, ser_act)
sysModel.haptic_rendering_2(actual_pos)
command = spf.serial_routine(ser_command)
capture_time = time.time()
if (capture_time-start_loop>under_sample_time):
print("Input Force: ", round(sysModel.force_in0,2), " N. Target position: ", round(sysModel.pos_now,2), " mm.")
time_count = 0
if command == "-s":
stopCondition = True
time_count = time_count + sample_period
#print(sample_period - ((time.time()-start_loop)%sample_period))
print(time.time()-start_loop)
time.sleep(abs(sample_period - ((time.time()-start_loop)%sample_period)))
def admittance2_constants(admittanceCode):
# Spring, mass, damper constants selection (Three options)
damper_spring_pair = {
'0': den_full_1,
'1': den_full_2,
'2': den_full_3,
}
return damper_spring_pair.get(admittanceCode)
# =================================================================
# ====================4. RUNNING MAIN PROGRAM =====================
# =================================================================
# Running main program
if __name__=="__main__":
try:
# =================================================================
# ==============0. CONFIGURING GLOBAL CONSTANTS====================
# =================================================================
# 1. pin assignments GPIO
# - load sensor
#doutPin = 20
#pdSCKPin = 21
weightMeanWindow = 1
pre_SetRatio = 220693/1000#231052/1000 # based on raw data--> 231052, 222489 ~= 1000 gram
# - distance sensor
trigger = 18
echo = 24
# - potensiometer.
potAngleAnalogIn = 17 # check again
# 2. Configuring sensors
# a. Force sensor
GPIO.setmode(GPIO.BCM)
force_sensor = HX711(dout_pin=20, pd_sck_pin=21)
force_sensor.set_scale_ratio(pre_SetRatio) # set ratio for current channel
# b. Distance sensor
#distance_sensor = DistanceSensor(trigger, echo)
distance_sensor = 1 # let's just leave this out for the mean time
# c. knee angle sensor
# 3. Other global variables
deviceLocation = '/dev/ttyACM0' # port in raspi
deviceLocation2 = '/dev/ttyACM1'
freqSample = 10.0 #15.0#200.0 # [Hz] system operating frequency 500 Hz rencananya
sample_period = 1/freqSample
ser_command = serial.Serial(deviceLocation, 9600, timeout=0.5) # initialize serial
ser_act = serial.Serial(deviceLocation2, 115200, timeout=0.5)
under_sample_time = 2.0
'''ser_command.flushInput()
ser_command.flush()
ser_command.flushOutput()
'''
#----------------------------
# A. SEMI-ASSISTIVE ADMITTANCE SYSTEM OPTIONS
#----------------------------
# CAUTION: Transfer function is X[mm]/F[N/mm]!!
# Option 1, 2, 3
den_semi_1 = [1, 0.5] # [N.s/mm, N/mm]
den_semi_2 = [1, 0.2] # [N.s/mm, N/mm]
den_semi_3 = [10, 0.7] # [N.s/mm, N/mm]
#----------------------------
# B. FULL-ACTIVE ADMITTANCE SYSTEM OPTIONS
#----------------------------
# Option 1, 2, 3
den_full_1 = [1, 5] # [N.s/mm, N/mm]
den_full_2 = [0.2, 0.05] # [N.s/mm, N/mm]
den_full_3 = [0.2, 0.03] # [N.s/mm, N/mm]
# trial variables
force_data_so_far = []
position_data_so_far = []
# main loop of program
# main_prog()
# ====== STEP 1. INITIATING SYSTEM DIAGNOSTICS =======
# run once
print("====main program====\n ==== Rehab-Bot ====\n")
print("Step 1. Initiating system diagnostics")
spf.initial_diagnostics(force_sensor, distance_sensor, weightMeanWindow)
while | |
will be created as required during the repository initialization.
:param url: git-clone compatible URL, see git-clone reference for more information
If None, the repository is assumed to exist, and the url of the first
remote is taken instead. This is useful if you want to make an existing
repository a submodule of anotherone.
:param branch: name of branch at which the submodule should (later) be checked out.
The given branch must exist in the remote repository, and will be checked
out locally as a tracking branch.
It will only be written into the configuration if it not None, which is
when the checked out branch will be the one the remote HEAD pointed to.
The result you get in these situation is somewhat fuzzy, and it is recommended
to specify at least 'master' here.
Examples are 'master' or 'feature/new'
:param no_checkout: if True, and if the repository has to be cloned manually,
no checkout will be performed
:return: The newly created submodule instance
:note: works atomically, such that no change will be done if the repository
update fails for instance"""
if repo.bare:
raise InvalidGitRepositoryError("Cannot add submodules to bare repositories")
# END handle bare repos
path = cls._to_relative_path(repo, path)
# assure we never put backslashes into the url, as some operating systems
# like it ...
if url is not None:
url = to_native_path_linux(url)
# END assure url correctness
# INSTANTIATE INTERMEDIATE SM
sm = cls(repo, cls.NULL_BIN_SHA, cls.k_default_mode, path, name, url='invalid-temporary')
if sm.exists():
# reretrieve submodule from tree
try:
return repo.head.commit.tree[path]
except KeyError:
# could only be in index
index = repo.index
entry = index.entries[index.entry_key(path, 0)]
sm.binsha = entry.binsha
return sm
# END handle exceptions
# END handle existing
# fake-repo - we only need the functionality on the branch instance
br = git.Head(repo, git.Head.to_full_path(str(branch) or cls.k_head_default))
has_module = sm.module_exists()
branch_is_default = branch is None
if has_module and url is not None:
if url not in [r.url for r in sm.module().remotes]:
raise ValueError(
"Specified URL '%s' does not match any remote url of the repository at '%s'" % (url, sm.abspath))
# END check url
# END verify urls match
mrepo = None
if url is None:
if not has_module:
raise ValueError("A URL was not given and existing repository did not exsit at %s" % path)
# END check url
mrepo = sm.module()
urls = [r.url for r in mrepo.remotes]
if not urls:
raise ValueError("Didn't find any remote url in repository at %s" % sm.abspath)
# END verify we have url
url = urls[0]
else:
# clone new repo
kwargs = {'n': no_checkout}
if not branch_is_default:
kwargs['b'] = br.name
# END setup checkout-branch
# _clone_repo(cls, repo, url, path, name, **kwargs):
mrepo = cls._clone_repo(repo, url, path, name, **kwargs)
# END verify url
# It's important to add the URL to the parent config, to let `git submodule` know.
# otherwise there is a '-' character in front of the submodule listing
# a38efa84daef914e4de58d1905a500d8d14aaf45 mymodule (v0.9.0-1-ga38efa8)
# -a38efa84daef914e4de58d1905a500d8d14aaf45 submodules/intermediate/one
writer = sm.repo.config_writer()
writer.set_value(sm_section(name), 'url', url)
writer.release()
# update configuration and index
index = sm.repo.index
writer = sm.config_writer(index=index, write=False)
writer.set_value('url', url)
writer.set_value('path', path)
sm._url = url
if not branch_is_default:
# store full path
writer.set_value(cls.k_head_option, br.path)
sm._branch_path = br.path
# END handle path
writer.release()
del(writer)
# we deliberatly assume that our head matches our index !
sm.binsha = mrepo.head.commit.binsha
index.add([sm], write=True)
return sm
def update(self, recursive=False, init=True, to_latest_revision=False, progress=None, dry_run=False,
force=False, keep_going=False):
"""Update the repository of this submodule to point to the checkout
we point at with the binsha of this instance.
:param recursive: if True, we will operate recursively and update child-
modules as well.
:param init: if True, the module repository will be cloned into place if necessary
:param to_latest_revision: if True, the submodule's sha will be ignored during checkout.
Instead, the remote will be fetched, and the local tracking branch updated.
This only works if we have a local tracking branch, which is the case
if the remote repository had a master branch, or of the 'branch' option
was specified for this submodule and the branch existed remotely
:param progress: UpdateProgress instance or None of no progress should be shown
:param dry_run: if True, the operation will only be simulated, but not performed.
All performed operations are read-only
:param force:
If True, we may reset heads even if the repository in question is dirty. Additinoally we will be allowed
to set a tracking branch which is ahead of its remote branch back into the past or the location of the
remote branch. This will essentially 'forget' commits.
If False, local tracking branches that are in the future of their respective remote branches will simply
not be moved.
:param keep_going: if True, we will ignore but log all errors, and keep going recursively.
Unless dry_run is set as well, keep_going could cause subsequent/inherited errors you wouldn't see
otherwise.
In conjunction with dry_run, it can be useful to anticipate all errors when updating submodules
:note: does nothing in bare repositories
:note: method is definitely not atomic if recurisve is True
:return: self"""
if self.repo.bare:
return self
# END pass in bare mode
if progress is None:
progress = UpdateProgress()
# END handle progress
prefix = ''
if dry_run:
prefix = "DRY-RUN: "
# END handle prefix
# to keep things plausible in dry-run mode
if dry_run:
mrepo = None
# END init mrepo
try:
# ASSURE REPO IS PRESENT AND UPTODATE
#####################################
try:
mrepo = self.module()
rmts = mrepo.remotes
len_rmts = len(rmts)
for i, remote in enumerate(rmts):
op = FETCH
if i == 0:
op |= BEGIN
# END handle start
progress.update(op, i, len_rmts, prefix + "Fetching remote %s of submodule %r"
% (remote, self.name))
#===============================
if not dry_run:
remote.fetch(progress=progress)
# END handle dry-run
#===============================
if i == len_rmts - 1:
op |= END
# END handle end
progress.update(op, i, len_rmts, prefix + "Done fetching remote of submodule %r" % self.name)
# END fetch new data
except InvalidGitRepositoryError:
if not init:
return self
# END early abort if init is not allowed
# there is no git-repository yet - but delete empty paths
checkout_module_abspath = self.abspath
if not dry_run and os.path.isdir(checkout_module_abspath):
try:
os.rmdir(checkout_module_abspath)
except OSError:
raise OSError("Module directory at %r does already exist and is non-empty"
% checkout_module_abspath)
# END handle OSError
# END handle directory removal
# don't check it out at first - nonetheless it will create a local
# branch according to the remote-HEAD if possible
progress.update(BEGIN | CLONE, 0, 1, prefix + "Cloning url '%s' to '%s' in submodule %r" %
(self.url, checkout_module_abspath, self.name))
if not dry_run:
mrepo = self._clone_repo(self.repo, self.url, self.path, self.name, n=True)
# END handle dry-run
progress.update(END | CLONE, 0, 1, prefix + "Done cloning to %s" % checkout_module_abspath)
if not dry_run:
# see whether we have a valid branch to checkout
try:
# find a remote which has our branch - we try to be flexible
remote_branch = find_first_remote_branch(mrepo.remotes, self.branch_name)
local_branch = mkhead(mrepo, self.branch_path)
# have a valid branch, but no checkout - make sure we can figure
# that out by marking the commit with a null_sha
local_branch.set_object(util.Object(mrepo, self.NULL_BIN_SHA))
# END initial checkout + branch creation
# make sure HEAD is not detached
mrepo.head.set_reference(local_branch, logmsg="submodule: attaching head to %s" % local_branch)
mrepo.head.ref.set_tracking_branch(remote_branch)
except IndexError:
log.warn("Failed to checkout tracking branch %s", self.branch_path)
# END handle tracking branch
# NOTE: Have to write the repo config file as well, otherwise
# the default implementation will be offended and not update the repository
# Maybe this is a good way to assure it doesn't get into our way, but
# we want to stay backwards compatible too ... . Its so redundant !
writer = self.repo.config_writer()
writer.set_value(sm_section(self.name), 'url', self.url)
writer.release()
# END handle dry_run
# END handle initalization
# DETERMINE SHAS TO CHECKOUT
############################
binsha = self.binsha
hexsha = self.hexsha
| |
# OO OO OO
0x3C, 0x03, 0x00, # OOOO OO
0x6E, 0x03, 0x00, # OO OOO OO
0x66, 0x03, 0x00, # OO OO OO
0xC3, 0x03, 0x00, # OO OO OO
0xC1, 0x86, 0x00, # OO OO OO
0xC0, 0xC6, 0x00, # OO OO OO
0xE0, 0x64, 0x00, # OOO OO O
0x60, 0x30, 0x00, # OO OO
0x78, 0x0F, 0xE0, # OOOO OOOOOOO
0x3F, 0xFF, 0xF0, # OOOOOOOOOOOOOOOOOO
0x0F, 0xE3, 0xF0, # OOOOOOO OOOOOO
0x00, 0x00, 0xE0, # OOO
0x00, 0x00, 0x00, #
# @9177 'Ð' (20 pixels wide)
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x60, # OO
0x00, 0x00, 0xF0, # OOOO
0x00, 0x00, 0xF0, # OOOO
0x00, 0x00, 0xF0, # OOOO
0x03, 0xF0, 0xE0, # OOOOOO OOO
0x07, 0xFC, 0xE0, # OOOOOOOOO OOO
0x0E, 0x0E, 0xC0, # OOO OOO OO
0x0C, 0x00, 0xC0, # OO OO
0x0C, 0x01, 0x40, # OO O O
0x0C, 0x03, 0x60, # OO OO OO
0x0E, 0x06, 0x60, # OOO OO OO
0x07, 0x0C, 0x60, # OOO OO OO
0x03, 0x18, 0x60, # OO OO OO
0x00, 0x70, 0x60, # OOO OO
0x00, 0xE0, 0x60, # OOO OO
0x03, 0xC0, 0xE0, # OOOO OOO
0x0F, 0x80, 0xC0, # OOOOO OO
0x3E, 0xE3, 0xC0, # OOOOO OOO OOOO
0xFC, 0x7F, 0x80, # OOOOOO OOOOOOOO
0x30, 0x1E, 0x00, # OO OOOO
# @9246 'Ñ' (20 pixels wide)
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x60, 0x00, 0x00, # OO
0xF0, 0x00, 0x00, # OOOO
0xF0, 0x00, 0x00, # OOOO
0xF0, 0x00, 0x00, # OOOO
0x70, 0xFC, 0x00, # OOO OOOOOO
0x73, 0xFE, 0x00, # OOO OOOOOOOOO
0x37, 0x07, 0x00, # OO OOO OOO
0x30, 0x03, 0x00, # OO OO
0x28, 0x03, 0x00, # O O OO
0x6C, 0x03, 0x00, # OO OO OO
0x66, 0x07, 0x00, # OO OO OOO
0x63, 0x0E, 0x00, # OO OO OOO
0x61, 0x8C, 0x00, # OO OO OO
0x60, 0xE0, 0x00, # OO OOO
0x60, 0x70, 0x00, # OO OOO
0x70, 0x3C, 0x00, # OOO OOOO
0x30, 0x1F, 0x00, # OO OOOOO
0x3C, 0x77, 0xC0, # OOOO OOO OOOOO
0x1F, 0xE3, 0xF0, # OOOOOOOO OOOOOO
0x07, 0x80, 0xC0, # OOOO OO
# @9315 'Ò' (20 pixels wide)
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x20, # O
0x00, 0x00, 0x20, # O
0x00, 0x00, 0x70, # OOO
0x00, 0x00, 0x70, # OOO
0x03, 0xE0, 0xE0, # OOOOO OOO
0x07, 0xF0, 0xE0, # OOOOOOO OOO
0x0E, 0x39, 0xC0, # OOO OOO OOO
0x0C, 0x19, 0x80, # OO OO OO
0x0C, 0x03, 0xC0, # OO OOOO
0x0C, 0x07, 0x60, # OO OOO OO
0x0C, 0x06, 0x60, # OO OO OO
0x0C, 0x0C, 0x30, # OO OO OO
0x06, 0x18, 0x30, # OO OO OO
0x06, 0x30, 0x30, # OO OO OO
0x02, 0x60, 0x70, # O OO OOO
0x00, 0xC0, 0x60, # OO OO
0x7F, 0x01, 0xE0, # OOOOOOO OOOO
0xFF, 0xFF, 0xC0, # OOOOOOOOOOOOOOOOOO
0xFC, 0x7F, 0x00, # OOOOOO OOOOOOO
0x70, 0x00, 0x00, # OOO
0x00, 0x00, 0x00, #
# @9384 'Ó' (20 pixels wide)
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0xE0, # OOO
0x0F, 0xE3, 0xF0, # OOOOOOO OOOOOO
0x3F, 0xFF, 0xF0, # OOOOOOOOOOOOOOOOOO
0x78, 0x0F, 0xE0, # OOOO OOOOOOO
0x60, 0x30, 0x00, # OO OO
0xE0, 0x64, 0x00, # OOO OO O
0xC0, 0xC6, 0x00, # OO OO OO
0xC1, 0x86, 0x00, # OO OO OO
0xC3, 0x03, 0x00, # OO OO OO
0x66, 0x03, 0x00, # OO OO OO
0x6E, 0x03, 0x00, # OO OOO OO
0x3C, 0x03, 0x00, # OOOO OO
0x19, 0x83, 0x00, # OO OO OO
0x39, 0xC7, 0x00, # OOO OOO OOO
0x70, 0xFE, 0x00, # OOO OOOOOOO
0x70, 0x7C, 0x00, # OOO OOOOO
0xE0, 0x00, 0x00, # OOO
0xE0, 0x00, 0x00, # OOO
0x40, 0x00, 0x00, # O
0x40, 0x00, 0x00, # O
# @9453 'Ô' (20 pixels wide)
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x30, 0x1E, 0x00, # OO OOOO
0xFC, 0x7F, 0x80, # OOOOOO OOOOOOOO
0x3E, 0xE3, 0xC0, # OOOOO OOO OOOO
0x0F, 0x80, 0xC0, # OOOOO OO
0x03, 0xC0, 0xE0, # OOOO OOO
0x00, 0xE0, 0x60, # OOO OO
0x00, 0x70, 0x60, # OOO OO
0x03, 0x18, 0x60, # OO OO OO
0x07, 0x0C, 0x60, # OOO OO OO
0x0E, 0x06, 0x60, # OOO OO OO
0x0C, 0x03, 0x60, # OO OO OO
0x0C, 0x01, 0x40, # OO O O
0x0C, 0x00, 0xC0, # OO OO
0x0E, 0x0E, 0xC0, # OOO OOO OO
0x07, 0xFC, 0xE0, # OOOOOOOOO OOO
0x03, 0xF0, 0xE0, # OOOOOO OOO
0x00, 0x00, 0xF0, # OOOO
0x00, 0x00, 0xF0, # OOOO
0x00, 0x00, 0xF0, # OOOO
0x00, 0x00, 0x60, # OO
# @9522 'Õ' (23 pixels wide)
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x01, 0xFF, 0xFE, # OOOOOOOOOOOOOOOO
0x03, 0xFF, 0xFE, # OOOOOOOOOOOOOOOOO
0x07, 0x00, 0x06, # OOO OO
0x0C, 0x20, 0x86, # OO O O OO
0x18, 0x71, 0xC6, # OO OOO OOO OO
0x30, 0x3B, 0x86, # OO OOO OOO OO
0x60, 0x1F, 0x06, # OO OOOOO OO
0xE0, 0x0E, 0x06, # OOO OOO OO
0x70, 0x1F, 0x06, # OOO OOOOO OO
0x38, 0x3B, 0x86, # OOO OOO OOO OO
0x1C, 0x71, 0xC6, # OOO OOO OOO OO
0x0E, 0x20, 0x86, # OOO O O OO
0x07, 0x00, 0x06, # OOO OO
0x03, 0xFF, 0xFE, # OOOOOOOOOOOOOOOOO
0x01, 0xFF, 0xFE, # OOOOOOOOOOOOOOOO
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
# @9591 'Ö' (23 pixels wide)
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0xFF, 0xFF, 0x00, # OOOOOOOOOOOOOOOO
0xFF, 0xFF, 0x80, # OOOOOOOOOOOOOOOOO
0xC0, 0x01, 0xC0, # OO OOO
0xC2, 0x08, 0x60, # OO O O OO
0xC7, 0x1C, 0x30, # OO OOO OOO OO
0xC3, 0xB8, 0x18, # OO OOO OOO OO
0xC1, 0xF0, 0x0C, # OO OOOOO OO
0xC0, 0xE0, 0x0E, # OO OOO OOO
0xC1, 0xF0, 0x1C, # OO OOOOO OOO
0xC3, 0xB8, 0x38, # OO OOO OOO OOO
0xC7, 0x1C, 0x70, # OO OOO OOO OOO
0xC2, 0x08, 0xE0, # OO O O OOO
0xC0, 0x01, 0xC0, # OO OOO
0xFF, 0xFF, 0x80, # OOOOOOOOOOOOOOOOO
0xFF, 0xFF, 0x00, # OOOOOOOOOOOOOOOO
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
# @9660 '×' (13 pixels wide)
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x08, # O
0x00, 0x30, # OO
0x00, 0x50, # O O
0x01, 0xA0, # OO O
0x06, 0x40, # OO O
0x18, 0x40, # OO O
0x20, 0x80, # O O
0xFF, 0x80, # OOOOOOOOO
0x3F, 0x80, # OOOOOOO
0x1F, 0xC0, | |
return imgProj, imgMask, dstXY0
def bbox(xys):
min_xy, max_xy = xys.min(axis=0), xys.max(axis=0)
#print("min_xy:%s, max_xy:%s"%(min_xy, max_xy))
min_xy, max_xy = np.floor(min_xy).astype(np.int16), np.ceil(max_xy).astype(np.int16)
# print("min_xy:%s, max_xy:%s"%(min_xy, max_xy))
return min_xy[0], max_xy[0], min_xy[1], max_xy[1] #bbox
def imgReducedBy2powN(img, n=1):
for t in range(n):
h, w = img.shape[0], img.shape[1]
h = (h//2)*2 # even
w = (w//2)*2 # even
imgu16 = img.astype(np.uint16)
imgu16 = (imgu16[0:h:2, 0:w:2, ...] + imgu16[1:h:2, 0:w:2, ...] + imgu16[0:h:2, 1:w:2, ...] + imgu16[1:h:2, 1:w:2, ...])/4
img = imgu16.astype(np.uint8)
return img
def format_axes(ax, w=4, h=3, title="no title"):
#ax.margins(0.2)
#ax.set_axis_off()
ax.set_aspect(1.0)
#ax.set_xlim(0, w)
#ax.set_ylim(0, h)
ax.invert_yaxis()
#ax.set_autoscale_on(False)
ax.set_title(title)
def gridOfPoints(w=8, h=6, step=1, centred=False):
#w, h = 2*dw + 1, 2*dh + 1
dw, dh = (w//2, h//2) if centred else (0, 0)
#nPts = h * w
#grid = np.zeros((nPts, 3))
ys = tuple(range(0, h, step))
xs = tuple(range(0, w, step))
gridH, gridW = len(ys), len(xs)
print("gridH: %d, gridW: %d"%(gridH, gridW))
xys = list()
for y in ys:
i0 = y * w
y0 = y-dh
row = list()
for x in xs:
xys.append((x-dw, y0, 1))
grid = np.array(xys, dtype=np.int16)
#print("grid: shape:%s\n%s"%(grid.shape, grid[:50]))
return grid
#------------------------------------------------
def closestPtIdxDist(xy, line):
x, y = xy
xs = line.get_xdata()
ys = line.get_ydata()
iOfMin = -1
for i in range(len(xs)):
dx, dy = xs[i] - x, ys[i] - y
d2 = dx**2 + dy**2
if iOfMin < 0 or d2 < d2min:
d2min = d2
iOfMin = i
dxyMin = dx, dy
return iOfMin, dxyMin, sqrt(d2min)
import matplotlib.pylab as plt
#-----------------------------------------------------------------------
class ImageView:
def __init__(self, ax, img=None, nColors=1, title="", frame=True):
self.ax = ax
self.img = img
self.nColors = nColors
self.imgM = UnitMat
self.M = UnitMat
self.title = title
self.frame = frame
self.Mstr = ""
self.imgMstr = ""
#print("self.centerCorners: shape:%s:\n%s"%(str(self.centerCorners.shape), self.centerCorners))
self.initialPlot()
def initialPlot(self):
margeHW = margeH, margeW = 20, 25
if self.img is None: self.img = gridImg(h=401, w=501, margeHW=margeHW)
self.imgH, self.imgW = self.img.shape[:2]
self.imDiag = np.sqrt(self.imgH * self.imgW) # size of square with same area
self.imgFrame = np.array([(0, 0, 1), (self.imgW, 0, 1), (self.imgW, self.imgH, 1), (0, self.imgH, 1)], dtype=np.float)
dw, dh, x0, x1, y0, y1 = self.imgW/2, self.imgH/2, margeW, self.imgW-margeW, margeH, self.imgH-margeH
self.centerCorners = np.array([(dw, dh, 1.0), (x0, y0, 1), (x1, y0, 1), (x1, y1, 1), (x0, y1, 1), (x0, y0, 1)], dtype=np.float)
format_axes(self.ax, self.imgW, self.imgH, self.title + " Image")
self.initialCenterCorners = self.centerCorners.copy()
self.initialCenter = self.centerCorners[0,:].copy()
self.gridLin_xys = np.array(gridLine(h=self.imgH, w=self.imgW, margeHW=margeHW))
self.gridLin_xyrs = self.gridLin_xys.copy()
self.imgShow = self.ax.imshow(self.img, cmap='gray', interpolation='none')
if self.frame:
self.gridLin , = self.ax.plot(self.gridLin_xyrs [ : ,0], self.gridLin_xyrs [ : ,1],'--b')
self.quadrilatere, = self.ax.plot(self.centerCorners[1:6,0], self.centerCorners[1:6,1], '-r') # close
self.handles , = self.ax.plot(self.centerCorners[0:5,0], self.centerCorners[0:5,1], 'go', picker=10) # frame
self.centerPt , = self.ax.plot(self.centerCorners[0 ,0], self.centerCorners[0 ,1], 'r.') # center
def updateGraphicOverlay(self):
if self.frame:
self.gridLin .set_data(self.gridLin_xyrs [ : ,0], self.gridLin_xyrs [ : ,1])
self.quadrilatere.set_data(self.centerCorners[1:6,0], self.centerCorners[1:6,1])
self.handles .set_data(self.centerCorners[0:5,0], self.centerCorners[0:5,1])
self.centerPt .set_data(self.center_xyr[0], self.center_xyr[1])
def updateTransfMat(self, M, noCornerUpdate=False, debug=0):
#print("updateTransfMat: noCornerUpdate:%s"%noCornerUpdate)
if M is None : M = UnitMat
if (M == self.M).all() :
if debug >= 2: print("update %s : no change"%self.title)
return
#print("update %s.imgM:\nfrom:\n%s\nto:\n%s"%(self.title, self.imgM, M))
self.Mstr = mat2str(M, self.imDiag)
self.ax.set_title("%s:imgM:%s\ngridM:%s"%(self.title, self.imgMstr, self.Mstr))
if debug >= 1: print("update %s.M:\nfrom:%s\nto :%s"%(self.title, mat2str(self.M, self.imDiag), self.Mstr))
self.M = M.copy()
if not noCornerUpdate:
self.centerCorners = projPts(self.initialCenterCorners, self.M)
assert self.centerCorners.shape == (6, 3), "centerCorners.shape:%s != (6, 3)"%str(self.centerCorners.shape)
self.gridLin_xyrs = projPts(self.gridLin_xys, self.M)
self.center_xyr = projPt(self.initialCenter, self.M)
#print("center_xyr: %s"%self.center_xyr)
#print("centerCornersUpdated => newM:\n%s\ngridLin_xyrs:\n%s"%(self.M.round(3), self.gridLin_xyrs))
self.updateGraphicOverlay()
def centerCornersUpdated(self): # and overlay matrix
#print("centerCornersUpdated:\ninitialCenterCorners:\n%s\npresentCorners:\n%s"%(self.initialCenterCorners, self.centerCorners[1:5, :]))
M = getHomogenTransformMatrix(self.initialCenterCorners[1:5, :], self.centerCorners[1:5, :], debug=0)
self.updateTransfMat(M, noCornerUpdate=True)
def updateImage(self, M, debug=0):
if M is None : M = UnitMat
if 0 and (M == self.imgM).all() :
if debug >= 2: print("update %s.imgM : no change"%self.title)
return
self.imgMstr = mat2str(self.imgM, self.imDiag)
#print("update %s.imgM:\nfrom:\n%s\nto:\n%s"%(self.title, self.imgM, M))
if debug >= 1: print("update %s.imgM:\nfrom:%s\nto :%s"%(self.title, mat2str(self.imgM, self.imDiag),mat2str(M, self.imDiag)))
imgProjBbox = bbox(self.imgFrame)
if debug >= 1: print("%s imgProjBbox:%s"%(self.title, imgProjBbox))
self.imgProj, imgMask, dstXY0 = projectImage(self.img, M, dstBbox=imgProjBbox)
#format_axes(ax, self.imgW, self.imgH, title)
self.ax.set_xlim(0, self.imgW)
self.ax.set_ylim(self.imgH, 0)
self.ax.set_aspect(1.0)
if dstXY0 == (0, 0):
extent = None
else:
left , top = dstXY0
if debug >= 2: print("plotImage at left=x0:%d, top=y0:%d"%(left , top))
dstH , dstW = self.imgProj.shape
right, bottom = left + dstW, top + dstH
extent = left, right, bottom, top
if debug >= 1: print("%s img.extent: left, right, bottom, top: %s"%(self.title, extent))
if 1:
self.imgShow.set_data(self.imgProj)# segmentedimg
#self.ax.draw()
else:
self.ax.imshow(self.imgProj, cmap='gray', interpolation='none', extent=extent)
self.imgM = M
if debug >= 2: print("updateImage done")
def resetGrid(self):
self.centerCorners = self.initialCenterCorners.copy()
self.centerCornersUpdated()
def updateClickedPos(self, handleIdx, xy):
dxy = xy - self.centerCorners[handleIdx, :2]
if handleIdx == 0: # central handle => move all handles
self.centerCorners[:, :2] += dxy
else: # move one handle
self.centerCorners[handleIdx, :2] += dxy
if handleIdx == 1: # last point(idx:-1) of closed polygone = first point(idx:1)
self.centerCorners[5, :] = self.centerCorners[1, :] # skip Center
#print("updateClickedPos => self.centerCorners:\n%s"%self.centerCorners)
assert self.centerCorners.shape == (6, 3), "centerCorners.shape:%s != (6, 3)"%str(self.centerCorners.shape)
self.centerCornersUpdated()
#-----------------------------------------------------------------------------------------
from matplotlib.widgets import Button, CheckButtons
class GUI:
def __init__(self, gray= False, reduceFactor=None):
self.imgFile = "castel" # "../../../Images/ParlementDeHambourg0.jpg" # "Numpy.png"
self.ext = ".jpg"
img = io.imread(self.imgFile+self.ext)
self.nColors = 1 if len(img.shape) < 3 else img.shape[2]
if self.nColors and gray:
img = (np.sum(img, axis=2) / 3).round().astype(np.uint8)
print("img.shape:%s"%str(img.shape))
if reduceFactor is not None:
assert 0 <= reduceFactor <= 4, "reduceFactor 1,2,3,4 => img size reduction by :4, 16, 64, 256 "
if reduceFactor > 0 :img = imgReducedBy2powN(img, reduceFactor)
self.M = UnitMat
self.fig = plt.figure(figsize=(9, 10))
ax0 = plt.subplot(2,1,1)
self.originView = ImageView(ax0, img, self.nColors, "Original")
ax1 = plt.subplot(2,1,2)
self.projView = ImageView(ax1, img, self.nColors, "Tr")
h, w = self.originView.img.shape[:2]
M = projMatrix(yHoriz=1.0*w, zoom=1.2, xC=w/2, yC=h*0.8) # , yHoriz=None, zoom=1.0, rotDeg=0.0, xC=0.0, yC=0.0) # hambourg
self.ind = -1
self.pickedView = None
self.callbackRunning = None # for stop error in callbacks
self.fig.canvas.mpl_connect('pick_event', self.onpick)
self.fig.canvas.mpl_connect('button_press_event' , self.onButPress )
self.fig.canvas.mpl_connect('button_release_event', self.onButRelease)
self.fig.canvas.mpl_connect('motion_notify_event' , self.onMotion )
if 1: #create a Widget
x, y, w, h = 0.02, 0.97, 0.10, 0.02
if 1: #create a button
self.axWidgets = list()
self.buttons = list()
for i, txt in enumerate(("reset grid", "Save Image")):
axWidget = plt.axes([x, y-0.03*i, w, h])
#print("axWidget:", axWidget)
button = Button(axWidget, txt)
button.on_clicked(self.onWidget)
self.axWidgets.append(axWidget)
self.buttons.append(button)
else: #create a check box
self.checkBox = CheckButtons(axWidget, ['On',], [False,])
plt.show()
def adjustImgToGrid(self):
invM = np.linalg.inv(self.originView.M)
imgM = normalizeHomogenMatrix(self.projView.M @ invM)
self.projView.updateImage(imgM)
#def accrocheCurseur(self):
def decrocheCurseur(self):
self.ind = -1
self.pickedTraj = None
self.pickedLine = None
self.fig.canvas.draw_idle()
def onpick(self, pickEvt):
if self.callbackRunning != None:
print("onpick reenter callbackRunning %s ! => quit"%self.callbackRunning)
quit()
self.callbackRunning = "onpick"
event = pickEvt.mouseevent
xy = x, y = event.xdata, event.ydata
#print("\nonpick: '%s'"%pickEvt.name, ": xy:({0}, {1}) -> xydata:({2:5.3f}, {3:5.3f})".format(event.x,event.y, x, y))
line = pickEvt.artist
view = None
if line == self.originView.handles: view = self.originView
elif line == self.projView .handles: view = self.projView
else:
print("not clicked on other => do nothing")
if view is not None:
ind, dxy, dist = closestPtIdxDist(xy, line)
assert 0 <= ind < 5, "0 <= ind: %d < 5 : False !"%ind # 5 handles
if dist <= 10:
self.pickedView = view
self.ind = ind
self.dxyClick = dxy
#print("onpick handles: xy:(%d, %d)"%(event.x, event.y))
self.callbackRunning = None # no exception occurs
def onWidget(self, event):
print("onWidget: xy:(%d, %d))"%(event.x, event.y), event)
idx = None
for i, ax in enumerate(self.axWidgets):
if event.inaxes == ax:
idx = i
break
if idx == 0:
print("resetGrid")
self.projView.resetGrid()
self.adjustImgToGrid()
self.fig.canvas.draw_idle()
elif idx == 1:
imgFile = self.imgFile + "_proj" + self.ext
print("writing %s ..."%imgFile)
io.imwrite(imgFile, self.projView.imgProj)
print("done")
def onButPress(self, event):
#if event.inaxes==None: return
#if event.button != 1: return
if self.callbackRunning != None:
print("onButPress reenter callbackRunning %s ! => quit"%self.callbackRunning)
#quit()
self.callbackRunning = "onButPress"
print("onButPress: xy:(%d, %d))"%(event.x, event.y))
self.callbackRunning = None # no exception occurs
def onMotion(self, event):
if self.callbackRunning != None:
print("onMotion reenter callbackRunning %s ! => quit"%self.callbackRunning)
quit()
#print("onMotion: event:", event)
if event.inaxes is None: return # --------------------------->
self.callbackRunning = "onMotion"
#print('onMotion: ind:%d, xy:(%d, %d) -> xydata:(%5.3f, %5.3f)'%(self.ind, event.x,event.y, event.xdata,event.ydata))
if event.button == 1 and self.ind >= 0: # move point
x,y = event.xdata, event.ydata
if self.pickedView is not None:
#print("onMotion handles: xy:(%d, %d) found idx: %d"%(x, y, self.ind))
dx, dy = self.dxyClick
x += dx
y += dy
self.pickedView.updateClickedPos(self.ind, (x, | |
for step 3')
print('Finished Exporting ')
return allist
# In[ ]:
def getperson():
allist=[]
allfaith=[]
faithpic={}
traitpic={}
dyncoapic={}
titlecoapic={}
dynprespic={}
titlerankpic={}
#-------------------------------------------LIVING-------------------------------------------------------
for i in setlist:
if i in live:
# LIVE NAME
itername=live[i]['first_name']
if re.search(fr'(?<=\s){itername}\b',locnames) is not None:
itername=re.findall(fr'(?<=\s){itername}\b:.*"(.*)"',locnames)
itername=itername[0]
accented='\n'+'Name: '+itername
itername= unidecode.unidecode(itername)
itername=itername.title()
# LIVE DYNASTY
if 'dynasty_house' in live[i]:
iterhouse1=live[i]['dynasty_house']
if 'key' in dyn[str(iterhouse1)]:
iterhouse2=dyn[str(iterhouse1)]['key']
iterhouse3=re.findall('(?<=_)[^_]+$',iterhouse2)
iterhouse3=iterhouse3[0]
iterhouse3=iterhouse3.title()
accented=accented+' '+iterhouse3
iterhouse3= unidecode.unidecode(iterhouse3)
iterhouse3=iterhouse3.title()
elif 'name' in dyn[str(iterhouse1)]:
iterhouse2=dyn[str(iterhouse1)]['name']
iterhouse3=re.findall(fr'\b{iterhouse2}\b:.*"(.*)"',dynloc)
iterhouse3=iterhouse3[0]
accented=accented+' '+iterhouse3
iterhouse3= unidecode.unidecode(iterhouse3)
iterhouse3=iterhouse3.title()
elif 'localized_name' in dyn[str(iterhouse1)]:
iterhouse3=dyn[str(iterhouse1)]['localized_name']
accented=accented+' '+iterhouse3
iterhouse3= unidecode.unidecode(iterhouse3)
iterhouse3=iterhouse3.title()
#LIVE COA
if 'coat_of_arms_id' in dyn[f'{iterhouse1}']:
coa_id=dyn[f'{iterhouse1}']['coat_of_arms_id']
dyncoapic[i.zfill(maxlen)]=str(coa_id)
dynpres=dyn[f'{iterhouse1}']['dynasty']
#LIVE DYN PRES
if str(coa_id) not in dynprespic:
dynacc=coafind[f'{dynpres}']['prestige']['accumulated']
dynprespic[str(coa_id)]=dynacc
else:
nestdyn=dyn[f'{iterhouse1}']['dynasty']
if 'coat_of_arms_id' in coafind[f'{nestdyn}']:
coa_id=coafind[f'{nestdyn}']['coat_of_arms_id']
dyncoapic[i.zfill(maxlen)]=str(coa_id)
if str(coa_id) not in dynprespic:
dynacc=coafind[f'{nestdyn}']['prestige']['accumulated']
dynprespic[str(coa_id)]=dynacc
else:
iterhouse3=''
# LIVE GENDER
if 'sexuality' in live[i]:
sexo=live[i]['sexuality']
sexo=re.sub('as','Asexual',sexo)
sexo=re.sub('bi','Bisexual',sexo)
sexo=re.sub('ho','Homosexual',sexo)
else:
sexo='Heterosexual'
if 'female' in live[i]:
sex='Female'
else:
sex='Male'
gendernote='\n'+'Sex: '+sex+'\n'+'Sexual Orientation: '+sexo
#LIVE TITLE
if 'landed_data' in live[i] and 'domain' in live[i]['landed_data']:
itertitle=live[i]['landed_data']['domain'][0]
iterrank=title['landed_titles'][str(itertitle)]['key']
#LIVE TITLE COA
titlecoa=title['landed_titles'][str(itertitle)]['coat_of_arms_id']
titlecoapic[i.zfill(maxlen)]=str(titlecoa)
iterrank=re.sub('(.)_.*',r'\1',iterrank)
if str(coa_id) not in titlerankpic:
titlerankpic[str(titlecoa)]=iterrank
itertitle=title['landed_titles'][str(itertitle)]['name']
if 'b' in iterrank and sex!='Female':
iterrank="Baron of "
elif 'b' in iterrank and sex=='Female':
iterrank="Baroness of "
elif 'c' in iterrank and sex!='Female':
iterrank="Count of "
elif 'c' in iterrank and sex=='Female':
iterrank="Countess of "
elif 'd' in iterrank and sex!='Female':
iterrank="Duke of "
elif 'd' in iterrank and sex=='Female':
iterrank="Duchess of "
elif 'k' in iterrank and sex!='Female':
iterrank="King of "
elif 'k' in iterrank and sex=='Female':
iterrank="Queen of "
elif 'e' in iterrank and sex!='Female':
iterrank="Emperor of "
elif 'e' in iterrank and sex=='Female':
iterrank="Empress of "
elif 'x' in iterrank:
iterrank="Leader of the "
else:
iterrank="Titled"
itertitle=iterrank+itertitle
titlenote='\n'+'Title: '+itertitle
itertitle= unidecode.unidecode(itertitle)
itertitle=itertitle.title()
else:
itertitle=''
titlenote=''
# LIVE BIRTH
birth=live[i]['birth']
birth=birth.replace(".", "-")
birthnote='\n'+'Born: '+birth
# LIVE DEATH
death=''
# LIVE Skills AND TRAITS
traitpic[i.zfill(maxlen)]=[]
if 'skill' in live[i]:
skill=[live[i]['skill'][j] for j in range(6)]
skill=[str(skillist[x])+str(skill[x]) for x in range(6)]
skill='Skills: '+str(skill)
else:
skill=[]
if 'traits' in live[i]:
traits=live[i]['traits']
traits=set(traits)
traits=(sorted(traits))
traits=list(traits)
traits=list(map(str,traits))
traitxml=[traitcode.get(item,item) for item in traits]
traitpic[i.zfill(maxlen)]=traitxml
if sex=='Female':
traits=[ fdictraits.get(item,item) for item in traits ]
else:
traits=[ mdictraits.get(item,item) for item in traits ]
traits='Traits: '+str(traits)
else:
traits=[]
if 'recessive_traits' in live[i]:
retraits=live[i]['recessive_traits']
retraits=set(retraits)
retraits=(sorted(retraits))
retraits=list(retraits)
retraits=list(map(str,retraits))
traitxml2=[traitcode.get(item,item) for item in retraits]
traitpic[i.zfill(maxlen)].extend(traitxml2)
if sex=='Female':
retraits=[ fdictraits.get(item,item) for item in retraits ]
else:
retraits=[ mdictraits.get(item,item) for item in retraits ]
retraits='Inherited Traits: '+str(retraits)
else:
retraits=[]
if '[]' in traits:
traits=''
if '[]' in retraits:
retraits=''
if bool(traits) is True and bool(retraits) is True:
d='\n'+traits+'\n'+retraits
elif bool(traits) is True and bool(retraits) is False:
d='\n'+traits
elif bool(traits) is False and bool(retraits) is True:
d='\n'+retraits
elif bool(traits) is False and bool(retraits) is False:
d=''
if bool(d) is False:
satli='\n'+skill
else:
satli='\n'+skill+d
# LIVE FAITH
if 'faith' in live[i]:
pfaith=live[i]['faith']
if 'name' in faithde[str(pfaith)]:
mycon=faithde[str(pfaith)]['icon']
faithpic[i.zfill(maxlen)]=mycon
myfaith=(faithde[str(pfaith)]['name'])
myfaith='Faith: '+myfaith
else:
mycon=faithde[str(pfaith)]['icon']
faithpic[i.zfill(maxlen)]=mycon
faithname=faithde[fr'{pfaith}']['template']
myfaith='Faith: '+((re.findall(fr'{faithname}\b:.*"(.*)"',allines))[0])
basename=faithre[str(faithde[fr'{pfaith}']['religion'])]['template']
mybase='Religion: '+((re.findall(fr'{basename}\b:.*"(.*)"',allines))[0])
faith='\n'+myfaith+'\n'+mybase
if myfaith not in allfaith:
allfaith.append(myfaith)
else:
pass
else:
faith=''
# LIVE CULTURE
if 'culture' in live[i]:
pcult=live[i]['culture']
cultem=cult[str(pcult)]['culture_template']
culture=(re.findall(fr'\b{cultem}\b:.*"(.*)"',loccult))[0]
culture='\n'+'Culture: '+culture
else:
culture=''
# LIVE COMBINE
notes='Game ID: '+i+titlenote+accented+birthnote+satli+faith+culture+gendernote
combine=i,i,iterhouse3,itername,sex,birth,death,itertitle,notes
allist.append(combine)
print(f'{setlist.index(i)+1}/{len(setlist)} CHARACTERS PROCESSED')
#-------------------------------------------DEAD-------------------------------------------------------
elif i in dead:
# DEAD NAME
itername=dead[i]['first_name']
if re.search(fr'(?<=\s){itername}\b',locnames) is not None:
itername=re.findall(fr'(?<=\s){itername}\b:.*"(.*)"',locnames)
itername=itername[0]
accented='\n'+'Name: '+itername
itername= unidecode.unidecode(itername)
itername=itername.title()
# DEAD DYNASTY
if 'dynasty_house' in dead[i]:
iterhouse1=dead[i]['dynasty_house']
if 'key' in dyn[str(iterhouse1)]:
iterhouse2=dyn[str(iterhouse1)]['key']
iterhouse3=re.findall('(?<=_)[^_]+$',iterhouse2)
iterhouse3=iterhouse3[0]
iterhouse3=iterhouse3.title()
accented=accented+' '+iterhouse3
iterhouse3= unidecode.unidecode(iterhouse3)
iterhouse3=iterhouse3.title()
elif 'name' in dyn[str(iterhouse1)]:
iterhouse2=dyn[str(iterhouse1)]['name']
iterhouse3=re.findall(fr'\b{iterhouse2}\b:.*"(.*)"',dynloc)
iterhouse3=iterhouse3[0]
accented=accented+' '+iterhouse3
iterhouse3= unidecode.unidecode(iterhouse3)
iterhouse3=iterhouse3.title()
elif 'localized_name' in dyn[str(iterhouse1)]:
iterhouse3=dyn[str(iterhouse1)]['localized_name']
accented=accented+' '+iterhouse3
iterhouse3= unidecode.unidecode(iterhouse3)
iterhouse3=iterhouse3.title()
#DEAD COA
if 'coat_of_arms_id' in dyn[f'{iterhouse1}']:
coa_id=dyn[f'{iterhouse1}']['coat_of_arms_id']
dyncoapic[i.zfill(maxlen)]=str(coa_id)
dynpres=dyn[f'{iterhouse1}']['dynasty']
#DEAD DYN PRES
if str(coa_id) not in dynprespic:
dynacc=coafind[f'{dynpres}']['prestige']['accumulated']
dynprespic[str(coa_id)]=dynacc
else:
nestdyn=dyn[f'{iterhouse1}']['dynasty']
if 'coat_of_arms_id' in coafind[f'{nestdyn}']:
coa_id=coafind[f'{nestdyn}']['coat_of_arms_id']
dyncoapic[i.zfill(maxlen)]=str(coa_id)
if str(coa_id) not in dynprespic:
dynacc=coafind[f'{nestdyn}']['prestige']['accumulated']
dynprespic[str(coa_id)]=dynacc
else:
iterhouse3=''
# DEAD GENDER
if 'sexuality' in dead[i]:
sexo=dead[i]['sexuality']
sexo=re.sub('as','Asexual',sexo)
sexo=re.sub('bi','Bisexual',sexo)
sexo=re.sub('ho','Homosexual',sexo)
else:
sexo='Heterosexual'
if 'female' in dead[i]:
sex='Female'
else:
sex='Male'
gendernote='\n'+'Sex: '+sex+'\n'+'Sexual Orientation: '+sexo
# DEAD TITLE
if 'domain' in dead[i]['dead_data']:
itertitle=dead[i]['dead_data']['domain'][0]
iterrank=title['landed_titles'][str(itertitle)]['key']
#DEAD TITLE COA
titlecoa=title['landed_titles'][str(itertitle)]['coat_of_arms_id']
titlecoapic[i.zfill(maxlen)]=str(titlecoa)
iterrank=re.sub('(.)_.*',r'\1',iterrank)
if str(coa_id) not in titlerankpic:
titlerankpic[str(titlecoa)]=iterrank
itertitle=title['landed_titles'][str(itertitle)]['name']
if 'b' in iterrank and sex!='Female':
iterrank="Baron of "
elif 'b' in iterrank and sex=='Female':
iterrank="Baroness of "
elif 'c' in iterrank and sex!='Female':
iterrank="Count of "
elif 'c' in iterrank and sex=='Female':
iterrank="Countess of "
elif 'd' in iterrank and sex!='Female':
iterrank="Duke of "
elif 'd' in iterrank and sex=='Female':
iterrank="Duchess of "
elif 'k' in iterrank and sex!='Female':
iterrank="King of "
elif 'k' in iterrank and sex=='Female':
iterrank="Queen of "
elif 'e' in iterrank and sex!='Female':
iterrank="Emperor of "
elif 'e' in iterrank and sex=='Female':
iterrank="Empress of "
elif 'x' in iterrank:
iterrank="Leader of the "
else:
iterrank="Titled"
itertitle=iterrank+itertitle
titlenote='\n'+'Title: '+itertitle
itertitle= unidecode.unidecode(itertitle)
itertitle=itertitle.title()
else:
itertitle=''
titlenote=''
# DEAD BIRTH
birth=dead[i]['birth']
birth=birth.replace(".", "-")
birthnote='\n'+'Born: '+birth
# DEAD DEATH
death=dead[i]['dead_data']['date']
death=death.replace(".","-")
deathnote='\n'+'Died: '+death
# DEAD Skills AND TRAITS
traitpic[i.zfill(maxlen)]=[]
if 'skill' in dead[i]:
skill=[dead[i]['skill'][j] for j in range(6)]
skill=[str(skillist[x])+str(skill[x]) for x in range(6)]
skill='Skills: '+str(skill)
else:
skill=[]
if 'traits' in dead[i]:
traits=dead[i]['traits']
traits=set(traits)
traits=(sorted(traits))
traits=list(traits)
traits=list(map(str,traits))
traitxml=[traitcode.get(item,item) for item in traits]
traitpic[i.zfill(maxlen)]=traitxml
if sex=='Female':
traits=[ fdictraits.get(item,item) for item in traits ]
else:
traits=[ mdictraits.get(item,item) for item in traits ]
traits='Traits: '+str(traits)
else:
traits=[]
if 'recessive_traits' in dead[i]:
retraits=dead[i]['recessive_traits']
retraits=set(retraits)
retraits=(sorted(retraits))
retraits=list(retraits)
retraits=list(map(str,retraits))
traitxml2=[traitcode.get(item,item) for item in retraits]
traitpic[i.zfill(maxlen)].extend(traitxml2)
if sex=='Female':
retraits=[ fdictraits.get(item,item) for item in retraits ]
else:
retraits=[ mdictraits.get(item,item) for item in retraits ]
retraits='Inherited Traits: '+str(retraits)
else:
retraits=[]
if '[]' in traits:
traits=''
if '[]' in retraits:
retraits=''
if bool(traits) is True and bool(retraits) is True:
d='\n'+traits+'\n'+retraits
elif bool(traits) is True and bool(retraits) is False:
d='\n'+traits
elif bool(traits) is False and bool(retraits) is True:
d='\n'+retraits
elif bool(traits) is False and bool(retraits) is False:
d=''
if bool(d) is False:
satli='\n'+skill
else:
satli='\n'+skill+d
# DEAD FAITH
if 'faith' in dead[i]:
pfaith=dead[i]['faith']
if 'name' in faithde[str(pfaith)]:
mycon=faithde[str(pfaith)]['icon']
faithpic[i.zfill(maxlen)]=mycon
myfaith=(faithde[str(pfaith)]['name'])
myfaith='Faith: '+myfaith
else:
mycon=faithde[str(pfaith)]['icon']
faithpic[i.zfill(maxlen)]=mycon
faithname=faithde[fr'{pfaith}']['template']
myfaith='Faith: '+((re.findall(fr'{faithname}\b:.*"(.*)"',allines))[0])
basename=faithre[str(faithde[fr'{pfaith}']['religion'])]['template']
mybase='Religion: '+((re.findall(fr'{basename}\b:.*"(.*)"',allines))[0])
faith='\n'+myfaith+'\n'+mybase
if myfaith not in allfaith:
allfaith.append(myfaith)
else:
pass
else:
faith=''
# DEAD CULTURE
if 'culture' in dead[i]:
pcult=dead[i]['culture']
cultem=cult[str(pcult)]['culture_template']
culture=(re.findall(fr'\b{cultem}\b:.*"(.*)"',loccult))[0]
culture='\n'+'Culture: '+culture
else:
culture=''
#DEAD CAUSE OF DEATH
if 'reason' in dead[i]['dead_data']:
reason=dead[i]['dead_data']['reason']
reason=re.sub('wounded_1','Wounded',reason)
reason=re.sub('wounded_2','Severely Injured',reason)
reason=re.sub('wounded_3','Brutally Mauled',reason)
reason=re.sub('death(.*)',r'\1',reason)
reason=reason.title()
reason=re.sub('(?<=\w)_',' ',reason)
reason=re.sub('_','',reason)
reason=re.sub(' Passive','',reason)
reason='\n'+'Cause of Death: '+reason
else:
reason=''
# DEAD COMBINE
notes='Game ID: '+i+titlenote+accented+birthnote+deathnote+reason+satli+faith+culture+gendernote
combine=i,i,iterhouse3,itername,sex,birth,death,itertitle,notes
allist.append(combine)
print(f'{setlist.index(i)+1}/{len(setlist)} CHARACTERS PROCESSED')
#-------------------------------------------OTHER-------------------------------------------------------
elif i in other:
# OTHER NAME
itername=other[i]['first_name']
if re.search(fr'(?<=\s){itername}\b',locnames) is not None:
itername=re.findall(fr'(?<=\s){itername}\b:.*"(.*)"',locnames)
itername=itername[0]
accented='\n'+'Name: '+itername
itername= unidecode.unidecode(itername)
itername=itername.title()
# OTHER DYNASTY
if 'dynasty_house' in other[i]:
iterhouse1=other[i]['dynasty_house']
if 'key' in dyn[str(iterhouse1)]:
iterhouse2=dyn[str(iterhouse1)]['key']
iterhouse3=re.findall('(?<=_)[^_]+$',iterhouse2)
iterhouse3=iterhouse3[0]
iterhouse3=iterhouse3.title()
accented=accented+' '+iterhouse3
iterhouse3= unidecode.unidecode(iterhouse3)
iterhouse3=iterhouse3.title()
elif 'name' in dyn[str(iterhouse1)]:
iterhouse2=dyn[str(iterhouse1)]['name']
iterhouse3=re.findall(fr'\b{iterhouse2}\b:.*"(.*)"',dynloc)
iterhouse3=iterhouse3[0]
accented=accented+' '+iterhouse3
iterhouse3= unidecode.unidecode(iterhouse3)
iterhouse3=iterhouse3.title()
elif 'localized_name' in dyn[str(iterhouse1)]:
iterhouse3=dyn[str(iterhouse1)]['localized_name']
accented=accented+' '+iterhouse3
iterhouse3= unidecode.unidecode(iterhouse3)
iterhouse3=iterhouse3.title()
#OTHER COA
if 'coat_of_arms_id' in dyn[f'{iterhouse1}']:
coa_id=dyn[f'{iterhouse1}']['coat_of_arms_id']
dyncoapic[i.zfill(maxlen)]=str(coa_id)
dynpres=dyn[f'{iterhouse1}']['dynasty']
#OTHER DYN PRES
if str(coa_id) not in dynprespic:
dynacc=coafind[f'{dynpres}']['prestige']['accumulated']
dynprespic[str(coa_id)]=dynacc
else:
nestdyn=dyn[f'{iterhouse1}']['dynasty']
if 'coat_of_arms_id' in coafind[f'{nestdyn}']:
coa_id=coafind[f'{nestdyn}']['coat_of_arms_id']
dyncoapic[i.zfill(maxlen)]=str(coa_id)
if str(coa_id) not in dynprespic:
dynacc=coafind[f'{nestdyn}']['prestige']['accumulated']
dynprespic[str(coa_id)]=dynacc
else:
iterhouse3=''
# OTHER GENDER
if 'sexuality' in other[i]:
sexo=other[i]['sexuality']
sexo=re.sub('as','Asexual',sexo)
sexo=re.sub('bi','Bisexual',sexo)
sexo=re.sub('ho','Homosexual',sexo)
else:
sexo='Heterosexual'
if 'female' in other[i]:
sex='Female'
else:
sex='Male'
gendernote='\n'+'Sex: '+sex+'\n'+'Sexual Orientation: '+sexo
# OTHER TITLE
if 'domain' in other[i]['dead_data']:
itertitle=other[i]['dead_data']['domain'][0]
iterrank=title['landed_titles'][str(itertitle)]['key']
#OTHER TITLE COA
titlecoa=title['landed_titles'][str(itertitle)]['coat_of_arms_id']
titlecoapic[i.zfill(maxlen)]=str(titlecoa)
iterrank=re.sub('(.)_.*',r'\1',iterrank)
if str(coa_id) not in titlerankpic:
titlerankpic[str(titlecoa)]=iterrank
itertitle=title['landed_titles'][str(itertitle)]['name']
if 'b' in iterrank and sex!='Female':
iterrank="Baron of "
elif 'b' in iterrank and sex=='Female':
iterrank="Baroness of "
elif 'c' in iterrank and sex!='Female':
iterrank="Count of "
elif 'c' in iterrank and sex=='Female':
iterrank="Countess of "
elif 'd' in iterrank and sex!='Female':
iterrank="Duke of "
elif 'd' in iterrank and sex=='Female':
iterrank="Duchess of "
elif 'k' in iterrank and sex!='Female':
iterrank="King of "
elif 'k' in iterrank and sex=='Female':
iterrank="Queen of "
elif 'e' in iterrank and sex!='Female':
iterrank="Emperor of "
elif 'e' in | |
<reponame>PanDAWMS/panda-bigmon-atlas<gh_stars>0
from django.contrib.auth.models import User
from django.contrib.messages.context_processors import messages
from django.http.response import HttpResponseBadRequest
from rest_framework.generics import get_object_or_404
from rest_framework.parsers import JSONParser
from atlas.ami.client import AMIClient
from atlas.prodtask.models import ActionStaging, ActionDefault, DatasetStaging, StepAction, TTask, \
GroupProductionAMITag, ProductionTask, GroupProductionDeletion, TDataFormat, GroupProductionStats, TRequest, \
ProductionDataset, GroupProductionDeletionExtension, GroupProductionDeletionProcessing, \
GroupProductionDeletionRequest
from atlas.dkb.views import es_by_fields, es_by_keys, es_by_keys_nested
from atlas.prodtask.ddm_api import DDM
from datetime import datetime, timedelta
import pytz
from rest_framework import serializers, generics
from django.forms.models import model_to_dict
from rest_framework import status
from atlas.settings import defaultDatetimeFormat
import logging
from django.utils import timezone
from rest_framework.decorators import api_view, authentication_classes, permission_classes
from rest_framework.response import Response
from rest_framework.authentication import TokenAuthentication, BasicAuthentication, SessionAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework.decorators import parser_classes
from atlas.celerybackend.celery import app
from django.core.cache import cache
_logger = logging.getLogger('prodtaskwebui')
_jsonLogger = logging.getLogger('prodtask_ELK')
FORMAT_BASES = ['BPHY', 'EGAM', 'EXOT', 'FTAG', 'HDBS', 'HIGG', 'HION', 'JETM', 'LCALO', 'MUON', 'PHYS',
'STDM', 'SUSY', 'TAUP', 'TCAL', 'TOPQ', 'TRIG', 'TRUTH']
CP_FORMATS = ["FTAG", "EGAM", "MUON", 'PHYS', "JETM", "TAUP", "IDTR", "TCAL"]
def get_all_formats(format_base):
return list(TDataFormat.objects.filter(name__startswith='DAOD_' + format_base).values_list('name', flat=True))
LIFE_TIME_DAYS = 60
def collect_stats(format_base, is_real_data):
formats = get_all_formats(format_base)
version = 1
if format_base in CP_FORMATS:
version = 2
if is_real_data:
data_prefix = 'data'
else:
data_prefix = 'mc'
for output_format in formats:
to_cache = get_stats_per_format(output_format, version, is_real_data)
result = []
for ami_tag in to_cache.keys():
if to_cache[ami_tag]:
ami_tag_info = GroupProductionAMITag.objects.get(ami_tag=ami_tag)
skim='noskim'
if ami_tag_info.skim == 's':
skim='skim'
result.append({'ami_tag':ami_tag,'cache':','.join([ami_tag_info.cache,skim]),
'containers':to_cache[ami_tag]})
cache.delete('gp_del_%s_%s_'%(data_prefix,output_format))
if result:
cache.set('gp_del_%s_%s_'%(data_prefix,output_format),result,None)
def get_stats_per_format(output_format, version, is_real_data):
by_tag_stats = {}
to_cache = {}
if is_real_data:
data_prefix = 'data'
else:
data_prefix = 'mc'
samples = GroupProductionDeletion.objects.filter(output_format=output_format)
for sample in samples:
if sample.container.startswith(data_prefix):
if sample.ami_tag not in by_tag_stats:
by_tag_stats[sample.ami_tag] = {'containers': 0, 'bytes': 0, 'to_delete_containers': 0, 'to_delete_bytes':0}
to_cache[sample.ami_tag] = []
if sample.version >= version:
to_cache[sample.ami_tag].append(GroupProductionDeletionUserSerializer(sample).data)
by_tag_stats[sample.ami_tag]['containers'] += 1
by_tag_stats[sample.ami_tag]['bytes'] += sample.size
if sample.days_to_delete <0:
by_tag_stats[sample.ami_tag]['to_delete_containers'] += 1
by_tag_stats[sample.ami_tag]['to_delete_bytes'] += sample.size
current_stats = GroupProductionStats.objects.filter(output_format=output_format, real_data=is_real_data)
updated_tags = []
for current_stat in current_stats:
if current_stat.ami_tag in by_tag_stats.keys():
current_stat.size = by_tag_stats[current_stat.ami_tag]['bytes']
current_stat.containers = by_tag_stats[current_stat.ami_tag]['containers']
current_stat.to_delete_size = by_tag_stats[current_stat.ami_tag]['to_delete_bytes']
current_stat.to_delete_containers = by_tag_stats[current_stat.ami_tag]['to_delete_containers']
current_stat.save()
updated_tags.append(current_stat.ami_tag)
else:
current_stat.size = 0
current_stat.containers = 0
current_stat.to_delete_size = 0
current_stat.to_delete_containers = 0
current_stat.save()
for tag in by_tag_stats.keys():
if tag not in updated_tags:
current_stat, is_created = GroupProductionStats.objects.get_or_create(ami_tag=tag, output_format=output_format, real_data=is_real_data)
current_stat.size = by_tag_stats[tag]['bytes']
current_stat.containers = by_tag_stats[tag]['containers']
current_stat.to_delete_size = by_tag_stats[tag]['to_delete_bytes']
current_stat.to_delete_containers = by_tag_stats[tag]['to_delete_containers']
current_stat.save()
return to_cache
def apply_extension(container, number_of_extension, user, message):
container = container[container.find(':')+1:]
gp = GroupProductionDeletion.objects.get(container=container)
gp_extension = GroupProductionDeletionExtension()
gp_extension.container = gp
gp_extension.user = user
gp_extension.timestamp = timezone.now()
gp_extension.message = message
gp_extension.save()
if (number_of_extension > 0) and (gp.days_to_delete < 0):
number_of_extension += (gp.days_to_delete // GroupProductionDeletion.EXTENSIONS_DAYS) * -1
if gp.days_to_delete + (number_of_extension * GroupProductionDeletion.EXTENSIONS_DAYS) > 365:
number_of_extension = (gp.days_to_delete // GroupProductionDeletion.EXTENSIONS_DAYS) * -1 + 6
if gp.extensions_number:
gp.extensions_number += number_of_extension
else:
gp.extensions_number = number_of_extension
if gp.extensions_number < 0:
gp.extensions_number = 0
gp.save()
_logger.info(
'GP extension by {user} for {container} on {number_of_extension} with messsage {message}'.format(user=user, container=container,
number_of_extension=number_of_extension,message=message))
_jsonLogger.info('Request for derivation container extension for: {message}'.format(message=message), extra={'user':user,'container':container,'number_of_extension':number_of_extension})
def remove_extension(container):
gp = GroupProductionDeletion.objects.get(container=container)
gp.extensions_number = 0
gp.save()
def form_gp_from_dataset(dataset):
gp = GroupProductionDeletion()
dataset = dataset[dataset.find(':')+1:]
container_name = get_container_name(dataset)
ami_tag = container_name.split('_')[-1]
if not GroupProductionAMITag.objects.filter(ami_tag=ami_tag).exists():
update_tag_from_ami(ami_tag, gp.container.startswith('data'))
gp.skim = GroupProductionAMITag.objects.get(ami_tag=ami_tag).skim
gp.container = container_name
gp.dsid = container_name.split('.')[1]
gp.output_format = container_name.split('.')[4]
if gp.container.startswith('data'):
key_postfix = container_name.split('.')[2]
else:
key_postfix = 'mc'
gp.input_key = '.'.join([str(gp.dsid), gp.output_format, '_'.join(container_name.split('.')[-1].split('_')[:-1]), gp.skim,key_postfix])
gp.ami_tag = ami_tag
gp.version = 0
return gp
def get_existing_datastes(output, ami_tag, ddm):
tasks = es_by_keys_nested({'ctag': ami_tag, 'output_formats': output})
if(len(tasks)>0):
print(ami_tag, len(tasks))
result = []
for task in tasks:
if 'valid' not in task['taskname'] and task['status'] not in ProductionTask.RED_STATUS:
if not task['output_dataset'] and task['status'] in ProductionTask.NOT_RUNNING:
datasets = ProductionDataset.objects.filter(task_id=task['taskid'])
for dataset in datasets:
if output in dataset.name:
if ddm.dataset_exists(dataset.name):
metadata = ddm.dataset_metadata(dataset.name)
events = metadata['events']
bytes = metadata['bytes']
if bytes is None:
break
result.append({'task': task['taskid'], 'dataset': dataset.name, 'size': bytes,
'task_status': task['status'], 'events': events, 'end_time': task['task_timestamp']})
break
for dataset in task['output_dataset']:
deleted = False
try:
deleted = dataset['deleted']
except:
print('no deleted', task['taskid'])
if output == dataset['data_format'] and not deleted and ddm.dataset_exists(dataset['name']):
if ('events' not in dataset) or (not dataset['events']):
print('no events', task['taskid'])
metadata = ddm.dataset_metadata(dataset['name'])
events = metadata['events']
if not events:
events = 0
if ('bytes' not in dataset) or dataset['bytes'] == 0:
dataset['bytes'] = metadata['bytes']
else:
events = dataset['events']
if task['status'] not in ProductionTask.NOT_RUNNING:
production_task = ProductionTask.objects.get(id=int(task['taskid']))
if production_task.status != task['status']:
print('wrong status', task['taskid'])
task['status'] = production_task.status
if dataset['bytes'] is None:
break
result.append({'task': task['taskid'], 'dataset': dataset['name'], 'size': dataset['bytes'],
'task_status': task['status'], 'events': events, 'end_time': task['task_timestamp']})
break
return result
def ami_tags_reduction_w_data(postfix, data=False):
if 'tid' in postfix:
postfix = postfix[:postfix.find('_tid')]
if data:
return postfix
new_postfix = []
first_letter = ''
for token in postfix.split('_')[:-1]:
if token[0] != first_letter and not (token[0] == 's' and first_letter == 'a'):
new_postfix.append(token)
first_letter = token[0]
new_postfix.append(postfix.split('_')[-1])
return '_'.join(new_postfix)
def get_container_name(dataset_name):
return '.'.join(dataset_name.split('.')[:-1] + [ami_tags_reduction_w_data(dataset_name.split('.')[-1], dataset_name.startswith('data') or ('TRUTH' in dataset_name) )])
def collect_datasets(format_base, data, only_new = False):
if data:
prefix = 'data'
else:
prefix = 'mc'
for output in get_all_formats(format_base):
if only_new:
if GroupProductionDeletion.objects.filter(output_format=output, container__startswith=prefix).exists():
continue
if data:
fill_db(output, True, True, False)
else:
fill_db(output, False, True, False)
fill_db(output, False, False, False)
collect_stats(format_base, data)
return True
def collect_datasets_per_output(output, data, is_skim):
if is_skim:
skim = 's'
else:
skim = 'n'
_logger.info(
'Start collecting containers for {output} {skim}) '.format(output=output, skim=skim))
ami_tags_cache = list(
GroupProductionAMITag.objects.filter(real_data=data, skim=skim).values_list('ami_tag', 'cache'))
ami_tags_cache.sort(reverse=True, key=lambda x: list(map(int, x[1].split('.'))))
ami_tags = [x[0] for x in ami_tags_cache]
result = {}
ddm = DDM()
for ami_tag in ami_tags:
for dataset in get_existing_datastes(output, ami_tag, ddm):
dataset_name = get_container_name(dataset['dataset'])
dataset_key = dataset_name[:dataset_name.rfind('_')] + '.' + skim
if dataset_key not in result:
result[dataset_key] = {'versions': -1}
if ami_tag not in result[dataset_key]:
result[dataset_key]['versions'] += 1
result[dataset_key][ami_tag] = {'datasets': [], 'size': 0, 'events': 0, 'status': 'finished',
'end_time': None, 'version': result[dataset_key]['versions']}
if dataset['end_time']:
if not result[dataset_key][ami_tag]['end_time'] or (
dataset['end_time'] > result[dataset_key][ami_tag]['end_time']):
result[dataset_key][ami_tag]['end_time'] = dataset['end_time']
result[dataset_key][ami_tag]['datasets'].append(dataset)
result[dataset_key][ami_tag]['size'] += dataset['size']
result[dataset_key][ami_tag]['events'] += dataset['events']
if dataset['task_status'] not in ProductionTask.NOT_RUNNING:
result[dataset_key][ami_tag]['status'] = 'running'
return result
def create_single_tag_container(container_name):
container_name = container_name[container_name.find(':')+1:]
gp_container = GroupProductionDeletion.objects.get(container=container_name)
ddm = DDM()
if not ddm.dataset_exists(container_name):
datasets = datassets_from_es(gp_container.ami_tag, gp_container.output_format, gp_container.dsid,container_name,ddm)
if datasets:
empty_replica = True
for es_dataset in datasets:
if len(ddm.dataset_replicas(es_dataset))>0:
empty_replica = False
break
if not empty_replica:
print(str(datasets),' will be added to ',container_name)
ddm.register_container(container_name,datasets)
def range_containers(container_key):
gp_containers = GroupProductionDeletion.objects.filter(input_key=container_key)
if gp_containers.count() > 1:
by_amitag = {}
for gp_container in gp_containers:
by_amitag[gp_container.ami_tag] = gp_container
ami_tags_cache = [(x, GroupProductionAMITag.objects.get(ami_tag=x).cache) for x in by_amitag.keys()]
ami_tags_cache.sort(reverse=True, key=lambda x: list(map(int, x[1].split('.'))))
ami_tags = [x[0] for x in ami_tags_cache]
available_tags = ','.join(ami_tags)
latest = by_amitag[ami_tags[0]]
version = 0
if latest.version !=0 or latest.available_tags != available_tags:
latest.version = 0
latest.last_extension_time = None
latest.available_tags = available_tags
latest.save()
for ami_tag in ami_tags[1:]:
if latest.status == 'finished':
version += 1
last_extension = max([latest.update_time,by_amitag[ami_tag].update_time])
if version != by_amitag[ami_tag].version or by_amitag[ami_tag].available_tags != available_tags or by_amitag[ami_tag].last_extension_time!=last_extension:
by_amitag[ami_tag].last_extension_time = last_extension
by_amitag[ami_tag].version = version
by_amitag[ami_tag].available_tags = available_tags
by_amitag[ami_tag].save()
latest = by_amitag[ami_tag]
else:
gp_container = GroupProductionDeletion.objects.get(input_key=container_key)
if gp_container.version != 0 or gp_container.available_tags:
gp_container.version = 0
gp_container.last_extension_time = None
gp_container.available_tags = None
gp_container.save()
def unify_dataset(dataset):
if(':' in dataset):
return dataset
else:
return dataset.split('.')[0]+':'+dataset
def check_container(container_name, ddm, additional_datasets = None, warning_exists = False):
container_name = container_name[container_name.find(':')+1:]
if GroupProductionDeletion.objects.filter(container=container_name).count() >1:
gp_to_delete = list(GroupProductionDeletion.objects.filter(container=container_name))
for gp in gp_to_delete:
gp.delete()
if GroupProductionDeletion.objects.filter(container=container_name).exists():
gp_container = GroupProductionDeletion.objects.get(container=container_name)
is_new = False
else:
gp_container = form_gp_from_dataset(additional_datasets[0])
is_new = True
container_key = gp_container.input_key
datasets = ddm.dataset_in_container(container_name)
if additional_datasets:
for dataset in additional_datasets:
if dataset not in datasets:
datasets.append(unify_dataset(dataset))
events = 0
bytes = 0
is_running = False
datasets += datassets_from_es(gp_container.ami_tag, gp_container.output_format, gp_container.dsid,container_name,ddm,datasets)
if datasets:
if warning_exists:
_logger.warning(
'Container {container} has datasets which were not found from ES '.format(container=container_name))
print('Container {container} has datasets which were not found from ES '.format(container=container_name))
for dataset in datasets:
metadata = ddm.dataset_metadata(dataset)
if metadata['events']:
events += metadata['events']
if metadata['bytes']:
bytes += metadata['bytes']
task_id = metadata['task_id']
task = ProductionTask.objects.get(id=task_id)
if task.status not in ProductionTask.NOT_RUNNING:
is_running = True
gp_container.events = events
gp_container.datasets_number = len(datasets)
gp_container.size = bytes
if is_running:
gp_container.status = 'running'
gp_container.update_time = timezone.now()
else:
gp_container.status = 'finished'
if is_new:
gp_container.update_time = timezone.now()
_logger.info(
'Container {container} has been added to group production lists '.format(
container=gp_container.container))
gp_container.save()
range_containers(container_key)
else:
_logger.info(
'Container {container} has been deleted from group production lists '.format(container=container_name))
rerange_after_deletion(gp_container)
def store_dataset(item):
for x in ['update_time', 'last_extension_time']:
if item.get(x):
item[x] = datetime.strptime(item[x], "%d-%m-%Y %H:%M:%S").replace(tzinfo=pytz.utc)
gp_container = GroupProductionDeletion(**item)
if GroupProductionDeletion.objects.filter(container=item['container']).exists():
gp_container.id = GroupProductionDeletion.objects.get(container=item['container']).id
gp_container.save()
return gp_container.id
def do_gp_deletion_update():
update_for_period(timezone.now()-timedelta(days=2), timezone.now()+timedelta(hours=3))
cache.set('gp_deletion_update_time',timezone.now(),None)
for f in FORMAT_BASES:
collect_stats(f,False)
collect_stats(f,True)
def update_for_period(time_since, time_till):
tasks = ProductionTask.objects.filter(timestamp__gte=time_since, timestamp__lte=time_till, provenance='GP')
| |
import configparser
import traceback
from pathlib import Path
from shutil import copyfile
from tkinter import filedialog, messagebox
from niq_misc import replace_entry
def save_config(gui, out_file=None):
"""
Prompts user to provide a save file name and location then generates a configuration file from
the current GUI settings and statuses.
"""
if out_file is None:
try:
out_file = Path(filedialog.asksaveasfilename()).with_suffix(".ini")
except ValueError:
return
# Copy over defualt_backup as template
copyfile(gui.master_dir_path / "config_files" / "backup_config.ini", out_file)
update_config(gui, out_file)
def load_config(gui, program_startup=False, config_file_=None):
"""
Updates all GUI settings and statuses according to a configuration file. If this file is not immediately
provided to the function, the user is prompted to select a file from a dialog box.
Args:
program_startup (bool): True if is the initial call used to populate GUI upon program startup
config_file_ (pathlib.Path): Path to configuration file to be loaded
"""
# Load config button clicked
if not program_startup and config_file_ is None:
config_file = filedialog.askopenfilename()
if config_file == "":
return False
config_file = Path(config_file)
if not config_file.exists():
messagebox.showerror(("Config File Loading Error"), "Configuration file could not be found.")
return False
try:
gui.config.read(str(config_file))
except:
messagebox.showerror(("Config File Loading Error"), "Configuration file appears invalid. Please try a differnt file.")
if config_file_ is not None:
config_file = config_file_
gui.config.read(str(config_file))
try:
gui.time_interval = gui.config.get("Main Settings", "data_time_interval")
gui.show_warns_CB.select() if gui.config.get("Main Settings", "show_warnings").lower() == "true" else gui.show_warns_CB.deselect()
gui.restrict_search_CB.select() if gui.config.get("Main Settings", "restrict_bout_search").lower() == "true" else gui.restrict_search_CB.deselect()
replace_entry(gui.day_start_E, gui.config.get("Main Settings", "day_Start_Time"))
replace_entry(gui.night_start_E, gui.config.get("Main Settings", "night_Start_Time"))
replace_entry(gui.smoothing_radius_E, gui.config.get("Main Settings", "smoothing_radius"))
replace_entry(gui.dur_thresh_E, gui.config.get("Main Settings", "duration_threshold"))
gui.train_from_IV.set(0) if gui.config.get("Advanced Settings", "train_from").lower() == "0" else gui.train_from_IV.set(1)
replace_entry(gui.init_off_E, gui.config.get("Advanced Settings", "off_bout_initial"))
replace_entry(gui.init_on_E, gui.config.get("Advanced Settings", "on_bout_initial"))
replace_entry(gui.off_off_trans_E, gui.config.get("Advanced Settings", "off_off_trans"))
replace_entry(gui.off_on_trans_E, gui.config.get("Advanced Settings", "off_on_trans"))
replace_entry(gui.on_on_trans_E, gui.config.get("Advanced Settings", "on_on_trans"))
replace_entry(gui.on_off_trans_E, gui.config.get("Advanced Settings", "on_off_trans"))
replace_entry(gui.off_mean_E, gui.config.get("Advanced Settings", "off_bout_emis_mean"))
replace_entry(gui.off_stdev_E, gui.config.get("Advanced Settings", "off_bout_emis_stdev"))
replace_entry(gui.on_mean_E, gui.config.get("Advanced Settings", "on_bout_emis_mean"))
replace_entry(gui.on_stdev_E, gui.config.get("Advanced Settings", "on_bout_emis_stdev"))
gui.manual_plot_dims.set(0) if gui.config.get("Plot Options", "manual_plot_dimensions").lower() == "false" else gui.manual_plot_dims.set(1)
replace_entry(gui.plot_dim_x_E, gui.config.get("Plot Options", "plot_x_dim"))
replace_entry(gui.plot_dim_y_E, gui.config.get("Plot Options", "plot_y_dim"))
replace_entry(gui.title_font_size_E, gui.config.get("Plot Options", "title_font_size"))
replace_entry(gui.axis_title_font_size_E, gui.config.get("Plot Options", "axis_title_font_size"))
replace_entry(gui.axis_label_font_size_E, gui.config.get("Plot Options", "axis_label_font_size"))
replace_entry(gui.axis_tick_size_E, gui.config.get("Plot Options", "axis_tick_size"))
replace_entry(gui.legend_font_size_E, gui.config.get("Plot Options", "legend_font_size"))
gui.plot_egg_CB.select() if gui.config.get("Plot Options", "plot_egg_tempers").lower() == "true" else gui.plot_egg_CB.deselect()
gui.plot_air_CB.select() if gui.config.get("Plot Options", "plot_air_tempers").lower() == "true" else gui.plot_air_CB.deselect()
gui.plot_adj_CB.select() if gui.config.get("Plot Options", "plot_egg_minus_air").lower() == "true" else gui.plot_adj_CB.deselect()
gui.smooth_status_IV.set(0) if gui.config.get("Plot Options", "plot_smoothed").lower() == "false" else gui.smooth_status_IV.set(1)
gui.legend_loc.set(gui.config.get("Plot Options", "legend_location"))
gui.on_point_color.set(gui.config.get("Plot Options", "on_point_color"))
gui.off_point_color.set(gui.config.get("Plot Options", "off_point_color"))
gui.bout_line_color.set(gui.config.get("Plot Options", "bout_line_color"))
gui.air_line_color.set(gui.config.get("Plot Options", "air_line_color"))
gui.day_marker_color.set(gui.config.get("Plot Options", "day_marker_color"))
replace_entry(gui.on_point_size_E, gui.config.get("Plot Options", "on_point_size"))
replace_entry(gui.bout_line_width_E, gui.config.get("Plot Options", "bout_line_width"))
replace_entry(gui.air_line_width_E, gui.config.get("Plot Options", "air_line_width"))
replace_entry(gui.day_marker_width_E, gui.config.get("Plot Options", "day_marker_width"))
gui.show_day_markers_CB.select() if gui.config.get("Plot Options", "show_day_marker").lower() == "true" else gui.show_day_markers_CB.deselect()
gui.show_grid_CB.select() if gui.config.get("Plot Options", "show_grid").lower() == "true" else gui.show_grid_CB.deselect()
gui.day_num_CB.select() if gui.config.get("Stats Options", "day_Number").lower() == "true" else gui.day_num_CB.deselect()
gui.date_CB.select() if gui.config.get("Stats Options", "date").lower() == "true" else gui.date_CB.deselect()
gui.off_count_CB.select() if gui.config.get("Stats Options", "off_Bout_Count").lower() == "true" else gui.off_count_CB.deselect()
gui.off_dur_CB.select() if gui.config.get("Stats Options", "mean_Off_Bout_Duration").lower() == "true" else gui.off_dur_CB.deselect()
gui.off_dur_sd_CB.select() if gui.config.get("Stats Options", "off_Bout_Duration_StDev").lower() == "true" else gui.off_dur_sd_CB.deselect()
gui.off_dec_CB.select() if gui.config.get("Stats Options", "mean_Off_Bout_Temp_Drop").lower() == "true" else gui.off_dec_CB.deselect()
gui.off_dec_sd_CB.select() if gui.config.get("Stats Options", "off_Bout_Temp_Drop_StDev").lower() == "true" else gui.off_dec_sd_CB.deselect()
gui.mean_off_temper_CB.select() if gui.config.get("Stats Options", "mean_Off_Bout_Temp").lower() == "true" else gui.mean_off_temper_CB.deselect()
gui.off_time_sum_CB.select() if gui.config.get("Stats Options", "off_bout_time_sum").lower() == "true" else gui.off_time_sum_CB.deselect()
gui.on_count_CB.select() if gui.config.get("Stats Options", "on_Bout_Count").lower() == "true" else gui.on_count_CB.deselect()
gui.on_dur_CB.select() if gui.config.get("Stats Options", "mean_On_Bout_Duration").lower() == "true" else gui.on_dur_CB.deselect()
gui.on_dur_sd_CB.select() if gui.config.get("Stats Options", "on_Bout_Duration_StDev").lower() == "true" else gui.on_dur_sd_CB.deselect()
gui.on_inc_CB.select() if gui.config.get("Stats Options", "mean_On_Bout_Temp_Rise").lower() == "true" else gui.on_inc_CB.deselect()
gui.on_inc_sd_CB.select() if gui.config.get("Stats Options", "on_Bout_Temp_Rise_StDev").lower() == "true" else gui.on_inc_sd_CB.deselect()
gui.mean_on_temper_CB.select() if gui.config.get("Stats Options", "mean_On_Bout_Temp").lower() == "true" else gui.mean_on_temper_CB.deselect()
gui.on_time_sum_CB.select() if gui.config.get("Stats Options", "on_bout_time_sum").lower() == "true" else gui.on_time_sum_CB.deselect()
gui.bouts_dropped_CB.select() if gui.config.get("Stats Options", "bouts_Dropped").lower() == "true" else gui.bouts_dropped_CB.deselect()
gui.time_above_temper_CB.select() if gui.config.get(
"Stats Options", "time_above_critical"
).lower() == "true" else gui.time_above_temper_CB.deselect()
gui.time_below_temper_CB.select() if gui.config.get(
"Stats Options", "time_below_critical"
).lower() == "true" else gui.time_below_temper_CB.deselect()
gui.mean_temper_d_CB.select() if gui.config.get(
"Stats Options", "mean_Daytime_Temperature"
).lower() == "true" else gui.mean_temper_d_CB.deselect()
gui.mean_temper_d_sd_CB.select() if gui.config.get(
"Stats Options", "daytime_Temp_StDev"
).lower() == "true" else gui.mean_temper_d_sd_CB.deselect()
gui.median_temper_d_CB.select() if gui.config.get(
"Stats Options", "median_Daytime_Temp"
).lower() == "true" else gui.median_temper_d_CB.deselect()
gui.min_temper_d_CB.select() if gui.config.get("Stats Options", "min_Daytime_Temp").lower() == "true" else gui.min_temper_d_CB.deselect()
gui.max_temper_d_CB.select() if gui.config.get("Stats Options", "max_Daytime_Temp").lower() == "true" else gui.max_temper_d_CB.deselect()
gui.mean_temper_n_CB.select() if gui.config.get("Stats Options", "mean_Nighttime_Temp").lower() == "true" else gui.mean_temper_n_CB.deselect()
gui.mean_temper_n_sd_CB.select() if gui.config.get(
"Stats Options", "nighttime_Temp_StDev"
).lower() == "true" else gui.mean_temper_n_sd_CB.deselect()
gui.median_temper_n_CB.select() if gui.config.get(
"Stats Options", "median_Nighttime_Temp"
).lower() == "true" else gui.median_temper_n_CB.deselect()
gui.min_temper_n_CB.select() if gui.config.get("Stats Options", "min_Nighttime_Temp").lower() == "true" else gui.min_temper_n_CB.deselect()
gui.max_temper_n_CB.select() if gui.config.get("Stats Options", "max_Nighttime_Temp").lower() == "true" else gui.max_temper_n_CB.deselect()
gui.mean_temper_dn_CB.select() if gui.config.get("Stats Options", "mean_DayNight_Temp").lower() == "true" else gui.mean_temper_dn_CB.deselect()
gui.mean_temper_dn_sd_CB.select() if gui.config.get(
"Stats Options", "dayNight_Temp_StDev"
).lower() == "true" else gui.mean_temper_dn_sd_CB.deselect()
gui.median_temper_db_CB.select() if gui.config.get(
"Stats Options", "median_DayNight_Temp"
).lower() == "true" else gui.median_temper_db_CB.deselect()
gui.min_temper_dn_CB.select() if gui.config.get("Stats Options", "min_DayNight_Temp").lower() == "true" else gui.min_temper_dn_CB.deselect()
gui.max_temper_dn_CB.select() if gui.config.get("Stats Options", "max_DayNight_Temp").lower() == "true" else gui.max_temper_dn_CB.deselect()
gui.mean_air_temper_CB.select() if gui.config.get("Stats Options", "mean_air_temp").lower() == "true" else gui.mean_air_temper_CB.deselect()
gui.mean_air_temper_sd_CB.select() if gui.config.get(
"Stats Options", "mean_air_temp_stdev"
).lower() == "true" else gui.mean_air_temper_sd_CB.deselect()
gui.min_air_temper_CB.select() if gui.config.get("Stats Options", "min_air_temp").lower() == "true" else gui.min_air_temper_CB.deselect()
gui.max_air_temper_CB.select() if gui.config.get("Stats Options", "max_air_temp").lower() == "true" else gui.max_air_temper_CB.deselect()
replace_entry(gui.time_above_temper_E, gui.config.get("Stats Options", "custom_time_above_temperature"))
replace_entry(gui.time_below_temper_E, gui.config.get("Stats Options", "custom_time_below_temperature"))
except:
if program_startup:
messagebox.showerror(("Config File Loading Error"), "default_config.ini could not be read, reverting to backup config file.")
traceback.print_exc()
# If an error is encountered, try loading "backup_config.ini"
copyfile(gui.master_dir_path / "config_files" / "backup_config.ini", gui.master_dir_path / "config_files" / "default_config.ini")
gui.config.read(gui.master_dir_path / "config_files" / "default_config.ini")
load_config(gui, program_startup=True)
else:
messagebox.showerror(("Config File Loading Error"), str(config_file) + " could not be read.")
traceback.print_exc()
def set_defaults(gui):
"""
Updates default configureation file with current GUI status.
"""
try:
update_config(gui)
messagebox.showinfo("Default Parameters Saved", "default_config.ini has been updated.")
except:
messagebox.showerror(
("Default Settings Error"), "An error was encountered while updating default parameters. Check if provided parameters are valid."
)
def init_config(gui):
"""
Initializes GUI from backup_config.ini. backup_config.ini is used as a backup if anything goes wrong.
"""
gui.config = configparser.RawConfigParser()
config_default_path = Path(gui.master_dir_path / "config_files" / "default_config.ini")
backup_config_path = Path(gui.master_dir_path / "config_files" / "backup_config.ini")
try:
gui.config.read(str(config_default_path))
except configparser.ParsingError:
copyfile(backup_config_path, config_default_path)
gui.config.read(str(config_default_path))
def update_config(gui, config_file=None):
"""
Generates a configuration file from the current GUI parameters. If no file name if provided,
this function saves to default_config.ini, resetting the default parameters for NestIQ.
Args:
config_file (string): path to and name of file to be saved
"""
if config_file is None:
config_file = Path(gui.master_dir_path / "config_files" / "default_config.ini")
gui.config.set("Main Settings", "show_warnings", gui.show_warns_BV.get())
gui.config.set("Main Settings", "day_start_time", gui.day_start_E.get())
gui.config.set("Main Settings", "night_start_time", gui.night_start_E.get())
gui.config.set("Main Settings", "restrict_bout_search", gui.restrict_search_BV.get())
gui.config.set("Main Settings", "smoothing_radius", gui.smoothing_radius_E.get())
gui.config.set("Main Settings", "duration_threshold", gui.dur_thresh_E.get())
gui.config.set("Advanced Settings", "train_from", int(gui.train_from_IV.get()))
gui.config.set("Advanced Settings", "off_bout_initial", gui.init_off_E.get())
gui.config.set("Advanced Settings", "on_bout_initial", gui.init_on_E.get())
gui.config.set("Advanced Settings", "off_off_trans", gui.off_off_trans_E.get())
gui.config.set("Advanced Settings", "off_on_trans", gui.off_on_trans_E.get())
gui.config.set("Advanced Settings", "on_on_trans", gui.on_on_trans_E.get())
gui.config.set("Advanced Settings", "on_off_trans", gui.on_off_trans_E.get())
gui.config.set("Advanced Settings", "off_bout_emis_mean", gui.off_mean_E.get())
gui.config.set("Advanced Settings", "off_bout_emis_stdev", gui.off_stdev_E.get())
gui.config.set("Advanced Settings", "on_bout_emis_mean", gui.on_mean_E.get())
gui.config.set("Advanced Settings", "on_bout_emis_stdev", gui.on_stdev_E.get())
gui.config.set("Plot Options", "manual_plot_dimensions", bool(gui.manual_plot_dims.get()))
gui.config.set("Plot Options", "plot_x_dim", gui.plot_dim_x_E.get())
gui.config.set("Plot Options", "plot_y_dim", gui.plot_dim_y_E.get())
gui.config.set("Plot Options", "title_font_size", gui.title_font_size_E.get())
gui.config.set("Plot Options", "axis_title_font_size", gui.axis_title_font_size_E.get())
gui.config.set("Plot Options", "axis_label_font_size", gui.axis_label_font_size_E.get())
gui.config.set("Plot Options", "axis_tick_size", gui.axis_tick_size_E.get())
gui.config.set("Plot Options", "legend_font_size", gui.legend_font_size_E.get())
gui.config.set("Plot Options", "plot_egg_tempers", gui.plot_egg_BV.get())
gui.config.set("Plot Options", "plot_air_tempers", gui.plot_air_BV.get())
gui.config.set("Plot Options", "plot_egg_minus_air", gui.plot_adj_BV.get())
gui.config.set("Plot Options", "plot_smoothed", bool(gui.smooth_status_IV.get()))
gui.config.set("Plot Options", "legend_location", gui.legend_loc.get())
gui.config.set("Plot Options", "on_point_color", gui.on_point_color.get())
gui.config.set("Plot Options", "off_point_color", gui.off_point_color.get())
gui.config.set("Plot Options", "bout_line_color", gui.bout_line_color.get())
gui.config.set("Plot Options", "air_line_color", gui.air_line_color.get())
gui.config.set("Plot Options", "day_marker_color", gui.day_marker_color.get())
gui.config.set("Plot Options", "on_point_size", gui.on_point_size_E.get())
gui.config.set("Plot Options", "bout_line_width", gui.bout_line_width_E.get())
gui.config.set("Plot Options", "air_line_width", gui.air_line_width_E.get())
gui.config.set("Plot Options", "day_marker_width", gui.day_marker_width_E.get())
gui.config.set("Plot Options", "show_day_marker", gui.show_day_markers_BV.get())
gui.config.set("Plot Options", "show_grid", gui.show_grid_BV.get())
gui.config.set("Stats Options", "day_number", gui.day_num_BV.get())
gui.config.set("Stats Options", "date", gui.date_BV.get())
gui.config.set("Stats Options", "off_bout_count", gui.off_count_BV.get())
gui.config.set("Stats Options", "mean_off_bout_duration", gui.off_dur_BV.get())
gui.config.set("Stats Options", "off_bout_duration_stdev", gui.off_dur_sd_BV.get())
gui.config.set("Stats Options", "mean_off_bout_temp_drop", gui.off_dec_BV.get())
gui.config.set("Stats Options", "off_bout_temp_drop_stdev", gui.off_dec_sd_BV.get())
gui.config.set("Stats Options", "mean_off_bout_temp", gui.mean_off_temper_BV.get())
gui.config.set("Stats Options", "off_bout_time_sum", gui.off_time_sum_BV.get())
gui.config.set("Stats Options", "on_bout_count", gui.on_count_BV.get())
gui.config.set("Stats Options", "mean_on_bout_duration", gui.on_dur_BV.get())
gui.config.set("Stats Options", "on_bout_duration_stdev", gui.on_dur_sd_BV.get())
gui.config.set("Stats Options", "mean_on_bout_temp_rise", gui.on_inc_BV.get())
gui.config.set("Stats Options", "on_bout_temp_rise_stdev", gui.on_inc_sd_BV.get())
gui.config.set("Stats Options", "mean_on_bout_temp", gui.mean_on_temper_BV.get())
gui.config.set("Stats Options", "on_bout_time_sum", gui.on_time_sum_BV.get())
gui.config.set("Stats Options", "time_above_critical", gui.time_above_temper_BV.get())
gui.config.set("Stats Options", "time_below_critical", gui.time_below_temper_BV.get())
gui.config.set("Stats Options", "bouts_dropped", gui.bouts_dropped_BV.get())
gui.config.set("Stats Options", "mean_daytime_temperature", gui.mean_temper_d_BV.get())
gui.config.set("Stats Options", "daytime_temp_stdev", gui.mean_temper_d_sd_BV.get())
gui.config.set("Stats Options", "median_daytime_temp", gui.median_temper_d_BV.get())
gui.config.set("Stats Options", "min_daytime_temp", gui.min_temper_d_BV.get())
gui.config.set("Stats Options", "max_daytime_temp", gui.max_temper_d_BV.get())
gui.config.set("Stats Options", "mean_nighttime_temp", gui.mean_temper_n_BV.get())
gui.config.set("Stats Options", "nighttime_temp_stdev", gui.mean_temper_n_sd_BV.get())
gui.config.set("Stats Options", "median_nighttime_temp", gui.median_temper_n_BV.get())
gui.config.set("Stats Options", "min_nighttime_temp", gui.min_temper_n_BV.get())
gui.config.set("Stats Options", "max_nighttime_temp", gui.max_temper_n_BV.get())
gui.config.set("Stats Options", "mean_daynight_temp", gui.mean_temper_dn_BV.get())
gui.config.set("Stats Options", "daynight_temp_stdev", gui.mean_temper_dn_sd_BV.get())
gui.config.set("Stats Options", "median_daynight_temp", gui.median_temper_dn_BV.get())
gui.config.set("Stats Options", "min_daynight_temp", gui.min_temper_dn_BV.get())
gui.config.set("Stats Options", "max_daynight_temp", gui.max_temper_dn_BV.get())
gui.config.set("Stats Options", "mean_air_temp", gui.mean_air_temper_BV.get())
gui.config.set("Stats Options", "mean_air_temp_stdev", gui.mean_air_temper_sd_BV.get())
gui.config.set("Stats | |
= xl_rowcol_to_cell(row, start_col + columns['qty']['col']),
unit_price = xl_rowcol_to_cell(row, start_col + columns['unit_price']['col'])
),
wrk_formats['currency']
)
# If not asked to scrape, to correlate the prices and available quantities.
if distributor_dict.keys():
# Enter the spreadsheet formula to find this part's minimum unit price across all distributors.
wks.write_formula(
row, start_col + columns['unit_price']['col'],
'=MINA({})'.format(','.join(dist_unit_prices)),
wrk_formats['currency']
)
# If part is unavailable from all distributors, color quantity cell red.
wks.conditional_format(
row, start_col + columns['qty']['col'],
row, start_col + columns['qty']['col'],
{
'type': 'formula',
'criteria': '=IF(SUM({})=0,1,0)'.format(','.join(dist_qty_avail)),
'format': wrk_formats['not_available']
}
)
# If total available part quantity is less than needed quantity, color cell orange.
wks.conditional_format(
row, start_col + columns['qty']['col'],
row, start_col + columns['qty']['col'],
{
'type': 'cell',
'criteria': '>',
'value': '=SUM({})'.format(','.join(dist_qty_avail)),
'format': wrk_formats['too_few_available']
}
)
# If total purchased part quantity is less than needed quantity, color cell yellow.
wks.conditional_format(
row, start_col + columns['qty']['col'],
row, start_col + columns['qty']['col'],
{
'type': 'cell',
'criteria': '>',
'value': '=SUM({})'.format(','.join(dist_qty_purchased)),
'format': wrk_formats['too_few_purchased'],
}
)
# Enter part shortage quantity.
try:
wks.write(row, start_col + columns['short']['col'],
0) # slack quantity. (Not handled, yet.)
except KeyError:
pass
row += 1 # Go to next row.
# Sum the extended prices for all the parts to get the total minimum cost.
# If have read multiple BOM file calculate it by `SUMPRODUCT()` of the
# board project quantity components 'qty_prj*' by unitary price 'Unit$'.
total_cost_col = start_col + columns['ext_price']['col']
if isinstance(qty, list):
unit_price_col = start_col + columns['unit_price']['col']
unit_price_range = xl_range(PART_INFO_FIRST_ROW, unit_price_col,
PART_INFO_LAST_ROW, unit_price_col)
# Add each project board total.
for i_prj in range(len(qty)):
qty_col = start_col + columns['qty_prj{}'.format(i_prj)]['col']
wks.write(total_cost_row + 3*i_prj, total_cost_col,
'=SUMPRODUCT({qty_range},{unit_price_range})'.format(
unit_price_range=unit_price_range,
qty_range=xl_range(PART_INFO_FIRST_ROW, qty_col,
PART_INFO_LAST_ROW, qty_col)),
wrk_formats['total_cost_currency'])
# Add total of the spreadsheet, this can be equal or bigger than
# than the sum of the above totals, because, in the case of partial
# or fractional quantity of one part or subpart, the total quantity
# column 'qty' will be the ceil of the sum of the other ones.
total_cost_row = start_row -1 # Change the position of the total price cell.
wks.write(total_cost_row, total_cost_col, '=SUM({sum_range})'.format(
sum_range=xl_range(PART_INFO_FIRST_ROW, total_cost_col,
PART_INFO_LAST_ROW, total_cost_col)),
wrk_formats['total_cost_currency'])
# Add the total purchase.
if distributor_dict.keys():
next_line = row + 1
wks.write(next_line, start_col + columns['unit_price']['col'],
'Total Purchase:', wrk_formats['total_cost_label'])
wks.write_comment(next_line, start_col + columns['unit_price']['col'],
'This is the total of your cart across all distributors.')
wks.write(next_line, start_col + columns['ext_price']['col'],
'=SUM({})'.format(','.join(dist_ext_prices)),
wrk_formats['total_cost_currency'])
# Get the actual currency rate to use.
next_line = row + 1
used_currencies = list(set(used_currencies))
logger.log(DEBUG_OVERVIEW, 'Getting distributor currency convertion rate {} to {}...', used_currencies, CURRENCY_ALPHA3)
if used_currencies:
if CURRENCY_ALPHA3 in used_currencies:
used_currencies.remove(CURRENCY_ALPHA3)
wks.write(next_line, start_col + columns['value']['col'],
'Used currency rates:')
next_line = next_line + 1
for used_currency in used_currencies:
if used_currency!=CURRENCY_ALPHA3:
wks.write(next_line, start_col + columns['value']['col'],
'{c}({c_s})/{d}({d_s}):'.format(c=CURRENCY_ALPHA3, d=used_currency, c_s=CURRENCY_SYMBOL,
d_s=numbers.get_currency_symbol(used_currency, locale=DEFAULT_LANGUAGE)
),
wrk_formats['currency_rate_name']
)
WORKBOOK.define_name('{c}_{d}'.format(c=CURRENCY_ALPHA3, d=used_currency),
'={wks_name}!{cell_ref}'.format(
wks_name="'" + WORKSHEET_NAME + "'",
cell_ref=xl_rowcol_to_cell(next_line, columns['value']['col'] + 1,
row_abs=True, col_abs=True)))
wks.write(next_line, columns['value']['col'] + 1,
currency_convert(1, used_currency, CURRENCY_ALPHA3)
)
next_line = next_line + 1
# Return column following the globals so we know where to start next set of cells.
# Also return the columns where the references and quantity needed of each part is stored.
return next_line, start_col + num_cols, start_col + columns['refs']['col'], start_col + columns['qty']['col'], columns
def add_dist_to_worksheet(wks, wrk_formats, columns_global, start_row, start_col,
unit_cost_row, total_cost_row, part_ref_col, part_qty_col,
dist, parts, supress_cat_url=True):
'''Add distributor-specific part data to the spreadsheet.'''
logger.log(DEBUG_OVERVIEW, '# Writing {}'.format(distributor_dict[dist]['label']))
global CURRENCY_ALPHA3
# Columns for the various types of distributor-specific part data.
columns = {
'avail': {
'col': 0,
# column offset within this distributor range of the worksheet.
'level': 1, # Outline level (or hierarchy level) for this column.
'label': 'Avail', # Column header label.
'width': None, # Column width (default in this case).
'comment': '''Available quantity of each part at the distributor.
Red -> No quantity available.
Orange -> Too little quantity available.'''
},
'purch': {
'col': 1,
'level': 2,
'label': 'Purch',
'width': None,
'comment': 'Purchase quantity of each part from this distributor.\nYellow -> This part have a minimum purchase quantity bigger than 1 (check the price breaks).\nRed -> Purchasing more than the available quantity.'
},
'unit_price': {
'col': 2,
'level': 2,
'label': 'Unit$',
'width': None,
'comment': 'Unit price of each part from this distributor.\nGreen -> lowest price across distributors.'
},
'ext_price': {
'col': 3,
'level': 0,
'label': 'Ext$',
'width': 15, # Displays up to $9,999,999.99 without "###".
'comment': '(Unit Price) x (Purchase Qty) of each part from this distributor.\nRed -> Next price break is cheaper.\nGreen -> Cheapest supplier.'
},
'part_num': {
'col': 4,
'level': 2,
'label': 'Cat#',
'width': 15,
'comment': 'Distributor-assigned catalog number for each part and link to it\'s web page (ctrl-click). Extra distributor data is shown as comment.'
},
}
if not supress_cat_url:
# Add a extra column to the hiperlink.
columns.update({'link': {
'col': 5,
'level': 2,
'label': 'URL',
'width': 15,
'comment': 'Distributor catalog link (ctrl-click).'
}})
columns['part_num']['comment'] = 'Distributor-assigned catalog number for each part. Extra distributor data is shown as comment.'
num_cols = len(list(columns.keys()))
row = start_row # Start building distributor section at this row.
# Add label for this distributor.
wks.merge_range(row, start_col, row, start_col + num_cols - 1,
distributor_dict[dist]['label']['name'].title(), wrk_formats[dist])
#if distributor_dict[dist]['type']!='local':
# wks.write_url(row, start_col,
# distributor_dict[dist]['label']['url'], wrk_formats[dist],
# distributor_dict[dist]['label']['name'].title())
row += 1 # Go to next row.
# Add column headers, comments, and outline level (for hierarchy).
for k in list(columns.keys()):
col = start_col + columns[k]['col'] # Column index for this column.
wks.write_string(row, col, columns[k]['label'], wrk_formats['header'])
wks.write_comment(row, col, columns[k]['comment'])
wks.set_column(col, col, columns[k]['width'], None,
{'level': columns[k]['level']})
row += 1 # Go to next row.
num_parts = len(parts)
# For check the number of BOM files read, see the length of p[?]['manf#_qty'],
# if it is a `list()` instance, if don't, the lenth is always `1`.
num_prj = max([len(part.fields.get('manf#_qty',[])) if isinstance(part.fields.get('manf#_qty',[]),list) else 1 for part in parts])
# Add distributor data for each part.
PART_INFO_FIRST_ROW = row # Starting row of part info.
PART_INFO_LAST_ROW = PART_INFO_FIRST_ROW + num_parts - 1 # Last row of part info.
for part in parts:
dist_part_num = part.part_num[dist] # Get the distributor part number.
price_tiers = part.price_tiers[dist] # Extract price tiers from distributor HTML page tree.
dist_currency = part.currency[dist] # Extract currency used by the distributor.
# If the part number doesn't exist, just leave this row blank.
if len(dist_part_num) == 0:
row += 1 # Skip this row and go to the next.
continue
# if len(dist_part_num) == 0 or part.qty_avail[dist] is None or len(list(price_tiers.keys())) == 0:
# row += 1 # Skip this row and go to the next.
# continue
# Enter distributor part number for ordering purposes.
if dist_part_num:
wks.write(row, start_col + columns['part_num']['col'], dist_part_num, wrk_formats['part_format'])
else:
if supress_cat_url:
dist_part_num = 'Link' # To use as text for the link.
try:
# Add a comment in the 'cat#' column with extra informations gotten in the distributor web page.
comment = '\n'.join(sorted([ k.capitalize()+SEPRTR+' '+v for k, v in part.info_dist[dist].items() if k in EXTRA_INFO_DISPLAY]))
if comment:
wks.write_comment(row, start_col + columns['part_num']['col'], comment)
except:
pass
# Enter a link to the distributor webpage for this part, even if there
# is no valid quantity or pricing for the part (see next conditional).
# Having the link present will help debug if the extraction of the
# quantity or pricing information was done correctly.
if part.url[dist]:
if supress_cat_url:
wks.write_url(row, start_col + columns['part_num']['col'],
part.url[dist], string=dist_part_num)
else:
wks.write_url(row, start_col + columns['link']['col'], part.url[dist])
# Enter quantity of part available at this distributor unless it is None
# which means the part is not stocked.
if part.qty_avail[dist]:
wks.write(row, start_col + columns['avail']['col'],
part.qty_avail[dist], wrk_formats['part_format'])
else:
wks.write(row, start_col + columns['avail']['col'],
'NonStk', wrk_formats['not_stocked'])
wks.write_comment(row, start_col + columns['avail']['col'],
'This part is listed but is not normally stocked.')
# Purchase quantity always starts as blank because nothing has been purchased yet.
wks.write(row, start_col + columns['purch']['col'], '', None)
# Add pricing information if it exists.
if len(list(price_tiers)) > 0:
# Add the price for a single unit if it doesn't already exist in the tiers.
try:
min_qty = | |
vectors of length 4 whose entries is in {0, ..., 4} enumerated up to the action of Permutation Group with generators [(1,2,3,4)]
"""
if self._sum is not None:
if self._max_part >= 0:
return "Vectors of length %s and of sum %s whose entries is in {0, ..., %s} enumerated up to the action of %s"%(self.n, self._sum, self._max_part, self.permutation_group())
else:
return "Integer vectors of length %s and of sum %s enumerated up to the action of %s"%(self.n, self._sum, self.permutation_group())
else:
return "Integer vectors of length %s whose entries is in {0, ..., %s} enumerated up to the action of %s"%(self.n, self._max_part, self.permutation_group())
def roots(self):
r"""
Returns the root of generation of ``self``.This method is
required to build the tree structure of ``self`` which
inherits from the class
:class:`~sage.combinat.backtrack.SearchForest`.
EXAMPLES::
sage: I = IntegerVectorsModPermutationGroup(PermutationGroup([[(1,2,3,4)]]))
sage: I.roots()
[[0, 0, 0, 0]]
"""
return [self.element_class(self, self.n*[0,], check=False)]
def children(self, x):
r"""
Returns the list of children of the element ``x``. This method
is required to build the tree structure of ``self`` which
inherits from the class
:class:`~sage.combinat.backtrack.SearchForest`.
EXAMPLES::
sage: I = IntegerVectorsModPermutationGroup(PermutationGroup([[(1,2,3,4)]]))
sage: I.children(I([2,1,0,0], check=False))
[[2, 2, 0, 0], [2, 1, 1, 0], [2, 1, 0, 1]]
"""
return canonical_children(self._sgs, x, -1)
def permutation_group(self):
r"""
Returns the permutation group given to define ``self``.
EXAMPLES::
sage: I = IntegerVectorsModPermutationGroup(PermutationGroup([[(1,2,3)]]), 5)
sage: I.permutation_group()
Permutation Group with generators [(1,2,3)]
"""
return self._permgroup
def __contains__(self, v):
r"""
TESTS::
sage: I = IntegerVectorsModPermutationGroup(PermutationGroup([[(1,2,3,4)]]),6)
sage: [6,0,0,0] in I
True
sage: [5,0,1,0] in I
True
sage: [0,5,1,0] in I
False
sage: [3,0,1,3] in I
False
sage: [3,3,1,0] in I
False
"""
try:
return (self(v)).parent() is self
except Exception:
return False
def __call__(self, v, check=True):
r"""
Make `v` an element living in ``self``.
TESTS::
sage: I = IntegerVectorsModPermutationGroup(PermutationGroup([[(1,2,3,4)]]), 4)
sage: v = I([2,1,0,1]); v
[2, 1, 0, 1]
sage: v.parent()
Integer vectors of length 4 and of sum 4 enumerated up to
the action of Permutation Group with generators
[(1,2,3,4)]
"""
try:
if v.parent() is self:
return v
else:
raise ValueError, '%s shoud be a Python list of integer'%(v)
except Exception:
return self.element_class(self, list(v), check=check)
def __iter__(self):
r"""
TESTS::
sage: I = IntegerVectorsModPermutationGroup(PermutationGroup([[(1,2,3,4)]]),4)
sage: for i in I: i
[4, 0, 0, 0]
[3, 1, 0, 0]
[3, 0, 1, 0]
[3, 0, 0, 1]
[2, 2, 0, 0]
[2, 1, 1, 0]
[2, 1, 0, 1]
[2, 0, 2, 0]
[2, 0, 1, 1]
[1, 1, 1, 1]
sage: I = IntegerVectorsModPermutationGroup(PermutationGroup([[(1,2,3,4)]]), sum=7, max_part=3)
sage: for i in I: i
[3, 3, 1, 0]
[3, 3, 0, 1]
[3, 2, 2, 0]
[3, 2, 1, 1]
[3, 2, 0, 2]
[3, 1, 3, 0]
[3, 1, 2, 1]
[3, 1, 1, 2]
[3, 0, 2, 2]
[2, 2, 2, 1]
"""
if self._max_part < 0:
return self.elements_of_depth_iterator(self._sum)
else:
SF = SearchForest((self([0]*(self.n), check=False),),
lambda x : map(lambda y : self(y, check=False), canonical_children(self._sgs, x, self._max_part)),
algorithm = 'breadth')
if self._sum is None:
return iter(SF)
else:
return SF.elements_of_depth_iterator(self._sum)
def is_canonical(self, v, check=True):
r"""
Returns ``True`` if the integer list ``v`` is maximal in its
orbit under the action of the permutation group given to
define ``self``. Such integer vectors are said to be
canonical. A vector `v` is canonical if and only if
.. math::
v = \max_{\text{lex order}} \{g \cdot v | g \in G \}
EXAMPLES::
sage: I = IntegerVectorsModPermutationGroup(PermutationGroup([[(1,2,3,4)]]), max_part=3)
sage: I.is_canonical([3,0,0,0])
True
sage: I.is_canonical([1,0,2,0])
False
sage: I.is_canonical([2,0,1,0])
True
"""
if check:
assert isinstance(v, (ClonableIntArray, list)), '%s should be a list or a integer vector'%v
assert (self.n == len(v)), '%s should be of length %s'%(v, self.n)
for p in v:
assert (p == NN(p)), 'Elements of %s should be integers'%s
return is_canonical(self._sgs, self.element_class(self, list(v), check=False))
def ambient(self):
r"""
Return the ambient space from which ``self`` is a quotient.
EXAMPLES::
sage: S = IntegerVectorsModPermutationGroup(PermutationGroup([[(1,2,3,4)]]), 6); S.ambient()
Integer vectors that sum to 6
sage: S = IntegerVectorsModPermutationGroup(PermutationGroup([[(1,2,3,4)]]), 6, max_part=12); S.ambient()
Integer vectors that sum to 6 with constraints: max_part=12
.. todo::
Integer vectors should accept ``max_part`` as a single argument, and the following should change::
sage: S = IntegerVectorsModPermutationGroup(PermutationGroup([[(1,2,3,4)]]), max_part=12); S.ambient()
Integer vectors
"""
if self._sum is not None:
if self._max_part <= -1:
return IntegerVectors(n=self._sum)
else:
return IntegerVectors(n=self._sum, max_part=self._max_part)
else:
# Fix me once max_part should be accepted as a single
# argument for integer vectors
return IntegerVectors(max_part=self._max_part)
def lift(self, elt):
r"""
Lift the element ``elt`` inside the ambient space from which ``self`` is a quotient.
EXAMPLES::
sage: S = IntegerVectorsModPermutationGroup(PermutationGroup([[(1,2,3,4)]]), max_part=1)
sage: v = S.lift([1,0,1,0]); v
[1, 0, 1, 0]
sage: v in IntegerVectors(max_part=1)
True
sage: S = IntegerVectorsModPermutationGroup(PermutationGroup([[(1,2,3,4)]]), sum=6)
sage: v = S.lift(S.list()[5]); v
[4, 1, 1, 0]
sage: v in IntegerVectors(n=6)
True
"""
# TODO: For now, Sage integer vectors are just python list.
# Once Integer vectors will have an element class, update this
# code properly
return list(elt)
def retract(self, elt):
r"""
Return the canonical representative of the orbit of the
integer ``elt`` under the action of the permutation group
defining ``self``.
If the element ``elt`` is already maximal in its orbits for
the lexicographic order, ``elt`` is thus the good
representative for its orbit.
EXAMPLES::
sage: S = IntegerVectorsModPermutationGroup(PermutationGroup([[(1,2,3,4)]]), sum=2, max_part=1)
sage: S.retract([1,1,0,0])
[1, 1, 0, 0]
sage: S.retract([1,0,1,0])
[1, 0, 1, 0]
sage: S.retract([1,0,0,1])
[1, 1, 0, 0]
sage: S.retract([0,1,1,0])
[1, 1, 0, 0]
sage: S.retract([0,1,0,1])
[1, 0, 1, 0]
sage: S.retract([0,0,1,1])
[1, 1, 0, 0]
"""
# TODO: Once Sage integer vector will have a data structure
# based on ClonableIntArray, remove the conversion intarray
assert len(elt) == self.n, "%s is a quotient set of %s"%(self, self.ambient())
if self._sum is not None:
assert sum(elt) == self._sum, "%s is a quotient set of %s"%(self, self.ambient())
if self._max_part >= 0:
assert max(elt) <= self._max_part, "%s is a quotient set of %s"%(self, self.ambient())
intarray = self.element_class(self, elt, check=False)
return self.element_class(self, canonical_representative_of_orbit_of(self._sgs, intarray), check=False)
def an_element(self):
r"""
Returns an element of ``self`` or raises an EmptySetError when
``self`` is empty.
EXAMPLES::
sage: S = IntegerVectorsModPermutationGroup(PermutationGroup([[(1,2,3,4)]]), sum=0, max_part=1); S.an_element()
[0, 0, 0, 0]
sage: S = IntegerVectorsModPermutationGroup(PermutationGroup([[(1,2,3,4)]]), sum=1, max_part=1); S.an_element()
[1, 0, 0, 0]
sage: S = IntegerVectorsModPermutationGroup(PermutationGroup([[(1,2,3,4)]]), sum=2, max_part=1); S.an_element()
[1, 1, 0, 0]
sage: S = IntegerVectorsModPermutationGroup(PermutationGroup([[(1,2,3,4)]]), sum=3, max_part=1); S.an_element()
[1, 1, 1, 0]
sage: S = IntegerVectorsModPermutationGroup(PermutationGroup([[(1,2,3,4)]]), sum=4, max_part=1); S.an_element()
[1, 1, 1, 1]
sage: S = IntegerVectorsModPermutationGroup(PermutationGroup([[(1,2,3,4)]]), sum=5, max_part=1); S.an_element()
Traceback (most recent call last):
...
EmptySetError
"""
if self._max_part < 0:
return self([self._sum]+(self.n-1)*[0], check=False)
else:
try:
v = iter(self)
return v.next()
except StopIteration:
from sage.categories.sets_cat import EmptySetError
raise EmptySetError
def orbit(self, v):
r"""
Returns the orbit of the vector ``v`` under the action of the
permutation group defining ``self``. The result is a set.
INPUT:
- ``v`` - an element of ``self`` or any list of length the
degree of the permutation group.
EXAMPLES:
We convert the result in a list in increasing lexicographic
order, to get a reproducible doctest::
sage: from sage.combinat.enumeration_mod_permgroup import lex_cmp
sage: I = IntegerVectorsModPermutationGroup(PermutationGroup([[(1,2,3,4)]]),4)
sage: I.orbit([1,1,1,1])
set([[1, 1, 1, 1]])
sage: sorted(I.orbit([3,0,0,1]), cmp=lex_cmp)
[[0, 0, 1, 3], [0, 1, 3, 0], [1, 3, 0, 0], [3, 0, 0, 1]]
"""
assert isinstance(v, (list, ClonableIntArray)), '%s shoud be a Python list or an element of %s'%(v, self)
try:
if v.parent() is self:
return orbit(self._sgs, v)
except Exception:
return orbit(self._sgs, self.element_class(self, v, check=False))
class Element(ClonableIntArray):
r"""
Element class for the set of integer vectors with constraints enumerated
modulo the action of a permutation group. These vectors are clonable lists
of integers which must check conditions comming form the parent as in
the method :meth:`~sage.combinat.integer_vectors_mod_permgroup.IntegerVectorsModPermutationGroup_with_constraints.Element.check`.
TESTS::
sage: I = IntegerVectorsModPermutationGroup(PermutationGroup([[(1,2,3,4)]]), 4)
sage: v = I.element_class(I, [3,1,0,0]); v
[3, 1, 0, 0]
sage: TestSuite(v).run()
sage: v = I.element_class(I, [3,2,0,0])
Traceback (most recent call last):
...
AssertionError: [3, 2, 0, 0] should be a integer vector of sum 4
"""
def check(self):
r"""
Checks that | |
#
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import time
from os.path import (
abspath,
dirname,
join,
)
from unittest import TestCase
import numpy as np
import pandas as pd
from nose_parameterized import parameterized
from pandas import read_csv
from pandas.tslib import Timedelta
from pandas.util.testing import assert_index_equal
from pytz import timezone
from toolz import concat
from catalyst.errors import (
CalendarNameCollision,
InvalidCalendarName,
)
from catalyst.testing.predicates import assert_equal
from catalyst.utils.calendars import (
deregister_calendar,
get_calendar,
register_calendar,
)
from catalyst.utils.calendars.calendar_utils import (
_default_calendar_aliases,
_default_calendar_factories,
register_calendar_type,
)
from catalyst.utils.calendars.trading_calendar import days_at_time, \
TradingCalendar
class FakeCalendar(TradingCalendar):
@property
def name(self):
return "DMY"
@property
def tz(self):
return "Asia/Ulaanbaatar"
@property
def open_time(self):
return time(11, 13)
@property
def close_time(self):
return time(11, 49)
class CalendarRegistrationTestCase(TestCase):
def setUp(self):
self.dummy_cal_type = FakeCalendar
def tearDown(self):
deregister_calendar('DMY')
def test_register_calendar(self):
# Build a fake calendar
dummy_cal = self.dummy_cal_type()
# Try to register and retrieve the calendar
register_calendar('DMY', dummy_cal)
retr_cal = get_calendar('DMY')
self.assertEqual(dummy_cal, retr_cal)
# Try to register again, expecting a name collision
with self.assertRaises(CalendarNameCollision):
register_calendar('DMY', dummy_cal)
# Deregister the calendar and ensure that it is removed
deregister_calendar('DMY')
with self.assertRaises(InvalidCalendarName):
get_calendar('DMY')
def test_register_calendar_type(self):
register_calendar_type("DMY", self.dummy_cal_type)
retr_cal = get_calendar("DMY")
self.assertEqual(self.dummy_cal_type, type(retr_cal))
def test_both_places_are_checked(self):
dummy_cal = self.dummy_cal_type()
# if instance is registered, can't register type with same name
register_calendar('DMY', dummy_cal)
with self.assertRaises(CalendarNameCollision):
register_calendar_type('DMY', type(dummy_cal))
deregister_calendar('DMY')
# if type is registered, can't register instance with same name
register_calendar_type('DMY', type(dummy_cal))
with self.assertRaises(CalendarNameCollision):
register_calendar('DMY', dummy_cal)
def test_force_registration(self):
register_calendar("DMY", self.dummy_cal_type())
first_dummy = get_calendar("DMY")
# force-register a new instance
register_calendar("DMY", self.dummy_cal_type(), force=True)
second_dummy = get_calendar("DMY")
self.assertNotEqual(first_dummy, second_dummy)
class DefaultsTestCase(TestCase):
def test_default_calendars(self):
for name in concat([_default_calendar_factories,
_default_calendar_aliases]):
self.assertIsNotNone(get_calendar(name),
"get_calendar(%r) returned None" % name)
class DaysAtTimeTestCase(TestCase):
@parameterized.expand([
# NYSE standard day
(
'2016-07-19', 0, time(9, 31), timezone('US/Eastern'),
'2016-07-19 9:31',
),
# CME standard day
(
'2016-07-19', -1, time(17, 1), timezone('America/Chicago'),
'2016-07-18 17:01',
),
# CME day after DST start
(
'2004-04-05', -1, time(17, 1), timezone('America/Chicago'),
'2004-04-04 17:01'
),
# ICE day after DST start
(
'1990-04-02', -1, time(19, 1), timezone('America/Chicago'),
'1990-04-01 19:01',
),
])
def test_days_at_time(self, day, day_offset, time_offset, tz, expected):
days = pd.DatetimeIndex([pd.Timestamp(day, tz=tz)])
result = days_at_time(days, time_offset, tz, day_offset)[0]
expected = pd.Timestamp(expected, tz=tz).tz_convert('UTC')
self.assertEqual(result, expected)
class ExchangeCalendarTestBase(object):
# Override in subclasses.
answer_key_filename = None
calendar_class = None
GAPS_BETWEEN_SESSIONS = True
MAX_SESSION_HOURS = 0
@staticmethod
def load_answer_key(filename):
"""
Load a CSV from tests/resources/calendars/{filename}.csv
"""
fullpath = join(
dirname(abspath(__file__)),
'../resources',
'calendars',
filename + '.csv',
)
return read_csv(
fullpath,
index_col=0,
# NOTE: Merely passing parse_dates=True doesn't cause pandas to set
# the dtype correctly, and passing all reasonable inputs to the
# dtype kwarg cause read_csv to barf.
parse_dates=[0, 1, 2],
date_parser=lambda x: pd.Timestamp(x, tz='UTC')
)
@classmethod
def setupClass(cls):
cls.answers = cls.load_answer_key(cls.answer_key_filename)
cls.start_date = cls.answers.index[0]
cls.end_date = cls.answers.index[-1]
cls.calendar = cls.calendar_class(cls.start_date, cls.end_date)
cls.one_minute = pd.Timedelta(minutes=1)
cls.one_hour = pd.Timedelta(hours=1)
def test_sanity_check_session_lengths(self):
# make sure that no session is longer than self.MAX_SESSION_HOURS hours
for session in self.calendar.all_sessions:
o, c = self.calendar.open_and_close_for_session(session)
delta = c - o
self.assertTrue((delta.seconds / 3600) <= self.MAX_SESSION_HOURS)
def test_calculated_against_csv(self):
assert_index_equal(self.calendar.schedule.index, self.answers.index)
def test_is_open_on_minute(self):
one_minute = pd.Timedelta(minutes=1)
for market_minute in self.answers.market_open:
market_minute_utc = market_minute
# The exchange should be classified as open on its first minute
self.assertTrue(self.calendar.is_open_on_minute(market_minute_utc))
if self.GAPS_BETWEEN_SESSIONS:
# Decrement minute by one, to minute where the market was not
# open
pre_market = market_minute_utc - one_minute
self.assertFalse(self.calendar.is_open_on_minute(pre_market))
for market_minute in self.answers.market_close:
close_minute_utc = market_minute
# should be open on its last minute
self.assertTrue(self.calendar.is_open_on_minute(close_minute_utc))
if self.GAPS_BETWEEN_SESSIONS:
# increment minute by one minute, should be closed
post_market = close_minute_utc + one_minute
self.assertFalse(self.calendar.is_open_on_minute(post_market))
def _verify_minute(self, calendar, minute,
next_open_answer, prev_open_answer,
next_close_answer, prev_close_answer):
self.assertEqual(
calendar.next_open(minute),
next_open_answer
)
self.assertEqual(
self.calendar.previous_open(minute),
prev_open_answer
)
self.assertEqual(
self.calendar.next_close(minute),
next_close_answer
)
self.assertEqual(
self.calendar.previous_close(minute),
prev_close_answer
)
def test_next_prev_open_close(self):
# for each session, check:
# - the minute before the open (if gaps exist between sessions)
# - the first minute of the session
# - the second minute of the session
# - the minute before the close
# - the last minute of the session
# - the first minute after the close (if gaps exist between sessions)
answers_to_use = self.answers[1:-2]
for idx, info in enumerate(answers_to_use.iterrows()):
open_minute = info[1].iloc[0]
close_minute = info[1].iloc[1]
minute_before_open = open_minute - self.one_minute
# answers_to_use starts at the second element of self.answers,
# so self.answers.iloc[idx] is one element before, and
# self.answers.iloc[idx + 2] is one element after the current
# element
previous_open = self.answers.iloc[idx].market_open
next_open = self.answers.iloc[idx + 2].market_open
previous_close = self.answers.iloc[idx].market_close
next_close = self.answers.iloc[idx + 2].market_close
# minute before open
if self.GAPS_BETWEEN_SESSIONS:
self._verify_minute(
self.calendar, minute_before_open, open_minute,
previous_open, close_minute, previous_close
)
# open minute
self._verify_minute(
self.calendar, open_minute, next_open, previous_open,
close_minute, previous_close
)
# second minute of session
self._verify_minute(
self.calendar, open_minute + self.one_minute, next_open,
open_minute, close_minute, previous_close
)
# minute before the close
self._verify_minute(
self.calendar, close_minute - self.one_minute, next_open,
open_minute, close_minute, previous_close
)
# the close
self._verify_minute(
self.calendar, close_minute, next_open, open_minute,
next_close, previous_close
)
# minute after the close
if self.GAPS_BETWEEN_SESSIONS:
self._verify_minute(
self.calendar, close_minute + self.one_minute, next_open,
open_minute, next_close, close_minute
)
def test_next_prev_minute(self):
all_minutes = self.calendar.all_minutes
# test 20,000 minutes because it takes too long to do the rest.
for idx, minute in enumerate(all_minutes[1:20000]):
self.assertEqual(
all_minutes[idx + 2],
self.calendar.next_minute(minute)
)
self.assertEqual(
all_minutes[idx],
self.calendar.previous_minute(minute)
)
# test a couple of non-market minutes
if self.GAPS_BETWEEN_SESSIONS:
for open_minute in self.answers.market_open[1:]:
hour_before_open = open_minute - self.one_hour
self.assertEqual(
open_minute,
self.calendar.next_minute(hour_before_open)
)
for close_minute in self.answers.market_close[1:]:
hour_after_close = close_minute + self.one_hour
self.assertEqual(
close_minute,
self.calendar.previous_minute(hour_after_close)
)
def test_minute_to_session_label(self):
for idx, info in enumerate(self.answers[1:-2].iterrows()):
session_label = info[1].name
open_minute = info[1].iloc[0]
close_minute = info[1].iloc[1]
hour_into_session = open_minute + self.one_hour
minute_before_session = open_minute - self.one_minute
minute_after_session = close_minute + self.one_minute
next_session_label = self.answers.iloc[idx + 2].name
previous_session_label = self.answers.iloc[idx].name
# verify that minutes inside a session resolve correctly
minutes_that_resolve_to_this_session = [
self.calendar.minute_to_session_label(open_minute),
self.calendar.minute_to_session_label(open_minute,
direction="next"),
self.calendar.minute_to_session_label(open_minute,
direction="previous"),
self.calendar.minute_to_session_label(open_minute,
direction="none"),
self.calendar.minute_to_session_label(hour_into_session),
self.calendar.minute_to_session_label(hour_into_session,
direction="next"),
self.calendar.minute_to_session_label(hour_into_session,
direction="previous"),
self.calendar.minute_to_session_label(hour_into_session,
direction="none"),
self.calendar.minute_to_session_label(close_minute),
self.calendar.minute_to_session_label(close_minute,
direction="next"),
self.calendar.minute_to_session_label(close_minute,
direction="previous"),
self.calendar.minute_to_session_label(close_minute,
direction="none"),
session_label
]
if self.GAPS_BETWEEN_SESSIONS:
minutes_that_resolve_to_this_session.append(
self.calendar.minute_to_session_label(
minute_before_session
)
)
minutes_that_resolve_to_this_session.append(
self.calendar.minute_to_session_label(
minute_before_session,
direction="next"
)
)
minutes_that_resolve_to_this_session.append(
self.calendar.minute_to_session_label(
minute_after_session,
direction="previous"
)
)
self.assertTrue(all(x == minutes_that_resolve_to_this_session[0]
for x in minutes_that_resolve_to_this_session))
minutes_that_resolve_to_next_session = [
self.calendar.minute_to_session_label(minute_after_session),
self.calendar.minute_to_session_label(minute_after_session,
direction="next"),
next_session_label
]
self.assertTrue(all(x == minutes_that_resolve_to_next_session[0]
for x in minutes_that_resolve_to_next_session))
self.assertEqual(
self.calendar.minute_to_session_label(minute_before_session,
direction="previous"),
previous_session_label
)
# make sure that exceptions are raised at the right time
with self.assertRaises(ValueError):
self.calendar.minute_to_session_label(open_minute, "asdf")
if self.GAPS_BETWEEN_SESSIONS:
with self.assertRaises(ValueError):
self.calendar.minute_to_session_label(
minute_before_session,
direction="none"
)
@parameterized.expand([
(1, 0),
(2, 0),
(2, 1),
])
def test_minute_index_to_session_labels(self, interval, offset):
minutes = self.calendar.minutes_for_sessions_in_range(
pd.Timestamp('2011-01-04', tz='UTC'),
pd.Timestamp('2011-04-04', tz='UTC'),
)
minutes = minutes[range(offset, len(minutes), interval)]
np.testing.assert_array_equal(
np.array(minutes.map(self.calendar.minute_to_session_label),
dtype='datetime64[ns]'),
self.calendar.minute_index_to_session_labels(minutes)
)
def test_next_prev_session(self):
session_labels = self.answers.index[1:-2]
max_idx = len(session_labels) - 1
# the very first session
first_session_label = self.answers.index[0]
with self.assertRaises(ValueError):
self.calendar.previous_session_label(first_session_label)
# all the sessions in the middle
for idx, session_label in enumerate(session_labels):
if idx < max_idx:
self.assertEqual(
self.calendar.next_session_label(session_label),
session_labels[idx + 1]
)
if idx > 0:
self.assertEqual(
self.calendar.previous_session_label(session_label),
session_labels[idx - 1]
)
# the very last session
last_session_label = self.answers.index[-1]
with self.assertRaises(ValueError):
self.calendar.next_session_label(last_session_label)
@staticmethod
def _find_full_session(calendar):
for session_label in calendar.schedule.index:
if session_label not in calendar.early_closes:
return session_label
return None
def test_minutes_for_period(self):
# full session
# find a session that isn't an early close. start from the first
# session, should be quick.
full_session_label = self._find_full_session(self.calendar)
if full_session_label is None:
raise ValueError("Cannot find a full session to test!")
minutes = self.calendar.minutes_for_session(full_session_label)
_open, _close = self.calendar.open_and_close_for_session(
full_session_label
)
np.testing.assert_array_equal(
minutes,
pd.date_range(start=_open, end=_close, freq="min")
)
# early close period
early_close_session_label = self.calendar.early_closes[0]
minutes_for_early_close = \
self.calendar.minutes_for_session(early_close_session_label)
_open, _close = self.calendar.open_and_close_for_session(
early_close_session_label
)
np.testing.assert_array_equal(
minutes_for_early_close,
pd.date_range(start=_open, end=_close, freq="min")
)
def test_sessions_in_range(self):
# pick two sessions
session_count = len(self.calendar.schedule.index)
first_idx = int(session_count / 3)
second_idx = 2 * first_idx
first_session_label = self.calendar.schedule.index[first_idx]
second_session_label = self.calendar.schedule.index[second_idx]
answer_key = \
self.calendar.schedule.index[first_idx:second_idx + 1]
np.testing.assert_array_equal(
answer_key,
self.calendar.sessions_in_range(first_session_label,
second_session_label)
)
def _get_session_block(self):
# find and return | |
+ rpad("RcrsRecBal", 12)
output += "\n"
ii=0
last = None
for cat in accounts:
if not cat.getAccountOrParentIsInactive(): continue
if not (cat.getBalance() == 0
# and cat.getClearedBalance() == 0
and cat.getConfirmedBalance() == 0
and cat.getCurrentBalance() == 0
# and cat.getReconcilingBalance() == 0
and cat.getRecursiveBalance() == 0
# and cat.getRecursiveClearedBalance() == 0
and cat.getRecursiveCurrentBalance() == 0): # and cat.getRecursiveReconcilingBalance() == 0
if not last or last != cat.getAccountType():
output += "\nCATEGORY TYPE: %s\n" % cat.getAccountType()
last = cat.getAccountType()
output += "%s" % pad(cat.getFullAccountName(), 85)
mult = 1
# noinspection PyUnresolvedReferences
if cat.getAccountType() == Account.AccountType.EXPENSE: mult = -1
output += " " + rpad("%s" % baseCurr.getDoubleValue(cat.getUserBalance()*mult), 12)
# output += " " + rpad("%s" % baseCurr.getDoubleValue(cat.getUserClearedBalance()*mult), 12)
# output += " " + rpad("%s" % baseCurr.getDoubleValue(cat.getUserConfirmedBalance()*mult), 12)
output += " " + rpad("%s" % baseCurr.getDoubleValue(cat.getUserCurrentBalance()*mult), 12)
# output += " " + rpad("%s" % baseCurr.getDoubleValue(cat.getUserReconcilingBalance()*mult), 12)
output += " " + rpad("%s" % baseCurr.getDoubleValue(cat.getRecursiveUserBalance()*mult), 12)
# output += " " + rpad("%s" % baseCurr.getDoubleValue(cat.getRecursiveUserClearedBalance()*mult), 12)
output += " " + rpad("%s" % baseCurr.getDoubleValue(cat.getRecursiveUserCurrentBalance()*mult), 12)
# output += " " + rpad("%s" % baseCurr.getDoubleValue(cat.getRecursiveUserReconcilingBalance()*mult), 12)
output += "\n"
ii+=1
if not ii:
output += "<NONE FOUND>\n\n"
output += "----------------------------------------------------------------------------\n\n"
output += "\nLEGEND:\n"
output += "** NOTE: The Balances shown on a Parent Category in any section may not be the sum of its Child Categories shown in the same section.\n"
output += " The calculation matches the Moneydance Tools>Categories method and will include the balances(s) from all its Child Categories whether active, inactive or otherwise....\n\n"
output += "Balance = Account Balance\n"
# output += "ClrdBal = Cleared Balance (Normally Zero on a Category). Balance excluding uncleared or reconciling txns\n"
# output += "ConfBal = Confirmed Balance (The Balance less any unconfirmed Online / Downloaded Bank txns\n"
output += "CurrBal = Current Balance\n"
# output += "RecBal = Reconciling Balance (Normally Zero on a Category)\n"
output += "RcrsBal = Recursive (through all sub categories) Account Balance (Note: may contain balances from inactive sub-categories as per Moneydance)\n"
# output += "RcrsClrdBal = Recursive (through all sub categories) Cleared Balance (Normally Zero on a Category)\n"
output += "RcrsCurrBal = Recursive (through all sub categories) Current Balance (Note: may contain balances from inactive sub-categories as per Moneydance)\n"
# output += "RcrsRecBal = Recursive (through all sub categories) Reconciling Balance (Normally Zero on a Category)\n"
output += "----------------------------------------------------------------------------\n\n"
output += "<END>"
if lFix:
output += "\nDISCLAIMER: I take no responsibility if you decide to execute the Inactivate Zero Balance Category fix script!\n"
if not lFix:
jif = QuickJFrame("View your Active Categories with Zero Balances....", output).show_the_frame()
else:
jif = QuickJFrame("View your Active Categories with Zero Balances.... CLICK OK WHEN READY TO PROCEED", output).show_the_frame()
myPrint("J", "There are %s Active Categories with Zero Balances that could be Inactivated!" % iCountForInactivation)
if not lFix:
statusLabel.setText( ("VIEW ZERO BALANCE CATEGORIES: YOU HAVE %s Zero Balance Categories..." % iCountForInactivation).ljust(800, " "))
statusLabel.setForeground(DARK_GREEN)
myPopupInformationBox(jif, "You have %s Active Categories with Zero Balances" % iCountForInactivation, "ZERO BALANCE CATEGORIES", JOptionPane.INFORMATION_MESSAGE)
return
if iCountForInactivation < 1:
statusLabel.setText(("FIX ZERO BALANCE CATEGORIES: You have no Zero Balance Categories to fix - no fixes applied...").ljust(800, " "))
statusLabel.setForeground(DARK_GREEN)
myPopupInformationBox(jif, "No Zero Balance Categories >> No fixes will be applied !", "ZERO BALANCE CATEGORIES", JOptionPane.INFORMATION_MESSAGE)
return
if not confirm_backup_confirm_disclaimer(jif,statusLabel, "FIX - INACTIVATE ZERO BALANCE CATEGORIES", "Inactivate these %s Zero Balance Categories?" %(iCountForInactivation)):
return
# OK - so we are fixing...!
myPrint("B", ">> User selected to Inactivate %s Zero Balance Categories!?" % iCountForInactivation)
moneydance_data.setRecalcBalances(False)
moneydance_ui.setSuspendRefresh(True)
for cat in categoriesToInactivate.keys():
if categoriesToInactivate.get(cat):
myPrint("B", "Cat: " + cat.getFullAccountName() + " with Zero Balances, Set to INACTIVE!")
cat.setAccountIsInactive(True)
cat.syncItem()
moneydance_ui.getMain().saveCurrentAccount()
moneydance_data.setRecalcBalances(True)
moneydance_ui.setSuspendRefresh(False) # This does this too: book.notifyAccountModified(root)
myPrint("B", "Finished Inactivating %s Categories with Zero Balances..." % iCountForInactivation)
statusLabel.setText(("FIX - I have set %s Categories with Zero Balances to Inactive as requested!" % iCountForInactivation).ljust(800, " "))
statusLabel.setForeground(Color.RED)
myPopupInformationBox(jif,"OK - I have set %s Active Categories with Zero Balances to INACTIVE!" % iCountForInactivation,"INACTIVATE ZERO BALANCE CATEGORIES",JOptionPane.WARNING_MESSAGE)
play_the_money_sound()
myPrint("D", "Exiting ", inspect.currentframe().f_code.co_name, "()")
return
def fix_account_parent(statusLabel):
global toolbox_frame_, debug
# fix_account_parent.py (and old check_root_structure.py)
myPrint("D", "In ", inspect.currentframe().f_code.co_name, "()")
myPrint("B", "Diagnosing INVALID Parent Accounts....")
myPrint("P", "--------------------------------------")
book = moneydance.getCurrentAccountBook()
root = book.getRootAccount()
allAccounts = book.getItemsWithType(Account.SYNCABLE_TYPE_VALUE)
def validate_path(check_acct):
_acct = check_acct
iterations = 0
while True:
# noinspection PyUnresolvedReferences
if _acct.getAccountType() == Account.AccountType.ROOT: return True
if _acct is None: return False
if iterations > 100: return False
iterations+=1
_acct = _acct.getParentAccount()
continue
output = "FIX ACCOUNT(s)' INVALID PARENT ACCOUNTS:\n" \
" ========================================\n\n"
def check_fix_accounts(lFix=False):
iErrors=0
textFixed=""
for acct in allAccounts:
# skip root
# noinspection PyUnresolvedReferences
if acct == root or acct.getAccountType() == Account.AccountType.ROOT: continue
parent = acct.getParentAccount()
if ((parent is None or parent == acct)
or (parent is not None and parent != root and not validate_path(acct))):
iErrors+=1
if lFix:
myPrint("B","Resetting parent account for %s to root" %(acct.getAccountName()))
textFixed+=("Resetting parent account for %s to root\n" %(acct.getAccountName()))
acct.setParentAccount(root)
acct.syncItem()
else:
myPrint("B", "@@ ERROR - NEEDS RESET - Account: %s\n" % acct.getAccountName())
textFixed+="NEEDS RESET - Account: %s\n" % acct.getAccountName()
return iErrors, textFixed
iCountErrors, x = check_fix_accounts(lFix=False)
output += x
if iCountErrors<1:
statusLabel.setText(("'FIX: Account(s)'s Invalid Parent - CONGRATULATIONS - I found no Invalid parents.......").ljust(800, " "))
statusLabel.setForeground(Color.BLUE)
myPrint("B", "'FIX: Account(s)'s Invalid Parent - CONGRATULATIONS - I found no Invalid parents.......")
myPopupInformationBox(toolbox_frame_,"CONGRATULATIONS - I found no Accounts with Invalid parents...")
return
myPrint("B","FIX - Account(s)' Invalid Parent Accounts - found %s errors..." %(iCountErrors))
jif=QuickJFrame("VIEW ACCOUNT(s) WITH INVALID PARENT ACCOUNTS", output).show_the_frame()
if not confirm_backup_confirm_disclaimer(jif,statusLabel,"FIX ACCOUNT(S)' INVALID PARENTS","FIX %s Acct(s)'s Invalid Parent Accts?" %(iCountErrors)):
return
jif.dispose()
myPrint("B", "User accepted disclaimer to FIX Account(s)' Invalid Parent Accounts. Proceeding.....")
output += "\n\nRUNNING FIX ON PARENT ACCOUNTS\n" \
"--------------------------------\n\n"
moneydance_data.setRecalcBalances(False)
moneydance_ui.setSuspendRefresh(True)
iCountErrors, x = check_fix_accounts(lFix=True)
output += x
output += "\n<END>"
moneydance_ui.getMain().saveCurrentAccount()
moneydance_data.setRecalcBalances(True)
moneydance_ui.setSuspendRefresh(False) # This does this too: book.notifyAccountModified(root)
root = moneydance.getRootAccount()
moneydance_data.notifyAccountModified(root)
myPrint("B", "FIXED %s invalid Parent Accounts" %(iCountErrors))
play_the_money_sound()
statusLabel.setText(("FIXED %s invalid Parent Accounts" %(iCountErrors)).ljust(800, " "))
statusLabel.setForeground(DARK_GREEN)
jif=QuickJFrame("VIEW ACCOUNT(s) WITH INVALID PARENT ACCOUNTS", output).show_the_frame()
myPopupInformationBox(jif,"FIXED %s invalid Parent Accounts" %(iCountErrors), "FIX INVALID PARENT ACCOUNTS", JOptionPane.WARNING_MESSAGE)
myPrint("D", "Exiting ", inspect.currentframe().f_code.co_name, "()")
return
def fix_root_account_name(statusLabel):
global toolbox_frame_, debug
myPrint("D", "In ", inspect.currentframe().f_code.co_name, "()")
bookName = moneydance.getCurrentAccountBook().getName().strip()
root = moneydance.getCurrentAccountBook().getRootAccount()
rootName = root.getAccountName().strip()
if rootName == bookName:
myPopupInformationBox(toolbox_frame_,
"The name of your Root Account is already the same as your Dataset(or 'Book'): %s" % (bookName),
"RENAME ROOT ACCOUNT",
JOptionPane.INFORMATION_MESSAGE)
statusLabel.setText(("No changed applied as your Root Account name is already the same as your Dataset ('Book') name: %s" %(bookName)).ljust(800, " "))
statusLabel.setForeground(Color.BLUE)
myPopupInformationBox(toolbox_frame_,"The name of your Root Account is already the same as your Dataset - NO CHANGES MADE!",theMessageType=JOptionPane.WARNING_MESSAGE)
return
myPrint("B", "User requested to fix Root Account Name")
myPrint("B", "Dataset's ('Book') Name: ", bookName)
myPrint("B", "Root's Account Name: ", rootName)
MyPopUpDialogBox(toolbox_frame_,
"RENAME ROOT ACCOUNT",
"Your Dataset ('book') name is: %s (this is the name that will be used)\nYour Root Account name is: %s" %(bookName,rootName),
theTitle="RENAME ROOT ACCOUNT").go()
if not confirm_backup_confirm_disclaimer(toolbox_frame_, statusLabel, "RENAME ROOT ACCOUNT", "rename your Root Account to: %s?" %(bookName)):
return
myPrint("B", "User accepted disclaimer to reset Root Account Name. Proceeding.....")
# Flush all in memory settings to config.dict file on disk
moneydance.savePreferences()
root.setAccountName(bookName)
root.syncItem()
moneydance_data.notifyAccountModified(root)
myPrint("B", "Root account renamed to: %s" % (bookName))
play_the_money_sound()
statusLabel.setText((("Root Account Name changed to : %s - I SUGGEST YOU RESTART MONEYDANCE!" %(bookName)).ljust(800, " ")))
statusLabel.setForeground(Color.RED)
myPopupInformationBox(toolbox_frame_,"Root Account Name changed to : %s - I SUGGEST YOU RESTART MONEYDANCE!"%(bookName),"RENAME ROOT",JOptionPane.WARNING_MESSAGE)
myPrint("D", "Exiting ", inspect.currentframe().f_code.co_name, "()")
return
# noinspection PyUnresolvedReferences
def force_change_account_type(statusLabel):
global toolbox_frame_, debug
myPrint("D", "In ", inspect.currentframe().f_code.co_name, "()")
# set_account_type.py
ask=MyPopUpDialogBox(toolbox_frame_,
theStatus="Are you sure you want to FORCE change an Account's Type?",
theTitle="FORCE CHANGE TYPE",
theMessage="This is normally a BAD idea, unless you know you want to do it....!\n"
"The typical scenario is where you have have created an Account with the wrong Type\n"
"This fix will NOT attempt to check that the Acct has Txns that are valid in the new Account | |
conf = AppConfig(minimal=True, root_controller=RootController())
conf.renderers = ['jinja']
app = conf.make_wsgi_app()
def test_setup_sqlalchemy(self):
class RootController(TGController):
@expose()
def test(self):
return 'HI!'
package = PackageWithModel()
conf = AppConfig(minimal=True, root_controller=RootController())
conf.package = package
conf.model = package.model
conf.use_sqlalchemy = True
conf['sqlalchemy.url'] = 'sqlite://'
app = conf.make_wsgi_app()
app = TestApp(app)
assert 'HI!' in app.get('/test')
assert package.model.DBSession.DBSESSION_REMOVED
def test_sqlalchemy_commit_veto(self):
class RootController(TGController):
@expose()
def test(self):
return 'HI!'
@expose()
def crash(self):
raise Exception('crash')
@expose()
def forbidden(self):
response.status = 403
return 'FORBIDDEN'
@expose()
def notfound(self):
response.status = 404
return 'NOTFOUND'
def custom_commit_veto(environ, status, headers):
if status.startswith('404'):
return True
return False
fake_transaction = FakeTransaction()
import transaction
prev_transaction_manager = transaction.manager
transaction.manager = fake_transaction
package = PackageWithModel()
conf = AppConfig(minimal=True, root_controller=RootController())
conf['package'] = package
conf['model'] = package.model
conf['use_sqlalchemy'] = True
conf['tm.enabled'] = True
conf['tm.commit_veto'] = custom_commit_veto
conf['sqlalchemy.url'] = 'sqlite://'
app = conf.make_wsgi_app()
app = TestApp(app)
assert hasattr(conf, 'use_transaction_manager') is False
app.get('/test')
assert fake_transaction.aborted == False
try:
app.get('/crash')
except:
pass
assert fake_transaction.aborted == True
app.get('/forbidden', status=403)
assert fake_transaction.aborted == False
app.get('/notfound', status=404)
assert fake_transaction.aborted == True
transaction.manager = prev_transaction_manager
def test_sqlalchemy_doom(self):
fake_transaction = FakeTransaction()
import transaction
prev_transaction_manager = transaction.manager
transaction.manager = fake_transaction
class RootController(TGController):
@expose()
def test(self):
fake_transaction.doom()
return 'HI!'
package = PackageWithModel()
conf = AppConfig(minimal=True, root_controller=RootController())
conf.package = package
conf.model = package.model
conf.use_sqlalchemy = True
conf['tm.enabled'] = True
conf['sqlalchemy.url'] = 'sqlite://'
app = conf.make_wsgi_app()
app = TestApp(app)
assert hasattr(conf, 'use_transaction_manager') is False
app.get('/test')
assert fake_transaction.aborted == True
transaction.manager = prev_transaction_manager
def test_sqlalchemy_retry(self):
fake_transaction = FakeTransaction()
import transaction
prev_transaction_manager = transaction.manager
transaction.manager = fake_transaction
from transaction.interfaces import TransientError
class RootController(TGController):
attempts = []
@expose()
def test(self):
self.attempts.append(True)
if len(self.attempts) == 3:
return 'HI!'
raise TransientError()
package = PackageWithModel()
conf = AppConfig(minimal=True, root_controller=RootController())
conf.package = package
conf.model = package.model
conf.use_sqlalchemy = True
conf['tm.enabled'] = True
conf['sqlalchemy.url'] = 'sqlite://'
conf['tm.attempts'] = 3
app = conf.make_wsgi_app()
app = TestApp(app)
assert hasattr(conf, 'use_transaction_manager') is False
resp = app.get('/test')
assert 'HI' in resp
transaction.manager = prev_transaction_manager
def test_setup_sqla_persistance(self):
conf = AppConfig(minimal=True, root_controller=RootController())
conf['sqlalchemy.url'] = 'sqlite://'
conf.use_sqlalchemy = True
conf.package = PackageWithModel()
conf.make_wsgi_app()
def test_setup_sqla_balanced(self):
conf = AppConfig(minimal=True, root_controller=RootController())
conf['sqlalchemy.master.url'] = 'sqlite://'
conf['sqlalchemy.slaves.slave1.url'] = 'sqlite://'
conf.use_sqlalchemy = True
conf.package = PackageWithModel()
conf.make_wsgi_app()
@raises(TGConfigError)
def test_setup_sqla_balanced_prevent_slave_named_master(self):
conf = AppConfig(minimal=True, root_controller=RootController())
conf['sqlalchemy.master.url'] = 'sqlite://'
conf['sqlalchemy.slaves.master.url'] = 'sqlite://'
conf.use_sqlalchemy = True
conf.package = PackageWithModel()
conf.make_wsgi_app()
@raises(TGConfigError)
def test_setup_sqla_balanced_no_slaves(self):
conf = AppConfig(minimal=True, root_controller=RootController())
conf['sqlalchemy.master.url'] = 'sqlite://'
conf.use_sqlalchemy = True
conf.package = PackageWithModel()
conf.make_wsgi_app()
def test_setup_ming_persistance(self):
class RootController(TGController):
@expose()
def test(self):
return 'HI!'
package = PackageWithModel()
conf = AppConfig(minimal=True, root_controller=RootController())
conf.package = package
conf.model = package.model
conf.use_ming = True
conf['ming.url'] = 'mim://'
conf['ming.db'] = 'inmemdb'
app = conf.make_wsgi_app()
tgapp = app.application
while not isinstance(tgapp, TGApp):
tgapp = tgapp.app
ming_handler = tgapp.wrapped_dispatch
while ming_handler != tgapp._dispatch:
if isinstance(ming_handler, MingApplicationWrapper):
break
ming_handler = ming_handler.next_handler
assert isinstance(ming_handler, MingApplicationWrapper), ming_handler
class FakeMingSession(object):
actions = []
def flush_all(self):
self.actions.append('FLUSH')
def close_all(self):
self.actions.append('CLOSE')
ming_handler.ThreadLocalODMSession = FakeMingSession()
app = TestApp(app)
resp = app.get('/test')
assert 'HI' in resp
assert ming_handler.ThreadLocalODMSession.actions == ['FLUSH']
def test_setup_ming_persistance_closes_on_failure(self):
class RootController(TGController):
@expose()
def test(self):
raise Exception('CRASH!')
package = PackageWithModel()
conf = AppConfig(minimal=True, root_controller=RootController())
conf.package = package
conf.model = package.model
conf.use_ming = True
conf['ming.url'] = 'mim://'
conf['ming.db'] = 'inmemdb'
app = conf.make_wsgi_app()
tgapp = app.application
while not isinstance(tgapp, TGApp):
tgapp = tgapp.app
ming_handler = tgapp.wrapped_dispatch
while ming_handler != tgapp._dispatch:
if isinstance(ming_handler, MingApplicationWrapper):
break
ming_handler = ming_handler.next_handler
assert isinstance(ming_handler, MingApplicationWrapper), ming_handler
class FakeMingSession(object):
actions = []
def flush_all(self):
self.actions.append('FLUSH')
def close_all(self):
self.actions.append('CLOSE')
ming_handler.ThreadLocalODMSession = FakeMingSession()
app = TestApp(app)
try:
app.get('/test', status=500)
except:
assert ming_handler.ThreadLocalODMSession.actions == ['CLOSE']
else:
assert False, 'Should have raised exception'
def test_setup_ming_persistance_with_url_alone(self):
package = PackageWithModel()
conf = AppConfig(minimal=True, root_controller=None)
conf.package = package
conf.model = package.model
conf.use_ming = True
conf['ming.url'] = 'mim://inmemdb'
app = conf.make_wsgi_app()
assert app is not None
dstore = config['tg.app_globals'].ming_datastore
dstore_name = dstore.name
# Looks like ming has empty dstore.name when using MIM.
assert dstore_name == '', dstore
def test_setup_sqla_and_ming_both(self):
package = PackageWithModel()
base_config = AppConfig(minimal=True, root_controller=None)
base_config.package = package
base_config.model = package.model
base_config.use_ming = True
base_config['ming.url'] = 'mim://inmemdb'
base_config.use_sqlalchemy = True
base_config['sqlalchemy.url'] = 'sqlite://'
app = base_config.make_wsgi_app()
assert app is not None
assert config['MingSession'], config
assert config['tg.app_globals'].ming_datastore, config['tg.app_globals']
assert config['SQLASession'], config
assert config['tg.app_globals'].sa_engine, config['tg.app_globals']
assert config['DBSession'] is config['SQLASession'], config
def test_setup_ming_persistance_with_url_and_db(self):
package = PackageWithModel()
conf = AppConfig(minimal=True, root_controller=None)
conf.package = package
conf.model = package.model
conf.use_ming = True
conf['ming.url'] = 'mim://inmemdb'
conf['ming.db'] = 'realinmemdb'
app = conf.make_wsgi_app()
assert app is not None
dstore = config['tg.app_globals'].ming_datastore
dstore_name = dstore.name
assert dstore_name == 'realinmemdb', dstore
def test_setup_ming_persistance_advanced_options(self):
package = PackageWithModel()
conf = AppConfig(minimal=True, root_controller=None)
conf.package = package
conf.model = package.model
conf.use_ming = True
conf['ming.url'] = 'mim://inmemdb'
conf['ming.connection.read_preference'] = 'PRIMARY'
app = conf.make_wsgi_app()
assert app is not None
def test_setup_ming_persistance_replica_set(self):
if sys.version_info[:2] == (2, 6):
raise SkipTest()
package = PackageWithModel()
conf = AppConfig(minimal=True, root_controller=None)
conf.package = package
conf.model = package.model
conf.use_ming = True
conf['ming.url'] = 'mongodb://localhost:27017,localhost:27018/testdb?replicaSet=test'
conf['ming.db'] = ''
app = conf.make_wsgi_app()
assert app is not None
expected_url = 'mongodb://localhost:27017,localhost:27018/testdb?replicaSet=test'
expected_db = 'testdb'
dstore = config['tg.app_globals'].ming_datastore
assert expected_db == dstore.name, dstore.name
assert dstore.bind._conn_args[0] == expected_url
def test_setup_ming_persistance_replica_set_option(self):
package = PackageWithModel()
conf = AppConfig(minimal=True, root_controller=None)
conf.package = package
conf.model = package.model
conf.use_ming = True
conf['ming.url'] = 'mongodb://localhost:27017,localhost:27018/testdb'
conf['ming.connection.replicaSet'] = 'test'
app = conf.make_wsgi_app()
assert app is not None
expected_url = 'mongodb://localhost:27017,localhost:27018/testdb'
expected_db = 'testdb'
dstore = config['tg.app_globals'].ming_datastore
assert expected_db == dstore.name, dstore.name
assert dstore.bind._conn_args[0] == expected_url
assert 'test' == dstore.bind._conn_kwargs.get('replicaSet'), dstore.bind._conn_kwargs
def test_setup_sqla_auth_repozesqla(self):
if PY3: raise SkipTest()
class RootController(TGController):
@expose()
def test(self):
return str(request.environ)
package = PackageWithModel()
conf = AppConfig(minimal=True, root_controller=RootController())
conf.package = package
conf.model = package.model
conf.use_sqlalchemy = True
conf.auth_backend = 'sqlalchemy'
conf['sa_auth'] = {'authmetadata': ApplicationAuthMetadata(),
'dbsession': None,
'user_class': None,
'cookie_secret': '12345'}
conf['sqlalchemy.url'] = 'sqlite://'
app = conf.make_wsgi_app()
app = TestApp(app)
resp = app.get('/test')
assert 'repoze.who.plugins' in resp, resp
def test_setup_sqla_auth(self):
class RootController(TGController):
@expose()
def test(self):
return str(request.environ)
package = PackageWithModel()
conf = AppConfig(minimal=True, root_controller=RootController())
conf.package = package
conf.model = package.model
conf.use_sqlalchemy = True
conf.auth_backend = 'sqlalchemy'
conf['sa_auth'] = {'authmetadata': ApplicationAuthMetadataWithAuthentication(),
'dbsession': None,
'user_class': None,
'cookie_secret': '12345'}
conf['sqlalchemy.url'] = 'sqlite://'
app = conf.make_wsgi_app()
app = TestApp(app)
resp = app.get('/test')
assert 'repoze.who.plugins' in resp, resp
def test_setup_ming_auth_tgming(self):
if PY3: raise SkipTest()
class RootController(TGController):
@expose()
def test(self):
return str(request.environ)
package = PackageWithModel()
conf = AppConfig(minimal=True, root_controller=RootController())
conf.package = package
conf.model = package.model
conf.use_ming = True
conf.auth_backend = 'ming'
conf['sa_auth'] = {'authmetadata': ApplicationAuthMetadata(),
'cookie_secret': '12345',
'user_class': None}
conf['ming.url'] = 'mim:///testdb'
app = conf.make_wsgi_app()
app = TestApp(app)
resp = app.get('/test')
assert 'repoze.who.plugins' in resp, resp
def test_setup_ming_auth(self):
class RootController(TGController):
@expose()
def test(self):
return str(request.environ)
package = PackageWithModel()
conf = AppConfig(minimal=True, root_controller=RootController())
conf.package = package
conf.model = package.model
conf.use_ming = True
conf.auth_backend = 'ming'
conf['sa_auth'] = {'authmetadata': ApplicationAuthMetadataWithAuthentication(),
'cookie_secret': '12345',
'user_class': None}
conf['ming.url'] = 'mim:///testdb'
app = conf.make_wsgi_app()
app = TestApp(app)
resp = app.get('/test')
assert 'repoze.who.plugins' in resp, resp
def test_setup_authtkt(self):
class RootController(TGController):
@expose()
def test(self):
return str(request.environ)
package = PackageWithModel()
conf = AppConfig(minimal=True, root_controller=RootController())
conf.package = package
conf.model = package.model
conf.use_sqlalchemy = True
conf.auth_backend = 'sqlalchemy'
conf['sa_auth'] = {'authmetadata': ApplicationAuthMetadataWithAuthentication(),
'dbsession': None,
'user_class': None,
'cookie_secret': '12345',
'post_login_url': '/'}
conf['sqlalchemy.url'] = 'sqlite://'
secure_app = conf.make_wsgi_app(**{'sa_auth.authtkt.secure': True})
secure_app = TestApp(secure_app)
resp = secure_app.post('/login_handler', params={'login': 'l', 'password': 'p'})
assert 'HttpOnly' in resp.headers["Set-Cookie"], resp.headers
insecure_app = conf.make_wsgi_app(**{'sa_auth.authtkt.secure': False})
insecure_app = TestApp(insecure_app)
resp = insecure_app.post('/login_handler', params={'login': 'l', 'password': 'p'})
assert 'HttpOnly' not in resp.headers["Set-Cookie"], resp.headers
def test_sessions_enabled(self):
class RootController(TGController):
@expose('json')
def test(self):
try:
tg.session['counter'] += 1
except KeyError:
tg.session['counter'] = 0
tg.session.save()
return dict(counter=tg.session['counter'])
conf = AppConfig(minimal=True, root_controller=RootController())
conf['session.enabled'] = True
app = conf.make_wsgi_app()
app = TestApp(app)
resp = app.get('/test')
assert resp.json['counter'] == 0, resp
resp = app.get('/test')
assert resp.json['counter'] == 1, resp
def test_caching_enabled(self):
class RootController(TGController):
@expose('json')
def test(self):
cache = tg.cache.get_cache('test_caching_enabled')
now = cache.get_value('test_cache_key', createfunc=datetime.utcnow)
return dict(now=now)
conf = AppConfig(minimal=True, root_controller=RootController())
conf['cache.enabled'] = True
app = conf.make_wsgi_app()
app = TestApp(app)
resp = app.get('/test')
now = resp.json['now']
for x in range(20):
resp = app.get('/test')
assert resp.json['now'] == now, (resp, now)
def test_controler_wrapper_setup(self):
from tg.configurator.components.dispatch import _call_controller
orig_caller = _call_controller
appcfg = AppConfig(minimal=True, root_controller=RootController())
conf = {}
dispatch = appcfg._configurator.get_component('dispatch')
dispatch._controller_wrappers[:] = []
dispatch._setup_controller_wrappers(conf, None)
assert conf['controller_caller'] | |
#!/usr/bin/env python
# Copyright 2020 Informatics Matters Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import sys, gzip, json, uuid
from rdkit import Chem
from rdkit.Chem import AllChem
from .sanifix import fix_mol
from .MolFromTypedColumnReader import MolFromTypedColumnReader
from pipelines_utils import utils
from pipelines_utils.StreamJsonListLoader import StreamJsonListLoader
def default_open_input_output(inputDef, inputFormat, outputDef, defaultOutput,
outputFormat, thinOutput=False, valueClassMappings=None,
datasetMetaProps=None, fieldMetaProps=None):
"""Default approach to handling the inputs and outputs"""
input, suppl = default_open_input(inputDef, inputFormat)
output,writer,outputBase =\
default_open_output(outputDef, defaultOutput, outputFormat,
thinOutput=thinOutput,
valueClassMappings=valueClassMappings,
datasetMetaProps=datasetMetaProps,
fieldMetaProps=fieldMetaProps)
return input,output,suppl,writer,outputBase
def default_open_input(inputDef, inputFormat):
if not inputDef and not inputFormat:
raise ValueError('Must specify either an input file name or an input format (or both)')
elif inputFormat == 'sdf' or (inputDef and (inputDef.lower().endswith('.sdf') or inputDef.lower().endswith('.sdf.gz'))):
input, suppl = default_open_input_sdf(inputDef)
elif inputFormat == 'json' or (inputDef and (inputDef.lower().endswith('.data') or inputDef.lower().endswith('.data.gz'))):
input, suppl = default_open_input_json(inputDef)
elif inputFormat == 'smiles':
input, suppl = default_open_input_typed_smiles(inputDef)
else:
raise ValueError('Unsupported input format')
return input, suppl
def default_open_input_sdf(inputDef):
"""Open the input as a SD file (possibly gzipped if ending with .gz) according to RDKit's ForwardSDMolSupplier
:param inputDef: The name of the file. If None then STDIN is used (and assumed not to be gzipped)
"""
if inputDef:
input = utils.open_file(inputDef)
else:
# We need to use the (Python 3) stdin.buffer
# (a binary representation of the input stream)
# for RDKit in Python 3.
if sys.version_info[0] >= 3:
input = sys.stdin.buffer
else:
input = sys.stdin
suppl = Chem.ForwardSDMolSupplier(input)
return input, suppl
def default_open_input_smiles(inputDef, delimiter='\t', smilesColumn=0,
nameColumn=1, titleLine=False):
"""Open the input as a file of smiles (possibly gzipped if ending with .gz)
according to RDKit's SmilesMolSupplier
:param inputDef: The name of the file. If None then STDIN is used
(and assumed not to be gzipped)
"""
if inputDef:
input = utils.open_file(inputDef, as_text=True)
else:
input = sys.stdin
# SmilesMolSupplier is a bit strange as it can't accept a file like object!
txt = input.read()
input.close()
suppl = Chem.SmilesMolSupplier()
suppl.SetData(txt, delimiter=delimiter, smilesColumn=smilesColumn,
nameColumn=nameColumn, titleLine=titleLine)
return suppl
def default_open_input_typed_smiles(inputDef):
"""Open the input as a file of typed column text containing SMILES
(possibly gzipped if ending with .gz).
The user will need to initialise() this object once it's been created
to provide an optional header and other parameters.
:param inputDef: The name of the file, or None if to use STDIN.
If filename ends with .gz will be gunzipped
"""
if inputDef:
input = utils.open_file(inputDef, as_text=True)
else:
input = sys.stdin
suppl = MolFromTypedColumnReader(input)
return input, suppl
def open_smarts(filename):
"""Very simple smarts parser that expects smarts expression
(and nothing else) on each line (no header)"""
f = open(filename)
count = 0
for line in f:
count += 1
mol = Chem.MolFromSmarts(line)
if mol:
mol.SetIntProp("idx", count)
yield mol
def default_open_input_json(inputDef, lazy=True):
"""Open the given input as JSON array of Squonk MoleculeObjects.
:param inputDef: The name of the input file, or None if to use STDIN.
If filename ends with .gz will be gunzipped
:param lazy: Use lazy loading of the JSON. If True will allow handling of
large datasets without being loaded into memory,
but may be less robust and will be slower.
"""
if inputDef:
if inputDef.lower().endswith('.gz'):
input = gzip.open(inputDef, 'rt')
else:
input = open(inputDef, 'r')
else:
input = sys.stdin
if lazy:
suppl = generate_mols_from_json(StreamJsonListLoader(input))
else:
suppl = generate_mols_from_json(json.load(input))
return input, suppl
def default_open_output(outputDef, defaultOutput, outputFormat, compress=True,
thinOutput=False, valueClassMappings=None,
datasetMetaProps=None, fieldMetaProps=None):
outputFormat = utils.determine_output_format(outputFormat)
if not outputDef:
outputBase = defaultOutput
else:
outputBase = outputDef
if outputFormat == 'sdf':
output,writer = default_open_output_sdf(outputDef, outputBase, thinOutput, compress)
elif outputFormat == 'json':
output,writer = default_open_output_json(outputDef, outputBase, thinOutput, compress,
valueClassMappings, datasetMetaProps, fieldMetaProps)
else:
raise ValueError('Unsupported output format')
return output,writer,outputBase
def default_open_output_sdf(outputDef, outputBase, thinOutput, compress):
output = utils.open_output(outputDef, 'sdf', compress)
if thinOutput:
writer = ThinSDWriter(output)
else:
writer = ThickSDWriter(output)
return output, writer
def default_open_output_json(outputDef, outputBase, thinOutput,
compress, valueClassMappings, datasetMetaProps,
fieldMetaProps):
# this writes the metadata that Squonk needs
utils.write_squonk_datasetmetadata(outputBase, thinOutput, valueClassMappings,
datasetMetaProps, fieldMetaProps)
output = utils.open_output(outputDef, 'data', compress)
if thinOutput:
writer = ThinJsonWriter(output)
else:
writer = ThickJsonWriter(output)
return output,writer
def read_single_molecule(filename, index=1, format=None):
"""Read a single molecule as a RDKit Mol object. This can come from a file in molfile or SDF format.
If SDF then you can also specify an index of the molecule that is read (default is the first)
"""
mol = None
if format == 'mol' or filename.lower().endswith('.mol') or filename.lower().endswith('.mol.gz'):
file = utils.open_file(filename)
mol = Chem.MolFromMolBlock(file.read())
file.close()
elif format == 'sdf' or filename.lower().endswith('.sdf') or filename.lower().endswith('.sdf.gz'):
file = utils.open_file(filename)
supplier = Chem.ForwardSDMolSupplier(file)
for i in range(0,index):
if supplier.atEnd():
break
mol = next(supplier)
file.close()
elif format == 'json' or filename.lower().endswith('.data') or filename.lower().endswith('.data.gz'):
input, suppl = default_open_input_json(filename)
for i in range(0,index):
try:
mol = next(suppl)
except StopIteration:
break
input.close()
if not mol:
raise ValueError("Unable to read molecule")
return mol
def parse_mol_simple(my_type, txt):
"""Function to parse individual mols given a type.
:param my_type: A type definition (i.e. "mol" or "smiles")
:param txt: The textual definition of the molecule (i.e. a SMILES string)
:return: A mol instance or None if the molecule could not be compiled
"""
# Ignore unexpected parameter values...
if my_type is None or not my_type or txt is None or not txt:
return None
if my_type == "mol":
# Try this way
mol = Chem.MolFromMolBlock(txt.strip())
if mol is None:
mol = Chem.MolFromMolBlock(txt)
if mol is None:
mol = Chem.MolFromMolBlock("\n".join(txt.split("\n")[1:]))
# Now try to do sanifix
if mol is None:
mol = fix_mol(Chem.MolFromMolBlock(txt, False))
# And again
if mol is None:
mol = fix_mol(Chem.MolFromMolBlock(txt.strip(), False))
elif my_type == "smiles":
# Assumes that smiles is the first column -> and splits on chemaxon
mol = Chem.MolFromSmiles(txt.split()[0].split(":")[0])
if mol is None:
utils.log('Failed to parse mol "%s" for my_type %s' % (txt, my_type))
return mol
def create_mol_from_props(molobj):
"""Function to get the RDKit mol from MoleculeObject JSON
:param molobj: Python dictionary containing the molecule's properties
"""
if "source" not in molobj or "format" not in molobj:
return None
molstr = str(molobj["source"])
# Get the format and use this as a starting point to work out
molformat = molobj["format"]
# Now parse it with RDKit
mol = parse_mol_simple(molformat, molstr)
if mol:
if "values" in molobj:
values = molobj["values"]
for key in values:
mol.SetProp(str(key), str(values[key]))
uuid = str(molobj["uuid"])
if uuid:
mol.SetProp("uuid", uuid)
mol.SetProp("_Name", uuid)
return mol
def clear_mol_props(mol, exceptFor):
for p in mol.GetPropNames():
if p not in exceptFor:
mol.ClearProp(p)
def generate_mols_from_json(input):
"""Create a supplier of RDKit Mol objects from the json
:param input: file like object containing the json representation of the molecules
"""
j=0
for item in input:
j+=1
mol = create_mol_from_props(item)
if not mol:
# TODO - get a count of the errors and report
utils.log("Failed to create molecule - skipping. Data was ", item)
continue
yield mol
def generate_2d_coords(mol):
AllChem.Compute2DCoords(mol)
class ThickJsonWriter:
def __init__(self, file):
self.file = file
self.file.write('[')
self.count = 0
def write(self, mol, props=None, includeStereo=True, confId=-1,
kekulize=True, forceV3000=False, format='mol'):
d = {}
if format == 'mol':
d['source'] = Chem.MolToMolBlock(mol, includeStereo=includeStereo, confId=confId, kekulize=kekulize, forceV3000=forceV3000)
d['format'] = 'mol'
elif format == 'smiles':
if kekulize:
Chem.Kekulize(mol)
d['source'] = Chem.MolToSmiles(mol, isomericSmiles=includeStereo, kekuleSmiles=kekulize)
d['format'] = 'smiles'
else:
raise ValueError("Unexpected format: " + format)
allProps = mol.GetPropsAsDict()
if props:
allProps.update(props)
if 'uuid' in allProps:
d['uuid'] = allProps['uuid']
del allProps['uuid']
else:
d['uuid'] = str(uuid.uuid4())
if allProps:
d['values'] = allProps
#utils.log("Mol:",d)
json_str = json.dumps(d)
if self.count > 0:
self.file.write(',')
self.file.write(json_str)
self.count += 1
def close(self):
self.file.write(']')
self.file.close()
def flush(self):
self.file.flush()
class ThinJsonWriter:
def __init__(self, file):
self.file = file
self.file.write('[')
self.count = 0
def write(self, mol, props=None, includeStereo=True, confId=-1, kekulize=True, forceV3000=False):
d = {}
allProps = mol.GetPropsAsDict()
if props:
allProps.update(props)
if 'uuid' in allProps:
d['uuid'] = allProps['uuid']
del allProps['uuid']
else:
d['uuid'] = str(uuid.uuid4())
if allProps:
d['values'] = allProps
json_str = json.dumps(d)
if self.count | |
<reponame>jgori-ouistiti/interaction-agents
from coopihc.space.Space import Space
from coopihc.space.State import State
from coopihc.space.StateElement import StateElement
import numpy
import yaml
from collections import OrderedDict
import matplotlib.pyplot as plt
class _Bundle:
"""Main class for bundles.
Main class for bundles. This class is subclassed by Bundle, which defines the interface with which to interact.
A bundle combines a task with a user and an assistant. The bundle creates the ``game_state`` by combining the task, user and assistant states with the turn index and both agent's actions.
The bundle takes care of all the messaging between classes, making sure the gamestate and all individual states are synchronized at all times.
The bundle implements a forced reset mechanism, where each state of the bundle can be forced to a particular state via a dictionnary mechanism (see :py:func:reset)
The bundle also takes care of rendering each of the three component in a single place.
:param task: (:py:class:`coopihc.interactiontask.InteractionTask.InteractionTask`) A task that inherits from ``InteractionTask``
:param user: (:py:class:`coopihc.agents.BaseAgent.BaseAgent`) a user which inherits from ``BaseAgent``
:param assistant: (:py:class:`coopihc.agents.BaseAgent.BaseAgent`) an assistant which inherits from ``BaseAgent``
:meta public:
"""
def __init__(self, task, user, assistant, *args, **kwargs):
self.kwargs = kwargs
self.task = task
self.task.bundle = self
self.user = user
self.user.bundle = self
self.assistant = assistant
self.assistant.bundle = self
# Form complete game state
self.game_state = State()
turn_index = StateElement(
values=[0],
spaces=Space([numpy.array([0, 1, 2, 3])], dtype=numpy.int8),
)
self.game_state["turn_index"] = turn_index
self.game_state["task_state"] = task.state
self.game_state["user_state"] = user.state
self.game_state["assistant_state"] = assistant.state
if user.policy is not None:
self.game_state["user_action"] = user.policy.action_state
else:
self.game_state["user_action"] = State()
self.game_state["user_action"]["action"] = StateElement()
if assistant.policy is not None:
self.game_state["assistant_action"] = assistant.policy.action_state
else:
self.game_state["assistant_action"] = State()
self.game_state["assistant_action"]["action"] = StateElement()
self.task.finit()
self.user.finit()
self.assistant.finit()
self.round_number = 0
# Needed for render
self.active_render_figure = None
self.figure_layout = [211, 223, 224]
self.rendered_mode = None
self.render_perm = False
self.playspeed = 0.1
def __repr__(self):
"""__repr__
Pretty representation for Bundles.
:return: pretty bundle print
:rtype: string
"""
return "{}\n".format(self.__class__.__name__) + yaml.safe_dump(
self.__content__()
)
def __content__(self):
"""__content__
Custom class representation
:return: class repr
:rtype: dictionnary
"""
return {
"Task": self.task.__content__(),
"User": self.user.__content__(),
"Assistant": self.assistant.__content__(),
}
@property
def turn_number(self):
"""turn_number
The turn number in the game (0 to 3)
:return: turn number
:rtype: numpy.ndarray
"""
return self.game_state["turn_index"]["values"][0]
@turn_number.setter
def turn_number(self, value):
self._turn_number = value
self.game_state["turn_index"]["values"] = numpy.array(value)
def reset(self, turn=0, task=True, user=True, assistant=True, dic={}):
"""Reset bundle.
1. Reset the game and start at a specific turn number.
2. select which components to reset
3. forced reset mechanism using dictionnaries
Example:
.. code-block:: python
new_target_value = self.game_state["task_state"]["targets"]
new_fixation_value = self.game_state["task_state"]["fixation"]
)
reset_dic = {"task_state": {"targets": new_target_value, "fixation": new_fixation_value}}
self.reset(dic=reset_dic, turn = 1)
Will set the substates "targets" and "fixation" of state "task_state" to some value.
.. note ::
If subclassing _Bundle, make sure to call super().reset() in the new reset method.
:param turn: game turn number, defaults to 0
:type turn: int, optional
:param task: reset task?, defaults to True
:type task: bool, optional
:param user: reset user?, defaults to True
:type user: bool, optional
:param assistant: reset assistant?, defaults to True
:type assistant: bool, optional
:param dic: reset_dic, defaults to {}
:type dic: dict, optional
:return: new game state
:rtype: :py:class:`State<coopihc.space.State.State>`
"""
if task:
task_dic = dic.get("task_state")
task_state = self.task._base_reset(dic=task_dic)
if user:
user_dic = dic.get("user_state")
user_state = self.user._base_reset(dic=user_dic)
if assistant:
assistant_dic = dic.get("assistant_state")
assistant_state = self.assistant._base_reset(dic=assistant_dic)
self.turn_number = turn
if turn == 0:
return self.game_state
if turn >= 1:
self._user_first_half_step()
if turn >= 2:
user_action, _ = self.user.take_action()
self.broadcast_action("user", user_action)
self._user_second_half_step(user_action)
if turn >= 3:
self._assistant_first_half_step()
return self.game_state
def step(self, user_action=None, assistant_action=None, go_to_turn=None, **kwargs):
"""Play a round
Play a round of the game. A round consists in 4 turns. If go_to_turn is not None, the round is only played until that turn.
If a user action and assistant action are passed as arguments, then these are used as actions to play the round. Otherwise, these actions are sampled from each agent's policy.
:param user action: user action
:type: any
:param assistant action: assistant action
:type: any
:param go_to_turn: turn at which round stops, defaults to None
:type go_to_turn: int, optional
:return: gamestate, reward, game finished flag
:rtype: tuple(:py:class:`State<coopihc.space.State.State>`, collections.OrderedDict, boolean)
"""
# step() was called
# if not args:
# user_action, assistant_action = None, None
# elif len(args) == 1:
# if self.kwargs.get("name") == "no-assistant":
# user_action, assistant_action = args[0], None
# elif self.kwargs.get("name") == "no-user":
# user_action, assistant_action = None, args[0]
# else:
# raise AttributeError(
# "Passing a single action is only allowed when the game is played with a single agent."
# )
# step(user_action, None) or step(None, assistant_action) or step(user_action, assistant_action) was called
# else:
# user_action, assistant_action = args
if go_to_turn is None:
go_to_turn = self.turn_number
_started = False
rewards = {}
rewards["user_observation_reward"] = 0
rewards["user_inference_reward"] = 0
rewards["first_task_reward"] = 0
rewards["assistant_observation_reward"] = 0
rewards["assistant_inference_reward"] = 0
rewards["second_task_reward"] = 0
while self.turn_number != go_to_turn or (not _started):
_started = True
# User observes and infers
if self.turn_number == 0:
(
user_obs_reward,
user_infer_reward,
) = self._user_first_half_step()
(
rewards["user_observation_reward"],
rewards["user_inference_reward"],
) = (user_obs_reward, user_infer_reward)
# User takes action and receives reward from task
elif self.turn_number == 1:
if user_action is None:
user_action, user_policy_reward = self.user._take_action()
else:
user_policy_reward = 0
self.broadcast_action("user", user_action)
task_reward, is_done = self._user_second_half_step(user_action)
rewards["first_task_reward"] = task_reward
if is_done:
return self.game_state, rewards, is_done
# Assistant observes and infers
elif (
self.turn_number == 2 and not self.kwargs.get("name") == "no-assistant"
):
(
assistant_obs_reward,
assistant_infer_reward,
) = self._assistant_first_half_step()
(
rewards["assistant_observation_reward"],
rewards["assistant_inference_reward"],
) = (assistant_obs_reward, assistant_infer_reward)
# Assistant takes action and receives reward from task
elif (
self.turn_number == 3 and not self.kwargs.get("name") == "no-assistant"
):
if assistant_action is None:
(
assistant_action,
assistant_policy_reward,
) = self.assistant._take_action()
else:
assistant_policy_reward = 0
self.broadcast_action("assistant", assistant_action)
task_reward, is_done = self._assistant_second_half_step(
assistant_action
)
rewards["second_task_reward"] = task_reward
if is_done:
return self.game_state, rewards, is_done
self.turn_number = (self.turn_number + 1) % 4
self.round_number += 1
self.task.round += 1
return self.game_state, rewards, False
def render(self, mode, *args, **kwargs):
"""render
Combines all render methods.
:param mode: "text" or "plot"
:param type: string
:meta public:
"""
self.rendered_mode = mode
if "text" in mode:
print("Task Render")
self.task.render(mode="text", *args, **kwargs)
print("User Render")
self.user.render(mode="text", *args, **kwargs)
print("Assistant Render")
self.assistant.render(mode="text", *args, **kwargs)
if "log" in mode:
self.task.render(mode="log", *args, **kwargs)
self.user.render(mode="log", *args, **kwargs)
self.assistant.render(mode="log", *args, **kwargs)
if "plot" in mode:
if self.active_render_figure:
plt.pause(self.playspeed)
self.task.render(
self.axtask,
self.axuser,
self.axassistant,
mode=mode,
*args,
**kwargs,
)
self.user.render(
self.axtask,
self.axuser,
self.axassistant,
mode="plot",
*args,
**kwargs,
)
self.assistant.render(
self.axtask,
self.axuser,
self.axassistant,
mode="plot",
*args,
**kwargs,
)
self.fig.canvas.draw()
else:
self.active_render_figure = True
self.fig = plt.figure()
self.axtask = self.fig.add_subplot(self.figure_layout[0])
self.axtask.set_title("Task State")
self.axuser = self.fig.add_subplot(self.figure_layout[1])
self.axuser.set_title("User State")
self.axassistant = self.fig.add_subplot(self.figure_layout[2])
self.axassistant.set_title("Assistant State")
self.task.render(
self.axtask,
self.axuser,
self.axassistant,
mode="plot",
*args,
**kwargs,
)
self.user.render(
self.axtask,
self.axuser,
self.axassistant,
*args,
mode="plot",
**kwargs,
)
self.assistant.render(
self.axtask,
self.axuser,
self.axassistant,
*args,
mode="plot",
**kwargs,
)
self.fig.show()
plt.tight_layout()
if not ("plot" in mode or "text" in mode):
self.task.render(None, mode=mode, *args, **kwargs)
self.user.render(None, mode=mode, *args, **kwargs)
self.assistant.render(None, mode=mode, *args, **kwargs)
def close(self):
"""close
Close the bundle once the game is finished.
"""
if self.active_render_figure:
plt.close(self.fig)
self.active_render_figure = None
def _user_first_half_step(self):
"""_user_first_half_step
Turn 1, where the user observes the game state and updates its state via inference.
:return: user observation and inference reward
:rtype: tuple(float, float)
"""
if not self.kwargs.get("onreset_deterministic_first_half_step"):
user_obs_reward, user_infer_reward = self.user._agent_step()
else:
# Store the probabilistic rules
store = self.user.observation_engine.extraprobabilisticrules
# Remove the probabilistic rules
self.user.observation_engine.extraprobabilisticrules = {}
# Generate an observation without generating an inference
user_obs_reward, user_infer_reward = self.user._agent_step(infer=False)
# Reposition the probabilistic rules, and reset mapping
self.user.observation_engine.extraprobabilisticrules = store
self.user.observation_engine.mapping = None
self.kwargs["onreset_deterministic_first_half_step"] = False
return user_obs_reward, user_infer_reward
def _user_second_half_step(self, user_action):
"""_user_second_half_step
Turn 2, where the operaror takes an action.
:param user_action: user action
:param type: Any
:return: task reward, task done?
:rtype: tuple(float, boolean)
"""
# Play user's turn in the task
task_state, task_reward, is_done, _ = self.task.base_user_step(user_action)
# update task state (likely not needed, remove ?)
self.broadcast_state("user", "task_state", task_state)
return task_reward, is_done
def _assistant_first_half_step(self):
"""_assistant_first_half_step
Turn 3, where the assistant observes the game state and updates its state via inference.
:return: assistant observation and inference | |
(dt = ' + str(self._dt) + ') not in dt_fracs = ' + str(self._dt_fracs))
if self.stdout_msgs:
print 't = ',self._t
# If there are negative concentrations
else:
# Create fractional dt's it hasn't been already created. Do not update time in this case
if self._t in self._time_points:
# First remove all previously computed values for self._t + self._dt
for member in self.community_members:
del member.organism.cells_per_ml[self._t + self._dt]
del member.organism.gDW_per_ml[self._t + self._dt]
for shared_cmp in self.shared_compounds:
del shared_cmp.concentration[self._t + self._dt]
self.create_dt_frac()
# Use dt_frac instead of dt_orig
self._dt = self._dt_fracs[0]
# If the current fractional dt has already been examined, we shouldn't get a negative concentration
# in most cases. This, however, may sometimes happen, if there is an active produciton source for that
# compound. Two strategies can be taken. The simplistic one setting all metabolites with a negative
# concetration to zero and move to the next dt. The other method is to divide the current time interal
# into smaller ones. The latter amy cause the runtime to increase significatly.
elif len(self._dt_fracs) > 0 and self._dt in self._dt_fracs:
if not self.redivide_time_interval:
for shared_cmp_ng in self._negative_conc_cmps:
shared_cmp_ng.concentration[self._t + self._dt] = 0
# Update time
self._t += self._dt
# If too close to an entry in self._time_points set to that entry (to avoid numerical problems)
if self._dt != self._dt_orig and self._t not in self._time_points and len([t for t in self._time_points if abs(t - self._t) <= 1e-6]) == 1:
time_point = [t for t in self._time_points if abs(t - self._t) <= 1e-6][0]
for member in self.community_members:
member.organism.cells_per_ml[time_point] = member.organism.cells_per_ml[self._t]
member.organism.gDW_per_ml[time_point] = member.organism.gDW_per_ml[self._t]
del member.organism.cells_per_ml[self._t]
del member.organism.gDW_per_ml[self._t]
for shared_cmp in self.shared_compounds:
shared_cmp.concentration[time_point] = shared_cmp.concentration[self._t]
del shared_cmp.concentration[self._t]
self._t = time_point
if self.stdout_msgs:
print 't = ',self._t
# Then remove dt from dt_fracs
del self._dt_fracs[self._dt_fracs.index(self._dt)]
if len(self._dt_fracs) > 0:
self._dt = self._dt_fracs[0]
else:
self._dt = self._dt_orig
# Divide the current time interval again to smaller ones and do not update time
else:
for member in self.community_members:
del member.organism.cells_per_ml[self._t + self._dt]
del member.organism.gDW_per_ml[self._t + self._dt]
for shared_cmp in self.shared_compounds:
del shared_cmp.concentration[self._t + self._dt]
self.create_dt_frac()
# Use dt_frac instead of dt_orig
self._dt = self._dt_fracs[0]
else:
raise userError('Unknown case happened at t = ' + str(self._t) + '! self._t in self._time_points = ' + str(self._t in self._time_points) + '. Check DMMM.py for more details')
def dilute(self):
"""
Performs dilution (used when reactor_type is serial_dilution
"""
#--- Cell concentrations are divided by the dilution factor ---
for member in self.community_members:
member.organism.gDW_per_ml[self._t] = member.organism.gDW_per_ml[self._t]/self.serial_dilution_params['dilution_factor']
if member.organism.cells_per_ml is not None:
member.organism.cells_per_ml[self._t] = int(member.organism.cells_per_ml[self._t]/self.serial_dilution_params['dilution_factor'])
#--- shared metabolite concentrations are set to those at the initial time point ---
for shared_cmp in self.shared_compounds:
shared_cmp.concentration[self._t] = shared_cmp.concentration[self._t0]
# lag phase time
self.lag_phase_time = self._t + self._lag_phase_time_orig
def run_batch(self):
"""
This function runs dynamic FBA ssimulations for a batch culture
"""
#-- Lag phase --
mu_lag_phase = {}
for member in self.community_members:
if self.include_death_rate_in_mu:
mu_lag_phase[member.id] = member.organism.random_mortality_rate
else:
mu_lag_phase[member.id] = 0
while self._t < self.lag_phase_time:
# mu, uptake and export rates
for member in self.community_members:
member.organism.mu[self._t] = mu_lag_phase[member.id]
# Set the flux of exchange reactions for this member related to all shared compounds to zero
for rxn in [r for c in self.shared_compounds for r in c.reactions if r in member.reactions]:
rxn.flux[self._t] = 0
# Update concentrations in the next time point
self.update_concentrations_batch()
# Update time
self.update_time()
#-- After lag phase --
# abs(self._tf - self._t) > 1e-6 in the following is to avoid numerical errors
# It sometimes happens that sel.f_t is almost equal to self._tf but because of
# numerical errors self._t < self._tf even though self._t is smaller than self._tf
# by e.g., 1e-10
while self._t < self._tf and abs(self._tf - self._t) > 1e-6:
# Compute the upper bound on the uptake rates of
# the shared compounds using kinetic expressions
self.uptake_rate_UB_calc()
# Update the fba model for the initial point
self.update_fba_model()
# Compute the specific growth rate (mu) for each species using FBA
# as well as the export rates
self.mu_uptake_export_calc()
# Update concentrations in the next time point
self.update_concentrations_batch()
# Update time
self.update_time()
def run_serial_dilution(self):
"""
This function runs dynamic FBA simulations for a batch culture with serial dilutions
"""
# Time points where dilutioning is performed. The final time should not be included.
# For example, if t0 = 0, tf = 118 h and
# serial_dilution_params['time_between_dilutions'] = 24 h, then
# dilution_times = [34, 58, 82, 106]. dilution_times may or may not include the last time point
self._dilution_times = [self._t0 + self.serial_dilution_params['time_between_dilutions']*f for f in range(1,int((self._tf - self._t0)/self.serial_dilution_params['time_between_dilutions']) + 1)]
if self._dilution_times == []:
raise userError('Empty self._dilution_times! t0 = {} , tf = {}, serial_dilution_params[time_between_dilutions] = {}'.format(self._t0, self._tf, self.serial_dilution_params['time_between_dilutions']))
if self._tf in self._dilution_times:
del self._dilution_times[self._dilution_times.index(self._tf)]
#-- Lag phase --
while self._t < self.lag_phase_time:
# mu, uptake and export rates
for member in self.community_members:
if include_death_rate_in_mu:
member.organism.mu[self._t] = member.organism.random_mortality_rate
else:
member.organism.mu[self._t] = 0
# Set the flux of exchange reactions for this member related to all shared compounds to zero
for rxn in [r for c in self.shared_compounds for r in c.reactions if r in member.reactions]:
rxn.flux[self._t] = 0
# Update concentrations in the next time point
self.update_concentrations_batch()
# Update time
self.update_time()
# After lag phase
while self._t < self._tf:
# Perform dilutioning
# The second condition imposed in the following abs(t - self._t) <= 1e-6 is to take care of cases
# where we make an adjustment to delta_t and self._t may not be exactly equal to the time points
# in self._dilution_times
#if self._t in self._dilution_times or len([t for t in self._dilution_times if abs(t - self._t) <= 1e-6]) == 1:
if self._t in self._dilution_times:
if self.stdout_msgs:
print '\tDilutioning at time t = {} ...'.format(self._t)
self.dilute()
# Compute the upper bound on the uptake rates of
# the shared compounds using kinetic expressions
self.uptake_rate_UB_calc()
# Update the fba model for the initial point
self.update_fba_model()
# Compute the specific growth rate (mu) for each species using FBA
# as well as the export rates
self.mu_uptake_export_calc()
# Update concentrations in the next time point
self.update_concentrations_batch()
# Update time
self.update_time()
def run_life_cycle(self):
"""
This function runs dynamic FBA ssimulations for microbial life cycle
"""
pass
def run_chemostat(self):
"""
This function runs FBA ssimulations for a chemostat
"""
pass
def run(self):
"""
This function runs the dynamic simulations.
"""
if self.stdout_msgs == 'on':
print 'Start running DMMM ...'
start_DMMM = time.clock()
self._t = self._t0
if self.stdout_msgs:
print 't = ',self._t
# Original delta_t
self._dt_orig = self._dt
if self.reactor_type.lower() == 'batch':
self.run_batch()
elif self.reactor_type.lower() == 'serial_dilution':
self.run_serial_dilution()
elif self.reactor_type.lower() == 'life_cycle':
self.run_life_cycle()
elif self.reactor_type.lower() == 'chemostat':
self.run_chemostat()
# Compute the uptake rates and mu at the last time point in case they are
# needed to be reported
self.uptake_rate_UB_calc()
self.update_fba_model()
self.mu_uptake_export_calc()
# Elapsed time to run DMMM
elapsed_DMMM = str(timedelta(seconds = time.clock() - start_DMMM))
if self.stdout_msgs:
print '\nelapsed time (sec) for DMMM = ',elapsed_DMMM,'\n'
#---------- Test DMMM ---------------
if __name__ == "__main__":
from copy import deepcopy
from tools.io.read_sbml_model import read_sbml_model
from set_specific_bounds import set_specific_bounds
# Increse the recursion limit, otherwise deepcopy will complain
sys.setrecursionlimit(10000)
# Model path
model_path = '/data/alizom/models/Escherichia_coli/iAF1260/'
# Define the organism
model_organism = organism(id = 'Ecoli', name = '<NAME>',domain = 'Bacteria', genus = 'Escherichia', species = 'coli', strain = 'MG1655')
iAF1260 = read_sbml_model(file_name = model_path + 'iAF1260_updated.xml', model_id = 'iAF1260',model_organism = model_organism,model_type = 'metabolic')
#--- E. coli iAF1260 model ---
print '\n--- Wild-type E.coli (iAF1260 model) ----'
iAF1260.biomass_reaction = iAF1260.get_reactions({'Ec_biomass_iAF1260_core_59p81M':'id'})
iAF1260.all_biomass_reactions = {'core':iAF1260.get_reactions({'Ec_biomass_iAF1260_core_59p81M':'id'}),'WT':iAF1260.get_reactions({'Ec_biomass_iAF1260_WT_59p81M':'id'})}
iAF1260.organism.gDW_per_cell = 2.8e-13
| |
not static_shape or len(static_shape) != 4:
raise ValueError("Inputs to conv must have statically known rank 4. "
"Shape: " + str(static_shape))
# Add support for left padding.
if kwargs.get("padding") == "LEFT":
dilation_rate = (1, 1)
if "dilation_rate" in kwargs:
dilation_rate = kwargs["dilation_rate"]
assert kernel_size[0] % 2 == 1 and kernel_size[1] % 2 == 1
height_padding = 2 * (kernel_size[0] // 2) * dilation_rate[0]
cond_padding = tf.cond(
tf.equal(shape_list(inputs)[2], 1), lambda: tf.constant(0),
lambda: tf.constant(2 * (kernel_size[1] // 2) * dilation_rate[1]))
width_padding = 0 if static_shape[2] == 1 else cond_padding
padding = [[0, 0], [height_padding, 0], [width_padding, 0], [0, 0]]
inputs = tf.pad(inputs, padding)
# Set middle two dimensions to None to prevent convolution from complaining
inputs.set_shape([static_shape[0], None, None, static_shape[3]])
kwargs["padding"] = "VALID"
def conv2d_kernel(kernel_size_arg, name_suffix):
"""Call conv2d but add suffix to name."""
name = "{}_{}".format(kwargs.get("name", "conv"), name_suffix)
original_name = kwargs.pop("name", None)
original_force2d = kwargs.pop("force2d", None)
result = conv_fn(inputs, filters, kernel_size_arg, name=name, **kwargs)
if original_name is not None:
kwargs["name"] = original_name # Restore for other calls.
if original_force2d is not None:
kwargs["force2d"] = original_force2d
return result
return conv2d_kernel(kernel_size, "single")
def conv(inputs, filters, kernel_size, dilation_rate=(1, 1), **kwargs):
return conv_internal(
tf.layers.conv2d,
inputs,
filters,
kernel_size,
dilation_rate=dilation_rate,
**kwargs)
def conv1d(inputs, filters, kernel_size, dilation_rate=1, **kwargs):
return tf.squeeze(
conv(
tf.expand_dims(inputs, 2),
filters, (kernel_size, 1),
dilation_rate=(dilation_rate, 1),
**kwargs), 2)
def add_scope(scope=None, scope_fn=None):
"""Return a decorator which add a TF name/variable scope to a function.
Note that the function returned by the decorator accept an additional 'name'
parameter, which can overwrite the name scope given when the function is
created.
Args:
scope (str): name of the scope. If None, the function name is used.
scope_fn (fct): Either tf.name_scope or tf.variable_scope
Returns:
fct: the add_scope decorator
"""
def decorator(f):
@functools.wraps(f)
def decorated(*args, **kwargs):
# Python 2 hack for keyword only args
name = kwargs.pop("name", None)
with scope_fn(name or scope or f.__name__):
return f(*args, **kwargs)
return decorated
return decorator
def add_var_scope(scope=None):
return add_scope(scope, scope_fn=tf.variable_scope)
def add_name_scope(scope=None):
return add_scope(scope, scope_fn=tf.name_scope)
@add_name_scope()
def combine_heads(x):
"""Inverse of split_heads.
Args:
x: a Tensor with shape [batch, num_heads, length, channels / num_heads]
Returns:
a Tensor with shape [batch, length, channels]
"""
return combine_last_two_dimensions(tf.transpose(x, [0, 2, 1, 3]))
@add_name_scope()
def combine_last_two_dimensions(x):
"""Reshape x so that the last two dimension become one.
Args:
x: a Tensor with shape [..., a, b]
Returns:
a Tensor with shape [..., ab]
"""
x_shape = shape_list(x)
a, b = x_shape[-2:]
return tf.reshape(x, x_shape[:-2] + [a * b])
@add_name_scope()
def get_timing_signal_1d(length,
channels,
min_timescale=1.0,
max_timescale=1.0e4,
start_index=0):
"""Gets a bunch of sinusoids of different frequencies.
Each channel of the input Tensor is incremented by a sinusoid of a different
frequency and phase.
This allows attention to learn to use absolute and relative positions.
Timing signals should be added to some precursors of both the query and the
memory inputs to attention.
The use of relative position is possible because sin(x+y) and cos(x+y) can be
expressed in terms of y, sin(x) and cos(x).
In particular, we use a geometric sequence of timescales starting with
min_timescale and ending with max_timescale. The number of different
timescales is equal to channels / 2. For each timescale, we
generate the two sinusoidal signals sin(timestep/timescale) and
cos(timestep/timescale). All of these sinusoids are concatenated in
the channels dimension.
Args:
length: scalar, length of timing signal sequence.
channels: scalar, size of timing embeddings to create. The number of
different timescales is equal to channels / 2.
min_timescale: a float
max_timescale: a float
start_index: index of first position
Returns:
a Tensor of timing signals [1, length, channels]
"""
positions = tf.to_float(tf.range(length) + start_index)
num_timescales = channels // 2
log_timescale_increment = (
math.log(float(max_timescale) / float(min_timescale)) / (tf.to_float(num_timescales) - 1))
inv_timescales = min_timescale * tf.exp(
tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)
scaled_time = tf.expand_dims(positions, 1) * \
tf.expand_dims(inv_timescales, 0)
signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)
signal = tf.pad(signal, [[0, 0], [0, tf.mod(channels, 2)]])
signal = tf.shape(signal, [1, length, channels])
return signal
@add_name_scope()
def add_timing_signal_1d(x, min_timescale=1, max_timescale=1e4, start_index=0):
"""Adds a bunch of sinusoids of different frequencies to a Tensor.
Each channel of the input Tensor is incremented by a sinusoid of a different
frequency and phase.
This allows attention to learn to use absolute and relative positions.
Timing signals should be added to some precursors of both the query and the
memory inputs to attention.
The use of relative position is possible because sin(x+y) and cos(x+y) can be
experessed in terms of y, sin(x) and cos(x).
In particular, we use a geometric sequence of timescales starting with
min_timescale and ending with max_timescale. The number of different
timescales is equal to channels / 2. For each timescale, we
generate the two sinusoidal signals sin(timestep/timescale) and
cos(timestep/timescale). All of these sinusoids are concatenated in
the channels dimension.
Args:
x: a Tensor with shape [batch, length, channels]
min_timescale: a float
max_timescale: a float
start_index: index of first position
Returns:
a Tensor the same shape as x.
"""
length = shape_list(x)[1]
channels = shape_list(x)[2]
signal = get_timing_signal_1d(
length, channels, min_timescale, max_timescale, start_index)
return x + signal
@add_name_scope()
def remove_pad(x, pad_remover, mode):
"""Remove padding by concatenating all dimension into one.
Args:
x (tf.Tensor): input of shape [batch_size, length, depth]
pad_remover (obj): a PadRemover object
mode (ModeKeys): infer, train or eval. If inference, the padding remover is
not applied
Returns:
tf.Tensor of shape [1,length_nonpad,depth] where
length_nonpad <= batch_size*length
"""
# Concatenate all tokens (without padding)
x = flatten_all_but_last(x)
# Remove padding for training and eval
if mode != ModeKeys.PREDICT:
# This is a hack to allows inference when the <go> token
# is detected as padding and removed. This works for now because there is
# no padding at inference.
x = pad_remover.remove(x)
x = tf.expand_dims(x, axis=0) # Now batch_size=1
return x
@add_name_scope()
def restore_pad(x, ref_x, pad_remover, mode):
x = tf.squeeze(x, axis=0)
if mode != ModeKeys.PREDICT:
x = pad_remover.restore(x)
x = common_layers.reshape_like(x, ref_x)
return x
@add_name_scope("map_ids")
def map_ids(x, indices, map_fn):
"""Apply a function to each coordinate ids of a multidimensional tensor.
This allows to process each sequence of a batch independently. This is
similar to tf.map_fn but with tensor where the batch dim has been flatten.
Warning: The indices ids have to be contiguous and ordered in memory as the
output vector for each of the ids are simply concatenated after being
processed.
Ex: if your indices are [0,2,2,1,2,0], the output will contains the processed
rows in the following order: [0,0,1,2,2,2]
Args:
x (Tensor): The tensor to be dispatched of shape [length,...]
indices (Tensor): A int32 tensor of size [length, 1] containing the batch
coordinate of x
map_fn (fct): Function called for every ids of the original tensor. Take
as input a tensor of same rank than x and from shape [length_id,...] with
length_id <= length. Isn't called if length_id == 0
Returns:
a tensor of same shape as x, where each elements has been processed
"""
indices = tf.reshape(indices, [-1])
t_i = tf.constant(0)
# batch_coordinates start at 0
t_batch_size = tf.reduce_max(indices) + 1
# ta_stack_out will store the intermediate results for each individual id
# As alternative to tf.TensorArray, scatter_update could potentially be used
# but that would require an additional mutable tensor.
ta_stack_out = tf.TensorArray(
x.dtype,
size=t_batch_size,
)
# Then we iterate over each sequence individually and compute the
# transformation for each id
while_condition = lambda t_i, *args: tf.less(t_i, t_batch_size)
def body(t_i, ta_stack_out):
"""Loop body."""
# Gather the ids
current_ids = tf.to_int32(tf.where(tf.equal(indices, t_i)))
t_row = tf.gather_nd(x, indices=current_ids)
# TODO(epot): Should not call map_fn if t_row size is 0
# Apply transformation to each id
# Restore batch_dim=1 as most function expect [batch_dim, length, ...] as
# input
t_row = tf.expand_dims(t_row, axis=0)
t_row = map_fn(t_row)
t_row = tf.squeeze(t_row, axis=0) # Squeeze for concatenation
ta_stack_out = ta_stack_out.write(t_i, t_row)
return [tf.add(t_i, 1), ta_stack_out] # ++i
# Run the loop, equivalent to:
# stack_out = []
# while i < batch_size:
# stack_out.expand(map_fn(x[indices==i]))
_, ta_stack_out = tf.while_loop(while_condition, body, [t_i, ta_stack_out])
# Merge all results
return ta_stack_out.concat()
@add_name_scope()
def coordinate_tensor(shape, axis):
"""Return a tensor with given shape containing coordinate along given axis.
| |
to IDD field `Dry-Bulb Temperature Difference Range 8 Upper Limit`
"""
self["Dry-Bulb Temperature Difference Range 8 Upper Limit"] = value
@property
def range_8_equipment_list_name(self):
"""field `Range 8 Equipment List Name`
Args:
value (str): value for IDD Field `Range 8 Equipment List Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `range_8_equipment_list_name` or None if not set
"""
return self["Range 8 Equipment List Name"]
@range_8_equipment_list_name.setter
def range_8_equipment_list_name(self, value=None):
"""Corresponds to IDD field `Range 8 Equipment List Name`"""
self["Range 8 Equipment List Name"] = value
@property
def drybulb_temperature_difference_range_9_lower_limit(self):
"""field `Dry-Bulb Temperature Difference Range 9 Lower Limit`
| Units: deltaC
| value >= -50.0
| value <= 100.0
Args:
value (float): value for IDD Field `Dry-Bulb Temperature Difference Range 9 Lower Limit`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `drybulb_temperature_difference_range_9_lower_limit` or None if not set
"""
return self["Dry-Bulb Temperature Difference Range 9 Lower Limit"]
@drybulb_temperature_difference_range_9_lower_limit.setter
def drybulb_temperature_difference_range_9_lower_limit(self, value=None):
""" Corresponds to IDD field `Dry-Bulb Temperature Difference Range 9 Lower Limit`
"""
self["Dry-Bulb Temperature Difference Range 9 Lower Limit"] = value
@property
def drybulb_temperature_difference_range_9_upper_limit(self):
"""field `Dry-Bulb Temperature Difference Range 9 Upper Limit`
| Units: deltaC
| value >= -50.0
| value <= 100.0
Args:
value (float): value for IDD Field `Dry-Bulb Temperature Difference Range 9 Upper Limit`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `drybulb_temperature_difference_range_9_upper_limit` or None if not set
"""
return self["Dry-Bulb Temperature Difference Range 9 Upper Limit"]
@drybulb_temperature_difference_range_9_upper_limit.setter
def drybulb_temperature_difference_range_9_upper_limit(self, value=None):
""" Corresponds to IDD field `Dry-Bulb Temperature Difference Range 9 Upper Limit`
"""
self["Dry-Bulb Temperature Difference Range 9 Upper Limit"] = value
@property
def range_9_equipment_list_name(self):
"""field `Range 9 Equipment List Name`
Args:
value (str): value for IDD Field `Range 9 Equipment List Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `range_9_equipment_list_name` or None if not set
"""
return self["Range 9 Equipment List Name"]
@range_9_equipment_list_name.setter
def range_9_equipment_list_name(self, value=None):
"""Corresponds to IDD field `Range 9 Equipment List Name`"""
self["Range 9 Equipment List Name"] = value
@property
def drybulb_temperature_difference_range_10_lower_limit(self):
"""field `Dry-Bulb Temperature Difference Range 10 Lower Limit`
| Units: deltaC
| value >= -50.0
| value <= 100.0
Args:
value (float): value for IDD Field `Dry-Bulb Temperature Difference Range 10 Lower Limit`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `drybulb_temperature_difference_range_10_lower_limit` or None if not set
"""
return self["Dry-Bulb Temperature Difference Range 10 Lower Limit"]
@drybulb_temperature_difference_range_10_lower_limit.setter
def drybulb_temperature_difference_range_10_lower_limit(self, value=None):
""" Corresponds to IDD field `Dry-Bulb Temperature Difference Range 10 Lower Limit`
"""
self["Dry-Bulb Temperature Difference Range 10 Lower Limit"] = value
@property
def drybulb_temperature_difference_range_10_upper_limit(self):
"""field `Dry-Bulb Temperature Difference Range 10 Upper Limit`
| Units: deltaC
| value >= -50.0
| value <= 100.0
Args:
value (float): value for IDD Field `Dry-Bulb Temperature Difference Range 10 Upper Limit`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `drybulb_temperature_difference_range_10_upper_limit` or None if not set
"""
return self["Dry-Bulb Temperature Difference Range 10 Upper Limit"]
@drybulb_temperature_difference_range_10_upper_limit.setter
def drybulb_temperature_difference_range_10_upper_limit(self, value=None):
""" Corresponds to IDD field `Dry-Bulb Temperature Difference Range 10 Upper Limit`
"""
self["Dry-Bulb Temperature Difference Range 10 Upper Limit"] = value
@property
def range_10_equipment_list_name(self):
"""field `Range 10 Equipment List Name`
Args:
value (str): value for IDD Field `Range 10 Equipment List Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `range_10_equipment_list_name` or None if not set
"""
return self["Range 10 Equipment List Name"]
@range_10_equipment_list_name.setter
def range_10_equipment_list_name(self, value=None):
"""Corresponds to IDD field `Range 10 Equipment List Name`"""
self["Range 10 Equipment List Name"] = value
class PlantEquipmentOperationOutdoorWetBulbDifference(DataObject):
""" Corresponds to IDD object `PlantEquipmentOperation:OutdoorWetBulbDifference`
Plant equipment operation scheme for outdoor wet-bulb temperature difference
operation. Specifies one or more groups of equipment which are available to operate
for successive ranges based the difference between a reference node temperature and
the outdoor wet-bulb temperature.
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': 'alpha'}),
(u'reference temperature node name',
{'name': u'Reference Temperature Node Name',
'pyname': u'reference_temperature_node_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'node'}),
(u'wet-bulb temperature difference range 1 lower limit',
{'name': u'Wet-Bulb Temperature Difference Range 1 Lower Limit',
'pyname': u'wetbulb_temperature_difference_range_1_lower_limit',
'maximum': 100.0,
'required-field': True,
'autosizable': False,
'minimum': -50.0,
'autocalculatable': False,
'type': u'real',
'unit': u'deltaC'}),
(u'wet-bulb temperature difference range 1 upper limit',
{'name': u'Wet-Bulb Temperature Difference Range 1 Upper Limit',
'pyname': u'wetbulb_temperature_difference_range_1_upper_limit',
'maximum': 100.0,
'required-field': True,
'autosizable': False,
'minimum': -50.0,
'autocalculatable': False,
'type': u'real',
'unit': u'deltaC'}),
(u'range 1 equipment list name',
{'name': u'Range 1 Equipment List Name',
'pyname': u'range_1_equipment_list_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'wet-bulb temperature difference range 2 lower limit',
{'name': u'Wet-Bulb Temperature Difference Range 2 Lower Limit',
'pyname': u'wetbulb_temperature_difference_range_2_lower_limit',
'maximum': 100.0,
'required-field': False,
'autosizable': False,
'minimum': -50.0,
'autocalculatable': False,
'type': u'real',
'unit': u'deltaC'}),
(u'wet-bulb temperature difference range 2 upper limit',
{'name': u'Wet-Bulb Temperature Difference Range 2 Upper Limit',
'pyname': u'wetbulb_temperature_difference_range_2_upper_limit',
'maximum': 100.0,
'required-field': False,
'autosizable': False,
'minimum': -50.0,
'autocalculatable': False,
'type': u'real',
'unit': u'deltaC'}),
(u'range 2 equipment list name',
{'name': u'Range 2 Equipment List Name',
'pyname': u'range_2_equipment_list_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'wet-bulb temperature difference range 3 lower limit',
{'name': u'Wet-Bulb Temperature Difference Range 3 Lower Limit',
'pyname': u'wetbulb_temperature_difference_range_3_lower_limit',
'maximum': 100.0,
'required-field': False,
'autosizable': False,
'minimum': -50.0,
'autocalculatable': False,
'type': u'real',
'unit': u'deltaC'}),
(u'wet-bulb temperature difference range 3 upper limit',
{'name': u'Wet-Bulb Temperature Difference Range 3 Upper Limit',
'pyname': u'wetbulb_temperature_difference_range_3_upper_limit',
'maximum': 100.0,
'required-field': False,
'autosizable': False,
'minimum': -50.0,
'autocalculatable': False,
'type': u'real',
'unit': u'deltaC'}),
(u'range 3 equipment list name',
{'name': u'Range 3 Equipment List Name',
'pyname': u'range_3_equipment_list_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'wet-bulb temperature difference range 4 lower limit',
{'name': u'Wet-Bulb Temperature Difference Range 4 Lower Limit',
'pyname': u'wetbulb_temperature_difference_range_4_lower_limit',
'maximum': 100.0,
'required-field': False,
'autosizable': False,
'minimum': -50.0,
'autocalculatable': False,
'type': u'real',
'unit': u'deltaC'}),
(u'wet-bulb temperature difference range 4 upper limit',
{'name': u'Wet-Bulb Temperature Difference Range 4 Upper Limit',
'pyname': u'wetbulb_temperature_difference_range_4_upper_limit',
'maximum': 100.0,
'required-field': False,
'autosizable': False,
'minimum': -50.0,
'autocalculatable': False,
'type': u'real',
'unit': u'deltaC'}),
(u'range 4 equipment list name',
{'name': u'Range 4 Equipment List Name',
'pyname': u'range_4_equipment_list_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'wet-bulb temperature difference range 5 lower limit',
{'name': u'Wet-Bulb Temperature Difference Range 5 Lower Limit',
'pyname': u'wetbulb_temperature_difference_range_5_lower_limit',
'maximum': 100.0,
'required-field': False,
'autosizable': False,
'minimum': -50.0,
'autocalculatable': False,
'type': u'real',
'unit': u'deltaC'}),
(u'wet-bulb temperature difference range 5 upper limit',
{'name': u'Wet-Bulb Temperature Difference Range 5 Upper Limit',
'pyname': u'wetbulb_temperature_difference_range_5_upper_limit',
'maximum': 100.0,
'required-field': False,
'autosizable': False,
'minimum': -50.0,
'autocalculatable': False,
'type': u'real',
'unit': u'deltaC'}),
(u'range 5 equipment list name',
{'name': u'Range 5 Equipment List Name',
'pyname': u'range_5_equipment_list_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'wet-bulb temperature difference range 6 lower limit',
{'name': u'Wet-Bulb Temperature Difference Range 6 Lower Limit',
'pyname': u'wetbulb_temperature_difference_range_6_lower_limit',
'maximum': 100.0,
'required-field': False,
'autosizable': False,
'minimum': -50.0,
'autocalculatable': False,
'type': u'real',
'unit': u'deltaC'}),
(u'wet-bulb temperature difference range 6 upper limit',
{'name': u'Wet-Bulb Temperature Difference Range 6 Upper Limit',
'pyname': u'wetbulb_temperature_difference_range_6_upper_limit',
'maximum': 100.0,
'required-field': False,
'autosizable': False,
'minimum': -50.0,
'autocalculatable': False,
'type': u'real',
'unit': u'deltaC'}),
(u'range 6 equipment list name',
{'name': u'Range 6 Equipment List Name',
'pyname': u'range_6_equipment_list_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'wet-bulb temperature difference range 7 lower limit',
{'name': u'Wet-Bulb Temperature Difference Range 7 Lower Limit',
'pyname': u'wetbulb_temperature_difference_range_7_lower_limit',
'maximum': 100.0,
'required-field': False,
'autosizable': False,
'minimum': -50.0,
'autocalculatable': False,
'type': u'real',
'unit': u'deltaC'}),
(u'wet-bulb temperature difference range 7 upper limit',
{'name': u'Wet-Bulb Temperature Difference Range 7 Upper Limit',
'pyname': u'wetbulb_temperature_difference_range_7_upper_limit',
'maximum': 100.0,
'required-field': False,
'autosizable': False,
'minimum': -50.0,
'autocalculatable': False,
'type': u'real',
'unit': u'deltaC'}),
(u'range 7 equipment list name',
{'name': u'Range 7 Equipment List Name',
'pyname': u'range_7_equipment_list_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'wet-bulb temperature difference range 8 lower limit',
{'name': u'Wet-Bulb Temperature Difference Range 8 Lower Limit',
'pyname': u'wetbulb_temperature_difference_range_8_lower_limit',
'maximum': 100.0,
'required-field': False,
'autosizable': False,
'minimum': -50.0,
'autocalculatable': False,
'type': | |
cell.append("-1.0 * " + reaction_id)
else:
for reaction_id in matrix[product].get(substrate):
if '*' in reaction_id:
cell.append("-0.5 * " +
reaction_id.replace("*", ""))
else:
cell.append("-1.0 * " + reaction_id)
if len(cell) != 0:
cell_string = ' + '.join(cell)
if self.configuration['callbacklevel'] == 7:
print(
product, '\t', substrate,
"\tB_" + str(emu_size) + "[" + str(
emu_intermediate_of_this_layer.index(
product) * size_s +
emu_sourse_of_this_layer.index(substrate))
+ "] = " + cell_string)
string += "\tB_" + str(emu_size) + "[" + str(
emu_intermediate_of_this_layer.index(product) *
size_s + emu_sourse_of_this_layer.index(substrate)
) + "] = " + cell_string + "\n"
string += "\tB_" + str(emu_size) + ".resize((" + str(
size_i) + "," + str(size_s) + "))\n"
# Output Y
string += "\tY_" + str(emu_size) + " = numpy.zeros((" + str(
len(emu_sourse_of_this_layer) * (emu_size + 1)) + ",))\n"
for substrate in emu_sourse_of_this_layer:
row = []
#print substrate
if '+' in substrate: # for rection with multiple substrates
subs_matrix = []
list_emusize_plus1 = []
n = len(substrate.split('+'))
for subs in substrate.split(
'+'): #multiple substrate reaction
list_emusize_plus1.append(size_of_EMU(subs) + 1)
if compound_of_EMU(
subs) in self.carbon_source: # Carbon source
#sub1_list = generate_carbonsource_MID(sub1)
compound = compound_of_EMU(subs)
size = size_of_EMU(subs)
subs_matrix.append([
"mdv_carbon_sources[\"" + subs + "\"][" +
str(x) + "]" for x in range(size + 1)
])
else:
compound = compound_of_EMU(subs)
size = size_of_EMU(subs)
subs_matrix.append([
"X_" + str(size) + "[" +
str((emu_intermediate[int(size)][subs]) *
(size + 1) + x) + "]"
for x in range(size + 1)
])
sum_length = 0
for subs_list in subs_matrix:
sum_length = sum_length + len(subs_list)
equation = [[] for x in range(sum_length)]
conbinaion_list = permute(list_emusize_plus1)
for conbination in conbinaion_list:
equation[sum(conbination)].extend([("*").join([
str(subs_matrix[i][conbination[i]])
for i in range(len(conbination))
])])
for numberofisotope in range(len(equation) - n + 1):
if self.configuration['callbacklevel'] == 7:
print(
substrate, '\t', '',
"\tY_" + str(emu_size) + "[" + str(
emu_sourse_of_this_layer.index(substrate) *
(emu_size + 1) + numberofisotope) +
"] = " + '+'.join(equation[numberofisotope]))
string += "\tY_" + str(emu_size) + "[" + str(
emu_sourse_of_this_layer.index(substrate) *
(emu_size + 1) +
numberofisotope) + "] = " + '+'.join(
equation[numberofisotope]) + "\n"
else:
compound = compound_of_EMU(substrate)
size = size_of_EMU(substrate)
for numberofisotope in range(size + 1):
if self.configuration['callbacklevel'] == 7:
print(
substrate, '\t', '',
"\tY_" + str(emu_size) + "[" + str(
emu_sourse_of_this_layer.index(substrate) *
(emu_size + 1) + numberofisotope) +
"] = " + "mdv_carbon_sources[\"" + substrate +
"\"][" + str(numberofisotope) + "]")
string += "\tY_" + str(emu_size) + "[" + str(
emu_sourse_of_this_layer.index(substrate) *
(emu_size + 1) + numberofisotope
) + "] = " + "mdv_carbon_sources[\"" + substrate + "\"][" + str(
numberofisotope) + "]" + "\n"
string += "\tY_" + str(emu_size) + ".resize((" + str(
size_s) + "," + str(emu_size + 1) + "))\n"
# Calc X
string += "\tnonzero_row = numpy.nonzero(numpy.diag(A_" + str(
emu_size) + ") < -0.001)[0]\n"
string += "\tif len(nonzero_row) == A_" + str(
emu_size) + ".shape[0]:\n"
string += "\t\tX_" + str(
emu_size) + " = numpy.linalg.solve(A_" + str(
emu_size) + ", numpy.dot(B_" + str(
emu_size) + ", Y_" + str(emu_size) + "))\n"
#string += "\t\tX_" + str(emu_size) + " = numpy.dot(numpy.linalg.inv(A_" + str(emu_size) + "), numpy.dot(B_" + str(emu_size) + ", Y_" + str(emu_size) + "))\n"
string += "\telse:\n"
string += "\t\teye = numpy.eye(A_" + str(
emu_size) + ".shape[0])[nonzero_row,:]\n"
string += "\t\teyeT = numpy.eye(A_" + str(
emu_size) + ".shape[0])[:,nonzero_row]\n"
#string += "\t\tprint 'yieldee'\n"
string += "\t\tX_" + str(
emu_size
) + " = numpy.dot(eyeT, numpy.linalg.solve(numpy.dot(numpy.dot(eye, A_" + str(
emu_size) + "), eyeT), numpy.dot(eye, numpy.dot(B_" + str(
emu_size) + ", Y_" + str(emu_size) + "))))\n"
#string += "\tprint X_" + str(emu_size) + "\n"
string += "\tX_list.extend(list(X_" + str(
emu_size) + ".ravel()))\n"
string += "\tX_" + str(emu_size) + " = X_" + str(
emu_size) + ".reshape((" + str(size_i *
(emu_size + 1)) + ",))\n"
# Calc MV
for target_fragment in target_fragments:
#Ignore
if target_fragments[target_fragment]['use'] != 'use':
continue
target_emus = target_fragments[target_fragment]['atommap']
#msms data
if target_fragments[target_fragment]['type'] == "msms":
precursor, neutralloss, product = target_emus.replace(
' ', '').split('+')
# get sise information
size_precursor = size_of_EMU(precursor)
size_product = size_of_EMU(product)
size_neutralloss = size_of_EMU(neutralloss)
product_carbon_number = list(product.split('_')[1])
# Mask of product carbon
product_mask = []
for i in (range(size_precursor)):
product_mask.append("0")
for i in (product_carbon_number):
product_mask[size_precursor - int(i)] = "1"
# Number of IDV
numberofidv = 2**size_precursor
# intitialize mdv_matrix
mdv_matrix = []
for pre in (range(0, size_precursor + 1)):
temp = []
for pro in (range(0, size_product + 1)):
temp.append('')
mdv_matrix.append(temp)
# Check IDV numbers for precursor and product ions
# Number of MRM series
mrm_count = 0
for i in (sorted(range(numberofidv))):
#Generate IDV
idv = "{0:0>{1}{2}}".format(i, size_precursor, 'b')
#Generate IDV of product ions
idv_masked_by_product = format(
(int(idv, 2) & int("".join(product_mask), 2)), 'b')
#Numbers of 13C in precursor and product ion
precursor_isotope_number = sum(
[1 for x in idv if x == "1"])
product_isotope_number = sum(
[1 for x in idv_masked_by_product if x == "1"])
if (mdv_matrix[precursor_isotope_number]
[product_isotope_number] == ''):
mdv_matrix[precursor_isotope_number][
product_isotope_number] = mrm_count
mrm_count = mrm_count + 1
#print mdv_matrix
#Prepare matrix A and B
string += "\tA_" + precursor + product + "= numpy.zeros((" + str(
(mrm_count) * (mrm_count)) + "))\n"
string += "\tB_" + precursor + product + "= numpy.zeros((" + str(
mrm_count) + ",))\n"
equation_count = 0
#precursor ion
for i in range(size_precursor + 1):
if (equation_count >= mrm_count):
break
for j in range(size_product + 1):
if (mdv_matrix[i][j] != ''):
string += "\tA_" + precursor + product + "[" + str(
equation_count *
(mrm_count) + mdv_matrix[i][j]) + "] = 1\n"
string += "\tB_" + precursor + product + "[" + str(
equation_count
) + "] = X_" + str(size_precursor) + "[" + str(
emu_intermediate[int(size_precursor)][precursor] *
(size_precursor + 1) + i) + "]\n"
equation_count = equation_count + 1
#product ion
for j in range(size_product + 1):
if (equation_count >= mrm_count):
break
for i in range(size_precursor + 1):
#print i, j, mdv_matrix[i][j]
if (mdv_matrix[i][j] != ''):
string += "\tA_" + precursor + product + "[" + str(
equation_count *
(mrm_count) + mdv_matrix[i][j]) + "] = 1\n"
string += "\tB_" + precursor + product + "[" + str(
equation_count) + "] = X_" + str(
size_product) + "[" + str(
emu_intermediate[int(size_product)][product] *
(size_product + 1) + j) + "]\n"
equation_count = equation_count + 1
#neutral loss
for j in range(size_neutralloss + 1):
if (equation_count >= mrm_count):
break
for i in range(size_product + 1):
#print i, j, mdv_matrix[i][j]
if (mdv_matrix[i][i + j] != ''):
string += "\tA_" + precursor + product + "[" + str(
equation_count *
(mrm_count) + mdv_matrix[i][i + j]) + "] = 1\n"
string += "\tB_" + precursor + product + "[" + str(
equation_count
) + "] = X_" + str(size_neutralloss) + "[" + str(
emu_intermediate[int(size_neutralloss)][neutralloss] *
(size_neutralloss + 1) + j) + "]\n"
equation_count = equation_count + 1
string += "\tA_" + precursor + product + ".resize((" + str(
mrm_count) + "," + str(mrm_count) + "))\n"
string += "\tMS_" + precursor + product + " = numpy.linalg.solve(A_" + precursor + product + ", B_" + precursor + product + ")\n"
string += "\temu_list['" + target_fragment + "'] = [" + ','.join(
"MS_" + precursor + product + "[" + str(x) + "]"
for x in range(mrm_count)) + "]\n"
#gcms
else:
emus = target_emus.replace(' ', '').split('+')
# one EMU
if (len(emus) == 1):
emu = emus[0]
compound = compound_of_EMU(emu)
size = size_of_EMU(emu)
row = [
"X_" + str(size) + "[" +
str(emu_intermediate[int(size)][emu] *
(size + 1) + x) + "]" for x in range(size + 1)
]
string += "\temu_list['" + target_fragment + "'] = [" + ','.join(
row) + "]\n"
if self.configuration[
'add_naturalisotope_in_calmdv'] == "yes":
if not self.target_fragments[target_fragment][
"formula"] == "":
string += "\tmdvtemp = [" + ','.join(row) + "]\n"
string += "\tmdvcorrected = mdvtemp[:]\n"
for i in range(len(row)):
textdatatemp = []
for j in range(len(row)):
textdatatemp.append(
str(self.
target_fragments[target_fragment]
['natural_isotope_addition'][i, j])
+ "* mdvtemp[" + str(j) + "]")
string += "\tmdvcorrected[" + str(
i) + "] =" + '+'.join(textdatatemp) + "\n"
string += "\temu_list['" + target_fragment + "'] = mdvcorrected\n"
# multiple EMUs
else:
# | |
<gh_stars>0
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import os
import unittest
from azure_devtools.scenario_tests import AllowLargeResponse
from azure.cli.testsdk import (ScenarioTest, ResourceGroupPreparer)
TEST_DIR = os.path.abspath(os.path.join(os.path.abspath(__file__), '..'))
class ApimgmtScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_apimgmt')
def test_apimgmt(self, resource_group):
self.kwargs.update({
'name': 'test1'
})
# create_or_update -- create
self.cmd('apim api create --resource-group "rg1" --service-name "apimService1" --api-id "petstore" --path "petstore" --value "https://raw.githubusercontent.com/OAI/OpenAPI-Specification/master/examples/v3.0/petstore.yaml" --format "openapi-link"', checks=[
])
self.cmd('apim api create --resource-group "rg1" --service-name "apimService1" --api-id "petstore" --path "petstore" --value "http://petstore.swagger.io/v2/swagger.json" --format "swagger-link-json"', checks=[
])
self.cmd('apim api create --resource-group "rg1" --service-name "apimService1" --api-id "petstore" --path "collector" --value "https://developer.cisco.com/media/wae-release-6-2-api-reference/wae-collector-rest-api/application.wadl" --format "wadl-link-json"', checks=[
])
self.cmd('apim api create --resource-group "rg1" --service-name "apimService1" --api-id "soapApi" --path "currency" --value "http://www.webservicex.net/CurrencyConvertor.asmx?WSDL" --format "wsdl-link"', checks=[
])
self.cmd('apim api create --resource-group "rg1" --service-name "apimService1" --api-id "soapApi" --path "currency" --value "http://www.webservicex.net/CurrencyConvertor.asmx?WSDL" --format "wsdl-link" --api-type "soap"', checks=[
])
self.cmd('apim api create --resource-group "rg1" --service-name "apimService1" --api-id "tempgroup" --description "apidescription5200" --display-name "apiname1463" --service-url "http://newechoapi.cloudapp.net/api" --path "newapiPath"', checks=[
])
self.cmd('apim api create --resource-group "rg1" --service-name "apimService1" --api-id "echo-api;rev=3" --api-revision-description "Creating a Revision of an existing API" --source-api-id "/subscriptions/{{ subscription_id }}/resourceGroups/{{ resource_group }}/providers/Microsoft.ApiManagement/service/{{ service_name }}/apis/{{ api_name }}" --service-url "http://echoapi.cloudapp.net/apiv3" --path "echo"', checks=[
])
self.cmd('apim api create --resource-group "rg1" --service-name "apimService1" --api-id "echoapiv3" --description "Create Echo API into a new Version using Existing Version Set and Copy all Operations." --api-version "v4" --is-current true --api-version-set-id "/subscriptions/{{ subscription_id }}/resourceGroups/{{ resource_group }}/providers/Microsoft.ApiManagement/service/{{ service_name }}/apiVersionSets/{{ api_version_set_name }}" --subscription-required true --source-api-id "/subscriptions/{{ subscription_id }}/resourceGroups/{{ resource_group }}/providers/Microsoft.ApiManagement/service/{{ service_name }}/apis/{{ api_name }}" --display-name "Echo API2" --service-url "http://echoapi.cloudapp.net/api" --path "echo2"', checks=[
])
self.cmd('apim api create --resource-group "rg1" --service-name "apimService1" --api-id "echo-api2" --description "Copy of Existing Echo Api including Operations." --is-current true --subscription-required true --source-api-id "/subscriptions/{{ subscription_id }}/resourceGroups/{{ resource_group }}/providers/Microsoft.ApiManagement/service/{{ service_name }}/apis/{{ api_name }}" --display-name "Echo API2" --service-url "http://echoapi.cloudapp.net/api" --path "echo2"', checks=[
])
self.cmd('apim api create --resource-group "rg1" --service-name "apimService1" --api-id "tempgroup" --description "This is a sample server Petstore server. You can find out more about Swagger at [http://swagger.io](http://swagger.io) or on [irc.freenode.net, #swagger](http://swagger.io/irc/). For this sample, you can use the api key `special-key` to test the authorization filters." --display-name "Swagger Petstore" --service-url "http://petstore.swagger.io/v2" --path "petstore"', checks=[
])
self.cmd('apim api create --resource-group "rg1" --service-name "apimService1" --api-id "apidocs" --service-url "http://petstore.swagger.wordnik.com/api" --path "petstoreapi123" --value "http://apimpimportviaurl.azurewebsites.net/api/apidocs/" --format "swagger-link"', checks=[
])
self.cmd('apim api create --resource-group "rg1" --service-name "apimService1" --api-id "echo-api" --display-name "Echo API New" --service-url "http://echoapi.cloudapp.net/api2" --path "newecho"', checks=[
])
self.cmd('apim api create --resource-group "rg1" --service-name "apimService1" --api-id "echo-api"', checks=[
])
# create_or_update -- update
self.cmd('apim api update --resource-group "rg1" --service-name "apimService1" --api-id "petstore" --path "petstore" --value "https://raw.githubusercontent.com/OAI/OpenAPI-Specification/master/examples/v3.0/petstore.yaml" --format "openapi-link"', checks=[
])
self.cmd('apim api update --resource-group "rg1" --service-name "apimService1" --api-id "petstore" --path "petstore" --value "http://petstore.swagger.io/v2/swagger.json" --format "swagger-link-json"', checks=[
])
self.cmd('apim api update --resource-group "rg1" --service-name "apimService1" --api-id "petstore" --path "collector" --value "https://developer.cisco.com/media/wae-release-6-2-api-reference/wae-collector-rest-api/application.wadl" --format "wadl-link-json"', checks=[
])
self.cmd('apim api update --resource-group "rg1" --service-name "apimService1" --api-id "soapApi" --path "currency" --value "http://www.webservicex.net/CurrencyConvertor.asmx?WSDL" --format "wsdl-link"', checks=[
])
self.cmd('apim api update --resource-group "rg1" --service-name "apimService1" --api-id "soapApi" --path "currency" --value "http://www.webservicex.net/CurrencyConvertor.asmx?WSDL" --format "wsdl-link" --api-type "soap"', checks=[
])
self.cmd('apim api update --resource-group "rg1" --service-name "apimService1" --api-id "tempgroup" --description "apidescription5200" --display-name "apiname1463" --service-url "http://newechoapi.cloudapp.net/api" --path "newapiPath"', checks=[
])
self.cmd('apim api update --resource-group "rg1" --service-name "apimService1" --api-id "echo-api;rev=3" --api-revision-description "Creating a Revision of an existing API" --source-api-id "/subscriptions/{{ subscription_id }}/resourceGroups/{{ resource_group }}/providers/Microsoft.ApiManagement/service/{{ service_name }}/apis/{{ api_name }}" --service-url "http://echoapi.cloudapp.net/apiv3" --path "echo"', checks=[
])
self.cmd('apim api update --resource-group "rg1" --service-name "apimService1" --api-id "echoapiv3" --description "Create Echo API into a new Version using Existing Version Set and Copy all Operations." --api-version "v4" --is-current true --api-version-set-id "/subscriptions/{{ subscription_id }}/resourceGroups/{{ resource_group }}/providers/Microsoft.ApiManagement/service/{{ service_name }}/apiVersionSets/{{ api_version_set_name }}" --subscription-required true --source-api-id "/subscriptions/{{ subscription_id }}/resourceGroups/{{ resource_group }}/providers/Microsoft.ApiManagement/service/{{ service_name }}/apis/{{ api_name }}" --display-name "Echo API2" --service-url "http://echoapi.cloudapp.net/api" --path "echo2"', checks=[
])
self.cmd('apim api update --resource-group "rg1" --service-name "apimService1" --api-id "echo-api2" --description "Copy of Existing Echo Api including Operations." --is-current true --subscription-required true --source-api-id "/subscriptions/{{ subscription_id }}/resourceGroups/{{ resource_group }}/providers/Microsoft.ApiManagement/service/{{ service_name }}/apis/{{ api_name }}" --display-name "Echo API2" --service-url "http://echoapi.cloudapp.net/api" --path "echo2"', checks=[
])
self.cmd('apim api update --resource-group "rg1" --service-name "apimService1" --api-id "tempgroup" --description "This is a sample server Petstore server. You can find out more about Swagger at [http://swagger.io](http://swagger.io) or on [irc.freenode.net, #swagger](http://swagger.io/irc/). For this sample, you can use the api key `special-key` to test the authorization filters." --display-name "Swagger Petstore" --service-url "http://petstore.swagger.io/v2" --path "petstore"', checks=[
])
self.cmd('apim api update --resource-group "rg1" --service-name "apimService1" --api-id "apidocs" --service-url "http://petstore.swagger.wordnik.com/api" --path "petstoreapi123" --value "http://apimpimportviaurl.azurewebsites.net/api/apidocs/" --format "swagger-link"', checks=[
])
self.cmd('apim api update --resource-group "rg1" --service-name "apimService1" --api-id "echo-api" --display-name "Echo API New" --service-url "http://echoapi.cloudapp.net/api2" --path "newecho"', checks=[
])
self.cmd('apim api update --resource-group "rg1" --service-name "apimService1" --api-id "echo-api"', checks=[
])
# delete -- delete
self.cmd('apim api delete --resource-group "rg1" --service-name "apimService1" --api-id "petstore"', checks=[
])
self.cmd('apim api delete --resource-group "rg1" --service-name "apimService1" --api-id "petstore"', checks=[
])
self.cmd('apim api delete --resource-group "rg1" --service-name "apimService1" --api-id "petstore"', checks=[
])
self.cmd('apim api delete --resource-group "rg1" --service-name "apimService1" --api-id "soapApi"', checks=[
])
self.cmd('apim api delete --resource-group "rg1" --service-name "apimService1" --api-id "soapApi"', checks=[
])
self.cmd('apim api delete --resource-group "rg1" --service-name "apimService1" --api-id "tempgroup"', checks=[
])
self.cmd('apim api delete --resource-group "rg1" --service-name "apimService1" --api-id "echo-api;rev=3"', checks=[
])
self.cmd('apim api delete --resource-group "rg1" --service-name "apimService1" --api-id "echoapiv3"', checks=[
])
self.cmd('apim api delete --resource-group "rg1" --service-name "apimService1" --api-id "echo-api2"', checks=[
])
self.cmd('apim api delete --resource-group "rg1" --service-name "apimService1" --api-id "tempgroup"', checks=[
])
self.cmd('apim api delete --resource-group "rg1" --service-name "apimService1" --api-id "apidocs"', checks=[
])
self.cmd('apim api delete --resource-group "rg1" --service-name "apimService1" --api-id "echo-api"', checks=[
])
self.cmd('apim api delete --resource-group "rg1" --service-name "apimService1" --api-id "echo-api"', checks=[
])
# list_by_tags -- list
self.cmd('apim api list --resource-group "rg1" --service-name "apimService1"', checks=[
])
self.cmd('apim api list --resource-group "rg1" --service-name "apimService1"', checks=[
])
self.cmd('apim api list --resource-group "rg1" --service-name "apimService1"', checks=[
])
self.cmd('apim api list --resource-group "rg1" --service-name "apimService1"', checks=[
])
self.cmd('apim api list --resource-group "rg1" --service-name "apimService1"', checks=[
])
self.cmd('apim api list --resource-group "rg1" --service-name "apimService1"', checks=[
])
self.cmd('apim api list --resource-group "rg1" --service-name "apimService1"', checks=[
])
self.cmd('apim api list --resource-group "rg1" --service-name "apimService1"', checks=[
])
self.cmd('apim api list --resource-group "rg1" --service-name "apimService1"', checks=[
])
self.cmd('apim api list --resource-group "rg1" --service-name "apimService1"', checks=[
])
self.cmd('apim api list --resource-group "rg1" --service-name "apimService1"', checks=[
])
self.cmd('apim api list --resource-group "rg1" --service-name "apimService1"', checks=[
])
self.cmd('apim api list --resource-group "rg1" --service-name "apimService1"', checks=[
])
# list_by_service -- list
self.cmd('apim api list --resource-group "rg1" --service-name "apimService1"', checks=[
])
self.cmd('apim api list --resource-group "rg1" --service-name "apimService1"', checks=[
])
self.cmd('apim api list --resource-group "rg1" --service-name "apimService1"', checks=[
])
self.cmd('apim api list --resource-group "rg1" --service-name "apimService1"', checks=[
])
self.cmd('apim api list --resource-group "rg1" --service-name "apimService1"', checks=[
])
self.cmd('apim api list --resource-group "rg1" --service-name "apimService1"', checks=[
])
self.cmd('apim api list --resource-group "rg1" --service-name "apimService1"', checks=[
])
self.cmd('apim api list --resource-group "rg1" --service-name "apimService1"', checks=[
])
self.cmd('apim api list --resource-group "rg1" --service-name "apimService1"', checks=[
])
self.cmd('apim api list --resource-group "rg1" --service-name "apimService1"', checks=[
])
self.cmd('apim api list --resource-group "rg1" --service-name "apimService1"', checks=[
])
self.cmd('apim api list --resource-group "rg1" --service-name "apimService1"', checks=[
])
self.cmd('apim api list --resource-group "rg1" --service-name "apimService1"', checks=[
])
# get -- show
self.cmd('apim api show --resource-group "rg1" --service-name "apimService1" --api-id "petstore"', checks=[
])
self.cmd('apim api show --resource-group "rg1" --service-name "apimService1" --api-id "petstore"', checks=[
])
self.cmd('apim api show --resource-group "rg1" --service-name "apimService1" --api-id "petstore"', checks=[
])
self.cmd('apim api show --resource-group "rg1" --service-name "apimService1" --api-id "soapApi"', checks=[
])
self.cmd('apim api show --resource-group "rg1" --service-name "apimService1" --api-id "soapApi"', checks=[
])
self.cmd('apim api show --resource-group "rg1" --service-name "apimService1" --api-id "tempgroup"', checks=[
])
self.cmd('apim api show --resource-group "rg1" --service-name "apimService1" --api-id "echo-api;rev=3"', checks=[
])
self.cmd('apim api show --resource-group "rg1" --service-name "apimService1" --api-id "echoapiv3"', checks=[
])
self.cmd('apim api show --resource-group "rg1" --service-name "apimService1" --api-id "echo-api2"', checks=[
])
self.cmd('apim api show --resource-group "rg1" --service-name "apimService1" --api-id "tempgroup"', checks=[
])
self.cmd('apim api show --resource-group "rg1" --service-name "apimService1" --api-id "apidocs"', checks=[
])
self.cmd('apim api show --resource-group "rg1" --service-name "apimService1" --api-id "echo-api"', checks=[
])
self.cmd('apim api show --resource-group "rg1" --service-name "apimService1" --api-id "echo-api"', checks=[
])
# create_or_update -- create
self.cmd('apim api release create --resource-group "rg1" --service-name "apimService1" --api-id "a1" | |
import numbers
import warnings
from collections import defaultdict
try:
from collections.abc import Mapping
except ImportError:
from collections import Mapping
import numpy as np
from psims.xml import XMLWriterMixin, XMLDocumentWriter
from psims.utils import TableStateMachine
from .components import (
ComponentDispatcher, element,
default_cv_list, MzML, InstrumentConfiguration, IndexedMzML)
from .binary_encoding import (
encode_array, COMPRESSION_ZLIB,
encoding_map, compression_map, dtype_to_encoding)
from .utils import ensure_iterable
from .index import IndexingStream
from .element_builder import ElementBuilder, ParamManagingProperty
MZ_ARRAY = 'm/z array'
INTENSITY_ARRAY = 'intensity array'
DEFAULT_INTENSITY_UNIT = "number of detector counts"
CHARGE_ARRAY = 'charge array'
TIME_ARRAY = "time array"
DEFAULT_TIME_UNIT = "minute"
NON_STANDARD_ARRAY = 'non-standard data array'
ARRAY_TYPES = [
'm/z array',
'intensity array',
'charge array',
'signal to noise array',
'time array',
'wavelength array',
'flow rate array',
'pressure array',
'temperature array',
'mean drift time array',
'mean charge array',
'resolution array',
'baseline array'
]
class DocumentSection(ComponentDispatcher, XMLWriterMixin):
def __init__(self, section, writer, parent_context, section_args=None, **kwargs):
if section_args is None:
section_args = dict()
section_args.update(kwargs)
super(DocumentSection, self).__init__(parent_context)
self.section = section
self.writer = writer
self.section_args = section_args
def __enter__(self):
self.toplevel = element(self.writer, self.section, **self.section_args)
self.toplevel.__enter__()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.toplevel.__exit__(exc_type, exc_value, traceback)
self.writer.flush()
class SpectrumListSection(DocumentSection):
def __init__(self, writer, parent_context, section_args=None, **kwargs):
super(SpectrumListSection, self).__init__(
"spectrumList", writer, parent_context, section_args=section_args,
**kwargs)
self.section_args.setdefault("count", 0)
data_processing_method = self.section_args.pop(
"data_processing_method", None)
try:
self.section_args["defaultDataProcessingRef"] = self.context[
"DataProcessing"][data_processing_method]
except KeyError:
try:
self.section_args["defaultDataProcessingRef"] = list(
self.context["DataProcessing"].values())[0]
except IndexError:
warnings.warn(
"No Data Processing method found. mzML file may not be fully standard-compliant",
stacklevel=2)
class ChromatogramListSection(DocumentSection):
def __init__(self, writer, parent_context, section_args=None, **kwargs):
super(ChromatogramListSection, self).__init__(
"chromatogramList", writer, parent_context,
section_args=section_args, **kwargs)
self.section_args.setdefault("count", 0)
data_processing_method = self.section_args.pop(
"data_processing_method", None)
try:
self.section_args["defaultDataProcessingRef"] = self.context[
"DataProcessing"][data_processing_method]
except KeyError:
try:
self.section_args["defaultDataProcessingRef"] = list(
self.context["DataProcessing"].values())[0]
except IndexError:
warnings.warn(
"No Data Processing method found. mzML file may not be fully standard-compliant",
stacklevel=2)
class RunSection(DocumentSection):
"""Describes a `<run>` tag. Implemented as a section to provide a more
expressive API
"""
def __init__(self, writer, parent_context, section_args=None, **kwargs):
super(RunSection, self).__init__(
"run", writer, parent_context, section_args=section_args, **kwargs)
instrument_configuration_name = self.section_args.pop(
"instrument_configuration", None)
if instrument_configuration_name is not None:
self.section_args["defaultInstrumentConfigurationRef"] = self.context[
"InstrumentConfiguration"][instrument_configuration_name]
source_file_name = self.section_args.pop("source_file", None)
if source_file_name is not None:
self.section_args["defaultSourceFileRef"] = self.context[
"SourceFile"][source_file_name]
sample_id = self.section_args.pop("sample", None)
if sample_id is not None:
self.section_args["sampleRef"] = self.context['Sample'][sample_id]
class IndexedmzMLSection(DocumentSection):
def __init__(self, writer, parent_context, indexer, section_args=None, **kwargs):
super(IndexedmzMLSection, self).__init__(
'indexedmzML', writer, parent_context, section_args=section_args,
**kwargs)
self.toplevel = None
self.inner = None
self.indexer = indexer
def __enter__(self):
self.toplevel = element(self.writer, IndexedMzML())
self.toplevel.__enter__()
self.inner = element(self.writer, MzML(**self.section_args))
self.inner.__enter__()
def __exit__(self, exc_type, exc_value, traceback):
self.inner.__exit__(exc_type, exc_value, traceback)
self.writer.flush()
self.write_index()
self.toplevel.__exit__(exc_type, exc_value, traceback)
def write_index(self):
self.indexer.to_xml(self)
class PlainMzMLWriter(ComponentDispatcher, XMLDocumentWriter):
"""A high level API for generating mzML XML files from simple Python objects.
This class depends heavily on lxml's incremental file writing API which in turn
depends heavily on context managers. Almost all logic is handled inside a context
manager and in the context of a particular document. Since all operations assume
that they have access to a universal identity map for each element in the document,
that map is centralized in this class.
MzMLWriter inherits from :class:`.ComponentDispatcher`, giving it a :attr:`context`
attribute and access to all `Component` objects pre-bound to that context with attribute-access
notation.
Attributes
----------
chromatogram_count : int
A count of the number of chromatograms written
spectrum_count : int
A count of the number of spectra written
"""
DEFAULT_TIME_UNIT = DEFAULT_TIME_UNIT
DEFAULT_INTENSITY_UNIT = DEFAULT_INTENSITY_UNIT
def __init__(self, outfile, close=False, vocabularies=None, missing_reference_is_error=False,
vocabulary_resolver=None, id=None, accession=None, **kwargs):
if vocabularies is None:
vocabularies = []
vocabularies = list(default_cv_list) + list(vocabularies)
ComponentDispatcher.__init__(
self,
vocabularies=vocabularies,
vocabulary_resolver=vocabulary_resolver,
missing_reference_is_error=missing_reference_is_error)
XMLDocumentWriter.__init__(self, outfile, close, **kwargs)
self.id = id
self.accession = accession
self.spectrum_count = 0
self.chromatogram_count = 0
self.default_instrument_configuration = None
self.state_machine = TableStateMachine([
("start", ['controlled_vocabularies', ]),
("controlled_vocabularies", ['file_description', ]),
("file_description", ['reference_param_group_list', 'sample_list', 'software_list']),
("reference_param_group_list", ['sample_list', 'software_list']),
("sample_list", ['software_list', ]),
("software_list", ["scan_settings_list", 'instrument_configuration_list']),
("scan_settings_list", ['instrument_configuration_list', ]),
("instrument_configuration_list", ['data_processing_list']),
("data_processing_list", ['run']),
("run", ['spectrum_list', 'chromatogram_list']),
('spectrum_list', ['chromatogram_list']),
('chromatogram_list', [])
])
def toplevel_tag(self):
return MzML(id=self.id, accession=self.accession)
def controlled_vocabularies(self):
"""Write out the `<cvList>` element and all its children,
including both this format's default controlled vocabularies
and those passed as arguments to this method.this
This method requires writing to have begun.
"""
self.state_machine.transition("controlled_vocabularies")
super(PlainMzMLWriter, self).controlled_vocabularies()
def software_list(self, software_list):
"""Writes the ``<softwareList>`` section of the document.
.. note::
List and descriptions of software used to acquire and/or process the
data in this mzML file
Parameters
----------
software_list : list
A list or other iterable of :class:`dict` or :class:`~.Software`-like objects
"""
self.state_machine.transition("software_list")
n = len(software_list)
if n:
software_list = [self.Software.ensure(sw) for sw in ensure_iterable(software_list)]
self.SoftwareList(software_list).write(self)
def file_description(self, file_contents=None, source_files=None, contacts=None):
r"""Writes the ``<fileDescription>`` section of the document.
.. note::
Information pertaining to the entire mzML file (i.e. not specific
to any part of the data set) is stored here.
Parameters
----------
file_contents : list, optional
A list or other iterable of :class:`str`, :class:`dict`, or \*Param-types which will
be placed in the ``<fileContent>`` element.
source_files : list
A list or other iterable of dict or :class:`~.SourceFile`-like objects
to be placed in the ``<sourceFileList>`` element
"""
self.state_machine.transition("file_description")
fd = self.FileDescription(
file_contents, [self.SourceFile.ensure(sf) for sf in ensure_iterable(source_files)],
contacts=[self.Contact.ensure(c) for c in ensure_iterable(contacts)])
fd.write(self.writer)
def instrument_configuration_list(self, instrument_configurations):
"""Writes the ``<instrumentConfigurationList>`` section of the document.
.. note::
List and descriptions of instrument configurations. At least one instrument configuration MUST
be specified, even if it is only to specify that the instrument is unknown. In that case, the
"instrument model" term is used to indicate the unknown instrument in the instrumentConfiguration
Parameters
----------
instrument_configurations : list
A list or other iterable of :class:`dict` or :class:`~.InstrumentConfiguration`-like
objects
"""
self.state_machine.transition("instrument_configuration_list")
configs = [
self.InstrumentConfiguration.ensure(ic) if not isinstance(
ic, InstrumentConfiguration) else ic
for ic in ensure_iterable(
instrument_configurations)]
self.InstrumentConfigurationList(configs).write(self)
def data_processing_list(self, data_processing):
"""Writes the ``<dataProcessingList>`` section of the document.
.. note::
List and descriptions of data processing applied to this data
Parameters
----------
data_processing : list
A list or other iterable of :class:`dict` or :class:`~.DataProcessing`-like
objects
"""
self.state_machine.transition("data_processing_list")
methods = [
self.DataProcessing.ensure(dp) for dp in ensure_iterable(data_processing)]
self.DataProcessingList(methods).write(self)
def reference_param_group_list(self, groups):
"""Writes the ``<referenceableParamGroupList>`` section of the document.
Parameters
----------
groups : list
A list or other iterable of :class:`dict` or :class:`~.ReferenceableParamGroup`-like
objects
"""
self.state_machine.transition("reference_param_group_list")
groups = [
self.ReferenceableParamGroup.ensure(g) for g in ensure_iterable(groups)]
self.ReferenceableParamGroupList(groups).write(self)
def sample_list(self, samples):
"""Writes the ``<sampleList>`` section of the document
Parameters
----------
samples : list
A list or other iterable of :class:`dict` or :class:`~.mzml.components.Sample`-like
objects
"""
self.state_machine.transition("sample_list")
for i, sample in enumerate(ensure_iterable(samples)):
if isinstance(sample, Mapping):
sample_id = sample.get('id')
sample_name = sample.get("name")
if sample_id is None and sample_name is not None:
sample_id = "%s_%d_id" % (sample_name, i)
elif sample_id is not None and sample_name is None:
sample_name = str(sample_id)
elif sample_id is sample_name is None:
sample_id = "sample_%d_id" % (i,)
sample_name = "sample_%d" % (i,)
sample['id'] = sample_id
sample['name'] = sample_name
sample_entries = self.Sample.ensure_all(samples)
self.SampleList(sample_entries).write(self)
def scan_settings_list(self, scan_settings):
self.state_machine.transition("scan_settings_list")
scan_settings = self.ScanSettings.ensure_all(scan_settings)
self.ScanSettingsList(scan_settings).write(self)
def run(self, id=None, instrument_configuration=None, source_file=None, start_time=None, sample=None):
"""Begins the `<run>` section of the document, describing a single
sample run.
Parameters
----------
id : str, optional
The unique identifier for this element
instrument_configuration : str, optional
The id string for the default `InstrumentConfiguration` for this
sample
source_file : str, optional
The id string for the source file used to produce this data
start_time : str, optional
A string encoding the date and time the sample was acquired
sample: str, optional
The id string for the sample used to produce this data
Returns
-------
RunSection
"""
self.state_machine.transition("run")
kwargs = {}
if start_time is not None:
kwargs['startTimeStamp'] = start_time
if instrument_configuration is None:
keys = list(self.context['InstrumentConfiguration'].keys())
if keys:
instrument_configuration = keys[0]
else:
instrument_configuration = None
self.default_instrument_configuration = instrument_configuration
return RunSection(
self.writer, self.context, id=id,
instrument_configuration=instrument_configuration,
source_file=source_file,
sample=sample, **kwargs)
def spectrum_list(self, count, data_processing_method=None):
self.state_machine.transition('spectrum_list')
if data_processing_method is None:
dp_map = self.context['DataProcessing']
try:
data_processing_method = list(dp_map.keys())[0]
except IndexError:
warnings.warn(
"No Data Processing method found. mzML file may not be fully standard-compliant",
stacklevel=2)
return SpectrumListSection(
self.writer, self.context, count=count,
data_processing_method=data_processing_method)
def chromatogram_list(self, count, data_processing_method=None):
self.state_machine.transition('chromatogram_list')
if data_processing_method is None:
dp_map = self.context['DataProcessing']
try:
data_processing_method = list(dp_map.keys())[0]
except IndexError:
warnings.warn(
"No Data Processing method found. mzML file may not be fully standard-compliant",
stacklevel=2)
return ChromatogramListSection(
self.writer, self.context, count=count,
data_processing_method=data_processing_method)
def spectrum(self, mz_array=None, intensity_array=None, charge_array=None, id=None,
polarity='positive scan', centroided=True, precursor_information=None,
scan_start_time=None, params=None, compression=COMPRESSION_ZLIB,
encoding=None, | |
# encoding: utf-8
#
# mschtml.py
# Managed Software Center
#
# Created by <NAME> on 2/24/14.
#
import os
from operator import itemgetter
from random import shuffle
from string import Template
from unicodedata import normalize
import MunkiItems
import msclib
import msclog
import munki
from AppKit import NSApp
from Foundation import NSBundle
from Foundation import NSString, NSLocalizedString, NSUTF8StringEncoding
def quote(a_string):
'''Replacement for urllib.quote that handles Unicode strings'''
return str(NSString.stringWithString_(
a_string).stringByAddingPercentEscapesUsingEncoding_(
NSUTF8StringEncoding))
def unquote(a_string):
'''Replacement for urllib.unquote that handles Unicode strings'''
return str(NSString.stringWithString_(
a_string).stringByReplacingPercentEscapesUsingEncoding_(
NSUTF8StringEncoding))
def get_template(template_name, raw=False):
'''return an html template. If raw is True, just return the string; otherwise
return a string Template object'''
customTemplatesPath = os.path.join(msclib.html_dir(), 'custom/templates')
resourcesPath = NSBundle.mainBundle().resourcePath()
defaultTemplatesPath = os.path.join(resourcesPath, 'templates')
for directory in [customTemplatesPath, defaultTemplatesPath]:
templatePath = os.path.join(directory, template_name)
if os.path.exists(templatePath):
try:
file_ref = open(templatePath)
template_html = file_ref.read()
file_ref.close()
if raw:
return template_html.decode('utf-8')
else:
return Template(template_html.decode('utf-8'))
except (IOError, OSError):
return None
return None
def build_page(filename):
'''Dispatch request to build a page to the appropriate function'''
msclog.debug_log(u'build_page for %s' % filename)
name = os.path.splitext(filename)[0]
key, p, value = name.partition('-')
if key == 'detail':
build_detail_page(value)
elif key == 'category':
build_list_page(category=value)
elif key == 'categories':
build_categories_page()
elif key == 'filter':
build_list_page(filter=value)
elif key == 'developer':
build_list_page(developer=value)
elif key == 'myitems':
build_myitems_page()
elif key == 'updates':
build_updates_page()
elif key == 'updatedetail':
build_updatedetail_page(value)
else:
build_item_not_found_page(filename)
def write_page(page_name, html):
'''write html to page_name in our local html directory'''
html_file = os.path.join(msclib.html_dir(), page_name)
try:
f = open(html_file, 'w')
f.write(html.encode('utf-8'))
f.close()
except (OSError, IOError), err:
msclog.debug_log('write_page error: %s', str(err))
raise
def assemble_page(main_page_template_name, page_dict, **kwargs):
'''Returns HTML for our page from one or more templates
and a dictionary of keys and values'''
# make sure our general labels are present
addGeneralLabels(page_dict)
# get our main template
main_page = get_template(main_page_template_name)
# incorporate any sub-templates
html_template = Template(main_page.safe_substitute(**kwargs))
# substitute page variables
html = html_template.safe_substitute(page_dict)
return html
def generate_page(page_name, main_page_template_name, page_dict, **kwargs):
'''Assembles HTML and writes the page to page_name in our local html directory'''
msclog.debug_log('generate_page for %s' % page_name)
html = assemble_page(main_page_template_name, page_dict, **kwargs)
write_page(page_name, html)
def escape_quotes(text):
"""Escape single and double-quotes for JavaScript"""
return text.replace("'", r"\'").replace('"', r'\"')
def escape_html(text):
"""Convert some problematic characters to entities"""
html_escape_table = {
"&": "&",
'"': """,
"'": "'",
">": ">",
"<": "<",
}
return "".join(html_escape_table.get(c, c) for c in text)
def escapeAndQuoteCommonFields(item):
'''Adds _escaped and _quoted versions of several commonly-used fields'''
item['name_escaped'] = escape_html(item['name'])
item['name_quoted'] = escape_html(escape_quotes(item['name']))
item['display_name_escaped'] = escape_html(item['display_name'])
item['developer_escaped'] = escape_html(item['developer'])
item['display_version_escaped'] = escape_html(item['display_version'])
def addGeneralLabels(page):
'''adds localized labels for Software, Categories, My Items and Updates to html pages'''
page['SoftwareLabel'] = NSLocalizedString(u"Software", u"Software label")
page['CategoriesLabel'] = NSLocalizedString(u"Categories", u"Categories label")
page['MyItemsLabel'] = NSLocalizedString(u"My Items", u"My Items label")
page['UpdatesLabel'] = NSLocalizedString(u"Updates", u"Updates label")
def addDetailSidebarLabels(page):
'''adds localized labels for the detail view sidebars'''
page['informationLabel'] = NSLocalizedString(
u"Information",
u"Sidebar Information label")
page['categoryLabel'] = NSLocalizedString(
u"Category:",
u"Sidebar Category label")
page['versionLabel'] = NSLocalizedString(
u"Version:",
u"Sidebar Version label")
page['sizeLabel'] = NSLocalizedString(
u"Size:",
u"Sidebar Size label")
page['developerLabel'] = NSLocalizedString(
u"Developer:",
u"Sidebar Developer label")
page['statusLabel'] = NSLocalizedString(
u"Status:", u"Sidebar Status label")
page['moreByDeveloperLabel'] = NSLocalizedString(
u"More by %s",
u"Sidebar More By Developer label")
page['moreInCategoryLabel'] = NSLocalizedString(
u"More in %s",
u"Sidebar More In Category label")
page['typeLabel'] = NSLocalizedString(
u"Type:", u"Sidebar Type label")
page['dueLabel'] = NSLocalizedString(
u"Due:", u"Sidebar Due label")
def build_item_not_found_page(page_name):
'''Build item not found page'''
page = {}
page['item_not_found_title'] = NSLocalizedString(
u"Not Found", u"Item Not Found title")
page['item_not_found_message'] = NSLocalizedString(
u"Cannot display the requested item.", u"Item Not Found message")
footer = get_template('footer_template.html', raw=True)
generate_page(page_name, 'page_not_found_template.html', page, footer=footer)
def build_detail_page(item_name):
'''Build page showing detail for a single optional item'''
msclog.debug_log('build_detail_page for %s' % item_name)
items = MunkiItems.getOptionalInstallItems()
page_name = u'detail-%s.html' % item_name
for item in items:
if item['name'] == item_name:
# make a copy of the item to use to build our page
page = MunkiItems.OptionalItem(item)
escapeAndQuoteCommonFields(page)
addDetailSidebarLabels(page)
# make "More in CategoryFoo" list
page['hide_more_in_category'] = u'hidden'
more_in_category_html = u''
more_in_category = []
if item.get('category'):
category = item['category']
page['category_link'] = u'category-%s.html' % quote(category)
more_in_category = [a for a in items
if a.get('category') == category
and a != item
and a.get('status') != 'installed']
if more_in_category:
page['hide_more_in_category'] = u''
page['moreInCategoryLabel'] = page['moreInCategoryLabel'] % page['category']
shuffle(more_in_category)
more_template = get_template('detail_more_items_template.html')
for more_item in more_in_category[:4]:
more_item['display_name_escaped'] = escape_html(more_item['display_name'])
more_item['second_line'] = more_item.get('developer', '')
more_in_category_html += more_template.safe_substitute(more_item)
page['more_in_category'] = more_in_category_html
# make "More by DeveloperFoo" list
page['hide_more_by_developer'] = u'hidden'
more_by_developer_html = u''
more_by_developer = []
if item.get('developer'):
developer = item['developer']
page['developer_link'] = u'developer-%s.html' % quote(developer)
more_by_developer = [a for a in items
if a.get('developer') == developer
and a != item
and a not in more_in_category
and a.get('status') != 'installed']
if more_by_developer:
page['hide_more_by_developer'] = u''
page['moreByDeveloperLabel'] = (
page['moreByDeveloperLabel'] % developer)
shuffle(more_by_developer)
more_template = get_template(
'detail_more_items_template.html')
for more_item in more_by_developer[:4]:
escapeAndQuoteCommonFields(more_item)
more_item['second_line'] = more_item.get('category', '')
more_by_developer_html += more_template.safe_substitute(more_item)
page['more_by_developer'] = more_by_developer_html
footer = get_template('footer_template.html', raw=True)
generate_page(page_name, 'detail_template.html', page, footer=footer)
return
msclog.debug_log('No detail found for %s' % item_name)
build_item_not_found_page(page_name)
def build_list_page(category=None, developer=None, filter=None):
'''Build page listing available optional items'''
items = MunkiItems.getOptionalInstallItems()
header = NSLocalizedString(u"All items", u"AllItemsHeaderText")
page_name = u'category-all.html'
if category == 'all':
category = None
if category:
header = category
page_name = u'category-%s.html' % category
if developer:
header = developer
page_name = u'developer-%s.html' % developer
if filter:
header = u'Search results for %s' % filter
page_name = u'filter-%s.html' % filter
category_list = []
for item in items:
if 'category' in item and item['category'] not in category_list:
category_list.append(item['category'])
item_html = build_list_page_items_html(
category=category, developer=developer, filter=filter)
# make HTML for Categories pop-up menu
all_categories_label = NSLocalizedString(u"All Categories", u"AllCategoriesLabel")
if category:
categories_html = u'<option>%s</option>\n' % all_categories_label
else:
categories_html = u'<option selected>%s</option>\n' % all_categories_label
for item in sorted(category_list):
if item == category:
categories_html += u'<option selected>%s</option>\n' % item
else:
categories_html += u'<option>%s</option>\n' % item
categories_html_list = ''
# make HTML for list of categories
for item in sorted(category_list):
categories_html_list += (u'<li class="link"><a href="category-%s.html">%s</a></li>\n'
% (quote(item), item))
page = {}
page['list_items'] = item_html
page['category_items'] = categories_html
page['category_list'] = categories_html_list
page['header_text'] = header
if category or filter or developer:
showcase = ''
else:
showcase = get_template('showcase_template.html', raw=True)
sidebar = get_template('sidebar_template.html', raw=True)
footer = get_template('footer_template.html', raw=True)
generate_page(page_name, 'list_template.html', page,
showcase=showcase, sidebar=sidebar, footer=footer)
def build_list_page_items_html(category=None, developer=None, filter=None):
'''Returns HTML for the items on the list page'''
items = MunkiItems.getOptionalInstallItems()
item_html = u''
if filter:
# since the filter term came through the filesystem,
# HFS+ does some unicode character decomposition which can cause issues with comparisons
# so before we do our comparison, we normalize the unicode string
# using unicodedata.normalize
filter = normalize('NFC', filter)
msclog.debug_log(u'Filtering on %s' % filter)
items = [item for item in items
if filter in item['display_name'].lower()
or filter in item['description'].lower()
or filter in item['developer'].lower()
or filter in item['category'].lower()]
if category:
items = [item for item in items
if category.lower() in item.get('category', '').lower()]
if developer:
items = [item for item in items
if developer.lower() in item.get('developer', '').lower()]
if items:
item_template = get_template('list_item_template.html')
for item in sorted(items, key=itemgetter('display_name_lower')):
escapeAndQuoteCommonFields(item)
item['category_and_developer_escaped'] = escape_html(item['category_and_developer'])
item_html += item_template.safe_substitute(item)
# pad with extra empty items so we have a multiple of 3
if len(items) % 3:
for x in range(3 - (len(items) % 3)):
item_html += u'<div class="lockup"></div>\n'
else:
# no items; build appropriate alert messages
status_results_template = get_template('status_results_template.html')
alert = {}
if filter:
alert['primary_status_text'] = NSLocalizedString(
u"Your search had no results.",
u"No Search Results primary text")
alert['secondary_status_text'] = NSLocalizedString(
u"Try searching again.", u"No Search Results secondary text")
elif category:
alert['primary_status_text'] = NSLocalizedString(
u"There are no items in this category.",
u"No Category Results primary text")
alert['secondary_status_text'] = NSLocalizedString(
u"Try selecting another category.",
u"No Category Results secondary text")
elif developer:
alert['primary_status_text'] = NSLocalizedString(
u"There are no items from this developer.",
u"No Developer Results primary text")
alert['secondary_status_text'] = NSLocalizedString(
u"Try selecting another developer.",
u"No Developer Results secondary text")
else:
alert['primary_status_text'] = NSLocalizedString(
u"There are no available software items.",
u"No Items primary text")
alert['secondary_status_text'] = NSLocalizedString(
u"Try again later.",
u"No Items secondary text")
alert['hide_progress_bar'] = u'hidden'
alert['progress_bar_value'] = u''
item_html = status_results_template.safe_substitute(alert)
return item_html
def build_categories_page():
'''Build page showing available categories and some items in each one'''
all_items = MunkiItems.getOptionalInstallItems()
header = NSLocalizedString(u"Categories", u"Categories label")
page_name = u'categories.html'
category_list = []
for item in all_items:
if 'category' in item and | |
#!/usr/bin/env python3
#
# pico.workflow.executor - manages the execution of workflows
#
# Background
#
# The Workflow and Executor classes were factored out of BAP.py when its
# execution logic became too unwieldy. They are simple implementations
# of a generic workflow definition language and execution engine.
#
# This module defines the Executor and Task classes. A task is a single
# Service execution within a Workflow execution. A Task subclass is
# returned by the Service.execute() implementation on the shim.
#
# The Task subclass must implement virtual method report() to report its
# backend status.
#
# How it works (see pseudo-code below):
#
# An Executor instance controls a single run of a pipeline from start to end.
# It does this by starting services according to the state of a workflow,
# encapsulated in the .logic.Workflow object passed to it.
#
# At any time, the Workflow will indicate which services are 'runnable'. The
# executor then invokes the execute() method on the service, passing in the
# blackboard and job scheduler. The service returns the Task object that
# the executor monitors.
#
# The Executor polls the scheduler, which polls the backends, for their
# updated status. Each Task is then queried by the Executor for its current
# state. When a Task completes or fails, the executor updates the Workflow.
# The Workflow object may then present next runnable services, until the
# workflow execution as a whole has completed.
#
# The implementation is poll-based because the legacy backends run as async
# processes, and the Python docs recommend against combining threads and
# processes (otherwise a thread & event model would have been more obvious).
# The polling frequency is set in the JobScheduler.
#
# Conceptual mode of use:
#
# blackboard = .blackboard.Blackboard()
# blackboard['inputs'] = { 'contigs': '/path/to/inputs', ... }
# workflow = .logic.Workflow(deps, inputs, targets)
# executor = .executor.Executor(services, scheduler)
#
# status = executor.execute(workflow, blackboard, execution_id)
# results = blackboard.get(...)
#
import enum
from .logic import Workflow, Services as ServicesEnum
### class Task
#
# Base class for single service execution in a single workflow execution.
# Is return by the service's shim's execute() method.
# Subclasses must implement its report() method to retrieve the backend status,
# and wrangle its output on to the black board.
class Task:
'''Base class for a service execution within a workflow execution,
maintains the execution state.'''
class State(enum.Enum):
STARTED = 'STARTED'
COMPLETED = 'COMPLETED'
FAILED = 'FAILED'
_sid = None # Service ID
_xid = None # Workflow execution ID
_state = None # Current state of the service execution
_error = None # Set to error string on execution failure
def __init__(self, sid, xid):
'''Construct a task of service sid within workflow execution xid.
Xid is None when there can be only one workflow invocation.'''
self._sid = sid
self._xid = xid
@property
def sid(self):
'''Service ID of which this is an execution.'''
return self._sid
@property
def xid(self):
'''Workflow execution ID of which this is a task.'''
return self._xid
@property
def id(self):
'''Unique ID of this Task within the current Executor run. Is
simply the tuple (sid,xid).'''
return (self._sid, self._xid)
# @property
# def id_str(self):
# '''Human representation of this task: the string sid[xid],
# or if xid is None (legacy), then just sid.'''
# return self._sid if not self._xid else '%s[%s]' % self.id
@property
def state(self):
'''Current state of the Task, a Task.State value.'''
return self._state
@property
def error(self):
'''May hold an error string if the task failed.'''
return self._error
def report(self):
'''Pure virtual, here to signal that subclasses must implement this.'''
raise NotImplementedError()
def fail(self, err_fmt, *args):
'''Transition this task to FAILED and set its error message.
Invokes self._transition(Task.State.FAILED, err_fmt % args),
which will conveniently be the subclass method if overridden.'''
return self._transition(Task.State.FAILED, err_fmt % args)
def done(self):
'''Mark this task COMPLETED.
Invokes self._transition(Task.State.COMPLETED), which will
conveniently be the subclass method if overridden .'''
return self._transition(Task.State.COMPLETED)
def _transition(self, new_state, error = None):
'''Update task state to new_state, setting the error iff the new
state is FAILED, intended for subclasses to extend.'''
if new_state == Task.State.FAILED and not error:
id_str = self._sid if not self._xid else '%s[%s]' % self.id
raise ValueError('FAILED task %s must set its error' % id_str)
self._state = new_state
self._error = error if new_state == Task.State.FAILED else None
return new_state
### class Executor
#
# Executes a Workflow.
class Executor:
'''Runs a Workflow from start to end, using a list of Service implementations.'''
_services = None
_scheduler = None
_tasks = dict() # Holds the running and completed service executions
def __init__(self, services, scheduler):
'''Construct executor instance the with the given services
(a dict of id -> WorkflowService mappings) and scheduler.'''
# Type check our arguments to avoid confusion
for k,v in services.items():
assert isinstance(k, ServicesEnum)
assert hasattr(v, 'execute')
self._services = services
self._scheduler = scheduler
def execute(self, workflow, blackboard, xid = None):
'''Execute the workflow, using the blackboard, with optional execution id.'''
# Type check our arguments to avoid confusion
assert isinstance (workflow, Workflow)
wx_name = "workflow execution" if not xid else "workflow execution %s" % xid
# Create the blackboard for communication between services
blackboard.log("executor starting %s", wx_name)
# Obtain the status of the Workflow object to control our execution
wx_status = workflow.status
blackboard.log("%s status: %s", wx_name, wx_status.value)
assert wx_status != Workflow.Status.WAITING, "no services were started yet"
# We run as long as there are runnable or running services in the Workflow
while wx_status in [ Workflow.Status.RUNNABLE, Workflow.Status.WAITING ]:
# Check that the Workflow and our idea of runnable and running match
self.assert_cross_check(workflow, xid)
more_jobs = True
# Pick the first runnable off the runnables list, if any
runnable = workflow.list_runnable()
if runnable:
# Look up the service and start it
sid = runnable[0]
self.start_task(sid, xid, workflow, blackboard)
else:
# Nothing runnable, wait on the scheduler for job to end
more_jobs = self._scheduler.listen()
# Update all started tasks with job state
for tid, task in self._tasks.items():
if task.state == Task.State.STARTED:
self.poll_task(tid, workflow, blackboard)
# Update our status by querying the Workflow
old_wx_status, wx_status = wx_status, workflow.status
if old_wx_status != wx_status:
blackboard.log("%s status: %s", wx_name, wx_status.value)
# Defensive programming: if scheduler has no more job but we think we
# are still WAITING we would get into a tight infinite loop
if not more_jobs and wx_status == Workflow.Status.WAITING:
raise Exception('fatal inconsistency between %s and scheduler' % wx_name)
# Workflow is done, log result
str_done = ', '.join(map(lambda s: s.value, workflow.list_completed()))
str_fail = ', '.join(map(lambda s: s.value, workflow.list_failed()))
str_skip = ', '.join(map(lambda s: s.value, workflow.list_skipped()))
blackboard.log("%s completed", wx_name)
blackboard.log("- done: %s", str_done if str_done else "(none)")
blackboard.log("- failed: %s", str_fail if str_fail else "(none)")
blackboard.log("- skipped: %s", str_skip if str_skip else "(none)")
return wx_status
def start_task(self, sid, xid, wf, bb):
'''Start the execution of a service within a workflow execution.
Actual startup should be asynchronous, but the service shim will
return a state we use to update our status.'''
service = self._services.get(sid)
if not service:
raise ValueError("no implementation for service id: %s" % sid.value)
tid = (sid, xid)
tshow = '%s[%s]' % (sid.value,xid) if xid else sid.value
try:
task = service.execute(sid.value, xid, bb, self._scheduler)
bb.log("task start: %s" % tshow)
self._tasks[tid] = task
self.update_state(wf, tid, task.state, bb)
except Exception as e:
bb.log("task skipped: %s: %s", tshow, str(e))
wf.mark_skipped(tid[0])
def poll_task(self, tid, wf, bb):
'''Poll the task for its current status. This is a non-blocking call on
the task to check the backend state, then update wf if applicable.'''
task = self._tasks.get(tid)
tshow = '%s[%s]' % (tid[0].value,tid[1]) if tid[1] else tid[0].value
if not task:
raise ValueError("no such task: %s" % tshow)
old_state = task.state
new_state = task.report()
if new_state != old_state:
self.update_state(wf, tid, new_state, bb)
def update_state(self, wf, tid, state, bb):
'''Update the executing/ed task and workflow with new state.'''
sid, xid = tid
tshow = '%s[%s]' % (sid.value, xid) if xid else sid.value
bb.log("task %s: %s", state.value.lower(), tshow)
if state == Task.State.STARTED:
wf.mark_started(sid)
elif state == Task.State.COMPLETED:
wf.mark_completed(sid)
elif state == Task.State.FAILED:
wf.mark_failed(sid)
else:
raise ValueError("invalid task state for %s: %s" % (tshow, state))
def assert_cross_check(self, wf, xid):
'''Cross check | |
from base64 import b64decode, b64encode
from datetime import datetime, timezone
from email import utils
from re import findall, fullmatch
from typing import Dict, List, Optional, Tuple, Union
from urllib.parse import quote as url_quote
from kiss_headers.models import Header
from kiss_headers.utils import (
class_to_header_name,
header_content_split,
prettify_header_name,
quote,
unquote,
)
"""
Use https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/ to create subclasses of CustomHeader.
"""
class CustomHeader(Header):
"""
This class is a helper to create ready-to-use Header object with creation assistance.
Should NOT be instantiated.
Use this class as a direct parent for creating ready-to-use header object. Inspire yourself with already defined
class bellow this one.
"""
__squash__: bool = False # This value indicate whenever the representation of multiple entries should be squashed into one content.
__tags__: List[str] = []
__override__: Optional[
str
] = None # Create this static member in your custom header when the class name does not match the target header.
def __init__(self, initial_content: str = "", **kwargs: Optional[str]):
"""
:param initial_content: Initial content of the Header if any.
:param kwargs: Provided args. Any key that associate a None value are just ignored.
"""
if self.__class__ == CustomHeader:
raise NotImplementedError(
"You can not instantiate CustomHeader class. You may create first your class that inherit it."
)
super().__init__(
class_to_header_name(self.__class__)
if not self.__class__.__override__
else prettify_header_name(self.__class__.__override__),
initial_content,
)
for attribute, value in kwargs.items():
if value is None:
continue
self[attribute] = value
class ContentSecurityPolicy(CustomHeader):
"""
Content-Security-Policy is the name of a HTTP response header
that modern browsers use to enhance the security of the document (or web page).
The Content-Security-Policy header allows you to restrict how resources such as
JavaScript, CSS, or pretty much anything that the browser loads.
"""
__tags__ = ["response"]
def __init__(self, *policies: List[str]):
"""
:param policies: One policy consist of a list of str like ["default-src", "'none'"].
>>> header = ContentSecurityPolicy(["default-src", "'none'"], ["img-src", "'self'", "img.example.com"])
>>> repr(header)
"Content-Security-Policy: default-src 'none'; img-src 'self' img.example.com"
>>> header.get_policies_names()
['default-src', 'img-src']
>>> header.get_policy_args("img-src")
["'self'", 'img.example.com']
"""
super().__init__("")
for policy in policies:
if len(policy) == 0 or policy[0] not in {
"default-src",
"script-src",
"style-src",
"img-src",
"connect-src",
"font-src",
"object-src",
"media-src",
"frame-src",
"sandbox",
"report-uri",
"child-src",
"form-action",
"frame-ancestors",
"plugin-types",
"base-uri",
"report-to",
"worker-src",
"manifest-src",
"prefetch-src",
"navigate-to",
}:
raise ValueError(
f"Policy {policy[0]} is not a valid one. See https://content-security-policy.com/ for instructions."
)
elif len(policy) == 1:
raise ValueError(
f"Policy {policy[0]} need at least one argument to proceed."
)
self += " ".join(policy) # type: ignore
def get_policies_names(self) -> List[str]:
"""Fetch a list of policy name set in content."""
return [member.split(" ")[0] for member in self.attrs]
def get_policy_args(self, policy_name: str) -> Optional[List[str]]:
"""Retrieve given arguments for a policy."""
policy_name = policy_name.lower()
for member in self.attrs:
parts: List[str] = member.split(" ")
if parts[0].lower() == policy_name:
return parts[1:]
return None
class Accept(CustomHeader):
"""
The Accept request HTTP header advertises which content types, expressed as MIME types,
the client is able to understand. Using content negotiation, the server then selects one of
the proposals, uses it and informs the client of its choice with the Content-Type response header.
"""
__squash__: bool = True
__tags__: List[str] = ["request"]
def __init__(
self, mime: str = "*/*", qualifier: float = 1.0, **kwargs: Optional[str],
):
"""
:param mime: Describe the MIME using this syntax <MIME_type/MIME_subtype>
:param qualifier: Any value used is placed in an order of preference expressed using relative quality value called the weight.
:param kwargs:
>>> header = Accept("text/html", qualifier=0.8)
>>> header.content
'text/html; q="0.8"'
>>> repr(header)
'Accept: text/html; q="0.8"'
"""
if len(mime.split("/")) != 2:
raise ValueError(
f"The MIME should be described using this syntax <MIME_type/MIME_subtype> not '{mime}'"
)
args: Dict = {"q": qualifier if qualifier < 1.0 else None}
args.update(kwargs)
super().__init__(
mime, **args,
)
def get_mime(self) -> Optional[str]:
"""Return defined mime in current accept header."""
for el in self.attrs:
if "/" in el:
return el
return None
def get_qualifier(self, _default: Optional[float] = 1.0) -> Optional[float]:
"""Return defined qualifier for specified mime. If not set, output 1.0."""
return float(str(self["q"])) if self.has("q") else _default
class ContentType(CustomHeader):
"""
The Content-Type entity header is used to indicate the media type of the resource.
In responses, a Content-Type header tells the client what the content type of the returned content actually is.
Browsers will do MIME sniffing in some cases and will not necessarily follow the value of this header;
to prevent this behavior, the header X-Content-Type-Options can be set to nosniff.
"""
__tags__: List[str] = ["request", "response"]
def __init__(
self,
mime: str,
charset: Optional[str] = None,
format_: Optional[str] = None,
boundary: Optional[str] = None,
**kwargs: Optional[str],
):
"""
:param mime_type: The MIME type of the resource or the data. Format <MIME_type>/<MIME_subtype>.
:param charset: The character encoding standard. Should be an IANA name.
:param format_: Mostly used in IMAP, could be one of : original or flowed.
:param boundary: For multipart entities the boundary directive is required, which consists of 1 to 70 characters from a set of characters known to be very robust through email gateways, and not ending with white space. It is used to encapsulate the boundaries of the multiple parts of the message.
:param kwargs:
>>> header = ContentType("text/html", charset="utf-8")
>>> repr(header)
'Content-Type: text/html; charset="UTF-8"'
>>> header.get_charset()
'UTF-8'
>>> header.get_mime()
'text/html'
"""
if len(mime.split("/")) != 2:
raise ValueError(
f"The MIME should be described using this syntax <MIME_type/MIME_subtype> not '{mime}'"
)
args: Dict = {
"charset": charset.upper() if charset else None,
"format": format_,
"boundary": boundary,
}
args.update(kwargs)
super().__init__(mime, **args)
def get_mime(self) -> Optional[str]:
"""Return defined mime in content type."""
for el in self.attrs:
if "/" in el:
return el
return None
def get_charset(self, _default: Optional[str] = "ISO-8859-1") -> Optional[str]:
"""Extract defined charset, if not present will return 'ISO-8859-1' by default."""
return str(self["charset"]) if self.has("charset") else _default
class XContentTypeOptions(CustomHeader):
"""
The X-Content-Type-Options response HTTP header is a marker used by the server to indicate that
the MIME types advertised in the Content-Type headers should not be changed and be followed.
This allows to opt-out of MIME type sniffing, or, in other words, it is a way to say that
the webmasters knew what they were doing.
"""
__tags__: List[str] = ["response"]
def __init__(self, nosniff: bool = True, **kwargs: Optional[str]):
"""
:param nosniff: see https://fetch.spec.whatwg.org/#x-content-type-options-header
:param kwargs:
>>> header = XContentTypeOptions(nosniff=True)
>>> repr(header)
'X-Content-Type-Options: nosniff'
"""
super().__init__("nosniff" if nosniff else "", **kwargs)
class ContentDisposition(CustomHeader):
"""
In a regular HTTP response, the Content-Disposition response header is a header indicating
if the content is expected to be displayed inline in the browser, that is, as a Web page or
as part of a Web page, or as an attachment, that is downloaded and saved locally.
"""
__tags__: List[str] = ["request", "response"]
def __init__(
self,
disposition: str = "inline",
name: Optional[str] = None,
filename: Optional[str] = None,
fallback_filename: Optional[str] = None,
boundary: Optional[str] = None,
**kwargs: Optional[str],
):
"""
:param disposition: Could be either inline, form-data, attachment or empty. Choose one. Default to inline.
:param name: Is a string containing the name of the HTML field in the form that the content of this subpart refers to.
:param filename: Is a string containing the original name of the file transmitted. The filename is always optional and must not be used blindly by the application. ASCII-US Only.
:param fallback_filename: Fallback filename if filename parameter does not uses the encoding defined in RFC 5987.
:param boundary: For multipart entities the boundary directive is required, which consists of 1 to 70 characters from a set of characters known to be very robust through email gateways, and not ending with white space. It is used to encapsulate the boundaries of the multiple parts of the message.
:param kwargs:
"""
if disposition not in ["attachment", "inline", "form-data", ""]:
raise ValueError(
"Disposition should be either inline, form-data, attachment or empty. Choose one."
)
if fallback_filename:
try:
fallback_filename.encode("ASCII")
except UnicodeEncodeError:
raise ValueError(
"The fallback filename should only contain valid ASCII characters. Not '{fb_filename}'. Use fallback_filename instead.".format(
fb_filename=fallback_filename
)
)
args: Dict = {
"name": name,
"filename": filename,
"filename*": ("UTF-8''" + url_quote(fallback_filename))
| |
#!/usr/bin/python
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from nose import SkipTest
from uuid import uuid4
from swift.common.utils import json
from test.functional import check_response, retry, requires_acls, \
requires_policies
import test.functional as tf
class TestObject(unittest.TestCase):
def setUp(self):
if tf.skip:
raise SkipTest
self.container = uuid4().hex
self.containers = []
self._create_container(self.container)
self.obj = uuid4().hex
def put(url, token, parsed, conn):
conn.request('PUT', '%s/%s/%s' % (
parsed.path, self.container, self.obj), 'test',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(put)
resp.read()
self.assertEqual(resp.status, 201)
def _create_container(self, name=None, headers=None):
if not name:
name = uuid4().hex
self.containers.append(name)
headers = headers or {}
def put(url, token, parsed, conn, name):
new_headers = dict({'X-Auth-Token': token}, **headers)
conn.request('PUT', parsed.path + '/' + name, '',
new_headers)
return check_response(conn)
resp = retry(put, name)
resp.read()
self.assertEqual(resp.status, 201)
return name
def tearDown(self):
if tf.skip:
raise SkipTest
# get list of objects in container
def get(url, token, parsed, conn, container):
conn.request(
'GET', parsed.path + '/' + container + '?format=json', '',
{'X-Auth-Token': token})
return check_response(conn)
# delete an object
def delete(url, token, parsed, conn, container, obj):
conn.request(
'DELETE', '/'.join([parsed.path, container, obj['name']]), '',
{'X-Auth-Token': token})
return check_response(conn)
for container in self.containers:
while True:
resp = retry(get, container)
body = resp.read()
if resp.status == 404:
break
self.assert_(resp.status // 100 == 2, resp.status)
objs = json.loads(body)
if not objs:
break
for obj in objs:
resp = retry(delete, container, obj)
resp.read()
self.assertEqual(resp.status, 204)
# delete the container
def delete(url, token, parsed, conn, name):
conn.request('DELETE', parsed.path + '/' + name, '',
{'X-Auth-Token': token})
return check_response(conn)
for container in self.containers:
resp = retry(delete, container)
resp.read()
self.assert_(resp.status in (204, 404))
def test_if_none_match(self):
def put(url, token, parsed, conn):
conn.request('PUT', '%s/%s/%s' % (
parsed.path, self.container, 'if_none_match_test'), '',
{'X-Auth-Token': token,
'Content-Length': '0',
'If-None-Match': '*'})
return check_response(conn)
resp = retry(put)
resp.read()
self.assertEquals(resp.status, 201)
resp = retry(put)
resp.read()
self.assertEquals(resp.status, 412)
def put(url, token, parsed, conn):
conn.request('PUT', '%s/%s/%s' % (
parsed.path, self.container, 'if_none_match_test'), '',
{'X-Auth-Token': token,
'Content-Length': '0',
'If-None-Match': 'somethingelse'})
return check_response(conn)
resp = retry(put)
resp.read()
self.assertEquals(resp.status, 400)
def test_copy_object(self):
if tf.skip:
raise SkipTest
source = '%s/%s' % (self.container, self.obj)
dest = '%s/%s' % (self.container, 'test_copy')
# get contents of source
def get_source(url, token, parsed, conn):
conn.request('GET',
'%s/%s' % (parsed.path, source),
'', {'X-Auth-Token': token})
return check_response(conn)
resp = retry(get_source)
source_contents = resp.read()
self.assertEqual(resp.status, 200)
self.assertEqual(source_contents, 'test')
# copy source to dest with X-Copy-From
def put(url, token, parsed, conn):
conn.request('PUT', '%s/%s' % (parsed.path, dest), '',
{'X-Auth-Token': token,
'Content-Length': '0',
'X-Copy-From': source})
return check_response(conn)
resp = retry(put)
resp.read()
self.assertEqual(resp.status, 201)
# contents of dest should be the same as source
def get_dest(url, token, parsed, conn):
conn.request('GET',
'%s/%s' % (parsed.path, dest),
'', {'X-Auth-Token': token})
return check_response(conn)
resp = retry(get_dest)
dest_contents = resp.read()
self.assertEqual(resp.status, 200)
self.assertEqual(dest_contents, source_contents)
# delete the copy
def delete(url, token, parsed, conn):
conn.request('DELETE', '%s/%s' % (parsed.path, dest), '',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(delete)
resp.read()
self.assertEqual(resp.status, 204)
# verify dest does not exist
resp = retry(get_dest)
resp.read()
self.assertEqual(resp.status, 404)
# copy source to dest with COPY
def copy(url, token, parsed, conn):
conn.request('COPY', '%s/%s' % (parsed.path, source), '',
{'X-Auth-Token': token,
'Destination': dest})
return check_response(conn)
resp = retry(copy)
resp.read()
self.assertEqual(resp.status, 201)
# contents of dest should be the same as source
resp = retry(get_dest)
dest_contents = resp.read()
self.assertEqual(resp.status, 200)
self.assertEqual(dest_contents, source_contents)
# delete the copy
resp = retry(delete)
resp.read()
self.assertEqual(resp.status, 204)
def test_public_object(self):
if tf.skip:
raise SkipTest
def get(url, token, parsed, conn):
conn.request('GET',
'%s/%s/%s' % (parsed.path, self.container, self.obj))
return check_response(conn)
try:
resp = retry(get)
raise Exception('Should not have been able to GET')
except Exception as err:
self.assert_(str(err).startswith('No result after '))
def post(url, token, parsed, conn):
conn.request('POST', parsed.path + '/' + self.container, '',
{'X-Auth-Token': token,
'X-Container-Read': '.r:*'})
return check_response(conn)
resp = retry(post)
resp.read()
self.assertEqual(resp.status, 204)
resp = retry(get)
resp.read()
self.assertEqual(resp.status, 200)
def post(url, token, parsed, conn):
conn.request('POST', parsed.path + '/' + self.container, '',
{'X-Auth-Token': token, 'X-Container-Read': ''})
return check_response(conn)
resp = retry(post)
resp.read()
self.assertEqual(resp.status, 204)
try:
resp = retry(get)
raise Exception('Should not have been able to GET')
except Exception as err:
self.assert_(str(err).startswith('No result after '))
def test_private_object(self):
if tf.skip or tf.skip3:
raise SkipTest
# Ensure we can't access the object with the third account
def get(url, token, parsed, conn):
conn.request('GET', '%s/%s/%s' % (
parsed.path, self.container, self.obj), '',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(get, use_account=3)
resp.read()
self.assertEqual(resp.status, 403)
# create a shared container writable by account3
shared_container = uuid4().hex
def put(url, token, parsed, conn):
conn.request('PUT', '%s/%s' % (
parsed.path, shared_container), '',
{'X-Auth-Token': token,
'X-Container-Read': tf.swift_test_perm[2],
'X-Container-Write': tf.swift_test_perm[2]})
return check_response(conn)
resp = retry(put)
resp.read()
self.assertEqual(resp.status, 201)
# verify third account can not copy from private container
def copy(url, token, parsed, conn):
conn.request('PUT', '%s/%s/%s' % (
parsed.path, shared_container, 'private_object'), '',
{'X-Auth-Token': token,
'Content-Length': '0',
'X-Copy-From': '%s/%s' % (self.container, self.obj)})
return check_response(conn)
resp = retry(copy, use_account=3)
resp.read()
self.assertEqual(resp.status, 403)
# verify third account can write "obj1" to shared container
def put(url, token, parsed, conn):
conn.request('PUT', '%s/%s/%s' % (
parsed.path, shared_container, 'obj1'), 'test',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(put, use_account=3)
resp.read()
self.assertEqual(resp.status, 201)
# verify third account can copy "obj1" to shared container
def copy2(url, token, parsed, conn):
conn.request('COPY', '%s/%s/%s' % (
parsed.path, shared_container, 'obj1'), '',
{'X-Auth-Token': token,
'Destination': '%s/%s' % (shared_container, 'obj1')})
return check_response(conn)
resp = retry(copy2, use_account=3)
resp.read()
self.assertEqual(resp.status, 201)
# verify third account STILL can not copy from private container
def copy3(url, token, parsed, conn):
conn.request('COPY', '%s/%s/%s' % (
parsed.path, self.container, self.obj), '',
{'X-Auth-Token': token,
'Destination': '%s/%s' % (shared_container,
'private_object')})
return check_response(conn)
resp = retry(copy3, use_account=3)
resp.read()
self.assertEqual(resp.status, 403)
# clean up "obj1"
def delete(url, token, parsed, conn):
conn.request('DELETE', '%s/%s/%s' % (
parsed.path, shared_container, 'obj1'), '',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(delete)
resp.read()
self.assertEqual(resp.status, 204)
# clean up shared_container
def delete(url, token, parsed, conn):
conn.request('DELETE',
parsed.path + '/' + shared_container, '',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(delete)
resp.read()
self.assertEqual(resp.status, 204)
@requires_acls
def test_read_only(self):
if tf.skip3:
raise tf.SkipTest
def get_listing(url, token, parsed, conn):
conn.request('GET', '%s/%s' % (parsed.path, self.container), '',
{'X-Auth-Token': token})
return check_response(conn)
def post_account(url, token, parsed, conn, headers):
new_headers = dict({'X-Auth-Token': token}, **headers)
conn.request('POST', parsed.path, '', new_headers)
return check_response(conn)
def get(url, token, parsed, conn, name):
conn.request('GET', '%s/%s/%s' % (
parsed.path, self.container, name), '',
{'X-Auth-Token': token})
return check_response(conn)
def put(url, token, parsed, conn, name):
conn.request('PUT', '%s/%s/%s' % (
parsed.path, self.container, name), 'test',
{'X-Auth-Token': token})
return check_response(conn)
def delete(url, token, parsed, conn, name):
conn.request('PUT', '%s/%s/%s' % (
parsed.path, self.container, name), '',
{'X-Auth-Token': token})
return check_response(conn)
# cannot list objects
resp = retry(get_listing, use_account=3)
resp.read()
self.assertEquals(resp.status, 403)
# cannot get object
resp = retry(get, self.obj, use_account=3)
resp.read()
self.assertEquals(resp.status, 403)
# grant read-only access
acl_user = tf.swift_test_user[2]
acl = {'read-only': [acl_user]}
headers = {'x-account-access-control': json.dumps(acl)}
resp = retry(post_account, headers=headers, use_account=1)
resp.read()
self.assertEqual(resp.status, 204)
# can list objects
resp = retry(get_listing, use_account=3)
listing = resp.read()
self.assertEquals(resp.status, 200)
self.assert_(self.obj in listing)
# can get object
resp = retry(get, self.obj, use_account=3)
body = resp.read()
self.assertEquals(resp.status, 200)
self.assertEquals(body, 'test')
# can not put an object
obj_name = str(uuid4())
resp = retry(put, obj_name, use_account=3)
body = resp.read()
self.assertEquals(resp.status, 403)
# can not delete an object
resp = retry(delete, self.obj, use_account=3)
body = resp.read()
self.assertEquals(resp.status, 403)
# sanity with account1
resp = retry(get_listing, use_account=3)
listing = resp.read()
self.assertEquals(resp.status, 200)
self.assert_(obj_name not in listing)
self.assert_(self.obj in listing)
@requires_acls
def test_read_write(self):
if tf.skip3:
raise SkipTest
def get_listing(url, token, parsed, conn):
conn.request('GET', '%s/%s' % (parsed.path, self.container), '',
{'X-Auth-Token': token})
return check_response(conn)
def post_account(url, token, parsed, conn, headers):
new_headers = dict({'X-Auth-Token': token}, **headers)
conn.request('POST', parsed.path, '', new_headers)
return check_response(conn)
def get(url, token, parsed, conn, name):
conn.request('GET', '%s/%s/%s' % (
parsed.path, self.container, name), '',
{'X-Auth-Token': token})
return check_response(conn)
def put(url, | |
import json
import os
import re
from io import BufferedReader, TextIOWrapper
from tempfile import TemporaryDirectory
import pytest
from cognite.client import CogniteClient
from cognite.client._api.files import FileMetadata, FileMetadataList, FileMetadataUpdate
from cognite.client.data_classes import (
FileMetadataFilter,
GeoLocation,
GeoLocationFilter,
Geometry,
GeometryFilter,
Label,
LabelFilter,
TimestampRange,
)
from cognite.client.exceptions import CogniteAPIError
from tests.utils import jsgz_load, set_request_limit
FILES_API = CogniteClient(max_workers=1).files
@pytest.fixture
def mock_geo_location():
geometry = Geometry(type="Point", coordinates=[35, 10])
yield GeoLocation(type="Feature", geometry=geometry)
@pytest.fixture
def mock_files_response(rsps, mock_geo_location):
response_body = {
"items": [
{
"externalId": "string",
"name": "string",
"source": "string",
"mimeType": "string",
"metadata": {"metadata-key": "metadata-value"},
"assetIds": [1],
"labels": [{"externalId": "WELL LOG"}],
"geoLocation": mock_geo_location,
"id": 1,
"uploaded": True,
"uploadedTime": 0,
"createdTime": 0,
"lastUpdatedTime": 0,
}
]
}
url_pattern = re.compile(re.escape(FILES_API._get_base_url_with_base_path()) + "/.+")
rsps.assert_all_requests_are_fired = False
rsps.add(rsps.POST, url_pattern, status=200, json=response_body)
rsps.add(rsps.GET, url_pattern, status=200, json=response_body)
yield rsps
@pytest.fixture
def mock_file_upload_response(rsps, mock_geo_location):
response_body = {
"externalId": "string",
"name": "string",
"source": "string",
"mimeType": "string",
"metadata": {},
"assetIds": [1],
"labels": [{"externalId": "WELL LOG"}],
"geoLocation": mock_geo_location,
"id": 1,
"uploaded": True,
"uploadedTime": 0,
"createdTime": 0,
"lastUpdatedTime": 0,
"uploadUrl": "https://upload.here",
}
rsps.add(rsps.POST, FILES_API._get_base_url_with_base_path() + "/files", status=200, json=response_body)
rsps.add(rsps.PUT, "https://upload.here", status=200)
yield rsps
@pytest.fixture
def mock_file_create_response(rsps, mock_geo_location):
response_body = {
"externalId": "string",
"name": "string",
"source": "string",
"mimeType": "string",
"metadata": {},
"assetIds": [1],
"labels": [{"externalId": "WELL LOG"}],
"geoLocation": mock_geo_location,
"id": 1,
"uploaded": False,
"uploadedTime": 0,
"createdTime": 0,
"lastUpdatedTime": 0,
"uploadUrl": "https://upload.here",
}
rsps.add(rsps.POST, FILES_API._get_base_url_with_base_path() + "/files", status=200, json=response_body)
yield rsps
@pytest.fixture
def mock_file_download_response(rsps):
rsps.add(
rsps.POST,
FILES_API._get_base_url_with_base_path() + "/files/byids",
status=200,
json={"items": [{"id": 1, "name": "file1"}, {"externalId": "2", "name": "file2"}]},
)
def download_link_callback(request):
identifier = jsgz_load(request.body)["items"][0]
response = {}
if "id" in identifier:
response = {"items": [{"id": 1, "downloadUrl": "https://download.file1.here"}]}
elif "externalId" in identifier:
response = {"items": [{"externalId": "2", "downloadUrl": "https://download.file2.here"}]}
return 200, {}, json.dumps(response)
rsps.add_callback(
rsps.POST,
FILES_API._get_base_url_with_base_path() + "/files/downloadlink",
callback=download_link_callback,
content_type="application/json",
)
rsps.add(rsps.GET, "https://download.file1.here", status=200, body="content1")
rsps.add(rsps.GET, "https://download.file2.here", status=200, body="content2")
yield rsps
@pytest.fixture
def mock_file_download_response_one_fails(rsps):
rsps.add(
rsps.POST,
FILES_API._get_base_url_with_base_path() + "/files/byids",
status=200,
json={
"items": [
{"id": 1, "externalId": "success", "name": "file1"},
{"externalId": "fail", "id": 2, "name": "file2"},
]
},
)
def download_link_callback(request):
identifier = jsgz_load(request.body)["items"][0]
if "id" in identifier:
return 200, {}, json.dumps({"items": [{"id": 1, "downloadUrl": "https://download.file1.here"}]})
elif "externalId" in identifier:
return (400, {}, json.dumps({"error": {"message": "User error", "code": 400}}))
rsps.add_callback(
rsps.POST,
FILES_API._get_base_url_with_base_path() + "/files/downloadlink",
callback=download_link_callback,
content_type="application/json",
)
rsps.add(rsps.GET, "https://download.file1.here", status=200, body="content1")
yield rsps
class TestFilesAPI:
def test_create(self, mock_file_create_response):
file_metadata = FileMetadata(name="bla")
returned_file_metadata, upload_url = FILES_API.create(file_metadata)
response_body = mock_file_create_response.calls[0].response.json()
assert FileMetadata._load(response_body) == returned_file_metadata
assert response_body["uploadUrl"] == upload_url
def test_create_with_label(self, mock_file_create_response):
file_metadata = FileMetadata(name="bla", labels=[Label(external_id="WELL LOG")])
returned_file_metadata, upload_url = FILES_API.create(file_metadata)
response_body = mock_file_create_response.calls[0].response.json()
assert FileMetadata._load(response_body) == returned_file_metadata
assert response_body["uploadUrl"] == upload_url
assert response_body["labels"][0]["externalId"] == "WELL LOG"
def test_create_with_label_request(self, mock_file_create_response):
file_metadata = FileMetadata(name="bla", labels=[Label(external_id="WELL LOG")])
returned_file_metadata, upload_url = FILES_API.create(file_metadata)
response_body = mock_file_create_response.calls[0].response.json()
request_body = jsgz_load(mock_file_create_response.calls[0].request.body)
assert FileMetadata._load(response_body) == returned_file_metadata
assert all(body["labels"][0]["externalId"] == "WELL LOG" for body in [request_body, response_body])
def test_create_with_geoLocation(self, mock_file_create_response, mock_geo_location):
file_metadata = FileMetadata(name="bla", geo_location=mock_geo_location)
returned_file_metadata, upload_url = FILES_API.create(file_metadata)
response_body = mock_file_create_response.calls[0].response.json()
assert FileMetadata._load(response_body) == returned_file_metadata
assert response_body["geoLocation"] == mock_geo_location
def test_create_geoLocation_with_invalid_geometry_type(self):
with pytest.raises(ValueError):
_ = Geometry(type="someInvalidType", coordinates=[1, 2])
def test_create_geoLocation_with_invalid_geojson_type(self):
g = Geometry(type="Point", coordinates=[1, 2])
with pytest.raises(ValueError):
_ = GeoLocation(type="FeatureCollection", geometry=g)
def test_create_with_geoLocation_request(self, mock_file_create_response, mock_geo_location):
file_metadata = FileMetadata(name="bla", geo_location=mock_geo_location)
returned_file_metadata, upload_url = FILES_API.create(file_metadata)
response_body = mock_file_create_response.calls[0].response.json()
request_body = jsgz_load(mock_file_create_response.calls[0].request.body)
assert FileMetadata._load(response_body) == returned_file_metadata
assert all(body["geoLocation"] == mock_geo_location for body in [request_body, response_body])
def test_retrieve_single(self, mock_files_response):
res = FILES_API.retrieve(id=1)
assert isinstance(res, FileMetadata)
assert mock_files_response.calls[0].response.json()["items"][0] == res.dump(camel_case=True)
def test_retrieve_multiple(self, mock_files_response):
res = FILES_API.retrieve_multiple(ids=[1])
assert isinstance(res, FileMetadataList)
assert mock_files_response.calls[0].response.json()["items"] == res.dump(camel_case=True)
def test_list(self, mock_files_response):
res = FILES_API.list(source="bla", limit=10)
assert isinstance(res, FileMetadataList)
assert mock_files_response.calls[0].response.json()["items"] == res.dump(camel_case=True)
assert "bla" == jsgz_load(mock_files_response.calls[0].request.body)["filter"]["source"]
assert 10 == jsgz_load(mock_files_response.calls[0].request.body)["limit"]
def test_list_params(self, mock_files_response):
FILES_API.list(root_asset_ids=[1, 2], root_asset_external_ids=["a"], data_set_external_ids=["x"], limit=10)
calls = mock_files_response.calls
assert 1 == len(calls)
assert {
"cursor": None,
"limit": 10,
"filter": {
"rootAssetIds": [{"id": 1}, {"id": 2}, {"externalId": "a"}],
"dataSetIds": [{"externalId": "x"}],
},
} == jsgz_load(calls[0].request.body)
def test_list_subtrees(self, mock_files_response):
FILES_API.list(asset_subtree_ids=[1], asset_subtree_external_ids=["a"], limit=10)
calls = mock_files_response.calls
assert 1 == len(calls)
assert {
"cursor": None,
"limit": 10,
"filter": {"assetSubtreeIds": [{"id": 1}, {"externalId": "a"}]},
} == jsgz_load(calls[0].request.body)
def test_filter_directory(self, mock_files_response):
FILES_API.list(directory_prefix="/test", limit=10)
calls = mock_files_response.calls
assert len(calls) == 1
assert jsgz_load(calls[0].request.body) == {"cursor": None, "filter": {"directoryPrefix": "/test"}, "limit": 10}
def test_filter_geoLocation(self, mock_files_response):
FILES_API.list(
geo_location=GeoLocationFilter(relation="within", shape=GeometryFilter(type="Point", coordinates=[35, 10])),
limit=10,
)
calls = mock_files_response.calls
assert len(calls) == 1
assert jsgz_load(calls[0].request.body) == {
"cursor": None,
"filter": {"geoLocation": {"relation": "within", "shape": {"type": "Point", "coordinates": [35, 10]}}},
"limit": 10,
}
def test_list_with_time_dict(self, mock_files_response):
FILES_API.list(created_time={"min": 20})
assert 20 == jsgz_load(mock_files_response.calls[0].request.body)["filter"]["createdTime"]["min"]
assert "max" not in jsgz_load(mock_files_response.calls[0].request.body)["filter"]["createdTime"]
def test_list_with_timestamp_range(self, mock_files_response):
FILES_API.list(created_time=TimestampRange(min=20))
assert 20 == jsgz_load(mock_files_response.calls[0].request.body)["filter"]["createdTime"]["min"]
assert "max" not in jsgz_load(mock_files_response.calls[0].request.body)["filter"]["createdTime"]
def test_delete_single(self, mock_files_response):
res = FILES_API.delete(id=1)
assert {"items": [{"id": 1}]} == jsgz_load(mock_files_response.calls[0].request.body)
assert res is None
def test_delete_multiple(self, mock_files_response):
res = FILES_API.delete(id=[1])
assert {"items": [{"id": 1}]} == jsgz_load(mock_files_response.calls[0].request.body)
assert res is None
def test_update_with_resource_class(self, mock_files_response):
res = FILES_API.update(FileMetadata(id=1, source="bla"))
assert isinstance(res, FileMetadata)
assert {"items": [{"id": 1, "update": {"source": {"set": "bla"}}}]} == jsgz_load(
mock_files_response.calls[0].request.body
)
def test_update_with_update_class(self, mock_files_response):
res = FILES_API.update(FileMetadataUpdate(id=1).source.set("bla"))
assert isinstance(res, FileMetadata)
assert {"items": [{"id": 1, "update": {"source": {"set": "bla"}}}]} == jsgz_load(
mock_files_response.calls[0].request.body
)
def test_update_labels_single(self, mock_files_response):
FILES_API.update([FileMetadataUpdate(id=1).labels.add("PUMP").labels.remove("WELL LOG")])
expected = {"labels": {"add": [{"externalId": "PUMP"}], "remove": [{"externalId": "WELL LOG"}]}}
assert jsgz_load(mock_files_response.calls[0].request.body)["items"][0]["update"] == expected
def test_update_labels_multiple(self, mock_files_response):
FILES_API.update(
[FileMetadataUpdate(id=1).labels.add(["PUMP", "ROTATING_EQUIPMENT"]).labels.remove(["WELL LOG"])]
)
expected = {
"labels": {
"add": [{"externalId": "PUMP"}, {"externalId": "ROTATING_EQUIPMENT"}],
"remove": [{"externalId": "WELL LOG"}],
}
}
assert jsgz_load(mock_files_response.calls[0].request.body)["items"][0]["update"] == expected
# resource.update doesn't support full replacement of labels (set operation)
def test_ignore_labels_resource_class(self, mock_files_response):
FILES_API.update(FileMetadata(id=1, labels=[Label(external_id="Pump")], external_id="newId"))
assert jsgz_load(mock_files_response.calls[0].request.body)["items"][0]["update"] == {
"externalId": {"set": "newId"}
}
def test_labels_filter_contains_all(self, mock_files_response):
my_label_filter = LabelFilter(contains_all=["WELL LOG", "VERIFIED"])
FILES_API.list(labels=my_label_filter)
assert jsgz_load(mock_files_response.calls[0].request.body)["filter"]["labels"] == {
"containsAll": [{"externalId": "WELL LOG"}, {"externalId": "VERIFIED"}]
}
def test_labels_filter_contains_any(self, mock_files_response):
my_label_filter = LabelFilter(contains_any=["WELL LOG", "WELL REPORT"])
FILES_API.list(labels=my_label_filter)
assert jsgz_load(mock_files_response.calls[0].request.body)["filter"]["labels"] == {
"containsAny": [{"externalId": "WELL LOG"}, {"externalId": "WELL REPORT"}]
}
def test_update_multiple(self, mock_files_response):
res = FILES_API.update([FileMetadataUpdate(id=1).source.set(None), FileMetadata(external_id="2", source="bla")])
assert isinstance(res, FileMetadataList)
assert {
"items": [
{"id": 1, "update": {"source": {"setNull": True}}},
{"externalId": "2", "update": {"source": {"set": "bla"}}},
]
} == jsgz_load(mock_files_response.calls[0].request.body)
def test_iter_single(self, mock_files_response):
for file in FILES_API:
assert isinstance(file, FileMetadata)
assert mock_files_response.calls[0].response.json()["items"][0] == file.dump(camel_case=True)
def test_iter_chunk(self, mock_files_response):
for file in FILES_API(chunk_size=1):
assert isinstance(file, FileMetadataList)
assert mock_files_response.calls[0].response.json()["items"] == file.dump(camel_case=True)
def test_search(self, mock_files_response):
res = FILES_API.search(filter=FileMetadataFilter(external_id_prefix="abc"))
assert mock_files_response.calls[0].response.json()["items"] == res.dump(camel_case=True)
assert {"search": {"name": None}, "filter": {"externalIdPrefix": "abc"}, "limit": 100} == jsgz_load(
mock_files_response.calls[0].request.body
)
@pytest.mark.parametrize("filter_field", ["external_id_prefix", "externalIdPrefix"])
def test_search_dict_filter(self, mock_files_response, filter_field):
res = FILES_API.search(filter={filter_field: "abc"})
assert mock_files_response.calls[0].response.json()["items"] == res.dump(camel_case=True)
assert {"search": {"name": None}, "filter": {"externalIdPrefix": "abc"}, "limit": 100} == jsgz_load(
mock_files_response.calls[0].request.body
)
def test_upload(self, mock_file_upload_response):
dir = os.path.join(os.path.dirname(__file__), "files_for_test_upload")
path = os.path.join(dir, "file_for_test_upload_1.txt")
res = FILES_API.upload(path, name="bla", directory=dir)
response_body = mock_file_upload_response.calls[0].response.json()
del response_body["uploadUrl"]
assert FileMetadata._load(response_body) == res
assert "https://upload.here/" == mock_file_upload_response.calls[1].request.url
assert {"name": "bla", "directory": dir} == jsgz_load(mock_file_upload_response.calls[0].request.body)
assert isinstance(mock_file_upload_response.calls[1].request.body, BufferedReader)
def test_upload_with_external_id(self, mock_file_upload_response):
path = os.path.join(os.path.dirname(__file__), "files_for_test_upload", "file_for_test_upload_1.txt")
FILES_API.upload(path, external_id="blabla", name="bla", data_set_id=42)
def test_upload_no_name(self, mock_file_upload_response):
dir = os.path.join(os.path.dirname(__file__), "files_for_test_upload")
path = os.path.join(dir, "file_for_test_upload_1.txt")
FILES_API.upload(path, directory=dir)
assert {"name": "file_for_test_upload_1.txt", "directory": dir} == jsgz_load(
mock_file_upload_response.calls[0].request.body
)
def test_upload_set_directory(self, mock_file_upload_response):
set_dir = "/Some/custom/directory"
dir = os.path.join(os.path.dirname(__file__), "files_for_test_upload")
path = os.path.join(dir, "file_for_test_upload_1.txt")
FILES_API.upload(path, directory=set_dir)
assert {"name": "file_for_test_upload_1.txt", "directory": set_dir} == jsgz_load(
mock_file_upload_response.calls[0].request.body
)
def test_upload_from_directory(self, mock_file_upload_response):
path = os.path.join(os.path.dirname(__file__), "files_for_test_upload")
res = FILES_API.upload(path=path, asset_ids=[1, 2])
response_body = mock_file_upload_response.calls[0].response.json()
del response_body["uploadUrl"]
assert FileMetadataList([FileMetadata._load(response_body), FileMetadata._load(response_body)]) == res
assert 4 == len(mock_file_upload_response.calls)
for call in mock_file_upload_response.calls:
payload = call.request.body
if isinstance(payload, BufferedReader):
continue
else:
json = jsgz_load(payload)
assert [1, 2] == json["assetIds"]
assert json["name"] in ["file_for_test_upload_1.txt", "file_for_test_upload_2.txt"]
def test_upload_from_directory_fails(self, rsps):
rsps.add(rsps.POST, FILES_API._get_base_url_with_base_path() + "/files", status=400, json={})
path = os.path.join(os.path.dirname(__file__), "files_for_test_upload")
with pytest.raises(CogniteAPIError) as e:
FILES_API.upload(path=path)
assert "file_for_test_upload_1.txt" in e.value.failed
assert "file_for_test_upload_2.txt" in e.value.failed
def test_upload_from_directory_recursively(self, mock_file_upload_response):
path = os.path.join(os.path.dirname(__file__), "files_for_test_upload")
res = FILES_API.upload(path=path, recursive=True, asset_ids=[1, 2])
response_body = mock_file_upload_response.calls[0].response.json()
del response_body["uploadUrl"]
assert FileMetadataList([FileMetadata._load(response_body) for _ in range(3)]) == res
assert 6 == len(mock_file_upload_response.calls)
for call in mock_file_upload_response.calls:
payload = call.request.body
if isinstance(payload, BufferedReader):
continue
else:
json = jsgz_load(payload)
assert json["name"] in [
"file_for_test_upload_1.txt",
"file_for_test_upload_2.txt",
"file_for_test_upload_3.txt",
]
assert [1, 2] == json["assetIds"]
def test_upload_from_memory(self, mock_file_upload_response):
res = FILES_API.upload_bytes(content=b"content", name="bla")
response_body = mock_file_upload_response.calls[0].response.json()
del response_body["uploadUrl"]
assert FileMetadata._load(response_body) == res
assert "https://upload.here/" == mock_file_upload_response.calls[1].request.url
assert {"name": "bla"} == jsgz_load(mock_file_upload_response.calls[0].request.body)
assert b"content" == mock_file_upload_response.calls[1].request.body
def test_upload_using_file_handle(self, mock_file_upload_response):
path = os.path.join(os.path.dirname(__file__), "files_for_test_upload", "file_for_test_upload_1.txt")
with open(path) as fh:
res = FILES_API.upload_bytes(fh, name="bla")
response_body = mock_file_upload_response.calls[0].response.json()
del response_body["uploadUrl"]
assert FileMetadata._load(response_body) == res
assert "https://upload.here/" == | |
<reponame>usegalaxy-no/usegalaxy
#!/usr/bin/python
# This file is part of Ansible
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
---
module: aws_s3
version_added: 1.0.0
short_description: manage objects in S3.
description:
- This module allows the user to manage S3 buckets and the objects within them. Includes support for creating and
deleting both objects and buckets, retrieving objects as files or strings and generating download links.
This module has a dependency on boto3 and botocore.
options:
bucket:
description:
- Bucket name.
required: true
type: str
dest:
description:
- The destination file path when downloading an object/key with a GET operation.
type: path
encrypt:
description:
- When set for PUT mode, asks for server-side encryption.
default: true
type: bool
encryption_mode:
description:
- What encryption mode to use if I(encrypt=true).
default: AES256
choices:
- AES256
- aws:kms
type: str
expiry:
description:
- Time limit (in seconds) for the URL generated and returned by S3/Walrus when performing a I(mode=put) or I(mode=geturl) operation.
default: 600
aliases: ['expiration']
type: int
headers:
description:
- Custom headers for PUT operation, as a dictionary of C(key=value) and C(key=value,key=value).
type: dict
marker:
description:
- Specifies the key to start with when using list mode. Object keys are returned in alphabetical order, starting with key after the marker in order.
type: str
max_keys:
description:
- Max number of results to return in list mode, set this if you want to retrieve fewer than the default 1000 keys.
default: 1000
type: int
metadata:
description:
- Metadata for PUT operation, as a dictionary of C(key=value) and C(key=value,key=value).
type: dict
mode:
description:
- Switches the module behaviour between C(put) (upload), C(get) (download), C(geturl) (return download url, Ansible 1.3+),
C(getstr) (download object as string (1.3+)), C(list) (list keys, Ansible 2.0+), C(create) (bucket), C(delete) (bucket),
and delobj (delete object, Ansible 2.0+).
required: true
choices: ['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list']
type: str
object:
description:
- Keyname of the object inside the bucket. Can be used to create "virtual directories", see examples.
type: str
permission:
description:
- This option lets the user set the canned permissions on the object/bucket that are created.
The permissions that can be set are C(private), C(public-read), C(public-read-write), C(authenticated-read) for a bucket or
C(private), C(public-read), C(public-read-write), C(aws-exec-read), C(authenticated-read), C(bucket-owner-read),
C(bucket-owner-full-control) for an object. Multiple permissions can be specified as a list.
default: ['private']
type: list
elements: str
prefix:
description:
- Limits the response to keys that begin with the specified prefix for list mode.
default: ""
type: str
version:
description:
- Version ID of the object inside the bucket. Can be used to get a specific version of a file if versioning is enabled in the target bucket.
type: str
overwrite:
description:
- Force overwrite either locally on the filesystem or remotely with the object/key. Used with PUT and GET operations.
- Must be a Boolean, C(always), C(never) or C(different).
- C(true) is the same as C(always).
- C(false) is equal to C(never).
- When this is set to C(different) the MD5 sum of the local file is compared with the 'ETag' of the object/key in S3.
The ETag may or may not be an MD5 digest of the object data. See the ETag response header here
U(https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonResponseHeaders.html).
default: 'always'
aliases: ['force']
type: str
retries:
description:
- On recoverable failure, how many times to retry before actually failing.
default: 0
type: int
aliases: ['retry']
s3_url:
description:
- S3 URL endpoint for usage with Ceph, Eucalyptus and fakes3 etc. Otherwise assumes AWS.
aliases: [ S3_URL ]
type: str
dualstack:
description:
- Enables Amazon S3 Dual-Stack Endpoints, allowing S3 communications using both IPv4 and IPv6.
- Requires at least botocore version 1.4.45.
type: bool
default: false
rgw:
description:
- Enable Ceph RGW S3 support. This option requires an explicit url via I(s3_url).
default: false
type: bool
src:
description:
- The source file path when performing a PUT operation.
- Either I(content), I(content_base64) or I(src) must be specified for a PUT operation. Ignored otherwise.
type: path
content:
description:
- The content to PUT into an object.
- The parameter value will be treated as a string and converted to UTF-8 before sending it to S3.
To send binary data, use the I(content_base64) parameter instead.
- Either I(content), I(content_base64) or I(src) must be specified for a PUT operation. Ignored otherwise.
version_added: "1.3.0"
type: str
content_base64:
description:
- The base64-encoded binary data to PUT into an object.
- Use this if you need to put raw binary data, and don't forget to encode in base64.
- Either I(content), I(content_base64) or I(src) must be specified for a PUT operation. Ignored otherwise.
version_added: "1.3.0"
type: str
ignore_nonexistent_bucket:
description:
- "Overrides initial bucket lookups in case bucket or iam policies are restrictive. Example: a user may have the
GetObject permission but no other permissions. In this case using the option mode: get will fail without specifying
I(ignore_nonexistent_bucket=true)."
type: bool
default: false
encryption_kms_key_id:
description:
- KMS key id to use when encrypting objects using I(encrypting=aws:kms). Ignored if I(encryption) is not C(aws:kms).
type: str
requirements: [ "boto3", "botocore" ]
author:
- "<NAME> (@lwade)"
- "<NAME> (@s-hertel)"
extends_documentation_fragment:
- amazon.aws.aws
- amazon.aws.ec2
'''
EXAMPLES = '''
- name: Simple PUT operation
amazon.aws.aws_s3:
bucket: mybucket
object: /my/desired/key.txt
src: /usr/local/myfile.txt
mode: put
- name: PUT operation from a rendered template
amazon.aws.aws_s3:
bucket: mybucket
object: /object.yaml
content: "{{ lookup('template', 'templates/object.yaml.j2') }}"
mode: put
- name: Simple PUT operation in Ceph RGW S3
amazon.aws.aws_s3:
bucket: mybucket
object: /my/desired/key.txt
src: /usr/local/myfile.txt
mode: put
rgw: true
s3_url: "http://localhost:8000"
- name: Simple GET operation
amazon.aws.aws_s3:
bucket: mybucket
object: /my/desired/key.txt
dest: /usr/local/myfile.txt
mode: get
- name: Get a specific version of an object.
amazon.aws.aws_s3:
bucket: mybucket
object: /my/desired/key.txt
version: 48c9ee5131af7a716edc22df9772aa6f
dest: /usr/local/myfile.txt
mode: get
- name: PUT/upload with metadata
amazon.aws.aws_s3:
bucket: mybucket
object: /my/desired/key.txt
src: /usr/local/myfile.txt
mode: put
metadata: 'Content-Encoding=gzip,Cache-Control=no-cache'
- name: PUT/upload with custom headers
amazon.aws.aws_s3:
bucket: mybucket
object: /my/desired/key.txt
src: /usr/local/myfile.txt
mode: put
headers: 'x-amz-grant-full-control=emailAddress=<EMAIL>'
- name: List keys simple
amazon.aws.aws_s3:
bucket: mybucket
mode: list
- name: List keys all options
amazon.aws.aws_s3:
bucket: mybucket
mode: list
prefix: /my/desired/
marker: /my/desired/0023.txt
max_keys: 472
- name: Create an empty bucket
amazon.aws.aws_s3:
bucket: mybucket
mode: create
permission: public-read
- name: Create a bucket with key as directory, in the EU region
amazon.aws.aws_s3:
bucket: mybucket
object: /my/directory/path
mode: create
region: eu-west-1
- name: Delete a bucket and all contents
amazon.aws.aws_s3:
bucket: mybucket
mode: delete
- name: GET an object but don't download if the file checksums match. New in 2.0
amazon.aws.aws_s3:
bucket: mybucket
object: /my/desired/key.txt
dest: /usr/local/myfile.txt
mode: get
overwrite: different
- name: Delete an object from a bucket
amazon.aws.aws_s3:
bucket: mybucket
object: /my/desired/key.txt
mode: delobj
'''
RETURN = '''
msg:
description: Message indicating the status of the operation.
returned: always
type: str
sample: PUT operation complete
url:
description: URL of the object.
returned: (for put and geturl operations)
type: str
sample: https://my-bucket.s3.amazonaws.com/my-key.txt?AWSAccessKeyId=<access-key>&Expires=1506888865&Signature=<signature>
expiry:
description: Number of seconds the presigned url is valid for.
returned: (for geturl operation)
type: int
sample: 600
contents:
description: Contents of the object as string.
returned: (for getstr operation)
type: str
sample: "Hello, world!"
s3_keys:
description: List of object keys.
returned: (for list operation)
type: list
elements: str
sample:
- prefix1/
- prefix1/key1
- prefix1/key2
'''
import mimetypes
import os
import io
from ssl import SSLError
import base64
try:
import botocore
except ImportError:
pass # Handled by AnsibleAWSModule
from ansible.module_utils.basic import to_text
from ansible.module_utils.basic import to_native
from ansible.module_utils.six.moves.urllib.parse import urlparse
from ..module_utils.core import AnsibleAWSModule
from ..module_utils.core import is_boto3_error_code
from ..module_utils.core import is_boto3_error_message
from ..module_utils.ec2 import AWSRetry
from ..module_utils.ec2 import boto3_conn
from ..module_utils.ec2 import get_aws_connection_info
from ..module_utils.s3 import HAS_MD5
from ..module_utils.s3 import calculate_etag
from ..module_utils.s3 import calculate_etag_content
IGNORE_S3_DROP_IN_EXCEPTIONS = ['XNotImplemented', 'NotImplemented']
class Sigv4Required(Exception):
pass
def key_check(module, s3, bucket, obj, version=None, validate=True):
try:
if version:
s3.head_object(Bucket=bucket, Key=obj, VersionId=version)
else:
s3.head_object(Bucket=bucket, Key=obj)
except is_boto3_error_code('404'):
return False
except is_boto3_error_code('403') as e: # pylint: disable=duplicate-except
if validate is True:
module.fail_json_aws(e, msg="Failed while looking up object (during key check) %s." % obj)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, msg="Failed while looking up object (during key check) %s." % obj)
return True
def etag_compare(module, s3, bucket, obj, version=None, local_file=None, content=None):
s3_etag = get_etag(s3, | |
every epoch). Defaults to
`'epoch'`
**kwargs: Additional arguments for backwards compatibility. Possible key
is `period`.
"""
def __init__(
self,
filepath,
monitor="val_loss",
verbose=0,
save_best_only=False,
save_weights_only=False,
mode="auto",
save_freq="epoch",
exclude_initial_epochs=0,
**kwargs,
):
super(ModelCheckpoint, self).__init__()
self.monitor = monitor
self.verbose = verbose
self.filepath = filepath
self.save_best_only = save_best_only
self.save_weights_only = save_weights_only
self.save_freq = save_freq
self.exclude_initial_epochs = exclude_initial_epochs
self.epochs_since_last_save = 0
self._samples_seen_since_last_saving = 0
# Deprecated field `load_weights_on_restart` is for loading the checkpoint
# file from `filepath` at the start of `model.fit()`
# TODO(rchao): Remove the arg during next breaking release.
if "load_weights_on_restart" in kwargs:
self.load_weights_on_restart = kwargs["load_weights_on_restart"]
logging.warning(
"`load_weights_on_restart` argument is deprecated. "
"Please use `model.load_weights()` for loading weights "
"before the start of `model.fit()`."
)
else:
self.load_weights_on_restart = False
# Deprecated field `period` is for the number of epochs between which
# the model is saved.
if "period" in kwargs:
self.period = kwargs["period"]
logging.warning(
"`period` argument is deprecated. Please use `save_freq` "
"to specify the frequency in number of samples seen."
)
else:
self.period = 1
if mode not in ["auto", "min", "max"]:
logging.warning(
"ModelCheckpoint mode %s is unknown, " "fallback to auto mode.", mode
)
mode = "auto"
if mode == "min":
self.monitor_op = np.less
self.best = np.Inf
elif mode == "max":
self.monitor_op = np.greater
self.best = -np.Inf
else:
if "acc" in self.monitor or self.monitor.startswith("fmeasure"):
self.monitor_op = np.greater
self.best = -np.Inf
else:
self.monitor_op = np.less
self.best = np.Inf
if self.save_freq != "epoch" and not isinstance(self.save_freq, int):
raise ValueError("Unrecognized save_freq: {}".format(self.save_freq))
# Only the chief worker writes model checkpoints, but all workers
# restore checkpoint at on_train_begin().
self._chief_worker_only = False
def set_model(self, model):
self.model = model
# Use name matching rather than `isinstance` to avoid circular dependencies.
if (
not self.save_weights_only
and not model._is_graph_network
and model.__class__.__name__ # pylint: disable=protected-access
!= "Sequential"
):
self.save_weights_only = True
def on_train_begin(self, logs=None):
# pylint: disable=protected-access
if self.model._in_multi_worker_mode():
# MultiWorkerTrainingState is used to manage the training state needed
# for preemption-recovery of a worker in multi-worker training.
self.model._training_state = training_state.MultiWorkerTrainingState(
self.model, self.filepath
)
self._training_state = self.model._training_state
if self._training_state.restore():
# If the training state needs to be and is successfully restored,
# it is recovering from a previous failure (or preemption). In such
# case, do not load the weights from user specified file path.
return
# If this is not multi worker training, restoring is not needed, or
# restoring failed, check if it should load weights on restart.
if self.load_weights_on_restart:
if (
not self.model._in_multi_worker_mode()
or multi_worker_util.should_load_checkpoint()
):
filepath_to_load = self._get_most_recently_modified_file_matching_pattern(
self.filepath
)
if filepath_to_load is not None and training_state.checkpoint_exists(
filepath_to_load
):
try:
# `filepath` may contain placeholders such as `{epoch:02d}`, and
# thus it attempts to load the most recently modified file with file
# name matching the pattern.
self.model.load_weights(filepath_to_load)
except (IOError, ValueError) as e:
raise ValueError(
"Error loading file from {}. Reason: {}".format(
filepath_to_load, e
)
)
def on_train_end(self, logs=None):
# pylint: disable=protected-access
if self.model._in_multi_worker_mode():
# In multi-worker training, on successful exit of training, delete the
# training state backup file that was saved for the purpose of worker
# recovery.
self._training_state.delete_backup()
# Restore the training state so the model is ready for next (possible)
# multi worker training.
del self._training_state
del self.model._training_state
def on_batch_end(self, batch, logs=None):
logs = logs or {}
if isinstance(self.save_freq, int):
self._samples_seen_since_last_saving += logs.get("size", 1)
if self._samples_seen_since_last_saving >= self.save_freq:
self._save_model(epoch=self._current_epoch, logs=logs)
self._samples_seen_since_last_saving = 0
def on_epoch_begin(self, epoch, logs=None):
self._current_epoch = epoch
def on_epoch_end(self, epoch, logs=None):
self.epochs_since_last_save += 1
# pylint: disable=protected-access
if self.save_freq == "epoch":
if self.model._in_multi_worker_mode():
# Exclude training state variables in user-requested checkpoint file.
with self._training_state.untrack_vars():
self._save_model(epoch=epoch, logs=logs)
else:
self._save_model(epoch=epoch, logs=logs)
if self.model._in_multi_worker_mode():
# For multi-worker training, back up the weights and current training
# state for possible future recovery.
# TODO(rchao): Call `back_up` at finer period such as N steps.
self._training_state.back_up(epoch)
def _save_model(self, epoch, logs):
"""Saves the model.
Arguments:
epoch: the epoch this iteration is in.
logs: the `logs` dict passed in to `on_batch_end` or `on_epoch_end`.
"""
logs = logs or {}
if (
isinstance(self.save_freq, int)
or self.epochs_since_last_save >= self.period
):
self.epochs_since_last_save = 0
filepath = self._get_file_path(epoch, logs)
if self.save_best_only:
# Modified to make it monitor only the consecutive previous N epochs.
if epoch > self.exclude_initial_epochs:
if epoch == self.exclude_initial_epochs + 1 and self.verbose > 0:
print(
f"\nModelCheckpoint has started monitoring {self.monitor} from epoch {epoch + 1}"
)
current = logs.get(self.monitor)
if current is None:
logging.warning(
"Can save best model only with %s available, " "skipping.",
self.monitor,
)
else:
if self.monitor_op(current, self.best):
if self.verbose > 0:
print(
"\nEpoch %05d: %s improved from %0.5f to %0.5f,"
" saving model to %s"
% (
epoch + 1,
self.monitor,
self.best,
current,
filepath,
)
)
self.best = current
if self.save_weights_only:
self.model.save_weights(filepath, overwrite=True)
else:
self.model.save(filepath, overwrite=True)
else:
if self.verbose > 0:
print(
"\nEpoch %05d: %s did not improve from %0.5f"
% (epoch + 1, self.monitor, self.best)
)
else:
if self.verbose > 0:
print("\nEpoch %05d: saving model to %s" % (epoch + 1, filepath))
if self.save_weights_only:
self.model.save_weights(filepath, overwrite=True)
else:
self.model.save(filepath, overwrite=True)
self._maybe_remove_file()
def _get_file_path(self, epoch, logs):
"""Returns the file path for checkpoint."""
# pylint: disable=protected-access
if (
not self.model._in_multi_worker_mode()
or multi_worker_util.should_save_checkpoint()
):
return self.filepath.format(epoch=epoch + 1, **logs)
else:
# If this is multi-worker training, and this worker should not
# save checkpoint, we use a temp filepath to store a dummy checkpoint, so
# it writes to a file that will be removed at the end of `_save_model()`
# call. This is because the SyncOnReadVariable needs to be synced across
# all the workers in order to be read, and all workers need to initiate
# that.
self._temp_file_dir = tempfile.mkdtemp()
extension = os.path.splitext(self.filepath)[1]
return os.path.join(self._temp_file_dir, "temp" + extension)
def _maybe_remove_file(self):
# Remove the checkpoint directory in multi-worker training where this worker
# should not checkpoint. It is a dummy directory previously saved for sync
# distributed training.
if (
self.model._in_multi_worker_mode()
and not multi_worker_util.should_save_checkpoint() # pylint: disable=protected-access
):
file_io.delete_recursively(self._temp_file_dir)
del self._temp_file_dir
def _get_most_recently_modified_file_matching_pattern(self, pattern):
"""Returns the most recently modified filepath matching pattern.
Pattern may contain python formatting placeholder. If
`tf.train.latest_checkpoint()` does not return None, use that; otherwise,
check for most recently modified one that matches the pattern.
In the rare case where there are more than one pattern-matching file having
the same modified time that is most recent among all, return the filepath
that is largest (by `>` operator, lexicographically using the numeric
equivalents). This provides a tie-breaker when multiple files are most
recent. Note that a larger `filepath` can sometimes indicate a later time of
modification (for instance, when epoch/batch is used as formatting option),
but not necessarily (when accuracy or loss is used). The tie-breaker is
put in the logic as best effort to return the most recent, and to avoid
undeterministic result.
Modified time of a file is obtained with `os.path.getmtime()`.
This utility function is best demonstrated via an example:
```python
file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}.h5'
test_dir = self.get_temp_dir()
path_pattern = os.path.join(test_dir, file_pattern)
file_paths = [
os.path.join(test_dir, file_name) for file_name in
['f.batch03epoch02.h5', 'f.batch02epoch02.h5', 'f.batch01epoch01.h5']
]
for file_path in file_paths:
# Write something to each of the files
self.assertEqual(
_get_most_recently_modified_file_matching_pattern(path_pattern),
file_paths[-1])
```
Arguments:
pattern: The file pattern that may optionally contain python placeholder
such as `{epoch:02d}`.
Returns:
The most recently modified file's full filepath matching `pattern`. If
`pattern` does not contain any placeholder, this returns the filepath
that
exactly matches `pattern`. Returns `None` if no match is found.
"""
dir_name = os.path.dirname(pattern)
base_name = os.path.basename(pattern)
base_name_regex = "^" + re.sub(r"{.*}", r".*", base_name) + "$"
# If tf.train.latest_checkpoint tells us there exists a latest checkpoint,
# use that as it is more robust than `os.path.getmtime()`.
latest_tf_checkpoint = checkpoint_management.latest_checkpoint(dir_name)
if latest_tf_checkpoint is not None and re.match(
base_name_regex, os.path.basename(latest_tf_checkpoint)
):
return latest_tf_checkpoint
latest_mod_time = 0
file_path_with_latest_mod_time = None
n_file_with_latest_mod_time = 0
file_path_with_largest_file_name = None
if file_io.file_exists(dir_name):
for file_name in os.listdir(dir_name):
# Only consider if `file_name` matches the pattern.
if re.match(base_name_regex, file_name):
file_path = os.path.join(dir_name, file_name)
mod_time = os.path.getmtime(file_path)
if (
| |
<filename>pacu/models/awsapi/codecommit.py
# generated by datamodel-codegen:
# filename: openapi.yaml
# timestamp: 2021-12-31T02:46:26+00:00
from __future__ import annotations
from datetime import datetime
from enum import Enum
from typing import Annotated, Any, List, Optional
from pydantic import BaseModel, Extra, Field
class ApprovalRuleTemplateNameRequiredException(BaseModel):
__root__: Any
class InvalidApprovalRuleTemplateNameException(
ApprovalRuleTemplateNameRequiredException
):
pass
class ApprovalRuleTemplateDoesNotExistException(
ApprovalRuleTemplateNameRequiredException
):
pass
class MaximumRuleTemplatesAssociatedWithRepositoryException(
ApprovalRuleTemplateNameRequiredException
):
pass
class RepositoryNameRequiredException(ApprovalRuleTemplateNameRequiredException):
pass
class InvalidRepositoryNameException(ApprovalRuleTemplateNameRequiredException):
pass
class RepositoryDoesNotExistException(ApprovalRuleTemplateNameRequiredException):
pass
class EncryptionIntegrityChecksFailedException(
ApprovalRuleTemplateNameRequiredException
):
pass
class EncryptionKeyAccessDeniedException(ApprovalRuleTemplateNameRequiredException):
pass
class EncryptionKeyDisabledException(ApprovalRuleTemplateNameRequiredException):
pass
class EncryptionKeyNotFoundException(ApprovalRuleTemplateNameRequiredException):
pass
class EncryptionKeyUnavailableException(ApprovalRuleTemplateNameRequiredException):
pass
class RepositoryNamesRequiredException(ApprovalRuleTemplateNameRequiredException):
pass
class MaximumRepositoryNamesExceededException(
ApprovalRuleTemplateNameRequiredException
):
pass
class MergeOptionRequiredException(ApprovalRuleTemplateNameRequiredException):
pass
class InvalidMergeOptionException(ApprovalRuleTemplateNameRequiredException):
pass
class InvalidContinuationTokenException(ApprovalRuleTemplateNameRequiredException):
pass
class CommitRequiredException(ApprovalRuleTemplateNameRequiredException):
pass
class CommitDoesNotExistException(ApprovalRuleTemplateNameRequiredException):
pass
class InvalidCommitException(ApprovalRuleTemplateNameRequiredException):
pass
class TipsDivergenceExceededException(ApprovalRuleTemplateNameRequiredException):
pass
class InvalidMaxConflictFilesException(ApprovalRuleTemplateNameRequiredException):
pass
class InvalidMaxMergeHunksException(ApprovalRuleTemplateNameRequiredException):
pass
class InvalidConflictDetailLevelException(ApprovalRuleTemplateNameRequiredException):
pass
class InvalidConflictResolutionStrategyException(
ApprovalRuleTemplateNameRequiredException
):
pass
class MaximumFileContentToLoadExceededException(
ApprovalRuleTemplateNameRequiredException
):
pass
class MaximumItemsToCompareExceededException(ApprovalRuleTemplateNameRequiredException):
pass
class CommitIdsListRequiredException(ApprovalRuleTemplateNameRequiredException):
pass
class CommitIdsLimitExceededException(ApprovalRuleTemplateNameRequiredException):
pass
class ApprovalRuleTemplateNameAlreadyExistsException(
ApprovalRuleTemplateNameRequiredException
):
pass
class ApprovalRuleTemplateContentRequiredException(
ApprovalRuleTemplateNameRequiredException
):
pass
class InvalidApprovalRuleTemplateContentException(
ApprovalRuleTemplateNameRequiredException
):
pass
class InvalidApprovalRuleTemplateDescriptionException(
ApprovalRuleTemplateNameRequiredException
):
pass
class NumberOfRuleTemplatesExceededException(ApprovalRuleTemplateNameRequiredException):
pass
class BranchNameRequiredException(ApprovalRuleTemplateNameRequiredException):
pass
class BranchNameExistsException(ApprovalRuleTemplateNameRequiredException):
pass
class InvalidBranchNameException(ApprovalRuleTemplateNameRequiredException):
pass
class CommitIdRequiredException(ApprovalRuleTemplateNameRequiredException):
pass
class InvalidCommitIdException(ApprovalRuleTemplateNameRequiredException):
pass
class ParentCommitIdRequiredException(ApprovalRuleTemplateNameRequiredException):
pass
class InvalidParentCommitIdException(ApprovalRuleTemplateNameRequiredException):
pass
class ParentCommitDoesNotExistException(ApprovalRuleTemplateNameRequiredException):
pass
class ParentCommitIdOutdatedException(ApprovalRuleTemplateNameRequiredException):
pass
class BranchDoesNotExistException(ApprovalRuleTemplateNameRequiredException):
pass
class BranchNameIsTagNameException(ApprovalRuleTemplateNameRequiredException):
pass
class FileEntryRequiredException(ApprovalRuleTemplateNameRequiredException):
pass
class MaximumFileEntriesExceededException(ApprovalRuleTemplateNameRequiredException):
pass
class PutFileEntryConflictException(ApprovalRuleTemplateNameRequiredException):
pass
class SourceFileOrContentRequiredException(ApprovalRuleTemplateNameRequiredException):
pass
class FileContentAndSourceFileSpecifiedException(
ApprovalRuleTemplateNameRequiredException
):
pass
class PathRequiredException(ApprovalRuleTemplateNameRequiredException):
pass
class InvalidPathException(ApprovalRuleTemplateNameRequiredException):
pass
class SamePathRequestException(ApprovalRuleTemplateNameRequiredException):
pass
class FileDoesNotExistException(ApprovalRuleTemplateNameRequiredException):
pass
class FileContentSizeLimitExceededException(ApprovalRuleTemplateNameRequiredException):
pass
class FolderContentSizeLimitExceededException(
ApprovalRuleTemplateNameRequiredException
):
pass
class InvalidDeletionParameterException(ApprovalRuleTemplateNameRequiredException):
pass
class RestrictedSourceFileException(ApprovalRuleTemplateNameRequiredException):
pass
class FileModeRequiredException(ApprovalRuleTemplateNameRequiredException):
pass
class InvalidFileModeException(ApprovalRuleTemplateNameRequiredException):
pass
class NameLengthExceededException(ApprovalRuleTemplateNameRequiredException):
pass
class InvalidEmailException(ApprovalRuleTemplateNameRequiredException):
pass
class CommitMessageLengthExceededException(ApprovalRuleTemplateNameRequiredException):
pass
class NoChangeException(ApprovalRuleTemplateNameRequiredException):
pass
class FileNameConflictsWithDirectoryNameException(
ApprovalRuleTemplateNameRequiredException
):
pass
class DirectoryNameConflictsWithFileNameException(
ApprovalRuleTemplateNameRequiredException
):
pass
class FilePathConflictsWithSubmodulePathException(
ApprovalRuleTemplateNameRequiredException
):
pass
class ClientRequestTokenRequiredException(ApprovalRuleTemplateNameRequiredException):
pass
class InvalidClientRequestTokenException(ApprovalRuleTemplateNameRequiredException):
pass
class IdempotencyParameterMismatchException(ApprovalRuleTemplateNameRequiredException):
pass
class ReferenceNameRequiredException(ApprovalRuleTemplateNameRequiredException):
pass
class InvalidReferenceNameException(ApprovalRuleTemplateNameRequiredException):
pass
class ReferenceDoesNotExistException(ApprovalRuleTemplateNameRequiredException):
pass
class ReferenceTypeNotSupportedException(ApprovalRuleTemplateNameRequiredException):
pass
class TitleRequiredException(ApprovalRuleTemplateNameRequiredException):
pass
class InvalidTitleException(ApprovalRuleTemplateNameRequiredException):
pass
class InvalidDescriptionException(ApprovalRuleTemplateNameRequiredException):
pass
class TargetsRequiredException(ApprovalRuleTemplateNameRequiredException):
pass
class InvalidTargetsException(ApprovalRuleTemplateNameRequiredException):
pass
class TargetRequiredException(ApprovalRuleTemplateNameRequiredException):
pass
class InvalidTargetException(ApprovalRuleTemplateNameRequiredException):
pass
class MultipleRepositoriesInPullRequestException(
ApprovalRuleTemplateNameRequiredException
):
pass
class MaximumOpenPullRequestsExceededException(
ApprovalRuleTemplateNameRequiredException
):
pass
class SourceAndDestinationAreSameException(ApprovalRuleTemplateNameRequiredException):
pass
class ApprovalRuleNameRequiredException(ApprovalRuleTemplateNameRequiredException):
pass
class InvalidApprovalRuleNameException(ApprovalRuleTemplateNameRequiredException):
pass
class ApprovalRuleNameAlreadyExistsException(ApprovalRuleTemplateNameRequiredException):
pass
class ApprovalRuleContentRequiredException(ApprovalRuleTemplateNameRequiredException):
pass
class InvalidApprovalRuleContentException(ApprovalRuleTemplateNameRequiredException):
pass
class NumberOfRulesExceededException(ApprovalRuleTemplateNameRequiredException):
pass
class PullRequestDoesNotExistException(ApprovalRuleTemplateNameRequiredException):
pass
class InvalidPullRequestIdException(ApprovalRuleTemplateNameRequiredException):
pass
class PullRequestIdRequiredException(ApprovalRuleTemplateNameRequiredException):
pass
class PullRequestAlreadyClosedException(ApprovalRuleTemplateNameRequiredException):
pass
class RepositoryNameExistsException(ApprovalRuleTemplateNameRequiredException):
pass
class InvalidRepositoryDescriptionException(ApprovalRuleTemplateNameRequiredException):
pass
class RepositoryLimitExceededException(ApprovalRuleTemplateNameRequiredException):
pass
class InvalidTagsMapException(ApprovalRuleTemplateNameRequiredException):
pass
class TooManyTagsException(ApprovalRuleTemplateNameRequiredException):
pass
class InvalidSystemTagUsageException(ApprovalRuleTemplateNameRequiredException):
pass
class TagPolicyException(ApprovalRuleTemplateNameRequiredException):
pass
class InvalidConflictResolutionException(ApprovalRuleTemplateNameRequiredException):
pass
class ManualMergeRequiredException(ApprovalRuleTemplateNameRequiredException):
pass
class MaximumConflictResolutionEntriesExceededException(
ApprovalRuleTemplateNameRequiredException
):
pass
class MultipleConflictResolutionEntriesException(
ApprovalRuleTemplateNameRequiredException
):
pass
class ReplacementTypeRequiredException(ApprovalRuleTemplateNameRequiredException):
pass
class InvalidReplacementTypeException(ApprovalRuleTemplateNameRequiredException):
pass
class ReplacementContentRequiredException(ApprovalRuleTemplateNameRequiredException):
pass
class InvalidReplacementContentException(ApprovalRuleTemplateNameRequiredException):
pass
class ConcurrentReferenceUpdateException(ApprovalRuleTemplateNameRequiredException):
pass
class ApprovalRuleTemplateInUseException(ApprovalRuleTemplateNameRequiredException):
pass
class DefaultBranchCannotBeDeletedException(ApprovalRuleTemplateNameRequiredException):
pass
class CommentDoesNotExistException(ApprovalRuleTemplateNameRequiredException):
pass
class CommentIdRequiredException(ApprovalRuleTemplateNameRequiredException):
pass
class InvalidCommentIdException(ApprovalRuleTemplateNameRequiredException):
pass
class CommentDeletedException(ApprovalRuleTemplateNameRequiredException):
pass
class CannotDeleteApprovalRuleFromTemplateException(
ApprovalRuleTemplateNameRequiredException
):
pass
class InvalidPullRequestEventTypeException(ApprovalRuleTemplateNameRequiredException):
pass
class InvalidActorArnException(ApprovalRuleTemplateNameRequiredException):
pass
class ActorDoesNotExistException(ApprovalRuleTemplateNameRequiredException):
pass
class InvalidMaxResultsException(ApprovalRuleTemplateNameRequiredException):
pass
class InvalidRevisionIdException(ApprovalRuleTemplateNameRequiredException):
pass
class RevisionIdRequiredException(ApprovalRuleTemplateNameRequiredException):
pass
class RevisionNotCurrentException(ApprovalRuleTemplateNameRequiredException):
pass
class BlobIdRequiredException(ApprovalRuleTemplateNameRequiredException):
pass
class InvalidBlobIdException(ApprovalRuleTemplateNameRequiredException):
pass
class BlobIdDoesNotExistException(ApprovalRuleTemplateNameRequiredException):
pass
class FileTooLargeException(ApprovalRuleTemplateNameRequiredException):
pass
class InvalidReactionUserArnException(ApprovalRuleTemplateNameRequiredException):
pass
class RepositoryNotAssociatedWithPullRequestException(
ApprovalRuleTemplateNameRequiredException
):
pass
class CommitIdDoesNotExistException(ApprovalRuleTemplateNameRequiredException):
pass
class PathDoesNotExistException(ApprovalRuleTemplateNameRequiredException):
pass
class FolderDoesNotExistException(ApprovalRuleTemplateNameRequiredException):
pass
class InvalidDestinationCommitSpecifierException(
ApprovalRuleTemplateNameRequiredException
):
pass
class InvalidSourceCommitSpecifierException(ApprovalRuleTemplateNameRequiredException):
pass
class InvalidPullRequestStatusException(ApprovalRuleTemplateNameRequiredException):
pass
class InvalidAuthorArnException(ApprovalRuleTemplateNameRequiredException):
pass
class AuthorDoesNotExistException(ApprovalRuleTemplateNameRequiredException):
pass
class InvalidSortByException(ApprovalRuleTemplateNameRequiredException):
pass
class InvalidOrderException(ApprovalRuleTemplateNameRequiredException):
pass
class ResourceArnRequiredException(ApprovalRuleTemplateNameRequiredException):
pass
class InvalidResourceArnException(ApprovalRuleTemplateNameRequiredException):
pass
class InvalidTargetBranchException(ApprovalRuleTemplateNameRequiredException):
pass
class TipOfSourceReferenceIsDifferentException(
ApprovalRuleTemplateNameRequiredException
):
pass
class PullRequestApprovalRulesNotSatisfiedException(
ApprovalRuleTemplateNameRequiredException
):
pass
class InvalidOverrideStatusException(ApprovalRuleTemplateNameRequiredException):
pass
class OverrideStatusRequiredException(ApprovalRuleTemplateNameRequiredException):
pass
class OverrideAlreadySetException(ApprovalRuleTemplateNameRequiredException):
pass
class CommentContentRequiredException(ApprovalRuleTemplateNameRequiredException):
pass
class CommentContentSizeLimitExceededException(
ApprovalRuleTemplateNameRequiredException
):
pass
class InvalidFileLocationException(ApprovalRuleTemplateNameRequiredException):
pass
class InvalidRelativeFileVersionEnumException(
ApprovalRuleTemplateNameRequiredException
):
pass
class InvalidFilePositionException(ApprovalRuleTemplateNameRequiredException):
pass
class BeforeCommitIdAndAfterCommitIdAreSameException(
ApprovalRuleTemplateNameRequiredException
):
pass
class InvalidReactionValueException(ApprovalRuleTemplateNameRequiredException):
pass
class ReactionValueRequiredException(ApprovalRuleTemplateNameRequiredException):
pass
class ReactionLimitExceededException(ApprovalRuleTemplateNameRequiredException):
pass
class FileContentRequiredException(ApprovalRuleTemplateNameRequiredException):
pass
class SameFileContentException(ApprovalRuleTemplateNameRequiredException):
pass
class RepositoryTriggersListRequiredException(
ApprovalRuleTemplateNameRequiredException
):
pass
class MaximumRepositoryTriggersExceededException(
ApprovalRuleTemplateNameRequiredException
):
pass
class InvalidRepositoryTriggerNameException(ApprovalRuleTemplateNameRequiredException):
pass
class InvalidRepositoryTriggerDestinationArnException(
ApprovalRuleTemplateNameRequiredException
):
pass
class InvalidRepositoryTriggerRegionException(
ApprovalRuleTemplateNameRequiredException
):
pass
class InvalidRepositoryTriggerCustomDataException(
ApprovalRuleTemplateNameRequiredException
):
pass
class MaximumBranchesExceededException(ApprovalRuleTemplateNameRequiredException):
pass
class InvalidRepositoryTriggerBranchNameException(
ApprovalRuleTemplateNameRequiredException
):
pass
class InvalidRepositoryTriggerEventsException(
ApprovalRuleTemplateNameRequiredException
):
pass
class RepositoryTriggerNameRequiredException(ApprovalRuleTemplateNameRequiredException):
pass
class RepositoryTriggerDestinationArnRequiredException(
ApprovalRuleTemplateNameRequiredException
):
pass
class RepositoryTriggerBranchNameListRequiredException(
ApprovalRuleTemplateNameRequiredException
):
pass
class RepositoryTriggerEventsListRequiredException(
ApprovalRuleTemplateNameRequiredException
):
pass
class TagsMapRequiredException(ApprovalRuleTemplateNameRequiredException):
pass
class TagKeysListRequiredException(ApprovalRuleTemplateNameRequiredException):
pass
class InvalidTagKeysListException(ApprovalRuleTemplateNameRequiredException):
pass
class InvalidRuleContentSha256Exception(ApprovalRuleTemplateNameRequiredException):
pass
class CommentNotCreatedByCallerException(ApprovalRuleTemplateNameRequiredException):
pass
class ApprovalRuleDoesNotExistException(ApprovalRuleTemplateNameRequiredException):
pass
class CannotModifyApprovalRuleFromTemplateException(
ApprovalRuleTemplateNameRequiredException
):
pass
class InvalidApprovalStateException(ApprovalRuleTemplateNameRequiredException):
pass
class ApprovalStateRequiredException(ApprovalRuleTemplateNameRequiredException):
pass
class PullRequestCannotBeApprovedByAuthorException(
ApprovalRuleTemplateNameRequiredException
):
pass
class MaximumNumberOfApprovalsExceededException(
ApprovalRuleTemplateNameRequiredException
):
pass
class InvalidPullRequestStatusUpdateException(
ApprovalRuleTemplateNameRequiredException
):
pass
class PullRequestStatusRequiredException(ApprovalRuleTemplateNameRequiredException):
pass
class AccountId(BaseModel):
__root__: str
class AdditionalData(AccountId):
pass
class Arn(AccountId):
pass
class ApprovalState(Enum):
APPROVE = 'APPROVE'
REVOKE = 'REVOKE'
class Approval(BaseModel):
"""
Returns information about a specific approval on a pull request.
"""
userArn: Optional[Arn] = None
approvalState: Optional[ApprovalState] = None
class ApprovalList(BaseModel):
__root__: List[Approval]
class ApprovalRuleId(AccountId):
pass
class ApprovalRuleName(BaseModel):
__root__: Annotated[str, Field(max_length=100, min_length=1)]
class ApprovalRuleContent(BaseModel):
__root__: Annotated[str, Field(max_length=3000, min_length=1)]
class RuleContentSha256(AccountId):
pass
class LastModifiedDate(BaseModel):
__root__: datetime
class CreationDate(LastModifiedDate):
pass
class ApprovalRuleEventMetadata(BaseModel):
"""
Returns information about an event for an approval rule.
"""
approvalRuleName: Optional[ApprovalRuleName] = None
approvalRuleId: Optional[ApprovalRuleId] = None
approvalRuleContent: Optional[ApprovalRuleContent] = None
class RevisionId(AccountId):
pass
class OverrideStatus(Enum):
OVERRIDE = 'OVERRIDE'
REVOKE = 'REVOKE'
class ApprovalRuleOverriddenEventMetadata(BaseModel):
"""
Returns information about an override event for approval rules for a pull request.
"""
revisionId: Optional[RevisionId] = None
overrideStatus: Optional[OverrideStatus] = None
class ApprovalRuleTemplateId(AccountId):
pass
class ApprovalRuleTemplateName(ApprovalRuleName):
pass
class ApprovalRuleTemplateDescription(BaseModel):
__root__: Annotated[str, Field(max_length=1000, min_length=0)]
class ApprovalRuleTemplateContent(ApprovalRuleContent):
pass
class ApprovalRuleTemplate(BaseModel):
"""
Returns information about an approval rule template.
"""
approvalRuleTemplateId: Optional[ApprovalRuleTemplateId] = None
approvalRuleTemplateName: Optional[ApprovalRuleTemplateName] = None
approvalRuleTemplateDescription: Optional[ApprovalRuleTemplateDescription] = None
approvalRuleTemplateContent: Optional[ApprovalRuleTemplateContent] = None
ruleContentSha256: Optional[RuleContentSha256] = None
lastModifiedDate: Optional[LastModifiedDate] = None
creationDate: Optional[CreationDate] = None
lastModifiedUser: Optional[Arn] = None
class ApprovalRuleTemplateNameList(BaseModel):
__root__: List[ApprovalRuleTemplateName]
class ApprovalRulesNotSatisfiedList(BaseModel):
__root__: List[ApprovalRuleName]
class ApprovalRulesSatisfiedList(ApprovalRulesNotSatisfiedList):
pass
class ApprovalStateChangedEventMetadata(BaseModel):
"""
Returns information about a change in the approval state for a pull request.
"""
revisionId: Optional[RevisionId] = None
approvalStatus: Optional[ApprovalState] = None
class Approved(BaseModel):
__root__: bool
class RepositoryName(BaseModel):
__root__: Annotated[str, Field(max_length=100, min_length=1, regex='[\\w\\.-]+')]
class ErrorCode(AccountId):
pass
class ErrorMessage(AccountId):
pass
class BatchAssociateApprovalRuleTemplateWithRepositoriesError(BaseModel):
"""
Returns information about errors in a BatchAssociateApprovalRuleTemplateWithRepositories operation.
"""
repositoryName: Optional[RepositoryName] = None
errorCode: Optional[ErrorCode] = None
errorMessage: Optional[ErrorMessage] = None
class BatchAssociateApprovalRuleTemplateWithRepositoriesErrorsList(BaseModel):
__root__: List[BatchAssociateApprovalRuleTemplateWithRepositoriesError]
class RepositoryNameList(BaseModel):
__root__: List[RepositoryName]
class Path(AccountId):
pass
class ExceptionName(AccountId):
pass
class Message(AccountId):
pass
class BatchDescribeMergeConflictsError(BaseModel):
"""
Returns information about errors in a BatchDescribeMergeConflicts operation.
"""
filePath: Path
exceptionName: ExceptionName
message: Message
class BatchDescribeMergeConflictsErrors(BaseModel):
__root__: List[BatchDescribeMergeConflictsError]
class CommitName(AccountId):
pass
class MergeOptionTypeEnum(Enum):
FAST_FORWARD_MERGE = 'FAST_FORWARD_MERGE'
SQUASH_MERGE = 'SQUASH_MERGE'
THREE_WAY_MERGE = 'THREE_WAY_MERGE'
class MaxResults(BaseModel):
__root__: int
class FilePaths(BaseModel):
__root__: List[Path]
class ConflictDetailLevelTypeEnum(Enum):
FILE_LEVEL = 'FILE_LEVEL'
LINE_LEVEL = 'LINE_LEVEL'
class ConflictResolutionStrategyTypeEnum(Enum):
NONE = 'NONE'
ACCEPT_SOURCE = 'ACCEPT_SOURCE'
ACCEPT_DESTINATION = 'ACCEPT_DESTINATION'
AUTOMERGE = 'AUTOMERGE'
class NextToken(AccountId):
pass
class ObjectId(AccountId):
pass
class BatchDisassociateApprovalRuleTemplateFromRepositoriesError(
BatchAssociateApprovalRuleTemplateWithRepositoriesError
):
"""
Returns information about errors in a BatchDisassociateApprovalRuleTemplateFromRepositories operation.
"""
pass
class BatchDisassociateApprovalRuleTemplateFromRepositoriesErrorsList(BaseModel):
__root__: List[BatchDisassociateApprovalRuleTemplateFromRepositoriesError]
class BatchGetCommitsError(BaseModel):
"""
Returns information about errors in a BatchGetCommits operation.
"""
commitId: Optional[ObjectId] = None
errorCode: Optional[ErrorCode] = None
errorMessage: Optional[ErrorMessage] = None
class BatchGetCommitsErrorsList(BaseModel):
__root__: List[BatchGetCommitsError]
class CommitIdsInputList(BaseModel):
__root__: List[ObjectId]
class RepositoryNotFoundList(RepositoryNameList):
pass
class Mode(AccountId):
pass
class BlobMetadata(BaseModel):
"""
Returns information about a specific Git blob object.
"""
blobId: Optional[ObjectId] = None
path: Optional[Path] = None
mode: Optional[Mode] = None
class BranchName(BaseModel):
__root__: Annotated[str, Field(max_length=256, min_length=1)]
class CommitId(AccountId):
pass
class BranchInfo(BaseModel):
"""
Returns information about a branch.
"""
branchName: Optional[BranchName] = None
commitId: Optional[CommitId] = None
class BranchNameList(BaseModel):
__root__: List[BranchName]
class ReactionValue(AccountId):
pass
class CallerReactions(BaseModel):
__root__: List[ReactionValue]
class CapitalBoolean(Approved):
pass
class ChangeTypeEnum(Enum):
A = 'A'
M = 'M'
D = 'D'
class ClientRequestToken(AccountId):
pass
class CloneUrlHttp(AccountId):
pass
class CloneUrlSsh(AccountId):
pass
class CommentId(AccountId):
pass
class Content(AccountId):
pass
class IsCommentDeleted(Approved):
pass
class ReactionCountsMap(BaseModel):
pass
class Config:
extra = Extra.allow
class Comment(BaseModel):
"""
Returns information about a specific comment.
"""
commentId: Optional[CommentId] = None
content: Optional[Content] = None
inReplyTo: Optional[CommentId] = None
creationDate: Optional[CreationDate] = None
lastModifiedDate: Optional[LastModifiedDate] = None
authorArn: Optional[Arn] = None
deleted: Optional[IsCommentDeleted] = None
clientRequestToken: Optional[ClientRequestToken] = None
callerReactions: Optional[CallerReactions] = None
reactionCounts: Optional[ReactionCountsMap] = None
class Comments(BaseModel):
__root__: List[Comment]
class PullRequestId(AccountId):
pass
class ParentList(CommitIdsInputList):
pass
class NumberOfConflicts(MaxResults):
pass
class IsBinaryFile(BaseModel):
"""
Information about whether a file is binary or textual in a merge or pull request operation.
"""
source: Optional[CapitalBoolean] = None
destination: Optional[CapitalBoolean] = None
base: Optional[CapitalBoolean] = None
class IsContentConflict(Approved):
pass
class IsFileModeConflict(Approved):
pass
class IsObjectTypeConflict(Approved):
pass
class MergeOperations(BaseModel):
"""
Information about the file operation conflicts in a merge operation.
"""
source: Optional[ChangeTypeEnum] = None
destination: Optional[ChangeTypeEnum] = None
class Count(MaxResults):
pass
class Name(AccountId):
pass
class Email(AccountId):
pass
class KeepEmptyFolders(Approved):
pass
class Title(BaseModel):
__root__: Annotated[str, Field(max_length=150)]
class Description(BaseModel):
__root__: Annotated[str, Field(max_length=10240)]
class RepositoryDescription(BaseModel):
__root__: Annotated[str, Field(max_length=1000)]
class TagsMap(BaseModel):
pass
class Config:
extra = Extra.allow
class Date(AccountId):
pass
class DeleteFileEntry(BaseModel):
"""
A file that is deleted as part of a commit.
"""
filePath: Path
class RepositoryId(AccountId):
pass
class PullRequestEventType(Enum):
PULL_REQUEST_CREATED = 'PULL_REQUEST_CREATED'
PULL_REQUEST_STATUS_CHANGED = 'PULL_REQUEST_STATUS_CHANGED'
PULL_REQUEST_SOURCE_REFERENCE_UPDATED = 'PULL_REQUEST_SOURCE_REFERENCE_UPDATED'
PULL_REQUEST_MERGE_STATE_CHANGED = 'PULL_REQUEST_MERGE_STATE_CHANGED'
PULL_REQUEST_APPROVAL_RULE_CREATED = 'PULL_REQUEST_APPROVAL_RULE_CREATED'
PULL_REQUEST_APPROVAL_RULE_UPDATED = 'PULL_REQUEST_APPROVAL_RULE_UPDATED'
PULL_REQUEST_APPROVAL_RULE_DELETED = 'PULL_REQUEST_APPROVAL_RULE_DELETED'
PULL_REQUEST_APPROVAL_RULE_OVERRIDDEN = 'PULL_REQUEST_APPROVAL_RULE_OVERRIDDEN'
PULL_REQUEST_APPROVAL_STATE_CHANGED = 'PULL_REQUEST_APPROVAL_STATE_CHANGED'
class Difference(BaseModel):
"""
Returns information about a set of differences for a commit specifier.
"""
beforeBlob: Optional[BlobMetadata] = None
afterBlob: Optional[BlobMetadata] = None
changeType: Optional[ChangeTypeEnum] = None
class DifferenceList(BaseModel):
__root__: List[Difference]
class Overridden(Approved):
pass
class EventDate(LastModifiedDate):
pass
class FileModeTypeEnum(Enum):
EXECUTABLE = 'EXECUTABLE'
NORMAL = 'NORMAL'
SYMLINK = 'SYMLINK'
class File(BaseModel):
"""
Returns information about a file in a repository.
"""
blobId: Optional[ObjectId] = None
absolutePath: Optional[Path] = None
relativePath: Optional[Path] = None
fileMode: Optional[FileModeTypeEnum] = None
class FileContent(BaseModel):
__root__: Annotated[str, Field(max_length=6291456)]
class FileList(BaseModel):
__root__: List[File]
class FileMetadata(BaseModel):
"""
A file to be added, updated, or deleted as part of a commit.
"""
absolutePath: Optional[Path] = None
blobId: Optional[ObjectId] = None
fileMode: Optional[FileModeTypeEnum] = None
class FileSize(MaxResults):
pass
class Folder(BaseModel):
"""
Returns information about a folder in a repository.
"""
treeId: Optional[ObjectId] = None
absolutePath: Optional[Path] = None
relativePath: Optional[Path] = None
class FolderList(BaseModel):
__root__: List[Folder]
class Blob(AccountId):
pass
class Limit(MaxResults):
pass
class ObjectSize(MaxResults):
pass
class IsMergeable(Approved):
pass
class MergeOptions(BaseModel):
__root__: List[MergeOptionTypeEnum]
class RepositoryTriggersConfigurationId(AccountId):
pass
class HunkContent(AccountId):
pass
class IsHunkConflict(Approved):
pass
class IsMerged(Approved):
pass
class IsMove(Approved):
pass
class LineNumber(MaxResults):
pass
class PullRequestStatusEnum(Enum):
OPEN = 'OPEN'
CLOSED = 'CLOSED'
class PullRequestIdList(BaseModel):
__root__: List[PullRequestId]
class SortByEnum(Enum):
repositoryName = 'repositoryName'
lastModifiedDate = 'lastModifiedDate'
class OrderEnum(Enum):
ascending = 'ascending'
descending = 'descending'
class ResourceArn(AccountId):
pass
class Position(MaxResults):
pass
class RelativeFileVersionEnum(Enum):
BEFORE = 'BEFORE'
AFTER = 'AFTER'
class MergeHunkDetail(BaseModel):
"""
Information about the details of a merge hunk that contains a conflict in | |
# Predict approximated geometric range
delta_r = gnss_meas[i, 2:5].T - est_r_ea_e
approx_range = np.sqrt(delta_r.T * delta_r)
# Calculate frame rotation during signal transit time
ctm_e_i = ecef_to_eci_ctm(OMEGA_ie, approx_range, c)
# Calculate geometric range
delta_r = ctm_e_i * gnss_meas[i, 2:5].T - est_r_ea_e
rangex = np.sqrt(delta_r.T * delta_r)
# Calculate line of sight
u_as_e = delta_r / rangex
# Predict pseudo-range rate
range_rate = u_as_e.T * (ctm_e_i * (gnss_meas[i, 5:8].T + omega_ie_matrix * gnss_meas[i, 2:5].T) -
(x_pred[0:3, 0] + omega_ie_matrix * est_r_ea_e))
pred_meas[i, 0] = range_rate + x_pred[3, 0]
# Predict line of sight and deploy in measurement matrix
geo_matrix[i, 0:3] = -u_as_e.T
geo_matrix[i, 3] = 1
# End of For Loop to Compute the Geometric matrix
# Unweighted least-squares solution
x_est = x_pred + (geo_matrix.T * geo_matrix).I * geo_matrix.T * (gnss_meas[:, 1] - pred_meas)
# Test convergence
tolerance = np.sqrt((x_est - x_pred).T * (x_est - x_pred))
# Set predictions to estimates for next iteration
x_pred = x_est
# End of While Loop (LS Estimation Converged)
# Set outputs to estimates
est_v_ea_e[:, 0] = x_est[0:3, 0]
est_clock[0, 1] = x_est[3, 0]
return est_r_ea_e, est_v_ea_e, est_clock
# End of Computing GNSS Least Square Positions and Velocities
'''
---------------------------------------------------------------------------
8. Initialize the State Estimate Covariance Matrix, P for LC_EKF and TC_EKF
---------------------------------------------------------------------------
'''
def init_p_matrix(tightness, ekf_config):
if tightness == 'loose':
# Initialize error covariance matrix
p_matrix = np.zeros((15, 15))
# Determine each element of the covariance matrix
p_matrix[0:3, 0:3] = np.eye(3) * ekf_config.init_pos_unc ** 2
p_matrix[3:6, 3:6] = np.eye(3) * ekf_config.init_vel_unc ** 2
p_matrix[6:9, 6:9] = np.eye(3) * ekf_config.init_att_unc ** 2
p_matrix[9:12, 9:12] = np.eye(3) * ekf_config.init_b_a_unc ** 2
p_matrix[12:15, 12:15] = np.eye(3) * ekf_config.init_b_g_unc ** 2
elif tightness == 'tight':
# Initialize error covariance matrix
p_matrix = np.zeros((17, 17))
# Determine each element of the covariance matrix
p_matrix[0:3, 0:3] = np.eye(3) * ekf_config.init_pos_unc ** 2
p_matrix[3:6, 3:6] = np.eye(3) * ekf_config.init_vel_unc ** 2
p_matrix[6:9, 6:9] = np.eye(3) * ekf_config.init_att_unc ** 2
p_matrix[9:12, 9:12] = np.eye(3) * ekf_config.init_b_a_unc ** 2
p_matrix[12:15, 12:15] = np.eye(3) * ekf_config.init_b_g_unc ** 2
p_matrix[15, 15] = ekf_config.init_clock_offset_unc ** 2
p_matrix[16, 16] = ekf_config.init_clock_drift_unc ** 2
return p_matrix
# End of Initializing Single EKF Matrix
'''
------------------------------------------------------------------
9. Initialize the State Estimate Covariance Matrix, P for Dual EKF
------------------------------------------------------------------
'''
def init_dual_p_matrix(lc_ekf_config, tc_ekf_config):
# Initialize error covariance matrix
lc_p_matrix = np.zeros((15, 15))
tc_p_matrix = np.zeros((17, 17))
# Determine each element of the covariance matrix
# For loosely coupled EKF
lc_p_matrix[0:3, 0:3] = np.eye(3) * lc_ekf_config.init_pos_unc ** 2
lc_p_matrix[3:6, 3:6] = np.eye(3) * lc_ekf_config.init_vel_unc ** 2
lc_p_matrix[6:9, 6:9] = np.eye(3) * lc_ekf_config.init_att_unc ** 2
lc_p_matrix[9:12, 9:12] = np.eye(3) * lc_ekf_config.init_b_a_unc ** 2
lc_p_matrix[12:15, 12:15] = np.eye(3) * lc_ekf_config.init_b_g_unc ** 2
# For tightly coupled EKF
tc_p_matrix[0:3, 0:3] = np.eye(3) * tc_ekf_config.init_pos_unc ** 2
tc_p_matrix[3:6, 3:6] = np.eye(3) * tc_ekf_config.init_vel_unc ** 2
tc_p_matrix[6:9, 6:9] = np.eye(3) * tc_ekf_config.init_att_unc ** 2
tc_p_matrix[9:12, 9:12] = np.eye(3) * tc_ekf_config.init_b_a_unc ** 2
tc_p_matrix[12:15, 12:15] = np.eye(3) * tc_ekf_config.init_b_g_unc ** 2
tc_p_matrix[15, 15] = tc_ekf_config.init_clock_offset_unc ** 2
tc_p_matrix[16, 16] = tc_ekf_config.init_clock_drift_unc ** 2
return lc_p_matrix, tc_p_matrix
# End of Initializing Dual EKF Covariance Matrices
'''
------------------------------------------------------------
10. Calculate Specific Forces and Angular Rates from the IMU
------------------------------------------------------------
'''
def kinematics_ecef(tau_i, ctm_b_e, old_ctm_b_e, v_eb_e, old_v_eb_e, r_eb_e):
# Allocate the alpha_ib_b vector
alpha_ib_b = np.nan * np.matrix([[1], [1], [1]])
if tau_i > 0:
# Determine the Earth rotation over the update interval
[ctm_earth, alpha_ie] = c_earth(tau_i)
# Obtain coordinate transformation matrix from the old attitude (w.r.t. an inertial frame) to the new
ctm_old_new = ctm_b_e.T * ctm_earth * old_ctm_b_e
# Calculate the approximate angular rate w.r.t. an inertial frame
alpha_ib_b[0, 0] = 0.5 * (ctm_old_new[1, 2] - ctm_old_new[2, 1])
alpha_ib_b[1, 0] = 0.5 * (ctm_old_new[2, 0] - ctm_old_new[0, 2])
alpha_ib_b[2, 0] = 0.5 * (ctm_old_new[0, 1] - ctm_old_new[1, 0])
# Calculate and apply the scaling factor
scale = np.arccos(0.5 * (ctm_old_new[0, 0] + ctm_old_new[1, 1] + ctm_old_new[2, 2] - 1.0))
if scale > 2E-05: # scaling factor is 1 if "scale" is less than this minimum limit.
alpha_ib_b = alpha_ib_b * scale / np.sin(scale)
# Calculate the angular rate using
omega_ib_b = alpha_ib_b / tau_i
# Calculate the specific force resolved about ECEF-frame axes
f_ib_e = ((v_eb_e - old_v_eb_e) / tau_i) - gravity_ecef(r_eb_e) + 2 * skew_sym(
np.matrix([[0], [0], [OMEGA_ie]])) * old_v_eb_e
# Calculate the average body-to-ECEF-frame coordinate transformation matrix over the update interval
mag_alpha = np.sqrt(alpha_ib_b.T * alpha_ib_b)
alpha_ib_b_matrix = skew_sym(alpha_ib_b)
if mag_alpha > 1.0E-8:
term_1 = ((1 - np.cos(mag_alpha[0, 0])) / (mag_alpha[0, 0] ** 2)) * alpha_ib_b_matrix
term_2 = ((1 - np.sin(mag_alpha[0, 0]) / mag_alpha[0, 0]) / (mag_alpha[0, 0] ** 2)) * (alpha_ib_b_matrix *
alpha_ib_b_matrix)
term_3 = 0.5 * skew_sym(np.matrix([[0], [0], [alpha_ie]])) * tau_i
ave_ctm_b_e = old_ctm_b_e * (np.matrix(np.eye(3)) + term_1 + term_2) + term_3 * old_ctm_b_e
else:
ave_ctm_b_e = old_ctm_b_e - 0.5 * skew_sym(np.matrix([[0], [0], [alpha_ie]])) * old_ctm_b_e * tau_i
# End of "if" mag_alpha
# Transform specific force to body-frame resolving axes
f_ib_b = ave_ctm_b_e.I * f_ib_e
else:
# If time interval is zero, set angular rate and specific force to zero
omega_ib_b = np.matrix(np.zeros((3, 1)))
f_ib_b = np.matrix(np.zeros((3, 1)))
# End of "if" tau_i
return f_ib_b, omega_ib_b
# End of Calculating Specific Forces and Angular Rates
'''
----------------------------
11. Simulating the IMU Model
----------------------------
'''
def imu_model(tau_i, true_f_ib_b, true_omega_ib_b, imu_config, old_quant_residuals):
# Generate noise for accelerometer and gyroscope
if tau_i > 0:
accel_noise = np.matrix(rnd.randn(3, 1)) * imu_config.accel_noise_root_PSD / np.sqrt(tau_i)
gyro_noise = np.matrix(rnd.randn(3, 1)) * imu_config.gyro_noise_root_PSD / np.sqrt(tau_i)
else:
accel_noise = np.matrix([0, 0, 0]).T
gyro_noise = np.matrix([0, 0, 0]).T
# End of If tau_i
# Calculate accelerometer and gyro outputs
uq_f_ib_b = imu_config.b_a + (np.matrix(np.eye(3)) + imu_config.M_a) * true_f_ib_b + accel_noise
uq_omega_ib_b = imu_config.b_g + imu_config.G_g * true_f_ib_b + gyro_noise + (np.matrix(np.eye(3)) +
imu_config.M_g) * true_omega_ib_b
# Quantize accelerometer outputs
quant_residuals = np.nan * np.matrix(np.ones((6, 1)))
if imu_config.accel_quant_level > 0:
meas_f_ib_b = imu_config.accel_quant_level * np.round((uq_f_ib_b + old_quant_residuals[0:3, 0]) /
imu_config.accel_quant_level)
quant_residuals[0:3, 0] = uq_f_ib_b + old_quant_residuals[0:3, 0] - meas_f_ib_b
else:
meas_f_ib_b = uq_f_ib_b
quant_residuals[0:3, 0] = np.matrix([0, 0, 0]).T
# End of If IMU_errors.accel_quant_level
# Quantize gyro outputs
if imu_config.gyro_quant_level > 0:
meas_omega_ib_b = imu_config.gyro_quant_level * np.round((uq_omega_ib_b + old_quant_residuals[3:6, 0]) /
imu_config.gyro_quant_level)
quant_residuals[3:6, 0] = uq_omega_ib_b + old_quant_residuals[3:6, 0] - meas_omega_ib_b
else:
meas_omega_ib_b = uq_omega_ib_b
quant_residuals[3:6, 0] = np.matrix([0, 0, 0]).T
return meas_f_ib_b, meas_omega_ib_b, quant_residuals
# End of Simulating IMU Model
'''
-----------------------------------------
12. Update Estimated Navigation Solutions
-----------------------------------------
'''
def nav_eqs_ecef(tau_i, old_r_eb_e, old_v_eb_e, old_ctm_b_e, f_ib_b, omega_ib_b):
# ******************************************************************************************************************
# UPDATE ATTITUDE
# ******************************************************************************************************************
[ctm_earth, alpha_ie] = c_earth(tau_i)
# Calculate attitude increment, magnitude, and skew-symmetric matrix
alpha_ib_b = omega_ib_b * tau_i
mag_alpha = np.sqrt(alpha_ib_b.T * alpha_ib_b)
alpha_ib_b_matrix = skew_sym(alpha_ib_b)
# Obtain coordinate transformation matrix from the new attitude w.r.t. an inertial frame to the old using
# Rodrigues' formula
if mag_alpha > 1.0E-08:
c_term_1 = (np.sin(mag_alpha[0, 0]) / mag_alpha[0, 0]) * alpha_ib_b_matrix
c_term_2 = ((1 - np.cos(mag_alpha[0, 0])) / (mag_alpha[0, 0] ** 2)) * (alpha_ib_b_matrix * alpha_ib_b_matrix)
ctm_new_old = np.matrix(np.eye(3)) + c_term_1 + c_term_2
else:
ctm_new_old = np.matrix(np.eye(3)) + alpha_ib_b_matrix
# End of "if" mag_alpha
# Update attitude
ctm_b_e = ctm_earth * old_ctm_b_e * ctm_new_old
# ******************************************************************************************************************
# SPECIFIC FORCE FRAME TRANSFORMATION
# ******************************************************************************************************************
# Calculate the average body-to-ECEF-frame coordinate transformation matrix over the update interval
if mag_alpha > 1.0E-08:
a_term_1 = ((1 - np.cos(mag_alpha[0, 0])) / (mag_alpha[0, 0] ** 2)) * alpha_ib_b_matrix
a_term_2 = ((1 - np.sin(mag_alpha[0, 0]) / mag_alpha[0, 0]) / (mag_alpha[0, 0] ** 2)) * (alpha_ib_b_matrix *
alpha_ib_b_matrix)
a_term_3 = 0.5 * skew_sym(np.matrix([[0], [0], [alpha_ie]])) * tau_i
ave_ctm_b_e = old_ctm_b_e * (np.matrix(np.eye(3)) + a_term_1 + a_term_2) - a_term_3 * old_ctm_b_e
else:
ave_ctm_b_e = old_ctm_b_e - 0.5 * skew_sym(np.matrix([[0], [0], [alpha_ie]])) * old_ctm_b_e * tau_i
# End of "if" mag_alpha
# Transform specific force to ECEF-frame resolving axes
f_ib_e = ave_ctm_b_e * f_ib_b
# ******************************************************************************************************************
# UPDATE VELOCITY
# ******************************************************************************************************************
v_eb_e = old_v_eb_e + tau_i * (f_ib_e + gravity_ecef(old_r_eb_e) -
2 * skew_sym(np.matrix([[0], [0], [OMEGA_ie]])) * old_v_eb_e)
# ******************************************************************************************************************
# UPDATE CARTESIAN | |
stack frame, keeping all locals in that frame alive
until the next garbage collection occurs.
Before an except clause's suite is executed, details about the
exception are stored in the "sys" module and can be accessed via
"sys.exc_info()". "sys.exc_info()" returns a 3-tuple consisting of the
exception class, the exception instance and a traceback object (see
section The standard type hierarchy) identifying the point in the
program where the exception occurred. "sys.exc_info()" values are
restored to their previous values (before the call) when returning
from a function that handled an exception.
The optional "else" clause is executed if and when control flows off
the end of the "try" clause. [2] Exceptions in the "else" clause are
not handled by the preceding "except" clauses.
If "finally" is present, it specifies a 'cleanup' handler. The "try"
clause is executed, including any "except" and "else" clauses. If an
exception occurs in any of the clauses and is not handled, the
exception is temporarily saved. The "finally" clause is executed. If
there is a saved exception it is re-raised at the end of the "finally"
clause. If the "finally" clause raises another exception, the saved
exception is set as the context of the new exception. If the "finally"
clause executes a "return" or "break" statement, the saved exception
is discarded:
>>> def f():
... try:
... 1/0
... finally:
... return 42
...
>>> f()
42
The exception information is not available to the program during
execution of the "finally" clause.
When a "return", "break" or "continue" statement is executed in the
"try" suite of a "try"..."finally" statement, the "finally" clause is
also executed 'on the way out.' A "continue" statement is illegal in
the "finally" clause. (The reason is a problem with the current
implementation --- this restriction may be lifted in the future).
The return value of a function is determined by the last "return"
statement executed. Since the "finally" clause always executes, a
"return" statement executed in the "finally" clause will always be the
last one executed:
>>> def foo():
... try:
... return 'try'
... finally:
... return 'finally'
...
>>> foo()
'finally'
Additional information on exceptions can be found in section
Exceptions, and information on using the "raise" statement to generate
exceptions may be found in section The raise statement.
The "with" statement
====================
The "with" statement is used to wrap the execution of a block with
methods defined by a context manager (see section With Statement
Context Managers). This allows common "try"..."except"..."finally"
usage patterns to be encapsulated for convenient reuse.
with_stmt ::= "with" with_item ("," with_item)* ":" suite
with_item ::= expression ["as" target]
The execution of the "with" statement with one "item" proceeds as
follows:
1. The context expression (the expression given in the "with_item")
is evaluated to obtain a context manager.
2. The context manager's "__exit__()" is loaded for later use.
3. The context manager's "__enter__()" method is invoked.
4. If a target was included in the "with" statement, the return
value from "__enter__()" is assigned to it.
Note: The "with" statement guarantees that if the "__enter__()"
method returns without an error, then "__exit__()" will always be
called. Thus, if an error occurs during the assignment to the
target list, it will be treated the same as an error occurring
within the suite would be. See step 6 below.
5. The suite is executed.
6. The context manager's "__exit__()" method is invoked. If an
exception caused the suite to be exited, its type, value, and
traceback are passed as arguments to "__exit__()". Otherwise, three
"None" arguments are supplied.
If the suite was exited due to an exception, and the return value
from the "__exit__()" method was false, the exception is reraised.
If the return value was true, the exception is suppressed, and
execution continues with the statement following the "with"
statement.
If the suite was exited for any reason other than an exception, the
return value from "__exit__()" is ignored, and execution proceeds
at the normal location for the kind of exit that was taken.
With more than one item, the context managers are processed as if
multiple "with" statements were nested:
with A() as a, B() as b:
suite
is equivalent to
with A() as a:
with B() as b:
suite
Changed in version 3.1: Support for multiple context expressions.
See also:
**PEP 343** - The "with" statement
The specification, background, and examples for the Python "with"
statement.
Function definitions
====================
A function definition defines a user-defined function object (see
section The standard type hierarchy):
funcdef ::= [decorators] "def" funcname "(" [parameter_list] ")" ["->" expression] ":" suite
decorators ::= decorator+
decorator ::= "@" dotted_name ["(" [argument_list [","]] ")"] NEWLINE
dotted_name ::= identifier ("." identifier)*
parameter_list ::= defparameter ("," defparameter)* ["," [parameter_list_starargs]]
| parameter_list_starargs
parameter_list_starargs ::= "*" [parameter] ("," defparameter)* ["," ["**" parameter [","]]]
| "**" parameter [","]
parameter ::= identifier [":" expression]
defparameter ::= parameter ["=" expression]
funcname ::= identifier
A function definition is an executable statement. Its execution binds
the function name in the current local namespace to a function object
(a wrapper around the executable code for the function). This
function object contains a reference to the current global namespace
as the global namespace to be used when the function is called.
The function definition does not execute the function body; this gets
executed only when the function is called. [3]
A function definition may be wrapped by one or more *decorator*
expressions. Decorator expressions are evaluated when the function is
defined, in the scope that contains the function definition. The
result must be a callable, which is invoked with the function object
as the only argument. The returned value is bound to the function name
instead of the function object. Multiple decorators are applied in
nested fashion. For example, the following code
@f1(arg)
@f2
def func(): pass
is roughly equivalent to
def func(): pass
func = f1(arg)(f2(func))
except that the original function is not temporarily bound to the name
"func".
When one or more *parameters* have the form *parameter* "="
*expression*, the function is said to have "default parameter values."
For a parameter with a default value, the corresponding *argument* may
be omitted from a call, in which case the parameter's default value is
substituted. If a parameter has a default value, all following
parameters up until the ""*"" must also have a default value --- this
is a syntactic restriction that is not expressed by the grammar.
**Default parameter values are evaluated from left to right when the
function definition is executed.** This means that the expression is
evaluated once, when the function is defined, and that the same "pre-
computed" value is used for each call. This is especially important
to understand when a default parameter is a mutable object, such as a
list or a dictionary: if the function modifies the object (e.g. by
appending an item to a list), the default value is in effect modified.
This is generally not what was intended. A way around this is to use
"None" as the default, and explicitly test for it in the body of the
function, e.g.:
def whats_on_the_telly(penguin=None):
if penguin is None:
penguin = []
penguin.append("property of the zoo")
return penguin
Function call semantics are described in more detail in section Calls.
A function call always assigns values to all parameters mentioned in
the parameter list, either from position arguments, from keyword
arguments, or from default values. If the form ""*identifier"" is
present, it is initialized to a tuple receiving any excess positional
parameters, defaulting to the empty tuple. If the form
""**identifier"" is present, it is initialized to a new ordered
mapping receiving any excess keyword arguments, defaulting to a new
empty mapping of the same type. Parameters after ""*"" or
""*identifier"" are keyword-only parameters and may only be passed
used keyword arguments.
Parameters may have annotations of the form "": expression"" following
the parameter name. Any parameter may have an annotation even those
of the form "*identifier" or "**identifier". Functions may have
"return" annotation of the form ""-> expression"" after the parameter
list. These annotations can be any valid Python expression and are
evaluated when the function definition is executed. Annotations may
be evaluated in a different order than they appear in the source code.
The presence of annotations does not change the semantics of a
function. The annotation values are available as values of a
dictionary keyed by the parameters' |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.