filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_484 |
#%% load the background
from __future__ import print_function, division
import torch
from torchvision import datasets, transforms
import os
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
import pandas as pd
import numpy as np
import torch.nn as nn
#%% define the datasets
list_datasets = ['/home/cw9/sds_hd/sd18a006/marlen/datasets/stainNormalization/patchCamelyon/patches/original',
'/home/cw9/sds_hd/sd18a006/marlen/datasets/stainNormalization/patchCamelyon/patches/normalized_to_HE',
'/home/cw9/sds_hd/sd18a006/marlen/datasets/stainNormalization/patchCamelyon/patches/normalized_to_tumorLymphnode_165',
'/home/cw9/sds_hd/sd18a006/marlen/datasets/stainNormalization/patchCamelyon/patches/normalized_to_onlyH',
'/home/cw9/sds_hd/sd18a006/marlen/datasets/stainNormalization/tumorLymphnode/patches/size_165/original',
'/home/cw9/sds_hd/sd18a006/marlen/datasets/stainNormalization/tumorLymphnode/patches/size_165/normalized_to_HE_165',
'/home/cw9/sds_hd/sd18a006/marlen/datasets/stainNormalization/tumorLymphnode/patches/size_165/normalized_to_camelyon_165',
'/home/cw9/sds_hd/sd18a006/marlen/datasets/stainNormalization/tumorLymphnode/patches/size_165/normalized_to_onlyH_165'
]
list_dataset_names = ['camelyon_ori', 'camelyon_to_HE', 'camelyon_to_tL', 'camelyon_to_H',
'tumorLymphnode_ori', 'tumorLymphnode_to_HE', 'tumorLymphnode_to_ca', 'tumorLymphnode_to_H']
list_models = ['/home/cw9/sds_hd/sd18a006/marlen/datasets/stainNormalization/patchCamelyon/patches/original/model_ResNet152.pt',
'/home/cw9/sds_hd/sd18a006/marlen/datasets/stainNormalization/patchCamelyon/patches/normalized_to_HE/model_ResNet152.pt',
'/home/cw9/sds_hd/sd18a006/marlen/datasets/stainNormalization/patchCamelyon/patches/normalized_to_tumorLymphnode_165/model_ResNet152.pt',
'/home/cw9/sds_hd/sd18a006/marlen/datasets/stainNormalization/patchCamelyon/patches/normalized_to_onlyH/model_ResNet152.pt' ]
list_model_names = ['ResNet_original', "ResNet_normalized_to_HE", "ResNet_normalized_to_tumorLymphnode", "ResNet_normalized_to_H"]
#%% iterate over all datasets (and later over all models)
list_model = []
list_dataset = []
list_kappa = []
list_accuracy = []
list_loss = []
for idataset, tdataset in enumerate(list_datasets):
#print(idataset)
#%% define the folder
if tdataset.find("patches") > 0:
dataset2use = "val"
else:
dataset2use = 'test'
# %%define the function to get the data
def get_datatransform(inputSize, data_dir):
data_transforms = {
dataset2use: transforms.Compose([
transforms.Resize([inputSize, inputSize]),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in [dataset2use]}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=4,
shuffle=False, num_workers=4)
for x in [dataset2use]}
return(data_transforms, image_datasets, dataloaders)
#%% prepare the transformations and the dataset
data_transforms , image_datasets, dataloaders= get_datatransform(259, tdataset)
class_names = dataloaders[dataset2use].dataset.classes
nb_classes = len(class_names)
confusion_matrix = torch.zeros(nb_classes, nb_classes)
#%% visualize the input data (to look if evey class is evenly)
class_names = ['normal', 'tumor']
df = pd.DataFrame(dataloaders[dataset2use].dataset.samples)
df.columns = ['file', 'class_nr']
df.class_nr = np.array(df.class_nr)
class_labels = ['NaN' for x in range(df.shape[0])]
for i in range(0,df.shape[0]):
class_labels[i] = class_names[df.class_nr[int(i)]]
df = df.assign(class_labels = class_labels)
sns.set_palette("Set1", n_colors = 12)
sns.countplot(df.class_labels)
plt.xlabel('Pattern')
plt.ylabel('Count [n]')
plt.savefig('DataBase_' + dataset2use + '.jpg')
plt.show()
plt.close()
n_normal = sum(map(lambda x : x == "normal", class_labels))
n_tumor = sum(map(lambda x: x == "tumor", class_labels))
print("n = " + str(n_normal) + " tiles without and n = " + str(n_tumor) + " tiles with tumor.")
#%% iterate over the models
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import accuracy_score
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
n = 0
df_values = pd.DataFrame(list(range(0,len(dataloaders[dataset2use].sampler.data_source.imgs))))
for imodel, tmodel in enumerate(list_models):
print(imodel)
#%% prepare the dataset
inputSize = 224
data_transforms, image_datasets, dataloaders = get_datatransform(inputSize, tdataset)
#%% apply model on test data set (and get a confusion matrix)
model_ft = torch.load(tmodel)
model_ft.eval()
vector_prd = []
vector_exp = []
with torch.no_grad():
for i, (inputs, classes) in enumerate(dataloaders[dataset2use]):
inputs = inputs.to(device)
classes = classes.to(device)
outputs = model_ft(inputs)
_, preds = torch.max(outputs, 1)
if i == 0:
outputs_matrix = outputs
else:
outputs_matrix = torch.cat((outputs_matrix, outputs), 0)
vector_prd = vector_prd + preds.view(-1).cpu().tolist()
vector_exp = vector_exp + classes.view(-1).cpu().tolist()
confusion_matrix = torch.zeros(nb_classes, nb_classes)
for x, y in zip(vector_exp, vector_prd):
confusion_matrix[y, x] += 1
loss_function = nn.CrossEntropyLoss()
loss_value = loss_function(outputs_matrix.to('cpu'), torch.tensor(vector_exp))
print(confusion_matrix)
#%% calcualte the comparison values
list_model.append(list_model_names[imodel])
list_dataset.append(list_dataset_names[idataset])
list_kappa.append(cohen_kappa_score(vector_prd, vector_exp))
list_accuracy.append(accuracy_score(vector_prd, vector_exp))
list_loss.append(loss_value.tolist())
print('Kappa-value: ' + str(list_kappa[-1]))
print('Accurary-value: ' + str(list_accuracy[-1]))
#%% plot a confusion matrix
matrix2plot = confusion_matrix.numpy()
matrix2plot = matrix2plot.astype(int)
ax = sns.heatmap(matrix2plot,
annot = True, linewidths=5, annot_kws={"size": 10},
xticklabels=class_names, yticklabels=class_names,
cmap = "Blues")
plt.xlabel('Ground Truth')
plt.ylabel('Model ' + list_model[-1] + " on " + list_dataset[-1])
plt.savefig('ConfMat_' +'Model ' + list_model[-1] + " on " + list_dataset[-1] + '.jpg')
plt.show()
plt.close()
#%% make a dataframe
df = pd.DataFrame(list(zip(list_model, list_dataset, list_kappa)), columns=['model', 'data', 'kappa'])
df = df.pivot_table(index = ["model"], columns = ["data"], values = "kappa")
df.to_csv('/home/cw9/sds_hd/sd18a006/marlen/datasets/stainNormalization/table.csv')
df.to_excel('/home/cw9/sds_hd/sd18a006/marlen/datasets/stainNormalization/table.xlsx')
with open('/home/cw9/sds_hd/sd18a006/marlen/datasets/stainNormalization/table.tex', 'w') as tf:
tf.write(df.to_latex()) |
the-stack_0_486 | """
Limits
======
Implemented according to the PhD thesis
http://www.cybertester.com/data/gruntz.pdf, which contains very thorough
descriptions of the algorithm including many examples. We summarize here
the gist of it.
All functions are sorted according to how rapidly varying they are at
infinity using the following rules. Any two functions f and g can be
compared using the properties of L:
L=lim log|f(x)| / log|g(x)| (for x -> oo)
We define >, < ~ according to::
1. f > g .... L=+-oo
we say that:
- f is greater than any power of g
- f is more rapidly varying than g
- f goes to infinity/zero faster than g
2. f < g .... L=0
we say that:
- f is lower than any power of g
3. f ~ g .... L!=0, +-oo
we say that:
- both f and g are bounded from above and below by suitable integral
powers of the other
Examples
========
::
2 < x < exp(x) < exp(x**2) < exp(exp(x))
2 ~ 3 ~ -5
x ~ x**2 ~ x**3 ~ 1/x ~ x**m ~ -x
exp(x) ~ exp(-x) ~ exp(2x) ~ exp(x)**2 ~ exp(x+exp(-x))
f ~ 1/f
So we can divide all the functions into comparability classes (x and x^2
belong to one class, exp(x) and exp(-x) belong to some other class). In
principle, we could compare any two functions, but in our algorithm, we
do not compare anything below the class 2~3~-5 (for example log(x) is
below this), so we set 2~3~-5 as the lowest comparability class.
Given the function f, we find the list of most rapidly varying (mrv set)
subexpressions of it. This list belongs to the same comparability class.
Let's say it is {exp(x), exp(2x)}. Using the rule f ~ 1/f we find an
element "w" (either from the list or a new one) from the same
comparability class which goes to zero at infinity. In our example we
set w=exp(-x) (but we could also set w=exp(-2x) or w=exp(-3x) ...). We
rewrite the mrv set using w, in our case {1/w, 1/w^2}, and substitute it
into f. Then we expand f into a series in w::
f = c0*w^e0 + c1*w^e1 + ... + O(w^en), where e0<e1<...<en, c0!=0
but for x->oo, lim f = lim c0*w^e0, because all the other terms go to zero,
because w goes to zero faster than the ci and ei. So::
for e0>0, lim f = 0
for e0<0, lim f = +-oo (the sign depends on the sign of c0)
for e0=0, lim f = lim c0
We need to recursively compute limits at several places of the algorithm, but
as is shown in the PhD thesis, it always finishes.
Important functions from the implementation:
compare(a, b, x) compares "a" and "b" by computing the limit L.
mrv(e, x) returns list of most rapidly varying (mrv) subexpressions of "e"
rewrite(e, Omega, x, wsym) rewrites "e" in terms of w
leadterm(f, x) returns the lowest power term in the series of f
mrv_leadterm(e, x) returns the lead term (c0, e0) for e
limitinf(e, x) computes lim e (for x->oo)
limit(e, z, z0) computes any limit by converting it to the case x->oo
All the functions are really simple and straightforward except
rewrite(), which is the most difficult/complex part of the algorithm.
When the algorithm fails, the bugs are usually in the series expansion
(i.e. in SymPy) or in rewrite.
This code is almost exact rewrite of the Maple code inside the Gruntz
thesis.
Debugging
---------
Because the gruntz algorithm is highly recursive, it's difficult to
figure out what went wrong inside a debugger. Instead, turn on nice
debug prints by defining the environment variable SYMPY_DEBUG. For
example:
[user@localhost]: SYMPY_DEBUG=True ./bin/isympy
In [1]: limit(sin(x)/x, x, 0)
limitinf(_x*sin(1/_x), _x) = 1
+-mrv_leadterm(_x*sin(1/_x), _x) = (1, 0)
| +-mrv(_x*sin(1/_x), _x) = set([_x])
| | +-mrv(_x, _x) = set([_x])
| | +-mrv(sin(1/_x), _x) = set([_x])
| | +-mrv(1/_x, _x) = set([_x])
| | +-mrv(_x, _x) = set([_x])
| +-mrv_leadterm(exp(_x)*sin(exp(-_x)), _x, set([exp(_x)])) = (1, 0)
| +-rewrite(exp(_x)*sin(exp(-_x)), set([exp(_x)]), _x, _w) = (1/_w*sin(_w), -_x)
| +-sign(_x, _x) = 1
| +-mrv_leadterm(1, _x) = (1, 0)
+-sign(0, _x) = 0
+-limitinf(1, _x) = 1
And check manually which line is wrong. Then go to the source code and
debug this function to figure out the exact problem.
"""
from functools import reduce
from sympy.core import Basic, S, Mul, PoleError
from sympy.core.cache import cacheit
from sympy.core.numbers import ilcm, I, oo
from sympy.core.symbol import Dummy, Wild
from sympy.core.traversal import bottom_up
from sympy.functions import log, exp, sign as _sign
from sympy.series.order import Order
from sympy.simplify import logcombine
from sympy.simplify.powsimp import powsimp, powdenest
from sympy.utilities.misc import debug_decorator as debug
from sympy.utilities.timeutils import timethis
timeit = timethis('gruntz')
def compare(a, b, x):
"""Returns "<" if a<b, "=" for a == b, ">" for a>b"""
# log(exp(...)) must always be simplified here for termination
la, lb = log(a), log(b)
if isinstance(a, Basic) and (isinstance(a, exp) or (a.is_Pow and a.base == S.Exp1)):
la = a.exp
if isinstance(b, Basic) and (isinstance(b, exp) or (b.is_Pow and b.base == S.Exp1)):
lb = b.exp
c = limitinf(la/lb, x)
if c == 0:
return "<"
elif c.is_infinite:
return ">"
else:
return "="
class SubsSet(dict):
"""
Stores (expr, dummy) pairs, and how to rewrite expr-s.
Explanation
===========
The gruntz algorithm needs to rewrite certain expressions in term of a new
variable w. We cannot use subs, because it is just too smart for us. For
example::
> Omega=[exp(exp(_p - exp(-_p))/(1 - 1/_p)), exp(exp(_p))]
> O2=[exp(-exp(_p) + exp(-exp(-_p))*exp(_p)/(1 - 1/_p))/_w, 1/_w]
> e = exp(exp(_p - exp(-_p))/(1 - 1/_p)) - exp(exp(_p))
> e.subs(Omega[0],O2[0]).subs(Omega[1],O2[1])
-1/w + exp(exp(p)*exp(-exp(-p))/(1 - 1/p))
is really not what we want!
So we do it the hard way and keep track of all the things we potentially
want to substitute by dummy variables. Consider the expression::
exp(x - exp(-x)) + exp(x) + x.
The mrv set is {exp(x), exp(-x), exp(x - exp(-x))}.
We introduce corresponding dummy variables d1, d2, d3 and rewrite::
d3 + d1 + x.
This class first of all keeps track of the mapping expr->variable, i.e.
will at this stage be a dictionary::
{exp(x): d1, exp(-x): d2, exp(x - exp(-x)): d3}.
[It turns out to be more convenient this way round.]
But sometimes expressions in the mrv set have other expressions from the
mrv set as subexpressions, and we need to keep track of that as well. In
this case, d3 is really exp(x - d2), so rewrites at this stage is::
{d3: exp(x-d2)}.
The function rewrite uses all this information to correctly rewrite our
expression in terms of w. In this case w can be chosen to be exp(-x),
i.e. d2. The correct rewriting then is::
exp(-w)/w + 1/w + x.
"""
def __init__(self):
self.rewrites = {}
def __repr__(self):
return super().__repr__() + ', ' + self.rewrites.__repr__()
def __getitem__(self, key):
if not key in self:
self[key] = Dummy()
return dict.__getitem__(self, key)
def do_subs(self, e):
"""Substitute the variables with expressions"""
for expr, var in self.items():
e = e.xreplace({var: expr})
return e
def meets(self, s2):
"""Tell whether or not self and s2 have non-empty intersection"""
return set(self.keys()).intersection(list(s2.keys())) != set()
def union(self, s2, exps=None):
"""Compute the union of self and s2, adjusting exps"""
res = self.copy()
tr = {}
for expr, var in s2.items():
if expr in self:
if exps:
exps = exps.xreplace({var: res[expr]})
tr[var] = res[expr]
else:
res[expr] = var
for var, rewr in s2.rewrites.items():
res.rewrites[var] = rewr.xreplace(tr)
return res, exps
def copy(self):
"""Create a shallow copy of SubsSet"""
r = SubsSet()
r.rewrites = self.rewrites.copy()
for expr, var in self.items():
r[expr] = var
return r
@debug
def mrv(e, x):
"""Returns a SubsSet of most rapidly varying (mrv) subexpressions of 'e',
and e rewritten in terms of these"""
e = powsimp(e, deep=True, combine='exp')
if not isinstance(e, Basic):
raise TypeError("e should be an instance of Basic")
if not e.has(x):
return SubsSet(), e
elif e == x:
s = SubsSet()
return s, s[x]
elif e.is_Mul or e.is_Add:
i, d = e.as_independent(x) # throw away x-independent terms
if d.func != e.func:
s, expr = mrv(d, x)
return s, e.func(i, expr)
a, b = d.as_two_terms()
s1, e1 = mrv(a, x)
s2, e2 = mrv(b, x)
return mrv_max1(s1, s2, e.func(i, e1, e2), x)
elif e.is_Pow and e.base != S.Exp1:
e1 = S.One
while e.is_Pow:
b1 = e.base
e1 *= e.exp
e = b1
if b1 == 1:
return SubsSet(), b1
if e1.has(x):
base_lim = limitinf(b1, x)
if base_lim is S.One:
return mrv(exp(e1 * (b1 - 1)), x)
return mrv(exp(e1 * log(b1)), x)
else:
s, expr = mrv(b1, x)
return s, expr**e1
elif isinstance(e, log):
s, expr = mrv(e.args[0], x)
return s, log(expr)
elif isinstance(e, exp) or (e.is_Pow and e.base == S.Exp1):
# We know from the theory of this algorithm that exp(log(...)) may always
# be simplified here, and doing so is vital for termination.
if isinstance(e.exp, log):
return mrv(e.exp.args[0], x)
# if a product has an infinite factor the result will be
# infinite if there is no zero, otherwise NaN; here, we
# consider the result infinite if any factor is infinite
li = limitinf(e.exp, x)
if any(_.is_infinite for _ in Mul.make_args(li)):
s1 = SubsSet()
e1 = s1[e]
s2, e2 = mrv(e.exp, x)
su = s1.union(s2)[0]
su.rewrites[e1] = exp(e2)
return mrv_max3(s1, e1, s2, exp(e2), su, e1, x)
else:
s, expr = mrv(e.exp, x)
return s, exp(expr)
elif e.is_Function:
l = [mrv(a, x) for a in e.args]
l2 = [s for (s, _) in l if s != SubsSet()]
if len(l2) != 1:
# e.g. something like BesselJ(x, x)
raise NotImplementedError("MRV set computation for functions in"
" several variables not implemented.")
s, ss = l2[0], SubsSet()
args = [ss.do_subs(x[1]) for x in l]
return s, e.func(*args)
elif e.is_Derivative:
raise NotImplementedError("MRV set computation for derviatives"
" not implemented yet.")
raise NotImplementedError(
"Don't know how to calculate the mrv of '%s'" % e)
def mrv_max3(f, expsf, g, expsg, union, expsboth, x):
"""
Computes the maximum of two sets of expressions f and g, which
are in the same comparability class, i.e. max() compares (two elements of)
f and g and returns either (f, expsf) [if f is larger], (g, expsg)
[if g is larger] or (union, expsboth) [if f, g are of the same class].
"""
if not isinstance(f, SubsSet):
raise TypeError("f should be an instance of SubsSet")
if not isinstance(g, SubsSet):
raise TypeError("g should be an instance of SubsSet")
if f == SubsSet():
return g, expsg
elif g == SubsSet():
return f, expsf
elif f.meets(g):
return union, expsboth
c = compare(list(f.keys())[0], list(g.keys())[0], x)
if c == ">":
return f, expsf
elif c == "<":
return g, expsg
else:
if c != "=":
raise ValueError("c should be =")
return union, expsboth
def mrv_max1(f, g, exps, x):
"""Computes the maximum of two sets of expressions f and g, which
are in the same comparability class, i.e. mrv_max1() compares (two elements of)
f and g and returns the set, which is in the higher comparability class
of the union of both, if they have the same order of variation.
Also returns exps, with the appropriate substitutions made.
"""
u, b = f.union(g, exps)
return mrv_max3(f, g.do_subs(exps), g, f.do_subs(exps),
u, b, x)
@debug
@cacheit
@timeit
def sign(e, x):
"""
Returns a sign of an expression e(x) for x->oo.
::
e > 0 for x sufficiently large ... 1
e == 0 for x sufficiently large ... 0
e < 0 for x sufficiently large ... -1
The result of this function is currently undefined if e changes sign
arbitrarily often for arbitrarily large x (e.g. sin(x)).
Note that this returns zero only if e is *constantly* zero
for x sufficiently large. [If e is constant, of course, this is just
the same thing as the sign of e.]
"""
if not isinstance(e, Basic):
raise TypeError("e should be an instance of Basic")
if e.is_positive:
return 1
elif e.is_negative:
return -1
elif e.is_zero:
return 0
elif not e.has(x):
e = logcombine(e)
return _sign(e)
elif e == x:
return 1
elif e.is_Mul:
a, b = e.as_two_terms()
sa = sign(a, x)
if not sa:
return 0
return sa * sign(b, x)
elif isinstance(e, exp):
return 1
elif e.is_Pow:
if e.base == S.Exp1:
return 1
s = sign(e.base, x)
if s == 1:
return 1
if e.exp.is_Integer:
return s**e.exp
elif isinstance(e, log):
return sign(e.args[0] - 1, x)
# if all else fails, do it the hard way
c0, e0 = mrv_leadterm(e, x)
return sign(c0, x)
@debug
@timeit
@cacheit
def limitinf(e, x, leadsimp=False):
"""Limit e(x) for x-> oo.
Explanation
===========
If ``leadsimp`` is True, an attempt is made to simplify the leading
term of the series expansion of ``e``. That may succeed even if
``e`` cannot be simplified.
"""
# rewrite e in terms of tractable functions only
if not e.has(x):
return e # e is a constant
if e.has(Order):
e = e.expand().removeO()
if not x.is_positive or x.is_integer:
# We make sure that x.is_positive is True and x.is_integer is None
# so we get all the correct mathematical behavior from the expression.
# We need a fresh variable.
p = Dummy('p', positive=True)
e = e.subs(x, p)
x = p
e = e.rewrite('tractable', deep=True, limitvar=x)
e = powdenest(e)
c0, e0 = mrv_leadterm(e, x)
sig = sign(e0, x)
if sig == 1:
return S.Zero # e0>0: lim f = 0
elif sig == -1: # e0<0: lim f = +-oo (the sign depends on the sign of c0)
if c0.match(I*Wild("a", exclude=[I])):
return c0*oo
s = sign(c0, x)
# the leading term shouldn't be 0:
if s == 0:
raise ValueError("Leading term should not be 0")
return s*oo
elif sig == 0:
if leadsimp:
c0 = c0.simplify()
return limitinf(c0, x, leadsimp) # e0=0: lim f = lim c0
else:
raise ValueError("{} could not be evaluated".format(sig))
def moveup2(s, x):
r = SubsSet()
for expr, var in s.items():
r[expr.xreplace({x: exp(x)})] = var
for var, expr in s.rewrites.items():
r.rewrites[var] = s.rewrites[var].xreplace({x: exp(x)})
return r
def moveup(l, x):
return [e.xreplace({x: exp(x)}) for e in l]
@debug
@timeit
def calculate_series(e, x, logx=None):
""" Calculates at least one term of the series of ``e`` in ``x``.
This is a place that fails most often, so it is in its own function.
"""
from sympy.polys import cancel
for t in e.lseries(x, logx=logx):
# bottom_up function is required for a specific case - when e is
# -exp(p/(p + 1)) + exp(-p**2/(p + 1) + p). No current simplification
# methods reduce this to 0 while not expanding polynomials.
t = bottom_up(t, lambda w: getattr(w, 'normal', lambda: w)())
t = cancel(t, expand=False).factor()
if t.has(exp) and t.has(log):
t = powdenest(t)
if not t.is_zero:
break
return t
@debug
@timeit
@cacheit
def mrv_leadterm(e, x):
"""Returns (c0, e0) for e."""
Omega = SubsSet()
if not e.has(x):
return (e, S.Zero)
if Omega == SubsSet():
Omega, exps = mrv(e, x)
if not Omega:
# e really does not depend on x after simplification
return exps, S.Zero
if x in Omega:
# move the whole omega up (exponentiate each term):
Omega_up = moveup2(Omega, x)
exps_up = moveup([exps], x)[0]
# NOTE: there is no need to move this down!
Omega = Omega_up
exps = exps_up
#
# The positive dummy, w, is used here so log(w*2) etc. will expand;
# a unique dummy is needed in this algorithm
#
# For limits of complex functions, the algorithm would have to be
# improved, or just find limits of Re and Im components separately.
#
w = Dummy("w", real=True, positive=True)
f, logw = rewrite(exps, Omega, x, w)
series = calculate_series(f, w, logx=logw)
try:
lt = series.leadterm(w, logx=logw)
except (ValueError, PoleError):
lt = f.as_coeff_exponent(w)
# as_coeff_exponent won't always split in required form. It may simply
# return (f, 0) when a better form may be obtained. Example (-x)**(-pi)
# can be written as (-1**(-pi), -pi) which as_coeff_exponent does not return
if lt[0].has(w):
base = f.as_base_exp()[0].as_coeff_exponent(w)
ex = f.as_base_exp()[1]
lt = (base[0]**ex, base[1]*ex)
return (lt[0].subs(log(w), logw), lt[1])
def build_expression_tree(Omega, rewrites):
r""" Helper function for rewrite.
We need to sort Omega (mrv set) so that we replace an expression before
we replace any expression in terms of which it has to be rewritten::
e1 ---> e2 ---> e3
\
-> e4
Here we can do e1, e2, e3, e4 or e1, e2, e4, e3.
To do this we assemble the nodes into a tree, and sort them by height.
This function builds the tree, rewrites then sorts the nodes.
"""
class Node:
def __init__(self):
self.before = []
self.expr = None
self.var = None
def ht(self):
return reduce(lambda x, y: x + y,
[x.ht() for x in self.before], 1)
nodes = {}
for expr, v in Omega:
n = Node()
n.var = v
n.expr = expr
nodes[v] = n
for _, v in Omega:
if v in rewrites:
n = nodes[v]
r = rewrites[v]
for _, v2 in Omega:
if r.has(v2):
n.before.append(nodes[v2])
return nodes
@debug
@timeit
def rewrite(e, Omega, x, wsym):
"""e(x) ... the function
Omega ... the mrv set
wsym ... the symbol which is going to be used for w
Returns the rewritten e in terms of w and log(w). See test_rewrite1()
for examples and correct results.
"""
if not isinstance(Omega, SubsSet):
raise TypeError("Omega should be an instance of SubsSet")
if len(Omega) == 0:
raise ValueError("Length cannot be 0")
# all items in Omega must be exponentials
for t in Omega.keys():
if not isinstance(t, exp):
raise ValueError("Value should be exp")
rewrites = Omega.rewrites
Omega = list(Omega.items())
nodes = build_expression_tree(Omega, rewrites)
Omega.sort(key=lambda x: nodes[x[1]].ht(), reverse=True)
# make sure we know the sign of each exp() term; after the loop,
# g is going to be the "w" - the simplest one in the mrv set
for g, _ in Omega:
sig = sign(g.exp, x)
if sig != 1 and sig != -1:
raise NotImplementedError('Result depends on the sign of %s' % sig)
if sig == 1:
wsym = 1/wsym # if g goes to oo, substitute 1/w
# O2 is a list, which results by rewriting each item in Omega using "w"
O2 = []
denominators = []
for f, var in Omega:
c = limitinf(f.exp/g.exp, x)
if c.is_Rational:
denominators.append(c.q)
arg = f.exp
if var in rewrites:
if not isinstance(rewrites[var], exp):
raise ValueError("Value should be exp")
arg = rewrites[var].args[0]
O2.append((var, exp((arg - c*g.exp).expand())*wsym**c))
# Remember that Omega contains subexpressions of "e". So now we find
# them in "e" and substitute them for our rewriting, stored in O2
# the following powsimp is necessary to automatically combine exponentials,
# so that the .xreplace() below succeeds:
# TODO this should not be necessary
f = powsimp(e, deep=True, combine='exp')
for a, b in O2:
f = f.xreplace({a: b})
for _, var in Omega:
assert not f.has(var)
# finally compute the logarithm of w (logw).
logw = g.exp
if sig == 1:
logw = -logw # log(w)->log(1/w)=-log(w)
# Some parts of SymPy have difficulty computing series expansions with
# non-integral exponents. The following heuristic improves the situation:
exponent = reduce(ilcm, denominators, 1)
f = f.subs({wsym: wsym**exponent})
logw /= exponent
return f, logw
def gruntz(e, z, z0, dir="+"):
"""
Compute the limit of e(z) at the point z0 using the Gruntz algorithm.
Explanation
===========
``z0`` can be any expression, including oo and -oo.
For ``dir="+"`` (default) it calculates the limit from the right
(z->z0+) and for ``dir="-"`` the limit from the left (z->z0-). For infinite z0
(oo or -oo), the dir argument doesn't matter.
This algorithm is fully described in the module docstring in the gruntz.py
file. It relies heavily on the series expansion. Most frequently, gruntz()
is only used if the faster limit() function (which uses heuristics) fails.
"""
if not z.is_symbol:
raise NotImplementedError("Second argument must be a Symbol")
# convert all limits to the limit z->oo; sign of z is handled in limitinf
r = None
if z0 == oo:
e0 = e
elif z0 == -oo:
e0 = e.subs(z, -z)
else:
if str(dir) == "-":
e0 = e.subs(z, z0 - 1/z)
elif str(dir) == "+":
e0 = e.subs(z, z0 + 1/z)
else:
raise NotImplementedError("dir must be '+' or '-'")
try:
r = limitinf(e0, z)
except ValueError:
r = limitinf(e0, z, leadsimp=True)
# This is a bit of a heuristic for nice results... we always rewrite
# tractable functions in terms of familiar intractable ones.
# It might be nicer to rewrite the exactly to what they were initially,
# but that would take some work to implement.
return r.rewrite('intractable', deep=True)
|
the-stack_0_488 | from unicodedata import name
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Ingredient,Recipe
from recipe.serializers import IngredientSerializer
INGREDIENTS_URL = reverse('recipe:ingredient-list')
class PublicIngredientsApiTests(TestCase):
#Test tha publicly available ingredients API
def setUp(self):
self.client = APIClient()
def test_login_required(self):
#Test tha login is required to access the endpoint
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateIngredientsApiTests(TestCase):
# Test that private ingredient api
def setUp(self) -> None:
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'[email protected]',
'pss123'
)
self.client.force_authenticate(self.user)
def test_retrive_ingredient_list(self):
#Test that retrivinng the list of ingredients
Ingredient.objects.create(user = self.user,name='Kale')
Ingredient.objects.create(user = self.user,name='Salt')
res = self.client.get(INGREDIENTS_URL)
ingredients = Ingredient.objects.all().order_by('-name')
serializer = IngredientSerializer(ingredients, many=True)
self.assertEqual(res.status_code,status.HTTP_200_OK)
self.assertEqual(res.data,serializer.data)
def test_ingredients_limits_to_user(self):
#Test that ingredients for authenticated user and return it
user2 = get_user_model().objects.create_user(
'[email protected]'
'pass321'
)
Ingredient.objects.create(user=user2,name='Vinegar')
ingredient = Ingredient.objects.create(user = self.user,name = 'Tumeric')
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code,status.HTTP_200_OK)
self.assertEqual(len(res.data),1)
self.assertEqual(res.data[0]['name'], ingredient.name)
def test_create_ingredient_successful(self):
#Test creating a new ingredient
payload = {'name':'Cabbage'}
self.client.post(INGREDIENTS_URL,payload)
exists = Ingredient.objects.filter(
user=self.user,
name=payload['name']
).exists()
self.assertTrue(exists)
def test_create_ingredient_invalid(self):
#Test creating a new ingredient with invalid payload
payload = {'name':''}
res = self.client.post(INGREDIENTS_URL,payload)
self.assertEqual(res.status_code,status.HTTP_400_BAD_REQUEST)
def test_retrieve_ingredients_assigned_to_recipes(self):
#Test filtering ingredients by those assigned to recipes
ingredient1 = Ingredient.objects.create(user=self.user,name='Apples')
ingredient2 = Ingredient.objects.create(user=self.user,name='Turkey')
recipe = Recipe.objects.create(
title ='Apple crumble',
time_minutes=5,
price=10,
user = self.user
)
recipe.ingredients.add(ingredient1)
res = self.client.get(INGREDIENTS_URL, {'assigned_only':1})
serializer1 = IngredientSerializer(ingredient1)
serializer2 = IngredientSerializer(ingredient2)
self.assertIn(serializer1.data, res.data)
self.assertNotIn(serializer2.data, res.data)
def test_retreive_ingredients_assigned_unique(self):
#Test filtering ingredients by assigned return unique items
ingredient = Ingredient.objects.create(user=self.user,name='Eggs')
Ingredient.objects.create(user=self.user,name='Cheese')
recipe1 = Recipe.objects.create(
title = 'Eggs benedict',
time_minutes = 30,
price = 12.00,
user=self.user
)
recipe1.ingredients.add(ingredient)
recipe2 = Recipe.objects.create(
title = 'Coriander eggs',
time_minutes=20,
price=5.00,
user=self.user
)
recipe2.ingredients.add(ingredient)
res = self.client.get(INGREDIENTS_URL,{'assigned_only':1})
self.assertEqual(len(res.data),1) |
the-stack_0_490 |
import random
from tqdm import tqdm
import glob
import numpy as np
import torch
from sparse_ct.reconstructor_2d.n2self import (
N2SelfReconstructor)
from sparse_ct.reconstructor_2d.dataset import (
DeepLesionDataset, EllipsesDataset)
if __name__ == "__main__":
params= {'batch_size': 8,
'shuffle': True,
'num_workers': 8}
N_PROJ = 64
pwd_train = '/external/CT_30_000/train'
pwd_test = '/external/CT_30_000/test'
file_list_train = glob.glob(pwd_train+'/*/*/*/*.png')
file_list_test = glob.glob(pwd_test+'/*/*/*/*.png')
print("file_list_train", len(file_list_train))
print("file_list_test", len(file_list_test))
# train_loader = torch.utils.data.DataLoader(
# DeepLesionDataset(
# file_list_train,
# return_gt=False,
# n_proj=N_PROJ,
# img_size=512),
# **params
# )
# test_loader = torch.utils.data.DataLoader(
# DeepLesionDataset(
# random.choices(file_list_test, k=1000),
# return_gt=True,
# n_proj=N_PROJ,
# img_size=512),
# **params
# )
train_loader = torch.utils.data.DataLoader(
EllipsesDataset(
ellipses_type='train',
return_gt=False,
n_proj=N_PROJ,
img_size=512),
**params
)
test_loader = torch.utils.data.DataLoader(
EllipsesDataset(
ellipses_type='validation',
return_gt=True,
n_proj=N_PROJ,
img_size=512),
**params
)
theta = np.linspace(0.0, 180.0, N_PROJ, endpoint=False)
recon_n2self = N2SelfReconstructor(
'N2SelfTrained',
net='unet', lr=0.0001,
n2self_weights=None,#'selfsuper-ellipses-64-l1-train1/iter_180000.pth',#'iter_15000.pth',
#'selfsuper-ellipses-64-train8/iter_58800.pth', #'self-super-train9/iter_199800.pth',
learnable_filter=False
)
recon_n2self.init_train(theta)
recon_n2self._eval(test_loader)
for i in range(50):
print('--------------- ',i)
recon_n2self._train_one_epoch(train_loader, test_loader)
recon_n2self._eval(test_loader)
recon_n2self._save('epoch_{}.pth'.format(i))
recon_n2self._save('end.pth')
|
the-stack_0_493 | """
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : 功能测试
Case Name : overlay函数入参为中文字符
Description :
1.使用overlay函数对中文字符进行处理
Expect :
1.返回结果正确
History :
"""
import unittest
import sys
from yat.test import Node
from yat.test import macro
sys.path.append(sys.path[0] + "/../")
from testcase.utils.Logger import Logger
from testcase.utils.CommonSH import CommonSH
logger = Logger()
class Function(unittest.TestCase):
def setUp(self):
logger.info("--------Opengauss_Function_Innerfunc_Overlay_Case0004.py开始执行--------")
self.commonsh = CommonSH('dbuser')
self.userNode = Node('dbuser')
self.DB_ENV_PATH = macro.DB_ENV_PATH
def test_right(self):
encoding = ['SQL_ASCII', 'UTF-8']
sql_cmd = "SELECT overlay('和卡拉和梵蒂冈' placing '猕猴桃666' from 4 for 9 );"
result = ["和猕猴桃666梵蒂冈", "和卡拉猕猴桃666"]
for i in range(2):
# 创建数据库
db_create = f"""drop database if exists aaa;
create database aaa encoding = '{encoding[i]}';"""
msg1 = self.commonsh.execut_db_sql(db_create)
logger.info(msg1)
self.assertTrue('CREATE' in msg1)
# 连接新建的编码类型的库执行sql语句
cmd1 = f'''source {self.DB_ENV_PATH};
gsql -d aaa -p {self.userNode.db_port} -c "{sql_cmd}"'''
msg2 = self.userNode.sh(cmd1).result()
logger.info(msg2)
self.assertTrue(msg2.splitlines()[-2].strip() == result[i])
# 删除数据库
db_drop = f'''drop database aaa;'''
msg3 = self.commonsh.execut_db_sql(db_drop)
logger.info(msg3)
self.assertTrue('DROP' in msg3)
def tearDown(self):
logger.info('--------Opengauss_Function_Innerfunc_Overlay_Case0004.py执行结束--------') |
the-stack_0_496 | """
Ethereum Virtual Machine (EVM) Interpreter
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. contents:: Table of Contents
:backlinks: none
:local:
Introduction
------------
A straightforward interpreter that executes EVM code.
"""
from dataclasses import dataclass
from typing import Iterable, Set, Tuple, Union
from ethereum.base_types import U256, Bytes0, Uint
from ethereum.utils.ensure import EnsureError, ensure
from ..eth_types import Address, Log
from ..state import (
account_has_code_or_nonce,
begin_transaction,
commit_transaction,
get_account,
increment_nonce,
move_ether,
rollback_transaction,
set_code,
touch_account,
)
from ..utils.address import to_address
from ..vm import Message
from ..vm.error import (
InsufficientFunds,
InvalidJumpDestError,
InvalidOpcode,
OutOfGasError,
StackDepthLimitError,
StackOverflowError,
StackUnderflowError,
)
from ..vm.gas import GAS_CODE_DEPOSIT, REFUND_SELF_DESTRUCT, subtract_gas
from ..vm.precompiled_contracts.mapping import PRE_COMPILED_CONTRACTS
from . import Environment, Evm
from .instructions import Ops, op_implementation
from .runtime import get_valid_jump_destinations
STACK_DEPTH_LIMIT = U256(1024)
MAX_CODE_SIZE = 0x6000
RIPEMD160_ADDRESS = to_address(Uint(3))
@dataclass
class MessageCallOutput:
"""
Output of a particular message call
Contains the following:
1. `gas_left`: remaining gas after execution.
2. `refund_counter`: gas to refund after execution.
3. `logs`: list of `Log` generated during execution.
4. `accounts_to_delete`: Contracts which have self-destructed.
5. `touched_accounts`: Accounts that have been touched.
6. `has_erred`: True if execution has caused an error.
"""
gas_left: U256
refund_counter: U256
logs: Union[Tuple[()], Tuple[Log, ...]]
accounts_to_delete: Set[Address]
touched_accounts: Iterable[Address]
has_erred: bool
def process_message_call(
message: Message, env: Environment
) -> MessageCallOutput:
"""
If `message.current` is empty then it creates a smart contract
else it executes a call from the `message.caller` to the `message.target`.
Parameters
----------
message :
Transaction specific items.
env :
External items required for EVM execution.
Returns
-------
output : `MessageCallOutput`
Output of the message call
"""
if message.target == Bytes0(b""):
is_collision = account_has_code_or_nonce(
env.state, message.current_target
)
if is_collision:
return MessageCallOutput(
U256(0), U256(0), tuple(), set(), set(), True
)
else:
evm = process_create_message(message, env)
else:
evm = process_message(message, env)
accounts_to_delete = collect_accounts_to_delete(evm, set())
evm.refund_counter += len(accounts_to_delete) * REFUND_SELF_DESTRUCT
return MessageCallOutput(
gas_left=evm.gas_left,
refund_counter=evm.refund_counter,
logs=evm.logs,
accounts_to_delete=accounts_to_delete,
touched_accounts=collect_touched_accounts(evm),
has_erred=evm.has_erred,
)
def process_create_message(message: Message, env: Environment) -> Evm:
"""
Executes a call to create a smart contract.
Parameters
----------
message :
Transaction specific items.
env :
External items required for EVM execution.
Returns
-------
evm: :py:class:`~ethereum.spurious_dragon.vm.Evm`
Items containing execution specific objects.
"""
# take snapshot of state before processing the message
begin_transaction(env.state)
increment_nonce(env.state, message.current_target)
evm = process_message(message, env)
if not evm.has_erred:
contract_code = evm.output
contract_code_gas = len(contract_code) * GAS_CODE_DEPOSIT
try:
evm.gas_left = subtract_gas(evm.gas_left, contract_code_gas)
ensure(len(contract_code) <= MAX_CODE_SIZE, OutOfGasError)
except OutOfGasError:
rollback_transaction(env.state)
evm.gas_left = U256(0)
evm.logs = ()
evm.accounts_to_delete = dict()
evm.refund_counter = U256(0)
evm.has_erred = True
else:
set_code(env.state, message.current_target, contract_code)
commit_transaction(env.state)
else:
rollback_transaction(env.state)
return evm
def process_message(message: Message, env: Environment) -> Evm:
"""
Executes a call to create a smart contract.
Parameters
----------
message :
Transaction specific items.
env :
External items required for EVM execution.
Returns
-------
evm: :py:class:`~ethereum.spurious_dragon.vm.Evm`
Items containing execution specific objects
"""
if message.depth > STACK_DEPTH_LIMIT:
raise StackDepthLimitError("Stack depth limit reached")
# take snapshot of state before processing the message
begin_transaction(env.state)
touch_account(env.state, message.current_target)
sender_balance = get_account(env.state, message.caller).balance
if message.should_transfer_value and message.value != 0:
if sender_balance < message.value:
rollback_transaction(env.state)
raise InsufficientFunds(
f"Insufficient funds: {sender_balance} < {message.value}"
)
move_ether(
env.state, message.caller, message.current_target, message.value
)
evm = execute_code(message, env)
if evm.has_erred:
# revert state to the last saved checkpoint
# since the message call resulted in an error
rollback_transaction(env.state)
else:
commit_transaction(env.state)
return evm
def execute_code(message: Message, env: Environment) -> Evm:
"""
Executes bytecode present in the `message`.
Parameters
----------
message :
Transaction specific items.
env :
External items required for EVM execution.
Returns
-------
evm: `ethereum.vm.EVM`
Items containing execution specific objects
"""
code = message.code
valid_jump_destinations = get_valid_jump_destinations(code)
evm = Evm(
pc=Uint(0),
stack=[],
memory=bytearray(),
code=code,
gas_left=message.gas,
env=env,
valid_jump_destinations=valid_jump_destinations,
logs=(),
refund_counter=U256(0),
running=True,
message=message,
output=b"",
accounts_to_delete=dict(),
has_erred=False,
children=[],
)
try:
if evm.message.code_address in PRE_COMPILED_CONTRACTS:
PRE_COMPILED_CONTRACTS[evm.message.code_address](evm)
return evm
while evm.running and evm.pc < len(evm.code):
try:
op = Ops(evm.code[evm.pc])
except ValueError:
raise InvalidOpcode(evm.code[evm.pc])
op_implementation[op](evm)
except (
OutOfGasError,
InvalidOpcode,
InvalidJumpDestError,
InsufficientFunds,
StackOverflowError,
StackUnderflowError,
StackDepthLimitError,
):
evm.gas_left = U256(0)
evm.logs = ()
evm.accounts_to_delete = dict()
evm.refund_counter = U256(0)
evm.has_erred = True
except (
EnsureError,
ValueError,
):
evm.has_erred = True
finally:
return evm
def collect_touched_accounts(
evm: Evm, ancestor_had_error: bool = False
) -> Iterable[Address]:
"""
Collect all of the accounts that *may* need to be deleted based on
`EIP-161 <https://eips.ethereum.org/EIPS/eip-161>`_.
Checking whether they *do* need to be deleted happens in the caller.
See also: https://github.com/ethereum/EIPs/issues/716
Parameters
----------
evm :
The current EVM frame.
ancestor_had_error :
True if the ancestors of the evm object erred else False
Returns
-------
touched_accounts: `typing.Iterable`
returns all the accounts that were touched and may need to be deleted.
"""
# collect the coinbase account if it was touched via zero-fee transfer
if (evm.message.caller == evm.env.origin) and evm.env.gas_price == 0:
yield evm.env.coinbase
# collect those explicitly marked for deletion
# ("beneficiary" is of SELFDESTRUCT)
for beneficiary in sorted(set(evm.accounts_to_delete.values())):
if evm.has_erred or ancestor_had_error:
# Special case to account for geth+parity bug
# https://github.com/ethereum/EIPs/issues/716
if beneficiary == RIPEMD160_ADDRESS:
yield beneficiary
continue
else:
yield beneficiary
# collect account directly addressed
if not isinstance(evm.message.target, Bytes0):
if evm.has_erred or ancestor_had_error:
# collect RIPEMD160 precompile even if ancestor evm had error.
# otherwise, skip collection from children of erred-out evm objects
if evm.message.target == RIPEMD160_ADDRESS:
yield evm.message.target
else:
yield evm.message.target
# recurse into nested computations
# (even erred ones, since looking for RIPEMD160)
for child in evm.children:
yield from collect_touched_accounts(
child, ancestor_had_error=(evm.has_erred or ancestor_had_error)
)
def collect_accounts_to_delete(
evm: Evm, accounts_to_delete: Set[Address]
) -> Set[Address]:
"""
Collects all the accounts that need to deleted from the `evm` object and
its children
Parameters
----------
evm :
The current EVM frame.
accounts_to_delete :
list of accounts that need to be deleted.
Note: An empty set should be passed to this parameter. This set
is used to store the results obtained by recursively iterating over the
child evm objects
Returns
-------
touched_accounts: `set`
returns all the accounts that were touched and may need to be deleted.
"""
if not evm.has_erred:
for address in evm.accounts_to_delete.keys():
accounts_to_delete.add(address)
for child in evm.children:
collect_accounts_to_delete(child, accounts_to_delete)
return accounts_to_delete
|
the-stack_0_497 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="simplevae", # Replace with your own username
version="1.0.0",
author="Chenxi Wu, Yizi Zhang",
author_email="[email protected], [email protected]",
description="Final project of STA 663: Implementation of Variational Autoencoder",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/yizi0511/sta_663_vae",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
install_requires=[
'numpy',
'tensorflow',
],
)
|
the-stack_0_499 | from lstm import BilstmAttention
from config import LSTMConfig
import torch
import pandas as pd
import numpy as np
from tqdm import tqdm
import os
import directory
def load_model(weight_path):
print(weight_path)
model = BilstmAttention(embed_num=859)
model.load_state_dict(torch.load(weight_path)) # 返回的是一个OrderDict,存储了网络结构的名字和对应的参数
model.to(device)
model.eval()
return model
@torch.no_grad()
def predict(texts):
pres_all = []
for text in tqdm(texts):
text = [int(i) for i in text.split(' ')]
# 统一样本的长度,这里选择55个词语作为样本长度,多的截断,少的补齐(用858补齐)
seq_len = LSTMConfig.seq_len
if len(text) > seq_len:
text = text[:seq_len]
else:
text = text + [858] * (seq_len - len(text))
text = torch.from_numpy(np.array(text))
text = text.unsqueeze(0)
text = text.type(torch.LongTensor).cuda()
#
for i in range(len(model_list)):
model = model_list[i]
outputs = model(text)
outputs = outputs.sigmoid().detach().cpu().numpy()[0]
if i == 0:
pres_fold = outputs / len(model_list)
else:
pres_fold += outputs / len(model_list)
# print("bilstm+attention_pres_fold:",pres_fold)
# print("bilstm+attention_pres_fold:",type(pres_fold))
pres_fold = [str(p) for p in pres_fold]
pres_fold = ' '.join(pres_fold)
pres_all.append(pres_fold)
return pres_all
if __name__ == "__main__":
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model_list = []
n_splits = LSTMConfig.n_splits
for i in range(n_splits):
model_list.append(load_model('./dl/user_data/model_data/label1/LSTMfold_' + str(i + 1) + '_best.pth'))
test_df = pd.read_csv(directory.SEMI_TEST_SET_B_PATH, header=None)
test_df.columns = ['report_ID', 'description']
submit = test_df.copy()
print("test_df:{}".format(test_df.shape))
new_des = [i.strip('|').strip() for i in test_df['description'].values]
'''
# 获取停用词
stopwords_path = './dl/code/test/label1/stopwords.txt'
stopwords = []
with open(stopwords_path, 'r', encoding='utf-8') as f:
for line in f:
if len(line) > 0:
stopwords.append(line.strip())
# 去掉new_des_test中的停用词
for j in range(0, len(new_des)):
str2lst = new_des[j].split()
copy = str2lst[:]
for i in copy:
if i in stopwords:
copy.remove(i)
str2lst = copy
lst2str = " ".join(str(i) for i in str2lst)
new_des[j] = lst2str
'''
test_df['description'] = new_des
sub_id = test_df['report_ID'].values
print(sub_id[0])
save_dir = './dl/prediction_result/label1/'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
pres_all = predict(new_des)
str_w = ''
with open(save_dir + 'submit_lstm.csv', 'w') as f:
for i in range(len(sub_id)):
str_w += sub_id[i] + ',' + '|' + pres_all[i] + '\n'
str_w = str_w.strip('\n')
f.write(str_w)
|
the-stack_0_500 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Util class for job-related operations.
"""
import contextlib
import os
from taskflow import engines
from taskflow.persistence import logbook
from oslo_utils import uuidutils
from pipeline.pipelines import pipeline_factory
from pipeline.utils import backend_helper
def post_remote_pipeline_job(pipeline):
ME = os.getpid()
print("Starting poster with pid: %s" % ME)
my_name = "poster-%s" % ME
persist_backend = backend_helper.default_persistence_backend()
with contextlib.closing(persist_backend):
with contextlib.closing(persist_backend.get_connection()) as conn:
conn.upgrade()
job_backend = backend_helper.default_jobboard_backend(my_name)
job_backend.connect()
with contextlib.closing(job_backend):
# Create information in the persistence backend about the
# unit of work we want to complete and the factory that
# can be called to create the tasks that the work unit needs
# to be done.
lb = logbook.LogBook("post-from-%s" % my_name)
fd = logbook.FlowDetail("sample-from-%s" % my_name,
uuidutils.generate_uuid())
lb.add(fd)
with contextlib.closing(persist_backend.get_connection()) as conn:
conn.save_logbook(lb)
engines.save_factory_details(fd,
pipeline_factory.make_pipeline_flow,
[pipeline.name],
pipeline.kwargs,
backend=persist_backend)
# Post, and be done with it!
jb = job_backend.post("sample-job-from-%s" % my_name, book=lb)
print("Posted: %s" % jb)
return jb
|
the-stack_0_501 | import csv
import random
from functools import partial
from typing import Callable, Optional
from pdb import set_trace as st
import os
import random
import pandas as pd
from typing import Any, Callable, Dict, Iterable, List, Tuple, Union
import numpy as np
import tensorflow as tf
from foolbox.attacks import (
FGSM,
Attack,
DeepFoolAttack,
IterativeGradientSignAttack,
SaliencyMapAttack,
)
# from foolbox.criteria import TargetClass
# from foolbox.models import TensorFlowModel
from tensorflow.python.training import saver
from tensorflow.python.training.session_manager import SessionManager
import tensorflow as tf
import numpy as np
import pickle
import sklearn.metrics as metrics
import scipy.ndimage as ndimage
import matplotlib.pyplot as plt
plt.switch_backend('Agg')
from model.config import LENET
from model import LeNet
import nninst_mode as mode
from dataset import mnist
from dataset.config import MNIST_TRAIN, MNIST_PATH
from dataset.mnist_transforms import *
from trace.lenet_mnist_class_trace_v2 import (
data_config,
)
from trace.common import (
class_trace,
)
from tf_utils import new_session_config
from nninst_statistics import calc_trace_side_overlap
from nninst_trace import TraceKey
from nninst_utils.numpy import arg_approx
from nninst_utils.ray import ray_init
from nninst_utils.fs import ensure_dir, IOAction, CsvIOAction, abspath
from eval.common import get_overlay_summary, clean_overlap_ratio, \
translation_overlap_ratio, attack_overlap_ratio, \
lenet_mnist_example
from eval.cw_attack import cw_generate_adversarial_example
from eval.eval_mnist import foolbox_generate_adversarial_example
from eval.cw_attacks import CarliniL2
from nninst_graph import AttrMap, Graph, GraphAttrKey
from nninst_utils.ray import ray_iter
from tf_graph import (
MaskWeightWithTraceHook,
model_fn_with_fetch_hook,
)
from trace.common import (
get_predicted_value,
get_rank,
predict,
reconstruct_class_trace_from_tf,
reconstruct_trace_from_tf,
reconstruct_trace_from_tf_brute_force,
)
from eval.eval_by_reduced_point import reconstruct_point
from nninst_op import *
from nninst_trace import calc_padding
threshold = 0.9
dilation_iter = 1
dilation_structure = ndimage.generate_binary_structure(2, 2)
# Model config
model_label = "augmentation"
model_dir = f"result/lenet/model_{model_label}"
# Trace config
trace_dir = f"{model_dir}/traces_{threshold}"
trace_name = "noop"
training_trace_dir = f"{model_dir}/per_image_trace_{threshold}/train"
# Result dir
result_name = "test"
result_dir = f"{model_dir}/birelation/{threshold}_{dilation_iter}"
# result_dir = f"result/lenet/test"
images_per_class = 1000
attack_name = "FGSM"
attacks = {
"FGSM": [FGSM],
"BIM": [IterativeGradientSignAttack],
"JSMA": [SaliencyMapAttack],
"DeepFool": [DeepFoolAttack],
# "DeepFool_full": [DeepFoolAttack, dict(subsample=None)],
# "CWL2": [CarliniL2],
}
# DeepFool will shutdown when num_gpu<0.2
num_gpus = 0.2
overlap_fn = calc_trace_side_overlap
per_channel = False
lenet_mnist_class_trace = class_trace(
trace_name,
model_config=LENET,
data_config=data_config,
)
graph = LENET.network_class.graph().load()
def reconstruct_edge_from_trace(
trace,
graph,
node_name,
key = TraceKey.EDGE,
):
attrs = trace.nodes[node_name]
op = graph.op(graph.id(node_name))
if key not in attrs:
return None
else:
attr = attrs[key]
edge = TraceKey.to_array(attr)
return edge
def reconstruct_point_from_trace_contrib(
trace,
graph,
node_name,
key = TraceKey.POINT,
):
attrs = trace.nodes[node_name]
def to_bitmap(shape, attr, contrib):
mask = np.zeros(np.prod(shape), dtype=np.int8)
pos_attr = attr[contrib > 0]
mask[TraceKey.to_array(pos_attr)] = 1
neg_attr = attr[contrib < 0]
mask[TraceKey.to_array(neg_attr)] = -1
return mask.reshape(shape)
if key in attrs:
return to_bitmap(attrs[key + "_shape"], attrs[key], attrs[TraceKey.POINT_CONTRIB])
else:
for attr_name, attr in attrs.items():
if attr_name.startswith(TraceKey.POINT + ".") and attr is not None:
return to_bitmap(attrs[TraceKey.POINT_SHAPE], attr)
RuntimeError(f"Point key not found in {node_name}")
def reconstruct_point_from_trace(
trace,
graph,
node_name,
key = TraceKey.POINT,
):
attrs = trace.nodes[node_name]
def to_bitmap(shape, attr):
mask = np.zeros(np.prod(shape), dtype=np.int8)
mask[TraceKey.to_array(attr)] = 1
return mask.reshape(shape)
if key in attrs:
return to_bitmap(attrs[key + "_shape"], attrs[key])
else:
for attr_name, attr in attrs.items():
if attr_name.startswith(TraceKey.POINT + ".") and attr is not None:
return to_bitmap(attrs[TraceKey.POINT_SHAPE], attr)
RuntimeError(f"Point key not found in {node_name}")
def reconstruct_weight_from_trace_contrib(
trace,
graph,
node_name,
key = TraceKey.WEIGHT,
):
attrs = trace.nodes[node_name]
def to_bitmap(shape, attr, contrib):
mask = np.zeros(np.prod(shape), dtype=np.int8)
mask[TraceKey.to_array(attr)] = contrib
return mask.reshape(shape)
if key in attrs:
return to_bitmap(attrs[key + "_shape"], attrs[key], attrs[TraceKey.WEIGHT_CONTRIB])
else:
RuntimeError(f"Weight key not found in {node_name}")
def reconstruct_weight_from_trace(
trace,
graph,
node_name,
key = TraceKey.WEIGHT,
):
attrs = trace.nodes[node_name]
def to_bitmap(shape, attr):
mask = np.zeros(np.prod(shape), dtype=np.int8)
mask[TraceKey.to_array(attr)] = 1
return mask.reshape(shape)
if key in attrs:
return to_bitmap(attrs[key + "_shape"], attrs[key])
else:
RuntimeError(f"Weight key not found in {node_name}")
def reconstruct_point_fn(
trace,
):
node_names = []
key = TraceKey.POINT
for attr_name, attr in trace.nodes.items():
if key in attr:
node_names.append(attr_name)
point_dict = {}
for node_name in [
"conv2d/Relu:0",
"conv2d_1/Relu:0",
"dense/BiasAdd:0",
"dense_1/BiasAdd:0",
]:
point_dict[node_name] = reconstruct_point_from_trace(
trace,
graph,
node_name,
)
# print(node_name, point_dict[node_name].shape)
return point_dict
def reconstruct_weight_fn(
trace,
):
weight_dict = {}
for node_name in [
"conv2d/Conv2D",
"conv2d_1/Conv2D",
]:
weight = reconstruct_weight_from_trace(
trace,
graph,
node_name,
)
weight = weight.reshape(-1, weight.shape[-2], weight.shape[-1])
weight_dict[node_name] = weight
return weight_dict
reconstruct_edge_fn = partial(
reconstruct_edge_from_trace,
graph = graph,
key = TraceKey.EDGE
)
|
the-stack_0_504 | # -*- coding: utf-8 -*-
# FOGLAMP_BEGIN
# See: http://foglamp.readthedocs.io/
# FOGLAMP_END
import json
import urllib.parse
import aiohttp
from aiohttp import web
from foglamp.common import utils
from foglamp.common import logger
from foglamp.common.service_record import ServiceRecord
from foglamp.common.storage_client.exceptions import StorageServerError
from foglamp.common.configuration_manager import ConfigurationManager
from foglamp.services.core import connect
from foglamp.services.core.service_registry.service_registry import ServiceRegistry
from foglamp.services.core.service_registry import exceptions as service_registry_exceptions
from foglamp.common.audit_logger import AuditLogger
__author__ = "Amarendra K Sinha"
__copyright__ = "Copyright (c) 2018 Dianomic Systems"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
_help = """
-------------------------------------------------------------------------------
| GET | /foglamp/notification/plugin |
| GET POST PUT DELETE | /foglamp/notification |
-------------------------------------------------------------------------------
"""
_logger = logger.setup()
NOTIFICATION_TYPE = ["one shot", "retriggered", "toggled"]
async def get_plugin(request):
""" GET lists of rule plugins and delivery plugins
:Example:
curl -X GET http://localhost:8081/foglamp/notification/plugin
"""
try:
notification_service = ServiceRegistry.get(s_type=ServiceRecord.Type.Notification.name)
_address, _port = notification_service[0]._address, notification_service[0]._port
except service_registry_exceptions.DoesNotExist:
raise web.HTTPNotFound(reason="No Notification service available.")
try:
url = 'http://{}:{}/notification/rules'.format(_address, _port)
rule_plugins = json.loads(await _hit_get_url(url))
url = 'http://{}:{}/notification/delivery'.format(_address, _port)
delivery_plugins = json.loads(await _hit_get_url(url))
except Exception as ex:
raise web.HTTPInternalServerError(reason=ex)
else:
return web.json_response({'rules': rule_plugins, 'delivery': delivery_plugins})
async def get_type(request):
""" GET the list of available notification types
:Example:
curl -X GET http://localhost:8081/foglamp/notification/type
"""
return web.json_response({'notification_type': NOTIFICATION_TYPE})
async def get_notification(request):
""" GET an existing notification
:Example:
curl -X GET http://localhost:8081/foglamp/notification/<notification_name>
"""
try:
notif = request.match_info.get('notification_name', None)
if notif is None:
raise ValueError("Notification name is required.")
notification = {}
storage = connect.get_storage_async()
config_mgr = ConfigurationManager(storage)
notification_config = await config_mgr._read_category_val(notif)
if notification_config:
rule_config = await config_mgr._read_category_val("rule{}".format(notif))
delivery_config = await config_mgr._read_category_val("delivery{}".format(notif))
notification = {
"name": notification_config['name']['value'],
"description": notification_config['description']['value'],
"rule": notification_config['rule']['value'],
"ruleConfig": rule_config,
"channel": notification_config['channel']['value'],
"deliveryConfig": delivery_config,
"notificationType": notification_config['notification_type']['value'],
"enable": notification_config['enable']['value'],
}
else:
raise ValueError("The Notification: {} does not exist.".format(notif))
except ValueError as ex:
raise web.HTTPBadRequest(reason=str(ex))
except Exception as ex:
raise web.HTTPInternalServerError(reason=ex)
else:
return web.json_response({'notification': notification})
async def get_notifications(request):
""" GET list of notifications
:Example:
curl -X GET http://localhost:8081/foglamp/notification
"""
try:
storage = connect.get_storage_async()
config_mgr = ConfigurationManager(storage)
all_notifications = await config_mgr._read_all_child_category_names("Notifications")
notifications = []
for notification in all_notifications:
notification_config = await config_mgr._read_category_val(notification['child'])
notification = {
"name": notification_config['name']['value'],
"rule": notification_config['rule']['value'],
"channel": notification_config['channel']['value'],
"notificationType": notification_config['notification_type']['value'],
"enable": notification_config['enable']['value'],
}
notifications.append(notification)
except Exception as ex:
raise web.HTTPInternalServerError(reason=ex)
else:
return web.json_response({'notifications': notifications})
async def post_notification(request):
"""
Create a new notification to run a specific plugin
:Example:
curl -X POST http://localhost:8081/foglamp/notification -d '{"name": "Test Notification", "description":"Test Notification", "rule": "threshold", "channel": "email", "notification_type": "one shot", "enabled": false}'
curl -X POST http://localhost:8081/foglamp/notification -d '{"name": "Test Notification", "description":"Test Notification", "rule": "threshold", "channel": "email", "notification_type": "one shot", "enabled": false, "rule_config": {}, "delivery_config": {}}'
"""
try:
notification_service = ServiceRegistry.get(s_type=ServiceRecord.Type.Notification.name)
_address, _port = notification_service[0]._address, notification_service[0]._port
except service_registry_exceptions.DoesNotExist:
raise web.HTTPNotFound(reason="No Notification service available.")
try:
data = await request.json()
if not isinstance(data, dict):
raise ValueError('Data payload must be a valid JSON')
name = data.get('name', None)
description = data.get('description', None)
rule = data.get('rule', None)
channel = data.get('channel', None)
notification_type = data.get('notification_type', None)
enabled = data.get('enabled', None)
rule_config = data.get('rule_config', {})
delivery_config = data.get('delivery_config', {})
if name is None or name.strip() == "":
raise ValueError('Missing name property in payload.')
if description is None:
raise ValueError('Missing description property in payload.')
if rule is None:
raise ValueError('Missing rule property in payload.')
if channel is None:
raise ValueError('Missing channel property in payload.')
if notification_type is None:
raise ValueError('Missing notification_type property in payload.')
if utils.check_reserved(name) is False:
raise ValueError('Invalid name property in payload.')
if utils.check_reserved(rule) is False:
raise ValueError('Invalid rule property in payload.')
if utils.check_reserved(channel) is False:
raise ValueError('Invalid channel property in payload.')
if notification_type not in NOTIFICATION_TYPE:
raise ValueError('Invalid notification_type property in payload.')
if enabled is not None:
if enabled not in ['true', 'false', True, False]:
raise ValueError('Only "true", "false", true, false are allowed for value of enabled.')
is_enabled = "true" if ((type(enabled) is str and enabled.lower() in ['true']) or (
(type(enabled) is bool and enabled is True))) else "false"
storage = connect.get_storage_async()
config_mgr = ConfigurationManager(storage)
curr_config = await config_mgr.get_category_all_items(name)
if curr_config is not None:
raise ValueError("A Category with name {} already exists.".format(name))
try:
# Get default config for rule and channel plugins
url = '{}/plugin'.format(request.url)
try:
# When authentication is mandatory we need to pass token in request header
auth_token = request.token
except AttributeError:
auth_token = None
list_plugins = json.loads(await _hit_get_url(url, auth_token))
r = list(filter(lambda rules: rules['name'] == rule, list_plugins['rules']))
c = list(filter(lambda channels: channels['name'] == channel, list_plugins['delivery']))
if len(r) == 0 or len(c) == 0: raise KeyError
rule_plugin_config = r[0]['config']
delivery_plugin_config = c[0]['config']
except KeyError:
raise ValueError("Invalid rule plugin {} and/or delivery plugin {} supplied.".format(rule, channel))
# Verify if rule_config contains valid keys
if rule_config != {}:
for k, v in rule_config.items():
if k not in rule_plugin_config:
raise ValueError("Invalid key {} in rule_config {} supplied for plugin {}.".format(k, rule_config, rule))
# Verify if delivery_config contains valid keys
if delivery_config != {}:
for k, v in delivery_config.items():
if k not in delivery_plugin_config:
raise ValueError(
"Invalid key {} in delivery_config {} supplied for plugin {}.".format(k, delivery_config, channel))
# First create templates for notification and rule, channel plugins
post_url = 'http://{}:{}/notification/{}'.format(_address, _port, urllib.parse.quote(name))
await _hit_post_url(post_url) # Create Notification template
post_url = 'http://{}:{}/notification/{}/rule/{}'.format(_address, _port, urllib.parse.quote(name),
urllib.parse.quote(rule))
await _hit_post_url(post_url) # Create Notification rule template
post_url = 'http://{}:{}/notification/{}/delivery/{}'.format(_address, _port, urllib.parse.quote(name),
urllib.parse.quote(channel))
await _hit_post_url(post_url) # Create Notification delivery template
# Create configurations
notification_config = {
"description": description,
"rule": rule,
"channel": channel,
"notification_type": notification_type,
"enable": is_enabled,
}
await _update_configurations(config_mgr, name, notification_config, rule_config, delivery_config)
audit = AuditLogger(storage)
await audit.information('NTFAD', {"name": name})
except ValueError as ex:
raise web.HTTPBadRequest(reason=str(ex))
except Exception as e:
raise web.HTTPInternalServerError(reason=str(e))
else:
return web.json_response({'result': "Notification {} created successfully".format(name)})
class NotFoundError(Exception):
pass
async def put_notification(request):
"""
Update an existing notification
:Example:
curl -X PUT http://localhost:8081/foglamp/notification/<notification_name> -d '{"description":"Test Notification modified"}'
curl -X PUT http://localhost:8081/foglamp/notification/<notification_name> -d '{"rule": "threshold", "channel": "email"}'
curl -X PUT http://localhost:8081/foglamp/notification/<notification_name> -d '{"notification_type": "one shot", "enabled": false}'
curl -X PUT http://localhost:8081/foglamp/notification/<notification_name> -d '{"enabled": false}'
curl -X PUT http://localhost:8081/foglamp/notification/<notification_name> -d '{"description":"Test Notification", "rule": "threshold", "channel": "email", "notification_type": "one shot", "enabled": false, "rule_config": {}, "delivery_config": {}}'
"""
try:
notification_service = ServiceRegistry.get(s_type=ServiceRecord.Type.Notification.name)
_address, _port = notification_service[0]._address, notification_service[0]._port
except service_registry_exceptions.DoesNotExist:
raise web.HTTPNotFound(reason="No Notification service available.")
try:
notif = request.match_info.get('notification_name', None)
if notif is None:
raise ValueError("Notification name is required for updation.")
# TODO: Stop notification before update
data = await request.json()
if not isinstance(data, dict):
raise ValueError('Data payload must be a valid JSON')
description = data.get('description', None)
rule = data.get('rule', None)
channel = data.get('channel', None)
notification_type = data.get('notification_type', None)
enabled = data.get('enabled', None)
rule_config = data.get('rule_config', {})
delivery_config = data.get('delivery_config', {})
if utils.check_reserved(notif) is False:
raise ValueError('Invalid notification instance name.')
if rule is not None and utils.check_reserved(rule) is False:
raise ValueError('Invalid rule property in payload.')
if channel is not None and utils.check_reserved(channel) is False:
raise ValueError('Invalid channel property in payload.')
if notification_type is not None and notification_type not in NOTIFICATION_TYPE:
raise ValueError('Invalid notification_type property in payload.')
if enabled is not None:
if enabled not in ['true', 'false', True, False]:
raise ValueError('Only "true", "false", true, false are allowed for value of enabled.')
is_enabled = "true" if ((type(enabled) is str and enabled.lower() in ['true']) or (
(type(enabled) is bool and enabled is True))) else "false"
storage = connect.get_storage_async()
config_mgr = ConfigurationManager(storage)
current_config = await config_mgr._read_category_val(notif)
if current_config is None:
raise NotFoundError('No {} notification instance found'.format(notif))
rule_changed = True if rule is not None and rule != current_config['rule']['value'] else False
channel_changed = True if channel is not None and channel != current_config['channel']['value'] else False
try:
# Get default config for rule and channel plugins
url = str(request.url)
url_parts = url.split("/foglamp/notification")
url = '{}/foglamp/notification/plugin'.format(url_parts[0])
try:
# When authentication is mandatory we need to pass token in request header
auth_token = request.token
except AttributeError:
auth_token = None
list_plugins = json.loads(await _hit_get_url(url, auth_token))
search_rule = rule if rule_changed else current_config['rule']['value']
r = list(filter(lambda rules: rules['name'] == search_rule, list_plugins['rules']))
if len(r) == 0:
raise KeyError
rule_plugin_config = r[0]['config']
search_channel = channel if channel_changed else current_config['channel']['value']
c = list(filter(lambda channels: channels['name'] == search_channel, list_plugins['delivery']))
if len(c) == 0:
raise KeyError
delivery_plugin_config = c[0]['config']
except KeyError:
raise ValueError("Invalid rule plugin:{} and/or delivery plugin:{} supplied.".format(rule, channel))
# Verify if rule_config contains valid keys
if rule_config != {}:
for k, v in rule_config.items():
if k not in rule_plugin_config:
raise ValueError("Invalid key:{} in rule plugin:{}".format(k, rule_plugin_config))
# Verify if delivery_config contains valid keys
if delivery_config != {}:
for k, v in delivery_config.items():
if k not in delivery_plugin_config:
raise ValueError(
"Invalid key:{} in delivery plugin:{}".format(k, delivery_plugin_config))
if rule_changed: # A new rule has been supplied
category_desc = rule_plugin_config['plugin']['description']
category_name = "rule{}".format(notif)
await config_mgr.create_category(category_name=category_name,
category_description=category_desc,
category_value=rule_plugin_config,
keep_original_items=False)
if channel_changed: # A new delivery has been supplied
category_desc = delivery_plugin_config['plugin']['description']
category_name = "delivery{}".format(notif)
await config_mgr.create_category(category_name=category_name,
category_description=category_desc,
category_value=delivery_plugin_config,
keep_original_items=False)
notification_config = {}
if description is not None:
notification_config.update({"description": description})
if rule is not None:
notification_config.update({"rule": rule})
if channel is not None:
notification_config.update({"channel": channel})
if notification_type is not None:
notification_config.update({"notification_type": notification_type})
if enabled is not None:
notification_config.update({"enable": is_enabled})
await _update_configurations(config_mgr, notif, notification_config, rule_config, delivery_config)
except ValueError as e:
raise web.HTTPBadRequest(reason=str(e))
except NotFoundError as e:
raise web.HTTPNotFound(reason=str(e))
except Exception as ex:
raise web.HTTPInternalServerError(reason=str(ex))
else:
# TODO: Start notification after update
return web.json_response({'result': "Notification {} updated successfully".format(notif)})
async def delete_notification(request):
""" Delete an existing notification
:Example:
curl -X DELETE http://localhost:8081/foglamp/notification/<notification_name>
"""
try:
notification_service = ServiceRegistry.get(s_type=ServiceRecord.Type.Notification.name)
_address, _port = notification_service[0]._address, notification_service[0]._port
except service_registry_exceptions.DoesNotExist:
raise web.HTTPNotFound(reason="No Notification service available.")
try:
notif = request.match_info.get('notification_name', None)
if notif is None:
raise ValueError("Notification name is required for deletion.")
# Stop & remove notification
url = 'http://{}:{}/notification/{}'.format(_address, _port, urllib.parse.quote(notif))
notification = json.loads(await _hit_delete_url(url))
# Removes the child categories for the rule and delivery plugins, Removes the category for the notification itself
storage = connect.get_storage_async()
config_mgr = ConfigurationManager(storage)
await config_mgr.delete_category_and_children_recursively(notif)
audit = AuditLogger(storage)
await audit.information('NTFDL', {"name": notif})
except ValueError as ex:
raise web.HTTPBadRequest(reason=str(ex))
except Exception as ex:
raise web.HTTPInternalServerError(reason=str(ex))
else:
return web.json_response({'result': 'Notification {} deleted successfully.'.format(notif)})
async def _hit_get_url(get_url, token=None):
headers = {"Authorization": token} if token else None
try:
async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(verify_ssl=False)) as session:
async with session.get(get_url, headers=headers) as resp:
status_code = resp.status
jdoc = await resp.text()
if status_code not in range(200, 209):
_logger.error("Error code: %d, reason: %s, details: %s, url: %s", resp.status, resp.reason, jdoc,
get_url)
raise StorageServerError(code=resp.status, reason=resp.reason, error=jdoc)
except Exception:
raise
else:
return jdoc
async def _hit_post_url(post_url, data=None):
try:
async with aiohttp.ClientSession() as session:
async with session.post(post_url, data=data) as resp:
status_code = resp.status
jdoc = await resp.text()
if status_code not in range(200, 209):
_logger.error("Error code: %d, reason: %s, details: %s, url: %s", resp.status, resp.reason, jdoc,
post_url)
raise StorageServerError(code=resp.status, reason=resp.reason, error=jdoc)
except Exception:
raise
else:
return jdoc
async def _update_configurations(config_mgr, name, notification_config, rule_config, delivery_config):
try:
# Update main notification
if notification_config != {}:
await config_mgr.update_configuration_item_bulk(name, notification_config)
# Replace rule configuration
if rule_config != {}:
category_name = "rule{}".format(name)
await config_mgr.update_configuration_item_bulk(category_name, rule_config)
# Replace delivery configuration
if delivery_config != {}:
category_name = "delivery{}".format(name)
await config_mgr.update_configuration_item_bulk(category_name, delivery_config)
except Exception as ex:
_logger.exception("Failed to update notification configuration. %s", str(ex))
raise web.HTTPInternalServerError(reason='Failed to update notification configuration. {}'.format(ex))
async def _hit_delete_url(delete_url, data=None):
try:
async with aiohttp.ClientSession() as session:
async with session.delete(delete_url, data=data) as resp:
status_code = resp.status
jdoc = await resp.text()
if status_code not in range(200, 209):
_logger.error("Error code: %d, reason: %s, details: %s, url: %s",
resp.status,
resp.reason,
jdoc,
delete_url)
raise StorageServerError(code=resp.status,
reason=resp.reason,
error=jdoc)
except Exception:
raise
else:
return jdoc
|
the-stack_0_506 | # dataset settings
ann_type = 'bast_eval' # * change accordingly
num_classes = 9 if ann_type == 'bast_base' else 42
# model settings
model = dict(
type='Recognizer3D',
backbone=dict(
type='ResNet3dSlowOnly',
depth=50,
pretrained=None,
in_channels=17,
base_channels=32,
num_stages=3,
out_indices=(2, ),
stage_blocks=(4, 6, 3),
conv1_stride_s=1,
pool1_stride_s=1,
inflate=(0, 1, 1),
spatial_strides=(2, 2, 2),
temporal_strides=(1, 1, 2),
dilations=(1, 1, 1)),
cls_head=dict(
type='I3DHead',
in_channels=512,
num_classes=num_classes,
spatial_type='avg',
dropout_ratio=0.5),
train_cfg=dict(),
test_cfg=dict(average_clips='prob'))
# dataset settings
dataset_type = 'PoseDataset'
ann_file_train = f'data/skeleton/{ann_type}/bast_train.pkl'
ann_file_val = f'data/skeleton/{ann_type}/bast_val.pkl'
ann_file_test = f'data/skeleton/{ann_type}/bast_test.pkl'
left_kp = [1, 3, 5, 7, 9, 11, 13, 15]
right_kp = [2, 4, 6, 8, 10, 12, 14, 16]
train_pipeline = [
dict(type='UniformSampleFrames', clip_len=54),
dict(type='PoseDecode'),
dict(type='PoseCompact', hw_ratio=1., allow_imgpad=True),
dict(type='Resize', scale=(-1, 64)),
dict(type='RandomResizedCrop', area_range=(0.56, 1.0)),
dict(type='Resize', scale=(56, 56), keep_ratio=False),
dict(type='Flip', flip_ratio=0.5, left_kp=left_kp, right_kp=right_kp),
dict(
type='GeneratePoseTarget',
sigma=0.6,
use_score=True,
with_kp=True,
with_limb=False),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs', 'label'])
]
val_pipeline = [
dict(type='UniformSampleFrames', clip_len=54, num_clips=1, test_mode=True),
dict(type='PoseDecode'),
dict(type='PoseCompact', hw_ratio=1., allow_imgpad=True),
dict(type='Resize', scale=(-1, 64)),
dict(type='CenterCrop', crop_size=64),
dict(
type='GeneratePoseTarget',
sigma=0.6,
use_score=True,
with_kp=True,
with_limb=False),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
test_pipeline = [
dict(
type='UniformSampleFrames', clip_len=54, num_clips=10, test_mode=True),
dict(type='PoseDecode'),
dict(type='PoseCompact', hw_ratio=1., allow_imgpad=True),
dict(type='Resize', scale=(-1, 64)),
dict(type='CenterCrop', crop_size=64),
dict(
type='GeneratePoseTarget',
sigma=0.6,
use_score=True,
with_kp=True,
with_limb=False,
double=True,
left_kp=left_kp,
right_kp=right_kp),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
data = dict(
videos_per_gpu=6,
workers_per_gpu=1,
test_dataloader=dict(videos_per_gpu=1),
train=dict(
type=dataset_type,
ann_file=ann_file_train,
data_prefix='',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=ann_file_val,
data_prefix='',
pipeline=val_pipeline),
test=dict(
type=dataset_type,
ann_file=ann_file_test,
data_prefix='',
pipeline=test_pipeline))
# optimizer
optimizer = dict(
type='SGD', lr=0.0094, momentum=0.9,
weight_decay=0.0003) # this lr is used for 8 gpus
optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
# learning policy
lr_config = dict(policy='CosineAnnealing', by_epoch=False, min_lr=0)
total_epochs = 280
checkpoint_config = dict(interval=10)
workflow = [('train', 10)]
evaluation = dict(
interval=5,
metrics=['top_k_accuracy', 'mean_class_accuracy'],
topk=(1, 2, 3, 4, 5))
eval_config = dict(
metric_options=dict(
top_k_accuracy=dict(topk=(1, 2, 3, 4, 5))),)
log_config = dict(
interval=20, hooks=[
dict(type='TextLoggerHook'),
])
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = ('https://download.openmmlab.com/mmaction/skeleton/posec3d/'
'slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint/'
'slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint-76ffdd8b.pth')
resume_from = None
find_unused_parameters = False
|
the-stack_0_508 | import numpy as np
import pandas as pd
from napari.qt.threading import thread_worker
from skimage.measure import regionprops_table
from imlib.pandas.misc import initialise_df
from imlib.general.list import unique_elements_lists
from brainreg_segment.atlas.utils import lateralise_atlas_image
@thread_worker
def region_analysis(
label_layers,
atlas_layer_image,
atlas,
regions_directory,
output_csv_file=None,
volumes=True,
summarise=True,
):
regions_directory.mkdir(parents=True, exist_ok=True)
if volumes:
print("Calculating region volume distribution")
print(f"Saving summary volumes to: {regions_directory}")
for label_layer in label_layers:
analyse_region_brain_areas(
label_layer,
atlas_layer_image,
regions_directory,
atlas,
)
if summarise:
if output_csv_file is not None:
print("Summarising regions")
summarise_brain_regions(
label_layers, output_csv_file, atlas.resolution
)
print("Finished!\n")
def summarise_brain_regions(label_layers, filename, atlas_resolution):
summaries = []
for label_layer in label_layers:
summaries.append(summarise_single_brain_region(label_layer))
result = pd.concat(summaries)
# TODO: use atlas.space to make these more intuitive
volume_header = "volume_mm3"
length_columns = [
"axis_0_min_um",
"axis_1_min_um",
"axis_2_min_um",
"axis_0_max_um",
"axis_1_max_um",
"axis_2_max_um",
"axis_0_center_um",
"axis_1_center_um",
"axis_2_center_um",
]
result.columns = ["region"] + [volume_header] + length_columns
voxel_volume_in_mm = np.prod(atlas_resolution) / (1000 ** 3)
result[volume_header] = result[volume_header] * voxel_volume_in_mm
for header in length_columns:
for dim, idx in enumerate(atlas_resolution):
if header.startswith(f"axis_{idx}"):
scale = float(dim)
assert scale > 0
result[header] = result[header] * scale
result.to_csv(filename, index=False)
def summarise_single_brain_region(
label_layer,
ignore_empty=True,
properties_to_fetch=[
"area",
"bbox",
"centroid",
],
):
data = label_layer.data
if ignore_empty:
if data.sum() == 0:
return
regions_table = regionprops_table(data, properties=properties_to_fetch)
df = pd.DataFrame.from_dict(regions_table)
df.insert(0, "Region", label_layer.name)
return df
def analyse_region_brain_areas(
label_layer,
atlas_layer_data,
destination_directory,
atlas,
extension=".csv",
ignore_empty=True,
):
"""
:param label_layer: napari labels layer (with segmented regions)
:param ignore_empty: If True, don't analyse empty regions
"""
data = label_layer.data
if ignore_empty:
if data.sum() == 0:
return
name = label_layer.name
masked_annotations = data.astype(bool) * atlas_layer_data
annotations_left, annotations_right = lateralise_atlas_image(
masked_annotations,
atlas.hemispheres,
left_hemisphere_value=atlas.left_hemisphere_value,
right_hemisphere_value=atlas.right_hemisphere_value,
)
unique_vals_left, counts_left = np.unique(
annotations_left, return_counts=True
)
unique_vals_right, counts_right = np.unique(
annotations_right, return_counts=True
)
voxel_volume_in_mm = np.prod(atlas.resolution) / (1000 ** 3)
df = initialise_df(
"structure_name",
"left_volume_mm3",
"left_percentage_of_total",
"right_volume_mm3",
"right_percentage_of_total",
"total_volume_mm3",
"percentage_of_total",
)
sampled_structures = unique_elements_lists(
list(unique_vals_left) + list(unique_vals_right)
)
total_volume_region = get_total_volume_regions(
unique_vals_left, unique_vals_right, counts_left, counts_right
)
for atlas_value in sampled_structures:
if atlas_value != 0:
try:
df = add_structure_volume_to_df(
df,
atlas_value,
atlas.structures,
unique_vals_left,
unique_vals_right,
counts_left,
counts_right,
voxel_volume_in_mm,
total_volume_voxels=total_volume_region,
)
except KeyError:
print(
f"Value: {atlas_value} is not in the atlas structure"
f" reference file. Not calculating the volume"
)
filename = destination_directory / (name + extension)
df.to_csv(filename, index=False)
def get_total_volume_regions(
unique_vals_left,
unique_vals_right,
counts_left,
counts_right,
):
zero_index_left = np.where(unique_vals_left == 0)[0][0]
counts_left = list(counts_left)
counts_left.pop(zero_index_left)
zero_index_right = np.where(unique_vals_right == 0)[0][0]
counts_right = list(counts_right)
counts_right.pop(zero_index_right)
return sum(counts_left + counts_right)
def add_structure_volume_to_df(
df,
atlas_value,
atlas_structures,
unique_vals_left,
unique_vals_right,
counts_left,
counts_right,
voxel_volume,
total_volume_voxels=None,
):
name = atlas_structures[atlas_value]["name"]
left_volume, left_percentage = get_volume_in_hemisphere(
atlas_value,
unique_vals_left,
counts_left,
total_volume_voxels,
voxel_volume,
)
right_volume, right_percentage = get_volume_in_hemisphere(
atlas_value,
unique_vals_right,
counts_right,
total_volume_voxels,
voxel_volume,
)
if total_volume_voxels is not None:
total_percentage = left_percentage + right_percentage
else:
total_percentage = 0
df = df.append(
{
"structure_name": name,
"left_volume_mm3": left_volume,
"left_percentage_of_total": left_percentage,
"right_volume_mm3": right_volume,
"right_percentage_of_total": right_percentage,
"total_volume_mm3": left_volume + right_volume,
"percentage_of_total": total_percentage,
},
ignore_index=True,
)
return df
def get_volume_in_hemisphere(
atlas_value, unique_vals, counts, total_volume_voxels, voxel_volume
):
try:
index = np.where(unique_vals == atlas_value)[0][0]
volume = counts[index] * voxel_volume
if total_volume_voxels is not None:
percentage = 100 * (counts[index] / total_volume_voxels)
else:
percentage = 0
except IndexError:
volume = 0
percentage = 0
return volume, percentage
|
the-stack_0_509 | import psycopg2
class Conn:
def __init__(self, connstr):
self.conn = psycopg2.connect(connstr)
self.setversion()
self.nexttmp = 0
def setversion(self):
cur = self.conn.cursor()
cur.execute("select version()")
verstr = cur.fetchone()
if "Greenplum Database 4" in verstr[0]:
self.ver = 4
elif "Greenplum Database 5" in verstr[0]:
self.ver = 5
else:
raise RuntimeError('Unknown Deepgreen Version')
self.typemap = {}
cur.execute("select oid, typname from pg_type")
rows = cur.fetchall()
for row in rows:
self.typemap[row[0]] = row[1]
cur.close()
self.conn.commit()
def close(self):
self.conn.close()
def next_tmpname(self):
self.nexttmp += 1
return "tmp_{0}".format(self.nexttmp)
def execute(self, sql):
cur = self.conn.cursor()
cur.execute(sql)
rows = cur.fetchall()
cur.close()
self.conn.commit()
return rows
def cursor(self, sql):
cur = self.conn.cursor()
cur.execute(sql)
return cur
if __name__ == '__main__':
conn = Conn("host=localhost user=ftian dbname=ftian")
print("Connected to deepgreen database, version is ", conn.ver)
|
the-stack_0_512 | from typing import Optional
import logging
import boto3
from botocore.exceptions import ClientError
from kermes_infra.models import User
class UserAdapter:
def __init__(self, endpoint_url: str, table_name: str, logger: logging.Logger) -> None:
self.dynamodb = boto3.resource("dynamodb", endpoint_url=endpoint_url)
self.table = self.dynamodb.Table(table_name)
self.logger = logger
def get(self, user_id: str) -> Optional[User]:
try:
item = self.table.get_item(Key={"user_id": user_id})
return User.from_dynamo(item["Item"])
except ClientError:
self.logger.error(f"error while getting record from Dynamo: user_id {user_id}", exc_info=True)
return None
def put(self, user: User) -> bool:
try:
self.table.put_item(Item=user.to_dynamo())
return True
except ClientError:
self.logger.error(
f"error while writing record to Dynamo: user_id {user.user_id}",
exc_info=True,
)
return False
|
the-stack_0_513 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class StorageAccountPaged(Paged):
"""
A paging container for iterating over a list of :class:`StorageAccount <azure.mgmt.storage.v2017_06_01.models.StorageAccount>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[StorageAccount]'}
}
def __init__(self, *args, **kwargs):
super(StorageAccountPaged, self).__init__(*args, **kwargs)
|
the-stack_0_514 | ##
## File: utils.py
##
## Author: Schuyler Martin <[email protected]>
##
## Description: Python file that contains basic utility functions
##
from utils.macros import *
import sys
#### GLOBALS ####
#### FUNCTIONS ####
def printd(msg):
'''
Prints debugging messages if debugging is enabled
:param: msg Message to print
'''
if (DEBUG_MACRO):
print("DEBUG: " + msg)
def read_file(fd):
'''
Reads in the file, line by line
:param: fd Name of the file
:return: Contents of the file, as an array of line strings
'''
data = []
for line in open(fd):
data += [line]
return data
def write_file(data, fd):
'''
Writes to a file, line by line
:param: data Lines of the file to write
:param: fd Name of the file to write
'''
fptr = open(fd, 'w')
for line in data:
fptr.write(line)
fptr.close()
|
the-stack_0_517 | import argparse
import sys
import time
from typing import Optional, Union
from moonstreamdb.db import yield_db_session_ctx
from moonstreamdb.models import ESDEventSignature, ESDFunctionSignature
from sqlalchemy.orm import Session
import requests
CRAWL_URLS = {
"functions": "https://www.4byte.directory/api/v1/signatures/",
"events": "https://www.4byte.directory/api/v1/event-signatures/",
}
DB_MODELS = {
"functions": ESDFunctionSignature,
"events": ESDEventSignature,
}
def crawl_step(
db_session: Session,
crawl_url: str,
db_model: Union[ESDEventSignature, ESDFunctionSignature],
) -> Optional[str]:
attempt = 0
current_interval = 2
success = False
response: Optional[requests.Response] = None
while (not success) and attempt < 3:
attempt += 1
try:
response = requests.get(crawl_url)
response.raise_for_status()
success = True
except:
current_interval *= 2
time.sleep(current_interval)
if response is None:
print(f"Could not process URL: {crawl_url}", file=sys.stderr)
return None
page = response.json()
results = page.get("results", [])
rows = [
db_model(
id=row.get("id"),
text_signature=row.get("text_signature"),
hex_signature=row.get("hex_signature"),
created_at=row.get("created_at"),
)
for row in results
]
db_session.bulk_save_objects(rows)
db_session.commit()
return page.get("next")
def crawl(crawl_type: str, interval: float) -> None:
crawl_url: Optional[str] = CRAWL_URLS[crawl_type]
db_model = DB_MODELS[crawl_type]
with yield_db_session_ctx() as db_session:
while crawl_url is not None:
print(f"Crawling: {crawl_url}")
crawl_url = crawl_step(db_session, crawl_url, db_model)
time.sleep(interval)
def main():
parser = argparse.ArgumentParser(
description="Crawls function and event signatures from the Ethereum Signature Database (https://www.4byte.directory/)"
)
parser.add_argument(
"crawl_type",
choices=CRAWL_URLS,
help="Specifies whether to crawl function signatures or event signatures",
)
parser.add_argument(
"--interval",
type=float,
default=0.1,
help="Number of seconds to wait between requests to the Ethereum Signature Database API",
)
args = parser.parse_args()
crawl(args.crawl_type, args.interval)
if __name__ == "__main__":
main()
|
the-stack_0_518 | import mock
import pytest
from os.path import abspath, dirname, join
import sys
from praw.models import (Button, ButtonWidget, Calendar, CommunityList,
CustomWidget, Menu, MenuLink, IDCard, Image,
ImageData, ImageWidget, ModeratorsWidget,
PostFlairWidget, Redditor, RulesWidget, Submenu,
Subreddit, TextArea, Widget)
from ... import IntegrationTest
if sys.version_info.major > 2:
basestring = str # pylint: disable=invalid-name
class TestButtonWidget(IntegrationTest):
@staticmethod
def image_path(name):
test_dir = abspath(dirname(sys.modules[__name__].__file__))
return join(test_dir, '..', '..', 'files', name)
def test_button_widget(self):
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
widgets = subreddit.widgets
with self.recorder.use_cassette('TestSubredditWidgets.fetch_widgets'):
button_widget = None
for widget in widgets.sidebar:
if isinstance(widget, ButtonWidget):
button_widget = widget
break
assert isinstance(button_widget, ButtonWidget)
assert len(button_widget) >= 1
assert all(isinstance(button, Button) for button in
button_widget.buttons)
assert button_widget == button_widget
assert button_widget.id == button_widget
assert button_widget in widgets.sidebar
assert button_widget[0].text
assert button_widget.shortName
assert hasattr(button_widget, 'description')
assert subreddit == button_widget.subreddit
@mock.patch('time.sleep', return_value=None)
def test_create_and_update_and_delete(self, _):
self.reddit.read_only = False
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
widgets = subreddit.widgets
with self.recorder.use_cassette(
'TestButtonWidget.test_create_and_update_and_delete'):
styles = {'headerColor': '#123456', 'backgroundColor': '#bb0e00'}
my_image = widgets.mod.upload_image(self.image_path('test.png'))
buttons = [
{
'kind': 'text',
'text': 'View source',
'url': 'https://github.com/praw-dev/praw',
'color': '#FF0000',
'textColor': '#00FF00',
'fillColor': '#0000FF',
'hoverState': {
'kind': 'text',
'text': 'VIEW SOURCE!',
'color': '#FFFFFF',
'textColor': '#000000',
'fillColor': '#0000FF'
}
},
{
'kind': 'image',
'text': 'View documentation',
'linkUrl': 'https://praw.readthedocs.io',
'url': my_image,
'height': 200,
'width': 200,
'hoverState': {
'kind': 'image',
'url': my_image,
'height': 200,
'width': 200
}
},
{
'kind': 'text',
'text': '/r/redditdev',
'url': 'https://reddit.com/r/redditdev',
'color': '#000000',
'textColor': '#FF00FF',
'fillColor': '#005500'
}
]
widget = widgets.mod.add_button_widget(
'Things to click', 'Click some of these *cool* links!',
buttons, styles)
assert isinstance(widget, ButtonWidget)
assert len(widget) == 3
assert all(isinstance(item, Button) for item in widget)
assert widget.shortName == 'Things to click'
assert widget.description == 'Click some of these *cool* links!'
assert widget.styles == styles
assert widget[0].text == 'View source'
assert widget[0].url == 'https://github.com/praw-dev/praw'
assert widget[2].text == '/r/redditdev'
assert widget[2].url == 'https://reddit.com/r/redditdev'
assert widget[1].text == 'View documentation'
assert widget[1].linkUrl == 'https://praw.readthedocs.io'
assert widget[1].hoverState['kind'] == 'image'
assert widget[1].hoverState['height'] == 200
widgets.refresh() # the links are initially invalid
for new_widget in widgets.sidebar:
if new_widget == widget:
widget = new_widget
break
widget = widget.mod.update(shortName='New short name')
assert isinstance(widget, ButtonWidget)
assert len(widget) == 3
assert all(isinstance(item, Button) for item in widget)
assert widget.shortName == 'New short name'
assert widget.description == 'Click some of these *cool* links!'
assert widget.styles == styles
assert widget[0].text == 'View source'
assert widget[0].url == 'https://github.com/praw-dev/praw'
assert widget[2].text == '/r/redditdev'
assert widget[2].url == 'https://reddit.com/r/redditdev'
assert widget[1].text == 'View documentation'
assert widget[1].linkUrl == 'https://praw.readthedocs.io'
assert widget[1].hoverState['kind'] == 'image'
assert widget[1].hoverState['height'] == 200
buttons.reverse()
widget = widget.mod.update(buttons=buttons)
assert isinstance(widget, ButtonWidget)
assert len(widget) == 3
assert all(isinstance(item, Button) for item in widget)
assert widget.shortName == 'New short name'
assert widget.description == 'Click some of these *cool* links!'
assert widget.styles == styles
assert widget[0].text == '/r/redditdev'
assert widget[0].url == 'https://reddit.com/r/redditdev'
assert widget[2].text == 'View source'
assert widget[2].url == 'https://github.com/praw-dev/praw'
assert widget[1].text == 'View documentation'
assert widget[1].linkUrl == 'https://praw.readthedocs.io'
assert widget[1].hoverState['kind'] == 'image'
assert widget[1].hoverState['height'] == 200
widget.mod.delete()
class TestCalendar(IntegrationTest):
def test_calendar(self):
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
widgets = subreddit.widgets
with self.recorder.use_cassette('TestSubredditWidgets.fetch_widgets'):
calendar = None
for widget in widgets.sidebar:
if isinstance(widget, Calendar):
calendar = widget
break
assert isinstance(calendar, Calendar)
assert calendar == calendar
assert calendar.id == calendar
assert calendar in widgets.sidebar
assert isinstance(calendar.configuration, dict)
assert hasattr(calendar, 'requiresSync')
assert subreddit == calendar.subreddit
@mock.patch('time.sleep', return_value=None)
def test_create_and_update_and_delete(self, _):
self.reddit.read_only = False
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
widgets = subreddit.widgets
with self.recorder.use_cassette(
'TestCalendar.test_create_and_update_and_delete'):
styles = {'headerColor': '#123456', 'backgroundColor': '#bb0e00'}
config = {'numEvents': 10,
'showDate': True,
'showDescription': False,
'showLocation': False,
'showTime': True,
'showTitle': True}
cal_id = '[email protected]'
widget = widgets.mod.add_calendar('Upcoming Events', cal_id, True,
config, styles)
assert isinstance(widget, Calendar)
assert widget.shortName == 'Upcoming Events'
assert widget.googleCalendarId == 'ccahu0rstno2jrvioq4ccffn78@' \
'group.calendar.google.com'
assert widget.configuration == config
assert widget.styles == styles
widget = widget.mod.update(shortName='Past Events :(')
assert isinstance(widget, Calendar)
assert widget.shortName == 'Past Events :('
assert widget.googleCalendarId == 'ccahu0rstno2jrvioq4ccffn78@' \
'group.calendar.google.com'
assert widget.configuration == config
assert widget.styles == styles
widget.mod.delete()
class TestCommunityList(IntegrationTest):
def test_community_list(self):
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
widgets = subreddit.widgets
with self.recorder.use_cassette('TestSubredditWidgets.fetch_widgets'):
comm_list = None
for widget in widgets.sidebar:
if isinstance(widget, CommunityList):
comm_list = widget
break
assert isinstance(comm_list, CommunityList)
assert len(comm_list) >= 1
assert all(isinstance(subreddit, Subreddit) for subreddit in
comm_list)
assert comm_list == comm_list
assert comm_list.id == comm_list
assert comm_list in widgets.sidebar
assert comm_list.shortName
assert comm_list[0] in comm_list
assert subreddit == comm_list.subreddit
@mock.patch('time.sleep', return_value=None)
def test_create_and_update_and_delete(self, _):
self.reddit.read_only = False
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
widgets = subreddit.widgets
with self.recorder.use_cassette(
'TestCommunityList.test_create_and_update_and_delete'):
styles = {'headerColor': '#123456', 'backgroundColor': '#bb0e00'}
subreddits = ['learnpython', self.reddit.subreddit('redditdev')]
widget = widgets.mod.add_community_list('My fav subs', subreddits,
styles)
assert isinstance(widget, CommunityList)
assert widget.shortName == 'My fav subs'
assert widget.styles == styles
assert self.reddit.subreddit('learnpython') in widget
assert 'redditdev' in widget
widget = widget.mod.update(shortName='My least fav subs :(',
data=['redesign'])
assert isinstance(widget, CommunityList)
assert widget.shortName == 'My least fav subs :('
assert widget.styles == styles
assert self.reddit.subreddit('redesign') in widget
widget.mod.delete()
class TestCustomWidget(IntegrationTest):
@staticmethod
def image_path(name):
test_dir = abspath(dirname(sys.modules[__name__].__file__))
return join(test_dir, '..', '..', 'files', name)
@mock.patch('time.sleep', return_value=None)
def test_create_and_update_and_delete(self, _):
self.reddit.read_only = False
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
widgets = subreddit.widgets
with self.recorder.use_cassette(
'TestCustomWidget.test_create_and_update_and_delete'):
image_dicts = [{'width': 0,
'height': 0,
'name': 'a',
'url': widgets.mod.upload_image(self.image_path(
'test.png'))}]
styles = {'headerColor': '#123456', 'backgroundColor': '#bb0e00'}
widget = widgets.mod.add_custom_widget('My widget',
'# Hello world!', '/**/',
200, image_dicts, styles)
assert isinstance(widget, CustomWidget)
assert widget.shortName == 'My widget'
assert widget.text == '# Hello world!'
assert widget.css == '/**/'
assert widget.height == 200
assert widget.styles == styles
assert len(widget.imageData) == 1
assert all(isinstance(img, ImageData) for img in widget.imageData)
# initially, image URLs are incorrect, so we much refresh to get
# the proper ones.
widgets.refresh()
refreshed = widgets.sidebar[-1]
assert refreshed == widget
widget = refreshed
new_css = 'h1,h2,h3,h4,h5,h6 {color: #00ff00;}'
widget = widget.mod.update(css=new_css)
assert isinstance(widget, CustomWidget)
assert widget.shortName == 'My widget'
assert widget.text == '# Hello world!'
assert widget.css == new_css
assert widget.height == 200
assert widget.styles == styles
assert len(widget.imageData) == 1
assert all(isinstance(img, ImageData) for img in widget.imageData)
widget.mod.delete()
def test_custom_widget(self):
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
widgets = subreddit.widgets
with self.recorder.use_cassette('TestSubredditWidgets.fetch_widgets'):
custom = None
for widget in widgets.sidebar:
if isinstance(widget, CustomWidget):
custom = widget
break
assert isinstance(custom, CustomWidget)
assert len(custom.imageData) > 0
assert all(isinstance(img_data, ImageData) for img_data in
custom.imageData)
assert custom == custom
assert custom.id == custom
assert custom in widgets.sidebar
assert 500 >= custom.height >= 50
assert custom.text
assert custom.css
assert custom.shortName
assert subreddit == custom.subreddit
class TestIDCard(IntegrationTest):
def test_id_card(self):
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
widgets = subreddit.widgets
with self.recorder.use_cassette('TestSubredditWidgets.fetch_widgets'):
card = widgets.id_card
assert isinstance(card, IDCard)
assert card == card
assert card.id == card
assert card.shortName
assert card.currentlyViewingText
assert card.subscribersText
assert subreddit == card.subreddit
class TestImageWidget(IntegrationTest):
@staticmethod
def image_path(name):
test_dir = abspath(dirname(sys.modules[__name__].__file__))
return join(test_dir, '..', '..', 'files', name)
@mock.patch('time.sleep', return_value=None)
def test_create_and_update_and_delete(self, _):
self.reddit.read_only = False
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
widgets = subreddit.widgets
with self.recorder.use_cassette(
'TestImageWidget.test_create_and_update_and_delete'):
image_paths = (self.image_path(name) for name in
('test.jpg', 'test.png'))
image_dicts = [{'width': 0, 'height': 0, 'linkUrl': '',
'url': widgets.mod.upload_image(img_path)}
for img_path in image_paths]
styles = {'headerColor': '#123456', 'backgroundColor': '#bb0e00'}
widget = widgets.mod.add_image_widget(short_name='My new pics!',
data=image_dicts,
styles=styles)
assert isinstance(widget, ImageWidget)
assert widget.shortName == 'My new pics!'
assert widget.styles == styles
assert len(widget) == 2
assert all(isinstance(img, Image) for img in widget)
widget = widget.mod.update(shortName='My old pics :(',
data=image_dicts[:1])
assert isinstance(widget, ImageWidget)
assert widget.shortName == 'My old pics :('
assert widget.styles == styles
assert len(widget) == 1
assert all(isinstance(img, Image) for img in widget)
widget.mod.delete()
def test_image_widget(self):
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
widgets = subreddit.widgets
with self.recorder.use_cassette('TestSubredditWidgets.fetch_widgets'):
img_widget = None
for widget in widgets.sidebar:
if isinstance(widget, ImageWidget):
img_widget = widget
break
assert isinstance(img_widget, ImageWidget)
assert len(img_widget) >= 1
assert all(isinstance(image, Image) for image in img_widget)
assert img_widget == img_widget
assert img_widget.id == img_widget
assert img_widget in widgets.sidebar
assert img_widget[0].linkUrl
assert img_widget.shortName
assert subreddit == img_widget.subreddit
class TestMenu(IntegrationTest):
@mock.patch('time.sleep', return_value=None)
def test_create_and_update_and_delete(self, _):
self.reddit.read_only = False
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
widgets = subreddit.widgets
menu_contents = [
{'text': 'My homepage', 'url': 'https://example.com'},
{'text': 'Python packages',
'children': [
{'text': 'PRAW', 'url': 'https://praw.readthedocs.io/'},
{'text': 'requests', 'url': 'http://python-requests.org'}
]},
{'text': 'Reddit homepage', 'url': 'https://reddit.com'}
]
with self.recorder.use_cassette(
'TestMenu.test_create_and_update_and_delete'):
widget = widgets.mod.add_menu(menu_contents)
assert isinstance(widget, Menu)
assert len(widget) == 3
assert all(isinstance(item, (Submenu, MenuLink))
for item in widget)
assert all(all(isinstance(item, MenuLink) for item in subm)
for subm in widget if isinstance(subm, Submenu))
assert widget[0].text == 'My homepage'
assert widget[0].url == 'https://example.com'
assert widget[2].text == 'Reddit homepage'
assert widget[2].url == 'https://reddit.com'
assert widget[1].text == 'Python packages'
assert widget[1][0].text == 'PRAW'
assert widget[1][0].url == 'https://praw.readthedocs.io/'
assert widget[1][1].text == 'requests'
assert widget[1][1].url == 'http://python-requests.org'
menu_contents.reverse()
widget = widget.mod.update(data=menu_contents)
assert isinstance(widget, Menu)
assert len(widget) == 3
assert all(isinstance(item, (Submenu, MenuLink))
for item in widget)
assert all(all(isinstance(item, MenuLink) for item in subm)
for subm in widget if isinstance(subm, Submenu))
assert widget[0].text == 'Reddit homepage'
assert widget[0].url == 'https://reddit.com'
assert widget[2].text == 'My homepage'
assert widget[2].url == 'https://example.com'
assert widget[1].text == 'Python packages'
assert widget[1][0].text == 'PRAW'
assert widget[1][0].url == 'https://praw.readthedocs.io/'
assert widget[1][1].text == 'requests'
assert widget[1][1].url == 'http://python-requests.org'
widget.mod.delete()
def test_menu(self):
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
widgets = subreddit.widgets
with self.recorder.use_cassette('TestSubredditWidgets.fetch_widgets'):
menu = None
for widget in widgets.topbar:
if isinstance(widget, Menu):
menu = widget
break
assert isinstance(menu, Menu)
assert all(isinstance(item, (MenuLink, Submenu)) for item in menu)
assert menu == menu
assert menu.id == menu
assert menu in widgets.topbar
assert len(menu) >= 1
assert menu[0].text
assert subreddit == menu.subreddit
submenu = None
for child in menu:
if isinstance(child, Submenu):
submenu = child
break
assert isinstance(submenu, Submenu)
assert len(submenu) >= 0
assert all(isinstance(child, MenuLink) for child in submenu)
assert submenu[0].text
assert submenu[0].url
class TestModeratorsWidget(IntegrationTest):
def test_moderators_widget(self):
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
widgets = subreddit.widgets
with self.recorder.use_cassette('TestSubredditWidgets.fetch_widgets'):
mods = widgets.moderators_widget
assert isinstance(mods, ModeratorsWidget)
assert all(isinstance(mod, Redditor) for mod in mods)
assert mods == mods
assert mods.id == mods
assert len(mods) >= 1
assert isinstance(mods[0], Redditor)
assert subreddit == mods.subreddit
class TestPostFlairWidget(IntegrationTest):
@mock.patch('time.sleep', return_value=None)
def test_create_and_update_and_delete(self, _):
self.reddit.read_only = False
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
widgets = subreddit.widgets
with self.recorder.use_cassette(
'TestPostFlairWidget.test_create_and_update_and_delete'):
flairs = [f['id'] for f in subreddit.flair.link_templates]
styles = {'headerColor': '#123456', 'backgroundColor': '#bb0e00'}
widget = widgets.mod.add_post_flair_widget('Some flairs', 'list',
flairs, styles)
assert isinstance(widget, PostFlairWidget)
assert widget.shortName == 'Some flairs'
assert widget.display == 'list'
assert widget.order == flairs
assert widget.styles == styles
assert len(widget) == 2
assert all(flair_id in widget.templates for flair_id in widget)
widget = widget.mod.update(display='cloud')
assert isinstance(widget, PostFlairWidget)
assert widget.shortName == 'Some flairs'
assert widget.display == 'cloud'
assert widget.order == flairs
assert widget.styles == styles
assert len(widget) == 2
assert all(flair_id in widget.templates for flair_id in widget)
widget = widget.mod.update(order=widget.order[1:])
assert isinstance(widget, PostFlairWidget)
assert widget.shortName == 'Some flairs'
assert widget.display == 'cloud'
assert widget.order == flairs[1:]
assert widget.styles == styles
assert len(widget) == 1
assert all(flair_id in widget.templates for flair_id in widget)
widget.mod.delete()
def test_post_flair_widget(self):
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
widgets = subreddit.widgets
with self.recorder.use_cassette('TestSubredditWidgets.fetch_widgets'):
pf_widget = None
for widget in widgets.sidebar:
if isinstance(widget, PostFlairWidget):
pf_widget = widget
break
assert isinstance(pf_widget, PostFlairWidget)
assert len(pf_widget) >= 1
assert all(flair_id in widget.templates for flair_id in widget)
assert pf_widget == pf_widget
assert pf_widget.id == pf_widget
assert pf_widget in widgets.sidebar
assert pf_widget.shortName
assert all(flair in pf_widget for flair in pf_widget)
assert subreddit == pf_widget.subreddit
class TestRulesWidget(IntegrationTest):
def test_rules_widget(self):
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
widgets = subreddit.widgets
with self.recorder.use_cassette('TestSubredditWidgets.fetch_widgets'):
rules = None
for widget in widgets.sidebar:
if isinstance(widget, RulesWidget):
rules = widget
break
assert isinstance(rules, RulesWidget)
assert rules == rules
assert rules.id == rules
assert rules.display
assert len(rules) > 0
assert subreddit == rules.subreddit
class TestSubredditWidgets(IntegrationTest):
def test_bad_attribute(self):
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
widgets = subreddit.widgets
with self.recorder.use_cassette('TestSubredditWidgets.fetch_widgets'):
with pytest.raises(AttributeError):
widgets.nonexistant_attribute
def test_items(self):
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
widgets = subreddit.widgets
with self.recorder.use_cassette('TestSubredditWidgets.fetch_widgets'):
assert isinstance(widgets.items, dict)
def test_progressive_images(self):
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
widgets = subreddit.widgets
def has_progressive(widgets_):
# best way I could figure if an image is progressive
sign = 'fm=pjpg'
for widget in widgets_.sidebar:
if isinstance(widget, ImageWidget):
for image in widget:
if sign in image.url:
return True
elif isinstance(widget, CustomWidget):
for image_data in widget.imageData:
if sign in image_data.url:
return True
return False
with self.recorder.use_cassette(
'TestSubredditWidgets.test_progressive_images'):
widgets.progressive_images = True
assert has_progressive(widgets)
widgets.progressive_images = False
widgets.refresh()
assert not has_progressive(widgets)
widgets.progressive_images = True
widgets.refresh()
assert has_progressive(widgets)
def test_refresh(self):
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
widgets = subreddit.widgets
with self.recorder.use_cassette('TestSubredditWidgets.test_refresh'):
assert widgets.sidebar # to fetch
old_sidebar = widgets.sidebar # reference, not value
widgets.refresh()
assert old_sidebar is not widgets.sidebar # should be new list
def test_repr(self):
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
widgets = subreddit.widgets
assert ("SubredditWidgets(subreddit=Subreddit(display_name='"
"{}'))").format(pytest.placeholders.test_subreddit) == repr(
widgets)
def test_sidebar(self):
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
widgets = subreddit.widgets
with self.recorder.use_cassette('TestSubredditWidgets.fetch_widgets'):
assert len(widgets.sidebar) >= 1 # also tests lazy-loading
# all items should be Widget subclasses
assert all(isinstance(widget, Widget) and type(widget) != Widget
for widget in widgets.sidebar)
def test_specials(self):
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
widgets = subreddit.widgets
with self.recorder.use_cassette('TestSubredditWidgets.fetch_widgets'):
assert isinstance(widgets.id_card, IDCard)
assert isinstance(widgets.moderators_widget, ModeratorsWidget)
def test_topbar(self):
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
widgets = subreddit.widgets
with self.recorder.use_cassette('TestSubredditWidgets.fetch_widgets'):
assert 1 <= len(widgets.topbar)
assert all(isinstance(widget, Widget) and type(widget) != Widget
for widget in widgets.topbar)
class TestSubredditWidgetsModeration(IntegrationTest):
@staticmethod
def image_path(name):
test_dir = abspath(dirname(sys.modules[__name__].__file__))
return join(test_dir, '..', '..', 'files', name)
@mock.patch('time.sleep', return_value=None)
def test_reorder(self, _):
self.reddit.read_only = False
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
widgets = subreddit.widgets
with self.recorder.use_cassette(
'TestSubredditWidgetsModeration.test_reorder'):
old_order = list(widgets.sidebar)
new_order = list(reversed(old_order))
widgets.mod.reorder(new_order)
widgets.refresh()
assert list(widgets.sidebar) == new_order
widgets.mod.reorder(old_order)
widgets.refresh()
assert list(widgets.sidebar) == old_order
mixed_types = [thing if i % 2 == 0 else thing.id
for i, thing in enumerate(new_order)]
# mixed_types has some str and some Widget.
assert any(isinstance(thing, basestring) for thing in mixed_types)
assert any(isinstance(thing, Widget) for thing in mixed_types)
widgets.mod.reorder(mixed_types)
widgets.refresh()
assert list(widgets.sidebar) == new_order
@mock.patch('time.sleep', return_value=None)
def test_upload_image(self, _):
self.reddit.read_only = False
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
widgets = subreddit.widgets
with self.recorder.use_cassette(
'TestSubredditWidgetsModeration.test_upload_image'):
for image in ('test.jpg', 'test.png'):
image_url = widgets.mod.upload_image(self.image_path(image))
assert image_url
class TestTextArea(IntegrationTest):
@mock.patch('time.sleep', return_value=None)
def test_create_and_update_and_delete(self, _):
self.reddit.read_only = False
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
widgets = subreddit.widgets
with self.recorder.use_cassette(
'TestTextArea.test_create_and_update_and_delete'):
styles = {'headerColor': '#123456', 'backgroundColor': '#bb0e00'}
widget = widgets.mod.add_text_area(short_name='My new widget!',
text='Hello world!',
styles=styles)
assert isinstance(widget, TextArea)
assert widget.shortName == 'My new widget!'
assert widget.styles == styles
assert widget.text == 'Hello world!'
widget = widget.mod.update(shortName='My old widget :(',
text='Feed me')
assert isinstance(widget, TextArea)
assert widget.shortName == 'My old widget :('
assert widget.styles == styles
assert widget.text == 'Feed me'
widget.mod.delete()
def test_text_area(self):
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
widgets = subreddit.widgets
with self.recorder.use_cassette('TestSubredditWidgets.fetch_widgets'):
text = None
for widget in widgets.sidebar:
if isinstance(widget, TextArea):
text = widget
break
assert isinstance(text, TextArea)
assert text == text
assert text.id == text
assert text in widgets.sidebar
assert text in widgets.sidebar
assert text.shortName
assert text.text
assert subreddit == text.subreddit
class TestWidget(IntegrationTest):
def test_inequality(self):
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
widgets = subreddit.widgets
with self.recorder.use_cassette('TestSubredditWidgets.fetch_widgets'):
assert len(widgets.sidebar) >= 2
assert widgets.sidebar[0] != widgets.sidebar[1]
assert widgets.sidebar[0] != widgets.sidebar[1].id
assert u'\xf0\x9f\x98\x80' != widgets.sidebar[0] # for python 2
|
the-stack_0_519 | #!/usr/bin/env python
#
# Script inspired in bud:
# https://github.com/indutny/bud
#
import platform
import os
import subprocess
import sys
CC = os.environ.get('CC', 'cc')
script_dir = os.path.dirname(__file__)
root = os.path.normpath(os.path.join(script_dir, '..'))
output_dir = os.path.join(os.path.abspath(root), 'out')
sys.path.insert(0, os.path.join(root, 'deps', 'gyp', 'pylib'))
try:
import gyp
except ImportError:
print('Error: you need to install gyp in deps/gyp first, run:')
print(' ./scripts/get-dep.sh gyp')
sys.exit(42)
def host_arch():
machine = platform.machine()
if machine == 'i386': return 'ia32'
if machine == 'x86_64': return 'x64'
if machine == 'aarch64': return 'arm64'
if machine == 'mips64': return 'mips64el'
if machine.startswith('arm'): return 'arm'
if machine.startswith('mips'): return 'mips'
return machine # Return as-is and hope for the best.
def compiler_version():
proc = subprocess.Popen(CC.split() + ['--version'], stdout=subprocess.PIPE)
is_clang = b'clang' in proc.communicate()[0].split(b'\n')[0]
proc = subprocess.Popen(CC.split() + ['-dumpversion'], stdout=subprocess.PIPE)
version = proc.communicate()[0].split(b'.')
mayor_version = int(version[:1][0])
if is_clang is False and mayor_version >= 7:
proc = subprocess.Popen(CC.split() + ['-dumpfullversion'], stdout=subprocess.PIPE)
version = proc.communicate()[0].split(b'.')
version = map(int, version[:2])
version = tuple(version)
return (version, is_clang)
def run_gyp(args):
rc = gyp.main(args)
if rc != 0:
print('Error running GYP')
sys.exit(rc)
if __name__ == '__main__':
args = sys.argv[1:]
# GYP bug.
# On msvs it will crash if it gets an absolute path.
# On Mac/make it will crash if it doesn't get an absolute path.
# NOTE ibc: Not sure that it requires absolute path in Mac/make...
if sys.platform == 'win32':
args.append(os.path.join(root, 'mediasoup-worker.gyp'))
common_fn = os.path.join(root, 'common.gypi')
# we force vs 2010 over 2008 which would otherwise be the default for gyp.
if not os.environ.get('GYP_MSVS_VERSION'):
os.environ['GYP_MSVS_VERSION'] = '2010'
else:
args.append(os.path.join(os.path.abspath(root), 'mediasoup-worker.gyp'))
common_fn = os.path.join(os.path.abspath(root), 'common.gypi')
if os.path.exists(common_fn):
args.extend(['-I', common_fn])
args.append('--depth=' + root)
# There's a bug with windows which doesn't allow this feature.
if sys.platform != 'win32':
if '-f' not in args:
args.extend('-f make'.split())
if 'ninja' not in args:
args.extend(['-Goutput_dir=' + output_dir])
args.extend(['--generator-output', output_dir])
(major, minor), is_clang = compiler_version()
args.append('-Dgcc_version=%d' % (10 * major + minor))
args.append('-Dclang=%d' % int(is_clang))
if is_clang is False and major == 4 and minor <= 8:
raise RuntimeError('gcc <= 4.8 not supported, please upgrade your gcc')
if not any(a.startswith('-Dhost_arch=') for a in args):
args.append('-Dhost_arch=%s' % host_arch())
if not any(a.startswith('-Dtarget_arch=') for a in args):
args.append('-Dtarget_arch=%s' % host_arch())
if any(a.startswith('-Dopenssl_fips=') for a in args):
fips_fn = os.path.join(os.path.abspath(root), 'fips.gypi')
args.extend(['-I', fips_fn])
else:
args.append('-Dopenssl_fips=')
if 'asan' in args:
args.append('-Dmediasoup_asan=true')
args = filter(lambda arg: arg != 'asan', args)
else:
args.append('-Dmediasoup_asan=false')
args.append('-Dnode_byteorder=' + sys.byteorder)
gyp_args = list(args)
print(gyp_args)
run_gyp(gyp_args)
|
the-stack_0_520 | '''
Created on 2020-08-11
@author: wf
'''
import unittest
import time
from lodstorage.sparql import SPARQL
from lodstorage.lod import LOD
from ptp.location import CountryManager, ProvinceManager, CityManager
import datetime
from collections import Counter
import getpass
class TestLocations(unittest.TestCase):
'''
check countries, provinces/states and cities
'''
def setUp(self):
self.debug=False
pass
def tearDown(self):
pass
def testCityStorage(self):
'''
try storing city data in cache
'''
cim=CityManager(name="github")
cim.fromLutangar()
cim.store(cim.cityList)
def testCities(self):
'''
test consolidating cities from different sources
'''
cim=CityManager('lutangarVersusOpenResearch')
startTime=time.time()
cim.fromLutangar()
self.assertEqual(128769,(len(cim.cityList)))
print ("reading %d cities from github took %5.1f secs" % (len(cim.cityList),time.time()-startTime))
startTime=time.time()
orCities=cim.fromOpenResearch(showProgress=True)
cityCounter=Counter(orCities)
uniqueCities=list(cityCounter.most_common())
print ("reading %d cities from %d events from openresearch took %5.1f secs" % (len(uniqueCities),len(orCities),time.time()-startTime))
print (cityCounter.most_common(1000))
orCityList=[]
for cityName,count in uniqueCities:
orCityList.append({'name': cityName, 'count': count})
startTime=time.time()
validCities=LOD.intersect(cim.cityList, orCityList, 'name')
print ("validating %d cities from openresearch took %5.1f secs" % (len(validCities),time.time()-startTime))
def getDBPedia(self,mode='query',debug=False):
endpoint="http://dbpedia.org/sparql"
dbpedia=SPARQL(endpoint,mode=mode,debug=debug)
return dbpedia
def testDBPediaCities(self):
'''
https://github.com/LITMUS-Benchmark-Suite/dbpedia-graph-convertor/blob/master/get_data.py
'''
# kglf
return
dbpedia=self.getDBPedia()
limit=100
# Query to get the population of cities
citiesWithPopulationQuery = """
PREFIX dbo: <http://dbpedia.org/ontology/>
PREFIX dbp: <http://dbpedia.org/property/>
PREFIX dbr: <http://dbpedia.org/resource/>
SELECT DISTINCT ?dbCity ?country ?name ?website ?population
WHERE {
?dbCity a dbo:City .
?dbCity dbp:name ?name .
?dbCity dbo:country ?country .
OPTIONAL { ?dbCity dbo:populationTotal ?population . }
OPTIONAL { ?dbCity dbp:website ?website . }
}
LIMIT %d
""" % limit
cityList=dbpedia.queryAsListOfDicts(citiesWithPopulationQuery)
cim=CityManager("dbpedia")
LOD.setNone4List(cityList, ["population","website"])
cim.store(cityList)
def testDBPediaCountries(self):
'''
http://dbpedia.org/ontology/Country
'''
# kglf
return
dbpedia=self.getDBPedia()
countriesQuery="""
# https://opendata.stackexchange.com/a/7660/18245 - dbp:iso3166code not set ...
PREFIX dbo: <http://dbpedia.org/ontology/>
SELECT ?country_name ?population ?isocode
WHERE {
?country_name a dbo:Country .
?country_name dbp:iso3166code ?isocode.
OPTIONAL { ?country_name dbo:populationTotal ?population . }
}
"""
countriesResult=dbpedia.query(countriesQuery)
print(countriesResult)
print(len(countriesResult))
def getEndPoint(self):
endpoint="https://query.wikidata.org/sparql"
# check we have local wikidata copy:
if getpass.getuser()=="travis":
endpoint=None
elif getpass.getuser()=="wf":
# use 2018 wikidata copy
#endpoint="http://blazegraph.bitplan.com/sparql"
# use 2020 wikidata copy
endpoint="http://jena.zeus.bitplan.com/wikidata"
return endpoint
def testWikiDataCities(self):
'''
test getting cities(human settlements to be precise)
from Wikidata
'''
#endpoint=self.getEndPoint()
# force caching - 3.5 hour query if done via endpoint!
endpoint=None
cm=CityManager("wikidata")
cm.endpoint=endpoint
cm.fromCache()
print("found %d cities" % len(cm.cityList))
self.assertTrue(len(cm.cityList)>=200000)
def testWikiDataProvinces(self):
'''
test getting provinces from wikidata
'''
pm=ProvinceManager("wikidata")
pm.endpoint=self.getEndPoint()
pm.fromCache()
print("found %d provinces" % len(pm.provinceList))
self.assertTrue(len(pm.provinceList)>=195)
def testWikiDataCountries(self):
'''
check local wikidata
'''
cm=CountryManager("wikidata")
cm.endpoint=self.getEndPoint()
cm.fromCache()
self.assertTrue(len(cm.countryList)>=195)
# sparql=TestJena.getJena(debug=self.debug)
# errors=cm.storeToRDF(sparql)
# self.assertFalse(sparql.printErrors(errors))
# doimport=True
# if doimport:
# cm2=CountryManager()
# cm2.fromRDF(sparql)
# self.assertEqual(cm.countryList,cm2.countryList)
def testCountryManager(self):
'''
test storying countries in SQL format
'''
cm=CountryManager("github",debug=True)
cm.fromErdem()
cm.store(cm.countryList)
def testIntersection(self):
'''
test creating the intersection of a list of dictionaries
'''
list1 = [{'count': 351, 'evt_datetime': datetime.datetime(2015, 10, 23, 8, 45), 'att_value': 'red'},
{'count': 332, 'evt_datetime': datetime.datetime(2015, 10, 23, 8, 45), 'att_value': 'red'},
{'count': 336, 'evt_datetime': datetime.datetime(2015, 10, 23, 8, 45), 'att_value': 'red'},
{'count': 359, 'evt_datetime': datetime.datetime(2015, 10, 23, 8, 45), 'att_value': 'red'},
{'count': 309, 'evt_datetime': datetime.datetime(2015, 10, 23, 8, 45), 'att_value': 'red'}]
list2 = [{'count': 359, 'evt_datetime': datetime.datetime(2015, 10, 23, 8, 45), 'att_value': 'red'},
{'count': 351, 'evt_datetime': datetime.datetime(2015, 10, 23, 8, 45), 'att_value': 'red'},
{'count': 381, 'evt_datetime': datetime.datetime(2015, 10, 22, 8, 45), 'att_value': 'red'}]
listi=LOD.intersect(list1, list2,'count')
print(listi)
self.assertEquals(2,len(listi))
listi=LOD.intersect(list1, list2)
print(listi)
self.assertEquals(2,len(listi))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
the-stack_0_523 | from argparse import ArgumentParser
from ._version import __version__
def build_args_parser(
prog: str,
description: str = '',
epilog: str = ''
) -> ArgumentParser:
parser = ArgumentParser(
prog = prog,
description = description,
epilog = epilog
)
# Build Parser
parser = add_arguments(parser)
return parser
def add_arguments(parser: ArgumentParser) -> ArgumentParser:
parser.add_argument(
'input',
type=str,
help='Path to an .xml SBOL file containing constructs designs and sequences'
)
parser.add_argument(
'output',
type=str,
help='Path to the output spreadsheet'
)
parser.add_argument(
'assembly_method',
type=str,
choices=["gibson", "golden_gate", "any_method"],
help='If "any_method" is selected, each construct can be built with any method. However, Golden Gate Assembly will have priority over Gibson Assembly'
)
parser.add_argument(
'--nb_constructs',
type=int,
help='Maximum number of constructs to build (only used in tests)'
)
parser.add_argument(
'--version',
action='version',
version='%(prog)s {}'.format(__version__),
help='show the version number and exit'
)
return parser
|
the-stack_0_524 | from Instrucciones.Declaracion import Declaracion
from Instrucciones.Sql_create.Tipo_Constraint import Tipo_Constraint, Tipo_Dato_Constraint
from Instrucciones.TablaSimbolos.Tipo import Tipo
from Instrucciones.TablaSimbolos.Instruccion import Instruccion
from Instrucciones.TablaSimbolos.Tabla import Tabla
from Instrucciones.Excepcion import Excepcion
from storageManager.jsonMode import *
from Instrucciones.Tablas.Tablas import Tablas
from Instrucciones.TablaSimbolos.Tipo import Tipo, Tipo_Dato
from Instrucciones.TablaSimbolos.Tipo import Tipo, Tipo_Dato
from Instrucciones.Tablas.Campo import Campo
from Optimizador.C3D import *
from Instrucciones.TablaSimbolos import Instruccion3D as c3d
class CreateTable(Instruccion):
def __init__(self, tabla, tipo, campos, herencia, strGram ,linea, columna):
Instruccion.__init__(self,tipo,linea,columna, strGram)
self.tabla = tabla
self.campos = campos
self.herencia = herencia
def ejecutar(self, tabla, arbol):
super().ejecutar(tabla,arbol)
# Ambito para la tabla
tablaLocal = Tabla(tabla)
compuesta = True
#SE VALIDA QUE SE HAYA SELECCIONADO UN BD
if arbol.bdUsar != None:
for camp in self.campos:
if isinstance(camp, Tipo_Constraint):
tc=self.campos.pop(int(self.campos.index(camp)))
if tc.tipo == Tipo_Dato_Constraint.UNIQUE or tc.tipo == Tipo_Dato_Constraint.PRIMARY_KEY or tc.tipo == Tipo_Dato_Constraint.FOREIGN_KEY:
for id in tc.expresion:
bid=False
for ct in self.campos:
if ct.nombre== id:
if self.campos[self.campos.index(ct)].constraint == None:
self.campos[self.campos.index(ct)].constraint=[]
if tc.tipo == Tipo_Dato_Constraint.UNIQUE:
self.campos[self.campos.index(ct)].constraint.append(Tipo_Constraint(self.tabla+"_"+ct.nombre+"_pkey", Tipo_Dato_Constraint.UNIQUE, None))
if tc.tipo == Tipo_Dato_Constraint.PRIMARY_KEY:
compuesta = False
self.campos[self.campos.index(ct)].constraint.append(Tipo_Constraint(self.tabla+"_pkey", Tipo_Dato_Constraint.PRIMARY_KEY, None))
#if tc.tipo == Tipo_Dato_Constraint.FOREIGN_KEY:
#self.campos[self.campos.index(ct)].constraint.append(Tipo_Constraint(None, Tipo_Dato_Constraint.UNIQUE, None))
bid=True
if not bid:
error = Excepcion("42P10","Semantico",f"La columna <<{id}>> no existe, Error en el Constraint",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return
#SE VALIDA SI LA TABLA VA HEREDAR
if self.herencia!=None:
#SE BUSCA LA SI LA TABLA HEREDADA EXISTE
htabla = arbol.devolverBaseDeDatos().getTabla(self.herencia)
if htabla != None:
tabla_temp=[]
#SE RECORRE TODOS LAS COLUMNAS DE LA TABLA PARA UNIR CAMPOS REPETIDOS
for campo_her in htabla.lista_de_campos:
indice=0
bandera_campo=True
for campo_nuevo in self.campos:
if campo_her.nombre==campo_nuevo.nombre:
tabla_temp.append(campo_nuevo)
arbol.consola.append(f"NOTICE: mezclando la columna <<{campo_nuevo.nombre}>> con la definición heredada.")
self.campos.pop(indice)
indice+=1
bandera_campo=False
break
if bandera_campo:
tabla_temp.append(campo_her)
tabla_temp = tabla_temp + self.campos
self.campos= tabla_temp
else:
error = Excepcion(f"42P01","Semantico","No existe la relación <<{self.herencia}>>.",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return
# VERIFICACIÓN LLAVES PRIMARIAS
listaPrimarias = []
for camp in self.campos:
if isinstance(camp.tipo,Tipo):
if camp.constraint != None:
for s in camp.constraint:
if s.tipo == Tipo_Dato_Constraint.PRIMARY_KEY:
listaPrimarias.append(camp)
if len(listaPrimarias) > 1 and compuesta:
error = Excepcion("42P16","Semantico","No se permiten múltiples llaves primarias para la tabla «"+self.tabla+"»",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return error
#SE CREA UN AMBITO PARA LA TABLA
tablaNueva = Tablas(self.tabla,None)
#SE LLENA LA TABLA EN MEMORIA
for camp in self.campos:
if isinstance(camp.tipo,Tipo):
if camp.tipo.tipo == Tipo_Dato.TIPOENUM:
existe = arbol.getEnum(camp.tipo.nombre)
if existe == None:
error = Excepcion('42P00',"Semántico","El tipo "+camp.tipo.nombre+" no existe",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return error
if camp.constraint != None:
for s in camp.constraint:
if s.tipo == Tipo_Dato_Constraint.CHECK:
arbol.comprobacionCreate = True
objeto = Declaracion(camp.nombre, camp.tipo, s.expresion)
checkBueno = objeto.ejecutar(tablaLocal, arbol)
if not isinstance(checkBueno,Excepcion):
if s.id == None:
s.id = self.tabla+"_"+camp.nombre+"_"+"check1"
#tablaNueva.agregarColumna(camp.nombre,camp.tipo.toString(),None, camp.constraint)
#continue
pass
else:
#arbol.consola.append(checkBueno.toString())
return
elif s.tipo == Tipo_Dato_Constraint.PRIMARY_KEY:
if s.id == None:
s.id = self.tabla+"_pkey"
elif s.tipo == Tipo_Dato_Constraint.UNIQUE:
if s.id == None:
s.id = self.tabla+"_"+camp.nombre+"_pkey"
tablaNueva.agregarColumna(camp.nombre,camp.tipo,None, camp.constraint)
#tablaNueva.lista_constraint.append(camp.constraint)
else:
tablaNueva.agregarColumna(camp.nombre,camp.tipo,None, camp.constraint)
#tablaNueva.lista_constraint.append(camp.constraint)
arbol.comprobacionCreate = False
#SE CREA LA TABLA EN DISCO
ctable = createTable(arbol.bdUsar,self.tabla,len(self.campos))
if ctable==0: #CUANDO LA TABLA SE CREA CORRECTAMENTE
arbol.consola.append(f"La Tabla: <<{self.tabla}>> se creo correctamente.")
arbol.agregarTablaABd(tablaNueva)
elif ctable==3: #CUANDO LA TABLA YA EXISTE
error = Excepcion("100","Semantico","La Tabla ya Existe.",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
elif ctable==2: #CUANDO POR ALGUN ERROR NO SE CREA LA TABLA.
error = Excepcion("100","Semantico","Error Interno.",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
# SE AGREGAN LAS LLAVES PRIMARIAS A LA TABLA
listaIndices = []
resultado=0
for i in listaPrimarias:
listaIndices.append(tablaNueva.devolverColumna(i.nombre))
if len(listaIndices) >0:
#print("SE AGREGO UN INDICE")
resultado = alterAddPK(arbol.getBaseDatos(), self.tabla, listaIndices)
if resultado == 1:
error = Excepcion('XX000',"Semántico","Error interno",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return error
elif resultado == 2:
error = Excepcion('42P00',"Semántico","La base de datos "+str(arbol.getBaseDatos())+" no existe",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return error
elif resultado == 3:
error = Excepcion('42P01',"Semántico","No existe la relación "+self.tabla,self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return error
elif resultado == 4:
error = Excepcion('42P16',"Semántico","No se permiten múltiples llaves primarias para la tabla «"+self.tabla+"»",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return error
elif resultado == 5:
error = Excepcion('XX002',"Semántico","Columna fuera de limites."+self.tabla,self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return error
else:
error = Excepcion("100","Semantico","No ha seleccionado ninguna Base de Datos.",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
def generar3D(self, tabla, arbol):
super().generar3D(tabla,arbol)
code = []
t0 = c3d.getTemporal()
code.append(c3d.asignacionString(t0, "CREATE TABLE " + self.tabla + " (\\n"))
sizeCol = len(self.campos)
contador = 1
for col in self.campos:
if isinstance(col, Campo):
sizeCol -= 1
elif not isinstance(col, Campo):
lista = col.generar3D(tabla, arbol)
code += lista
tLast = c3d.getLastTemporal()
if contador != sizeCol:
t3 = c3d.getTemporal()
code.append(c3d.operacion(t3, Identificador(tLast), Valor('",\\n"', "STRING"), OP_ARITMETICO.SUMA))
contador += 1
tLast = t3
t2 = c3d.getTemporal()
code.append(c3d.operacion(t2, Identificador(t0), Identificador(tLast), OP_ARITMETICO.SUMA))
t0 = t2
t1 = c3d.getTemporal()
if self.herencia != None:
code.append(c3d.operacion(t1, Identificador(t0), Valor('"\\n) INHERITS (' + self.herencia + '"', "STRING"), OP_ARITMETICO.SUMA))
t0 = t1
t1 = c3d.getTemporal()
code.append(c3d.operacion(t1, Identificador(t0), Valor('");"', "STRING"), OP_ARITMETICO.SUMA))
code.append(c3d.asignacionTemporalStack(t1))
code.append(c3d.aumentarP())
return code
class IdentificadorColumna(Instruccion):
def __init__(self, id, linea, columna):
self.id = id
Instruccion.__init__(self,Tipo(Tipo_Dato.ID),linea,columna,strGram)
def ejecutar(self, tabla, arbol):
super().ejecutar(tabla,arbol)
variable = tabla.getVariable(self.id)
if variable == None:
error = Excepcion("42P10","Semantico","La columna "+str(self.id)+" no existe",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return error
self.tipo = variable.tipo
return variable.valor.ejecutar(tabla, arbol)
def generar3D(self, tabla, arbol):
super().generar3D(tabla,arbol) |
the-stack_0_525 | import logging
import sys
from requests import HTTPError
from .readwritelock import ReadWriteLock
from .interfaces import CachePolicy
log = logging.getLogger(sys.modules[__name__].__name__)
class ManualPollingCachePolicy(CachePolicy):
def __init__(self, config_fetcher, config_cache):
self._config_fetcher = config_fetcher
self._config_cache = config_cache
self._lock = ReadWriteLock()
def get(self):
try:
self._lock.acquire_read()
config = self._config_cache.get()
return config
finally:
self._lock.release_read()
def force_refresh(self):
force_fetch = False
try:
self._lock.acquire_read()
config = self._config_cache.get()
force_fetch = not bool(config)
finally:
self._lock.release_read()
try:
configuration_response = self._config_fetcher.get_configuration_json(
force_fetch
)
if configuration_response.is_fetched():
configuration = configuration_response.json()
try:
self._lock.acquire_write()
self._config_cache.set(configuration)
finally:
self._lock.release_write()
except HTTPError as e:
log.error(
"Double-check your SDK Key at https://app.configcat.com/sdkkey."
" Received unexpected response: [%s]" % str(e.response)
)
except:
log.exception(sys.exc_info()[0])
def stop(self):
pass
|
the-stack_0_527 | from .util import Configurable, Openable, pretty_str
@pretty_str
class Hook(Configurable, Openable):
"""
Base of all hook classes, performs any form of processing on messages from all connected
plugs, via the provided host instance.
Instantiation may raise :class:`.ConfigError` if the provided configuration is invalid.
Attributes:
virtual (bool):
``True`` if managed by another component (e.g. a hook that exposes plug functionality).
"""
def __init__(self, name, config, host, virtual=False):
super().__init__(name, config, host)
self.virtual = virtual
async def start(self):
"""
Perform any setup tasks.
"""
async def stop(self):
"""
Perform any teardown tasks.
"""
def on_load(self):
"""
Perform any additional one-time setup that requires other plugs or hooks to be loaded.
"""
async def channel_migrate(self, old, new):
"""
Move any private data between channels on admin request. This is intended to cover data
keyed by channel sources and plug network identifiers.
Args:
old (.Channel):
Existing channel with local data.
new (.Channel):
Target replacement channel to migrate data to.
Returns:
bool:
``True`` if any data was migrated for the requested channel.
"""
return False
async def before_send(self, channel, msg):
"""
Modify an outgoing message before it's pushed to the network. The ``(channel, msg)`` pair
must be returned, so hooks may modify in-place or return a different pair. This method is
called for each hook, one after another. If ``channel`` is modified, the sending will
restart on the new channel, meaning this method will be called again for all hooks.
Hooks may also suppress a message (e.g. if their actions caused it, but it bears no value
to the network) by returning ``None``.
Args:
channel (.Channel):
Original source of this message.
msg (.Message):
Raw message received from another plug.
Returns:
(.Channel, .Message) tuple:
The augmented or replacement pair, or ``None`` to suppress this message.
"""
return (channel, msg)
async def before_receive(self, sent, source, primary):
"""
Modify an incoming message before it's pushed to other hooks. The ``sent`` object must be
returned, so hooks may modify in-place or return a different object. This method is called
for each hook, one after another, so any time-consuming tasks should be deferred to
:meth:`process` (which is run for all hooks in parallel).
Hooks may also suppress a message (e.g. if their actions caused it, but it bears no value
to the rest of the system) by returning ``None``.
Args:
sent (.SentMessage):
Raw message received from another plug.
source (.Message):
Original message data used to generate the raw message, if sent via the plug (e.g.
from another hook), equivalent to ``msg`` if the source is otherwise unknown.
primary (bool):
``False`` for supplementary messages if the source message required multiple raw
messages in order to represent it (e.g. messages with multiple attachments where
the underlying network doesn't support it), otherwise ``True``.
Returns:
.SentMessage:
The augmented or replacement message, or ``None`` to suppress this message.
"""
return sent
async def on_receive(self, sent, source, primary):
"""
Handle an incoming message received by any plug.
Args:
sent (.SentMessage):
Raw message received from another plug.
source (.Message):
Original message data used to generate the raw message, if sent via the plug (e.g.
from another hook), equivalent to ``msg`` if the source is otherwise unknown.
primary (bool):
``False`` for supplementary messages if the source message required multiple raw
messages in order to represent it (e.g. messages with multiple attachments where
the underlying network doesn't support it), otherwise ``True``.
"""
def on_config_change(self, source):
"""
Handle a configuration change from another plug or hook.
Args:
source (.Configurable):
Source plug or hook that triggered the event.
"""
def __repr__(self):
return "<{}: {}>".format(self.__class__.__name__, self.name)
class ResourceHook(Hook):
"""
Variant of hooks that globally provide access to some resource.
Only one of each class may be loaded, which happens before regular hooks, and such hooks are
keyed by their class rather than a name, allowing for easier lookups.
"""
|
the-stack_0_528 | import enum
import platform
import typing
import math
from functools import lru_cache
from publicsuffix2 import get_sld, get_tld
import urwid
import urwid.util
from mitmproxy import flow
from mitmproxy.http import HTTPFlow
from mitmproxy.utils import human, emoji
from mitmproxy.tcp import TCPFlow
from mitmproxy import dns
from mitmproxy.dns import DNSFlow
# Detect Windows Subsystem for Linux and Windows
IS_WINDOWS_OR_WSL = "Microsoft" in platform.platform() or "Windows" in platform.platform()
def is_keypress(k):
"""
Is this input event a keypress?
"""
if isinstance(k, str):
return True
def highlight_key(str, key, textattr="text", keyattr="key"):
l = []
parts = str.split(key, 1)
if parts[0]:
l.append((textattr, parts[0]))
l.append((keyattr, key))
if parts[1]:
l.append((textattr, parts[1]))
return l
KEY_MAX = 30
def format_keyvals(
entries: typing.Iterable[typing.Tuple[str, typing.Union[None, str, urwid.Widget]]],
key_format: str = "key",
value_format: str = "text",
indent: int = 0
) -> typing.List[urwid.Columns]:
"""
Format a list of (key, value) tuples.
Args:
entries: The list to format. keys must be strings, values can also be None or urwid widgets.
The latter makes it possible to use the result of format_keyvals() as a value.
key_format: The display attribute for the key.
value_format: The display attribute for the value.
indent: Additional indent to apply.
"""
max_key_len = max((len(k) for k, v in entries if k is not None), default=0)
max_key_len = min(max_key_len, KEY_MAX)
if indent > 2:
indent -= 2 # We use dividechars=2 below, which already adds two empty spaces
ret = []
for k, v in entries:
if v is None:
v = urwid.Text("")
elif not isinstance(v, urwid.Widget):
v = urwid.Text([(value_format, v)])
ret.append(
urwid.Columns(
[
("fixed", indent, urwid.Text("")),
(
"fixed",
max_key_len,
urwid.Text([(key_format, k)])
),
v
],
dividechars=2
)
)
return ret
def fcol(s: str, attr: str) -> typing.Tuple[str, int, urwid.Text]:
s = str(s)
return (
"fixed",
len(s),
urwid.Text(
[
(attr, s)
]
)
)
if urwid.util.detected_encoding:
SYMBOL_REPLAY = "\u21ba"
SYMBOL_RETURN = "\u2190"
SYMBOL_MARK = "\u25cf"
SYMBOL_UP = "\u21E7"
SYMBOL_DOWN = "\u21E9"
SYMBOL_ELLIPSIS = "\u2026"
SYMBOL_FROM_CLIENT = "\u21d2"
SYMBOL_TO_CLIENT = "\u21d0"
else:
SYMBOL_REPLAY = "[r]"
SYMBOL_RETURN = "<-"
SYMBOL_MARK = "#"
SYMBOL_UP = "^"
SYMBOL_DOWN = " "
SYMBOL_ELLIPSIS = "~"
SYMBOL_FROM_CLIENT = "->"
SYMBOL_TO_CLIENT = "<-"
SCHEME_STYLES = {
'http': 'scheme_http',
'https': 'scheme_https',
'ws': 'scheme_ws',
'wss': 'scheme_wss',
'tcp': 'scheme_tcp',
'dns': 'scheme_dns',
}
HTTP_REQUEST_METHOD_STYLES = {
'GET': 'method_get',
'POST': 'method_post',
'DELETE': 'method_delete',
'HEAD': 'method_head',
'PUT': 'method_put'
}
HTTP_RESPONSE_CODE_STYLE = {
2: "code_200",
3: "code_300",
4: "code_400",
5: "code_500",
}
class RenderMode(enum.Enum):
TABLE = 1
"""The flow list in table format, i.e. one row per flow."""
LIST = 2
"""The flow list in list format, i.e. potentially multiple rows per flow."""
DETAILVIEW = 3
"""The top lines in the detail view."""
def fixlen(s: str, maxlen: int) -> str:
if len(s) <= maxlen:
return s.ljust(maxlen)
else:
return s[0:maxlen - len(SYMBOL_ELLIPSIS)] + SYMBOL_ELLIPSIS
def fixlen_r(s: str, maxlen: int) -> str:
if len(s) <= maxlen:
return s.rjust(maxlen)
else:
return SYMBOL_ELLIPSIS + s[len(s) - maxlen + len(SYMBOL_ELLIPSIS):]
def render_marker(marker: str) -> str:
rendered = emoji.emoji.get(marker, SYMBOL_MARK)
# The marker can only be one glyph. Some emoji that use zero-width joiners (ZWJ)
# will not be rendered as a single glyph and instead will show
# multiple glyphs. Just use the first glyph as a fallback.
# https://emojipedia.org/emoji-zwj-sequence/
return rendered[0]
class TruncatedText(urwid.Widget):
def __init__(self, text, attr, align='left'):
self.text = text
self.attr = attr
self.align = align
super().__init__()
def pack(self, size, focus=False):
return (len(self.text), 1)
def rows(self, size, focus=False):
return 1
def render(self, size, focus=False):
text = self.text
attr = self.attr
if self.align == 'right':
text = text[::-1]
attr = attr[::-1]
text_len = urwid.util.calc_width(text, 0, len(text))
if size is not None and len(size) > 0:
width = size[0]
else:
width = text_len
if width >= text_len:
remaining = width - text_len
if remaining > 0:
c_text = text + ' ' * remaining
c_attr = attr + [('text', remaining)]
else:
c_text = text
c_attr = attr
else:
trim = urwid.util.calc_trim_text(text, 0, width - 1, 0, width - 1)
visible_text = text[0:trim[1]]
if trim[3] == 1:
visible_text += ' '
c_text = visible_text + SYMBOL_ELLIPSIS
c_attr = (urwid.util.rle_subseg(attr, 0, len(visible_text.encode())) +
[('focus', len(SYMBOL_ELLIPSIS.encode()))])
if self.align == 'right':
c_text = c_text[::-1]
c_attr = c_attr[::-1]
return urwid.TextCanvas([c_text.encode()], [c_attr], maxcol=width)
def truncated_plain(text, attr, align='left'):
return TruncatedText(text, [(attr, len(text.encode()))], align)
# Work around https://github.com/urwid/urwid/pull/330
def rle_append_beginning_modify(rle, a_r):
"""
Append (a, r) (unpacked from *a_r*) to BEGINNING of rle.
Merge with first run when possible
MODIFIES rle parameter contents. Returns None.
"""
a, r = a_r
if not rle:
rle[:] = [(a, r)]
else:
al, run = rle[0]
if a == al:
rle[0] = (a, run + r)
else:
rle[0:0] = [(a, r)]
def colorize_host(host):
tld = get_tld(host)
sld = get_sld(host)
attr = []
tld_size = len(tld)
sld_size = len(sld) - tld_size
for letter in reversed(range(len(host))):
character = host[letter]
if tld_size > 0:
style = 'url_domain'
tld_size -= 1
elif tld_size == 0:
style = 'text'
tld_size -= 1
elif sld_size > 0:
sld_size -= 1
style = 'url_extension'
else:
style = 'text'
rle_append_beginning_modify(attr, (style, len(character.encode())))
return attr
def colorize_req(s):
path = s.split('?', 2)[0]
i_query = len(path)
i_last_slash = path.rfind('/')
i_ext = path[i_last_slash + 1:].rfind('.')
i_ext = i_last_slash + i_ext if i_ext >= 0 else len(s)
in_val = False
attr = []
for i in range(len(s)):
c = s[i]
if ((i < i_query and c == '/') or
(i < i_query and i > i_last_slash and c == '.') or
(i == i_query)):
a = 'url_punctuation'
elif i > i_query:
if in_val:
if c == '&':
in_val = False
a = 'url_punctuation'
else:
a = 'url_query_value'
else:
if c == '=':
in_val = True
a = 'url_punctuation'
else:
a = 'url_query_key'
elif i > i_ext:
a = 'url_extension'
elif i > i_last_slash:
a = 'url_filename'
else:
a = 'text'
urwid.util.rle_append_modify(attr, (a, len(c.encode())))
return attr
def colorize_url(url):
parts = url.split('/', 3)
if len(parts) < 4 or len(parts[1]) > 0 or parts[0][-1:] != ':':
return [('error', len(url))] # bad URL
return [
(SCHEME_STYLES.get(parts[0], "scheme_other"), len(parts[0]) - 1),
('url_punctuation', 3), # ://
] + colorize_host(parts[2]) + colorize_req('/' + parts[3])
def format_http_content_type(content_type: str) -> typing.Tuple[str, str]:
content_type = content_type.split(";")[0]
if content_type.endswith('/javascript'):
style = 'content_script'
elif content_type.startswith('text/'):
style = 'content_text'
elif (content_type.startswith('image/') or
content_type.startswith('video/') or
content_type.startswith('font/') or
"/x-font-" in content_type):
style = 'content_media'
elif content_type.endswith('/json') or content_type.endswith('/xml'):
style = 'content_data'
elif content_type.startswith('application/'):
style = 'content_raw'
else:
style = 'content_other'
return content_type, style
def format_duration(duration: float) -> typing.Tuple[str, str]:
pretty_duration = human.pretty_duration(duration)
style = 'gradient_%02d' % int(99 - 100 * min(math.log2(1 + 1000 * duration) / 12, 0.99))
return pretty_duration, style
def format_size(num_bytes: int) -> typing.Tuple[str, str]:
pretty_size = human.pretty_size(num_bytes)
style = 'gradient_%02d' % int(99 - 100 * min(math.log2(1 + num_bytes) / 20, 0.99))
return pretty_size, style
def format_left_indicators(
*,
focused: bool,
intercepted: bool,
timestamp: float
):
indicators: typing.List[typing.Union[str, typing.Tuple[str, str]]] = []
if focused:
indicators.append(("focus", ">>"))
else:
indicators.append(" ")
pretty_timestamp = human.format_timestamp(timestamp)[-8:]
if intercepted:
indicators.append(("intercept", pretty_timestamp))
else:
indicators.append(("text", pretty_timestamp))
return "fixed", 10, urwid.Text(indicators)
def format_right_indicators(
*,
replay: bool,
marked: str,
):
indicators: typing.List[typing.Union[str, typing.Tuple[str, str]]] = []
if replay:
indicators.append(("replay", SYMBOL_REPLAY))
else:
indicators.append(" ")
if bool(marked):
indicators.append(("mark", render_marker(marked)))
else:
indicators.append(" ")
return "fixed", 3, urwid.Text(indicators)
@lru_cache(maxsize=800)
def format_http_flow_list(
*,
render_mode: RenderMode,
focused: bool,
marked: str,
is_replay: bool,
request_method: str,
request_scheme: str,
request_host: str,
request_path: str,
request_url: str,
request_http_version: str,
request_timestamp: float,
request_is_push_promise: bool,
intercepted: bool,
response_code: typing.Optional[int],
response_reason: typing.Optional[str],
response_content_length: typing.Optional[int],
response_content_type: typing.Optional[str],
duration: typing.Optional[float],
error_message: typing.Optional[str],
) -> urwid.Widget:
req = []
if render_mode is RenderMode.DETAILVIEW:
req.append(fcol(human.format_timestamp(request_timestamp), "highlight"))
else:
if focused:
req.append(fcol(">>", "focus"))
else:
req.append(fcol(" ", "focus"))
method_style = HTTP_REQUEST_METHOD_STYLES.get(request_method, "method_other")
req.append(fcol(request_method, method_style))
if request_is_push_promise:
req.append(fcol('PUSH_PROMISE', 'method_http2_push'))
preamble_len = sum(x[1] for x in req) + len(req) - 1
if request_http_version not in ("HTTP/1.0", "HTTP/1.1"):
request_url += " " + request_http_version
if intercepted and not response_code:
url_style = "intercept"
elif response_code or error_message:
url_style = "text"
else:
url_style = "title"
if render_mode is RenderMode.DETAILVIEW:
req.append(
urwid.Text([(url_style, request_url)])
)
else:
req.append(truncated_plain(request_url, url_style))
req.append(format_right_indicators(replay=is_replay, marked=marked))
resp = [
("fixed", preamble_len, urwid.Text(""))
]
if response_code:
if intercepted:
style = "intercept"
else:
style = ""
status_style = style or HTTP_RESPONSE_CODE_STYLE.get(response_code // 100, "code_other")
resp.append(fcol(SYMBOL_RETURN, status_style))
resp.append(fcol(str(response_code), status_style))
if response_reason and render_mode is RenderMode.DETAILVIEW:
resp.append(fcol(response_reason, status_style))
if response_content_type:
ct, ct_style = format_http_content_type(response_content_type)
resp.append(fcol(ct, style or ct_style))
if response_content_length:
size, size_style = format_size(response_content_length)
elif response_content_length == 0:
size = "[no content]"
size_style = "text"
else:
size = "[content missing]"
size_style = "text"
resp.append(fcol(size, style or size_style))
if duration:
dur, dur_style = format_duration(duration)
resp.append(fcol(dur, style or dur_style))
elif error_message:
resp.append(fcol(SYMBOL_RETURN, "error"))
resp.append(urwid.Text([("error", error_message)]))
return urwid.Pile([
urwid.Columns(req, dividechars=1),
urwid.Columns(resp, dividechars=1)
])
@lru_cache(maxsize=800)
def format_http_flow_table(
*,
render_mode: RenderMode,
focused: bool,
marked: str,
is_replay: typing.Optional[str],
request_method: str,
request_scheme: str,
request_host: str,
request_path: str,
request_url: str,
request_http_version: str,
request_timestamp: float,
request_is_push_promise: bool,
intercepted: bool,
response_code: typing.Optional[int],
response_reason: typing.Optional[str],
response_content_length: typing.Optional[int],
response_content_type: typing.Optional[str],
duration: typing.Optional[float],
error_message: typing.Optional[str],
) -> urwid.Widget:
items = [
format_left_indicators(
focused=focused,
intercepted=intercepted,
timestamp=request_timestamp
)
]
if intercepted and not response_code:
request_style = "intercept"
else:
request_style = ""
scheme_style = request_style or SCHEME_STYLES.get(request_scheme, "scheme_other")
items.append(fcol(fixlen(request_scheme.upper(), 5), scheme_style))
if request_is_push_promise:
method_style = 'method_http2_push'
else:
method_style = request_style or HTTP_REQUEST_METHOD_STYLES.get(request_method, "method_other")
items.append(fcol(fixlen(request_method, 4), method_style))
items.append(('weight', 0.25, TruncatedText(request_host, colorize_host(request_host), 'right')))
items.append(('weight', 1.0, TruncatedText(request_path, colorize_req(request_path), 'left')))
if intercepted and response_code:
response_style = "intercept"
else:
response_style = ""
if response_code:
status = str(response_code)
status_style = response_style or HTTP_RESPONSE_CODE_STYLE.get(response_code // 100, "code_other")
if response_content_length and response_content_type:
content, content_style = format_http_content_type(response_content_type)
content_style = response_style or content_style
elif response_content_length:
content = ''
content_style = 'content_none'
elif response_content_length == 0:
content = "[no content]"
content_style = 'content_none'
else:
content = "[content missing]"
content_style = 'content_none'
elif error_message:
status = 'err'
status_style = 'error'
content = error_message
content_style = 'error'
else:
status = ''
status_style = 'text'
content = ''
content_style = ''
items.append(fcol(fixlen(status, 3), status_style))
items.append(('weight', 0.15, truncated_plain(content, content_style, 'right')))
if response_content_length:
size, size_style = format_size(response_content_length)
items.append(fcol(fixlen_r(size, 5), response_style or size_style))
else:
items.append(("fixed", 5, urwid.Text("")))
if duration:
duration_pretty, duration_style = format_duration(duration)
items.append(fcol(fixlen_r(duration_pretty, 5), response_style or duration_style))
else:
items.append(("fixed", 5, urwid.Text("")))
items.append(format_right_indicators(
replay=bool(is_replay),
marked=marked,
))
return urwid.Columns(items, dividechars=1, min_width=15)
@lru_cache(maxsize=800)
def format_tcp_flow(
*,
render_mode: RenderMode,
focused: bool,
timestamp_start: float,
marked: str,
client_address,
server_address,
total_size: int,
duration: typing.Optional[float],
error_message: typing.Optional[str],
):
conn = f"{human.format_address(client_address)} <-> {human.format_address(server_address)}"
items = []
if render_mode in (RenderMode.TABLE, RenderMode.DETAILVIEW):
items.append(
format_left_indicators(focused=focused, intercepted=False, timestamp=timestamp_start)
)
else:
if focused:
items.append(fcol(">>", "focus"))
else:
items.append(fcol(" ", "focus"))
if render_mode is RenderMode.TABLE:
items.append(fcol("TCP ", SCHEME_STYLES["tcp"]))
else:
items.append(fcol("TCP", SCHEME_STYLES["tcp"]))
items.append(('weight', 1.0, truncated_plain(conn, "text", 'left')))
if error_message:
items.append(('weight', 1.0, truncated_plain(error_message, "error", 'left')))
if total_size:
size, size_style = format_size(total_size)
items.append(fcol(fixlen_r(size, 5), size_style))
else:
items.append(("fixed", 5, urwid.Text("")))
if duration:
duration_pretty, duration_style = format_duration(duration)
items.append(fcol(fixlen_r(duration_pretty, 5), duration_style))
else:
items.append(("fixed", 5, urwid.Text("")))
items.append(format_right_indicators(replay=False, marked=marked))
return urwid.Pile([
urwid.Columns(items, dividechars=1, min_width=15)
])
@lru_cache(maxsize=800)
def format_dns_flow(
*,
render_mode: RenderMode,
focused: bool,
intercepted: bool,
marked: str,
is_replay: typing.Optional[str],
op_code: str,
request_timestamp: float,
domain: str,
type: str,
response_code: typing.Optional[str],
response_code_http_equiv: int,
answer: typing.Optional[str],
error_message: str,
duration: typing.Optional[float],
):
items = []
if render_mode in (RenderMode.TABLE, RenderMode.DETAILVIEW):
items.append(format_left_indicators(focused=focused, intercepted=intercepted, timestamp=request_timestamp))
else:
items.append(fcol(">>" if focused else " ", "focus"))
scheme_style = "intercepted" if intercepted else SCHEME_STYLES["dns"]
t = f"DNS {op_code}"
if render_mode is RenderMode.TABLE:
t = fixlen(t, 10)
items.append(fcol(t, scheme_style))
items.append(('weight', 0.5, TruncatedText(domain, colorize_host(domain), 'right')))
items.append(fcol("(" + fixlen(type, 5)[:len(type)] + ") =", "text"))
items.append(("weight", 1, (
truncated_plain("..." if answer is None else "?" if not answer else answer, "text")
if error_message is None else
truncated_plain(error_message, "error")
)))
status_style = "intercepted" if intercepted else HTTP_RESPONSE_CODE_STYLE.get(response_code_http_equiv // 100, "code_other")
items.append(fcol(fixlen("" if response_code is None else response_code, 9), status_style))
if duration:
duration_pretty, duration_style = format_duration(duration)
items.append(fcol(fixlen_r(duration_pretty, 5), duration_style))
else:
items.append(("fixed", 5, urwid.Text("")))
items.append(format_right_indicators(
replay=bool(is_replay),
marked=marked,
))
return urwid.Pile([
urwid.Columns(items, dividechars=1, min_width=15)
])
def format_flow(
f: flow.Flow,
*,
render_mode: RenderMode,
hostheader: bool = False, # pass options directly if we need more stuff from them
focused: bool = True,
) -> urwid.Widget:
"""
This functions calls the proper renderer depending on the flow type.
We also want to cache the renderer output, so we extract all attributes
relevant for display and call the render with only that. This assures that rows
are updated if the flow is changed.
"""
duration: typing.Optional[float]
error_message: typing.Optional[str]
if f.error:
error_message = f.error.msg
else:
error_message = None
if isinstance(f, TCPFlow):
total_size = 0
for message in f.messages:
total_size += len(message.content)
if f.messages:
duration = f.messages[-1].timestamp - f.client_conn.timestamp_start
else:
duration = None
return format_tcp_flow(
render_mode=render_mode,
focused=focused,
timestamp_start=f.client_conn.timestamp_start,
marked=f.marked,
client_address=f.client_conn.peername,
server_address=f.server_conn.address,
total_size=total_size,
duration=duration,
error_message=error_message,
)
elif isinstance(f, DNSFlow):
if f.response:
duration = f.response.timestamp - f.request.timestamp
response_code_str: typing.Optional[str] = dns.response_codes.to_str(f.response.response_code)
response_code_http_equiv = dns.response_codes.http_equiv_status_code(f.response.response_code)
answer = ", ".join(str(x) for x in f.response.answers)
else:
duration = None
response_code_str = None
response_code_http_equiv = 0
answer = None
return format_dns_flow(
render_mode=render_mode,
focused=focused,
intercepted=f.intercepted,
marked=f.marked,
is_replay=f.is_replay,
op_code=dns.op_codes.to_str(f.request.op_code),
request_timestamp=f.request.timestamp,
domain=f.request.questions[0].name if f.request.questions else "",
type=dns.types.to_str(f.request.questions[0].type) if f.request.questions else "",
response_code=response_code_str,
response_code_http_equiv=response_code_http_equiv,
answer=answer,
error_message=error_message,
duration=duration,
)
elif isinstance(f, HTTPFlow):
intercepted = f.intercepted
response_content_length: typing.Optional[int]
if f.response:
if f.response.raw_content is not None:
response_content_length = len(f.response.raw_content)
else:
response_content_length = None
response_code: typing.Optional[int] = f.response.status_code
response_reason: typing.Optional[str] = f.response.reason
response_content_type = f.response.headers.get("content-type")
if f.response.timestamp_end:
duration = max([f.response.timestamp_end - f.request.timestamp_start, 0])
else:
duration = None
else:
response_content_length = None
response_code = None
response_reason = None
response_content_type = None
duration = None
scheme = f.request.scheme
if f.websocket is not None:
if scheme == "https":
scheme = "wss"
elif scheme == "http":
scheme = "ws"
if render_mode in (RenderMode.LIST, RenderMode.DETAILVIEW):
render_func = format_http_flow_list
else:
render_func = format_http_flow_table
return render_func(
render_mode=render_mode,
focused=focused,
marked=f.marked,
is_replay=f.is_replay,
request_method=f.request.method,
request_scheme=scheme,
request_host=f.request.pretty_host if hostheader else f.request.host,
request_path=f.request.path,
request_url=f.request.pretty_url if hostheader else f.request.url,
request_http_version=f.request.http_version,
request_timestamp=f.request.timestamp_start,
request_is_push_promise='h2-pushed-stream' in f.metadata,
intercepted=intercepted,
response_code=response_code,
response_reason=response_reason,
response_content_length=response_content_length,
response_content_type=response_content_type,
duration=duration,
error_message=error_message,
)
else:
raise NotImplementedError()
|
the-stack_0_529 | """
anime.py contains the base classes required for other anime classes.
"""
import os
import logging
import copy
import importlib
from anime_downloader.sites.exceptions import AnimeDLError, NotFoundError
from anime_downloader import util
from anime_downloader.config import Config
from anime_downloader.extractors import get_extractor
from anime_downloader.downloader import get_downloader
logger = logging.getLogger(__name__)
class Anime:
"""
Base class for all anime classes.
Parameters
----------
url: string
URL of the anime.
quality: One of ['360p', '480p', '720p', '1080p']
Quality of episodes
fallback_qualities: list
The order of fallback.
Attributes
----------
sitename: str
name of the site
title: str
Title of the anime
meta: dict
metadata about the anime. [Can be empty]
QUALITIES: list
Possible qualities for the site
"""
sitename = ''
title = ''
meta = dict()
subclasses = {}
QUALITIES = ['360p', '480p', '720p', '1080p']
@classmethod
def search(cls, query):
"""
Search searches for the anime using the query given.
Parameters
----------
query: str
query is the query keyword to be searched.
Returns
-------
list
List of :py:class:`~anime_downloader.sites.anime.SearchResult`
"""
return
def __init__(self, url=None, quality='720p',
fallback_qualities=None,
_skip_online_data=False):
self.url = url
if fallback_qualities is None:
fallback_qualities = ['720p', '480p', '360p']
self._fallback_qualities = [
q for q in fallback_qualities if q in self.QUALITIES]
if quality in self.QUALITIES:
self.quality = quality
else:
raise AnimeDLError(
'Quality {0} not found in {1}'.format(quality, self.QUALITIES))
if not _skip_online_data:
logger.info('Extracting episode info from page')
self._episode_urls = self.get_data()
self._len = len(self._episode_urls)
@classmethod
def verify_url(cls, url):
if cls.sitename in url:
return True
return False
@property
def config(self):
return Config['siteconfig'][self.sitename]
def __init_subclass__(cls, sitename, **kwargs):
super().__init_subclass__(**kwargs)
cls.subclasses[sitename] = cls
@classmethod
def factory(cls, sitename: str):
"""
factory returns the appropriate subclass for the given site name.
Parameters
----------
sitename: str
sitename is the name of the site
Returns
-------
subclass of :py:class:`Anime`
Sub class of :py:class:`Anime`
"""
return cls.subclasses[sitename]
@classmethod
def new_anime(cls, sitename: str):
"""
new_anime is a factory which returns the anime class corresposing to
`sitename`
Returns
-------
subclass of Anime
"""
module = importlib.import_module(
'anime_downloader.sites.{}'.format(sitename)
)
for c in dir(module):
if issubclass(c, cls):
return c
raise ImportError("Cannot find subclass of {}".format(cls))
def get_data(self):
"""
get_data is called inside the :code:`__init__` of
:py:class:`~anime_downloader.sites.anime.BaseAnime`. It is used to get
the necessary data about the anime and it's episodes.
This function calls
:py:class:`~anime_downloader.sites.anime.BaseAnime._scarpe_episodes`
and
:py:class:`~anime_downloader.sites.anime.BaseAnime._scrape_metadata`
TODO: Refactor this so that classes which need not be soupified don't
have to overload this function.
Returns
-------
list
A list of tuples of episodes containing episode name and
episode url.
Ex::
[('1', 'https://9anime.is/.../...', ...)]
"""
self._episode_urls = []
try:
self._scrape_metadata()
except Exception as e:
logger.debug('Metadata scraping error: {}'.format(e))
self._episode_urls = self._scrape_episodes()
self._len = len(self._episode_urls)
logger.debug('EPISODE IDS: length: {}, ids: {}'.format(
self._len, self._episode_urls))
if not isinstance(self._episode_urls[0], tuple):
self._episode_urls = [(no+1, id) for no, id in
enumerate(self._episode_urls)]
return self._episode_urls
def __getitem__(self, index):
episode_class = AnimeEpisode.subclasses[self.sitename]
if isinstance(index, int):
try:
ep_id = self._episode_urls[index]
except IndexError as e:
raise RuntimeError("No episode found with index") from e
return episode_class(ep_id[1], parent=self,
ep_no=ep_id[0])
elif isinstance(index, slice):
anime = copy.deepcopy(self)
try:
anime._episode_urls = anime._episode_urls[index]
except IndexError as e:
raise RuntimeError("No episode found with index") from e
return anime
return None
def __iter__(self):
episode_class = AnimeEpisode.subclasses[self.sitename]
for ep_id in self._episode_urls:
yield episode_class(ep_id[1], parent=self, ep_no=ep_id[0])
def __repr__(self):
return '''
Site: {name}
Anime: {title}
Episode count: {length}
'''.format(name=self.sitename, title=self.title, length=len(self))
def __len__(self):
return self._len
def __str__(self):
return self.title
def _scarpe_episodes(self):
"""
_scarpe_episodes is function which has to be overridden by the base
classes to scrape the episode urls from the web page.
Parameters
----------
soup: `bs4.BeautifulSoup`
soup is the html of the anime url after passing through
BeautifulSoup.
Returns
-------
:code:`list` of :code:`str`
A list of episode urls.
"""
return
def _scrape_metadata(self):
"""
_scrape_metadata is function which has to be overridden by the base
classes to scrape the metadata of anime from the web page.
Parameters
----------
soup: :py:class:`bs4.BeautifulSoup`
soup is the html of the anime url after passing through
BeautifulSoup.
"""
return
class AnimeEpisode:
"""
Base class for all Episode classes.
Parameters
----------
url: string
URL of the episode.
quality: One of ['360p', '480p', '720p', '1080p']
Quality of episode
fallback_qualities: list
The order of fallback.
Attributes
----------
sitename: str
name of the site
title: str
Title of the anime
meta: dict
metadata about the anime. [Can be empty]
ep_no: string
Episode number/title of the episode
pretty_title: string
Pretty title of episode in format <animename>-<ep_no>
"""
QUALITIES = []
title = ''
stream_url = ''
subclasses = {}
def __init__(self, url, parent: Anime = None, ep_no=None):
self.ep_no = ep_no
self.url = url
self.quality = parent.quality
self.QUALITIES = parent.QUALITIES
self._parent = parent
self._sources = None
self.pretty_title = '{}-{}'.format(self._parent.title, self.ep_no)
logger.debug("Extracting stream info of id: {}".format(self.url))
def try_data():
self.get_data()
# Just to verify the source is acquired
self.source().stream_url
try:
try_data()
except NotFoundError:
# Issue #28
qualities = copy.copy(self._parent._fallback_qualities)
try:
qualities.remove(self.quality)
except ValueError:
pass
for quality in qualities:
logger.warning('Quality {} not found. Trying {}.'.format(
self.quality, quality))
self.quality = quality
try:
try_data()
return
except NotFoundError:
pass
logger.warning(f'Skipping episode: {self.ep_no}')
def __init_subclass__(cls, sitename: str, **kwargs):
super().__init_subclass__(**kwargs)
cls.subclasses[sitename] = cls
cls.sitename = sitename
@classmethod
def factory(cls, sitename: str):
return cls.subclasses[sitename]
@property
def config(self):
return Config['siteconfig'][self.sitename]
def source(self, index=0):
"""
Get the source for episode
Returns
-------
`anime_downloader.extractors.base_extractor.BaseExtractor`
Extractor depending on the source.
"""
if not self._sources:
self.get_data()
try:
sitename, url = self._sources[index]
except TypeError:
return self._sources[index]
except IndexError:
raise NotFoundError("No episode sources found.")
ext = get_extractor(sitename)(url, quality=self.quality)
self._sources[index] = ext
return ext
def get_data(self):
self._sources = self._get_sources()
logger.debug('Sources : {}'.format(self._sources))
def _get_sources(self):
raise NotImplementedError
def sort_sources(self, data):
"""
Formatted data should look something like this
[
{'extractor': 'mp4upload', 'url': 'https://twist.moe/mp4upload/...', 'server': 'mp4upload', 'version': 'subbed'},
{'extractor': 'vidstream', 'url': 'https://twist.moe/vidstream/...', 'server': 'vidstream', 'version': 'dubbed'},
{'extractor': 'no_extractor', 'url': 'https://twist.moe/anime/...', 'server': 'default', 'version': 'subbed'}
]
extractor = the extractor the link should be passed to
url = url to be passed to the extractor
server = the server name used in config
version = subbed/dubbed
The config should consist of a list with servers in preferred order and a preferred language, eg
"servers":["vidstream","default","mp4upload"],
"version":"subbed"
Using the example above, this function will return: [('no_extractor', 'https://twist.moe/anime/...')]
as it prioritizes preferred language over preferred server
"""
version = self.config.get('version','subbed') #TODO add a flag for this
servers = self.config.get('servers',[''])
logger.debug('Data : {}'.format(data))
#Sorts the dicts by preferred server in config
sorted_by_server = sorted(data, key=lambda x: servers.index(x['server']) if x['server'] in servers else len(data))
#Sorts the above by preferred language
#resulting in a list with the dicts sorted by language and server
#with language being prioritized over server
sorted_by_lang = list(sorted(sorted_by_server, key=lambda x: x['version'] == version, reverse=True))
logger.debug('Sorted sources : {}'.format(sorted_by_lang))
return '' if not sorted_by_lang else [(sorted_by_lang[0]['extractor'],sorted_by_lang[0]['url'])]
def download(self, force=False, path=None,
format='{anime_title}_{ep_no}', range_size=None):
"""
Downloads episode. This might be removed in a future release.
Parameters
----------
force: bool
Whether to force download or not.
path: string
Path to the directory/file where the file should be downloaded to.
format: string
The format of the filename if not provided.
"""
# TODO: Remove this shit
logger.info('Downloading {}'.format(self.pretty_title))
if format:
file_name = util.format_filename(format, self)+'.mp4'
if path is None:
path = './' + file_name
if path.endswith('.mp4'):
path = path
else:
path = os.path.join(path, file_name)
Downloader = get_downloader('http')
downloader = Downloader(self.source(),
path, force, range_size=range_size)
downloader.download()
class SearchResult:
"""
SearchResult class holds the search result of a search done by an Anime
class
Parameters
----------
title: str
Title of the anime.
url: str
URL of the anime
poster: str
URL for the poster of the anime.
meta: dict
Additional metadata regarding the anime.
Attributes
----------
title: str
Title of the anime.
url: str
URL of the anime
poster: str
URL for the poster of the anime.
meta: dict
Additional metadata regarding the anime.
meta_info: dict
Metadata regarding the anime. Not shown in the results, used to match with MAL
"""
def __init__(self, title, url, poster='', meta='', meta_info={}):
self.title = title
self.url = url
self.poster = poster
self.meta = meta
self.meta_info = meta_info
def __repr__(self):
return '<SearchResult Title: {} URL: {}>'.format(self.title, self.url)
def __str__(self):
return self.title
@property
def pretty_metadata(self):
"""
pretty_metadata is the prettified version of metadata
"""
if self.meta:
return ' | '.join(val for _, val in self.meta.items())
return ''
|
the-stack_0_532 | # -*- coding: utf-8 -*-
"""
Fuel inventory library (UOX)
Script to run computations. It will produce a set of folders and outputfiles
and a csv file storing linking the output file paths to the BU, CT, IE values.
zsolt elter 2019
"""
import numpy as np
import os
import math
#import pandas as pd
#from PDfunctions import *
def fuelinput(wp):
"""
function to calculate the weight percentage of MOX nuclides
formulae from http://holbert.faculty.asu.edu/eee460/NumberDensity.pdf
Parameters
----------
wp : float
Plutonium content in percentage
Returns
-------
fuelstr : str
Serpent formatted material composition
Notes
-----
1, Right now the temperature is hard coded (ie ZAID ends with '.15c'), this can be modified.
2, Right now the density of fuel is hard coded, this can be modified
3, The fuel string includes Cf nuclides with 0.0w%. This is to force Serpent2 to include these
nuclides. The reason to include them because they might be relevant in subsequent neutron coincidence
based calculations.
"""
u=1000*1.660539040e-27 #g
NA=6.0221409e23 ##/mol
M={'U235': 235.0439299*u*NA,
'U234': 234.0409521*u*NA,
'U238': 238.05078826*u*NA,
'Pu238': 238.0495599*u*NA,
'Pu239': 239.0521634*u*NA,
'Pu240': 240.0538135*u*NA,
'Pu241': 241.0568515*u*NA,
'Pu242': 242.0587426*u*NA}
Puvec={'Pu238':2.5/100,'Pu239':54.7/100,'Pu240':26.1/100,'Pu241':9.5/100,'Pu242':7.2/100}
Uvec={'U234':0.0012/100,'U235':0.25/100,'U238':99.7488/100} #czsolti 0.00119 rounded to get 1
MO16= 15.99491461956*u*NA
rhoMOX=10.5 #g/cm3 czsolti this density falls out from the equations
wp=wp/100
MU=1/sum([Uvec[iso]/M[iso] for iso in Uvec])
MPu=1/sum([Puvec[iso]/M[iso] for iso in Puvec])
MHM=(1-wp)*MU+wp*MPu
MMOX=MHM+2*MO16
rhoHM=rhoMOX*(MHM/MMOX)
rhoO=rhoMOX*(MO16/MMOX)
MVOL={}
for iso in Uvec:
MVOL[iso] = (1-wp)*Uvec[iso]*rhoHM
for iso in Puvec:
MVOL[iso] = wp*Puvec[iso]*rhoHM
M_O16=(rhoO*2)
M_TOT=sum(MVOL.values())+M_O16
fuelstr='mat MOX -10.5 burn 1'
fuelstr=fuelstr+'\n 92234.15c -%.8f'%(MVOL['U234']/M_TOT)
fuelstr=fuelstr+'\n 92235.15c -%.8f'%(MVOL['U235']/M_TOT)
fuelstr=fuelstr+'\n 92238.15c -%.8f'%(MVOL['U238']/M_TOT)
fuelstr=fuelstr+'\n 94238.15c -%.8f'%(MVOL['Pu238']/M_TOT)
fuelstr=fuelstr+'\n 94239.15c -%.8f'%(MVOL['Pu239']/M_TOT)
fuelstr=fuelstr+'\n 94240.15c -%.8f'%(MVOL['Pu240']/M_TOT)
fuelstr=fuelstr+'\n 94241.15c -%.8f'%(MVOL['Pu241']/M_TOT)
fuelstr=fuelstr+'\n 94242.15c -%.8f'%(MVOL['Pu242']/M_TOT)
fuelstr=fuelstr+'\n 8016.15c -%.8f'%(M_O16/M_TOT)
fuelstr=fuelstr+'\n 98249.15c -0.0'
fuelstr=fuelstr+'\n 98250.15c -0.0'
fuelstr=fuelstr+'\n 98251.15c -0.0'
fuelstr=fuelstr+'\n 98252.15c -0.0'
fuelstr=fuelstr+'\n 98253.15c -0.0'
fuelstr=fuelstr+'\n 98254.15c -0.0'
return fuelstr
### SCRIPT to run
###Init array for CTs-> can be modified if other CT values are preferred.
CT=0
CTs=[0]
decstep=[]
while CT<70*365:
if CT<10*365:
decstep.append(91.25)
CT=CT+91.25
CTs.append(CT)
elif CT<40*365:
decstep.append(2*91.25)
CT=CT+2*91.25
CTs.append(CT)
else:
decstep.append(4*91.25)
CT=CT+4*91.25
CTs.append(CT)
#csv header
csvstr=',BU,CT,IE,fuelType,reactorType,serpent\n'
#path to be updated
path=os.getcwd()+'/'
dataFrame='fuellog_strategicPWR_MOX.csv'
inputFileRun = open(dataFrame,'a')
inputFileRun.write(csvstr)
inputFileRun.close()
inputFileBU = open('MOX_manyBU')
inputFileBURefStr = inputFileBU.read()
inputFileBU.close()
inputFileCT = open('MOX_manyCT')
inputFileCTRefStr = inputFileCT.read()
inputFileCT.close()
IE=np.linspace(4,10,31)
idfuel=0
for ie in IE:
fstr=fuelinput(ie)
inputFileBUStr = inputFileBURefStr
inputFileBUStr = inputFileBUStr.replace('fuelstr', fstr)
sfile='sPWR_MOX_IE_%d'%(ie*10)
os.chdir(path+'serpent_files/')
os.system('mkdir IE%d'%(ie*10))
os.chdir(path+'serpent_files/IE%d/'%(ie*10))
inputFileRun = open(sfile,'w')
inputFileRun.write(inputFileBUStr)
inputFileRun.close()
#pathV=path+'serpent_filesPWR_BIC/'
#os.system('ssh '+node+' "nice sss2 '+pathV+sfile+' -omp 64"')
os.system('nice sss2 '+sfile+' -omp 64')
bu=5.0
for bui in range(10,147): #5-70 MWd/kgU
if bui not in [0,21,42,63,84,105,126]:#downtime
os.chdir(path+'serpent_files/IE%d/'%(ie*10))
spentmat = open(sfile+'.bumat'+str(bui)).read()
spentmat=spentmat.replace('MOXp1r1','MOX')
spentmat=spentmat.replace('\n 1001.15c',' burn 1\n 1001.15c')
inputFileCTStr = inputFileCTRefStr
inputFileCTStr = inputFileCTStr.replace('matstr', spentmat)
sfilect='sPWR_MOX_IE_%d_BU_%d'%(ie*10,bu*10)
os.system('mkdir BU%d'%(bu*10))
os.chdir(path+'serpent_files/IE%d/BU%d/'%(ie*10,bu*10))
inputFileRun = open(sfilect,'w')
inputFileRun.write(inputFileCTStr)
inputFileRun.close()
os.system('nice sss2 '+sfilect+' -omp 64')
for cti in range(131):
filepath=path+'serpent_files/IE%d/BU%d/'%(ie*10,bu*10)+sfilect+'.bumat'+str(cti)
csvstr='%d,%.2f,%.2f,%.2f,MOX,PWR,%s\n'%(idfuel,bu,CTs[cti],ie,filepath)
idfuel=idfuel+1
os.chdir(path)
inputFileRun = open(dataFrame,'a')
inputFileRun.write(csvstr)
inputFileRun.close()
bu=bu+0.5
|
the-stack_0_534 | from tensorflow.keras import layers, models, datasets, optimizers
import numpy as np
def neural_network_spatial():
input_ = layers.Input(shape=(32,32,3))
cnn = layers.Conv2D(16, (3,3), activation="relu") (input_)
cnn = layers.SpatialDropout2D(0.2) (cnn)
cnn = layers.MaxPooling2D() (cnn)
cnn = layers.Conv2D(32, (3,3), activation="relu") (cnn)
cnn = layers.SpatialDropout2D(0.5) (cnn)
cnn = layers.MaxPooling2D() (cnn)
flatten = layers.GlobalMaxPooling2D() (cnn)
dense = layers.Dense(32, activation="relu") (flatten)
dense = layers.Dropout(0.5) (dense)
dense = layers.Dense(16, activation="relu") (dense)
output = layers.Dense(10, activation="softmax") (dense)
opt = optimizers.Adam()
m= models.Model(input_, output)
m.compile(optimizer=opt,
loss='categorical_crossentropy',
metrics=['accuracy'])
return m
model = neural_network_spatial() # get model
print(model.summary()) |
the-stack_0_535 | import coloredlogs
import logging
import os
logging.basicConfig(
filename="plex_doctor.log",
level=logging.DEBUG,
format='%(levelname)s: "%(asctime)s - %(message)s',
)
log = logging.getLogger("PLEX-DOCTOR")
log.setLevel(logging.DEBUG)
LOGLEVEL = os.environ.get("LOGLEVEL", "INFO").upper()
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(
logging.Formatter('%(levelname)s: "%(asctime)s - %(message)s')
)
log.addHandler(stream_handler)
coloredlogs.install(LOGLEVEL, logger=log) |
the-stack_0_537 | from arm.logicnode.arm_nodes import *
class OnContactArrayNode(ArmLogicTreeNode):
"""Activates the output when the given rigid body make contact with other given rigid bodies."""
bl_idname = 'LNOnContactArrayNode'
bl_label = 'On Contact Array'
arm_section = 'contact'
arm_version = 1
property0: EnumProperty(
items = [('begin', 'Begin', 'The contact between the rigid bodies begins'),
('overlap', 'Overlap', 'The contact between the rigid bodies is happening'),
('end', 'End', 'The contact between the rigid bodies ends')],
name='', default='begin')
def init(self, context):
super(OnContactArrayNode, self).init(context)
self.add_input('ArmNodeSocketObject', 'RB')
self.add_input('ArmNodeSocketArray', 'RBs')
self.add_output('ArmNodeSocketAction', 'Out')
def draw_buttons(self, context, layout):
layout.prop(self, 'property0')
|
the-stack_0_538 | #!/usr/bin/env python
from load import ROOT as R
from gna.unittest import *
from gna.env import env
import gna.constructors as C
import numpy as N
from gna import context
import gna.bindings.arrayview
@floatcopy(globals(), True)
def test_vararray_preallocated_v01(function_name):
ns = env.globalns(function_name)
names = [ 'zero', 'one', 'two', 'three', 'four', 'five' ]
values = N.arange(len(names), dtype=context.current_precision_short())
variables = R.vector('variable<%s>'%context.current_precision())()
with context.allocator(100) as allocator:
for name, value in zip(names, values):
par = ns.defparameter(name, central=value, relsigma=0.1)
variables.push_back(par.getVariable())
with ns:
vsum = C.VarSum(names, 'sum', ns=ns)
vsum_var=ns['sum'].get()
variables.push_back(vsum_var.getVariable())
vprod = C.VarProduct(names, 'product', ns=ns)
vprod_var=ns['product'].get()
variables.push_back(vprod_var.getVariable())
va = C.VarArrayPreallocated(variables)
pool=allocator.view()
res=va.vararray.points.data()
values_all = N.zeros(shape=values.size+2, dtype=values.dtype)
values_all[:-2]=values
values_all[-2]=values_all[:-2].sum()
values_all[-1]=values_all[:-2].prod()
print('Python array:', values_all)
print('VarArray (preallocated):', res)
print('Pool:', pool)
assert (values_all==res).all()
assert (values_all==pool).all()
assert (res==pool).all()
for i, (val, name) in enumerate(enumerate(names, 2)):
ns[name].set(val)
values_all[i]=val
values_all[-2]=values_all[:-2].sum()
values_all[-1]=values_all[:-2].prod()
res=va.vararray.points.data()
print('Iteration', i)
print(' Python array:', values_all)
print(' VarArray (preallocated):', res)
assert (values_all==res).all()
assert (values_all==pool).all()
assert (res==pool).all()
if __name__ == '__main__':
run_unittests(globals())
|
the-stack_0_541 | ##############################################################################
#
# Copyright (c) 2019 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""
TPC protocol state management.
The various states in which a storage instance can find itself during
two-phase commit are complicated. This package presents a set of
objects that encapsulate various possibilities. In this way we can
test independent states...independently, and the state transitions are
explicit.
"""
from __future__ import absolute_import
from __future__ import print_function
import logging
from transaction.interfaces import NoTransaction
from transaction._transaction import rm_key
from transaction import get as get_thread_local_transaction
from perfmetrics import statsd_client
from zope.interface import implementer
from ZODB.POSException import ReadOnlyError
from ZODB.POSException import StorageTransactionError
from ..interfaces import ITPCStateNotInTransaction
from ..interfaces import ITPCStateDatabaseAvailable
from ...adapters.connections import ClosedConnection
from ..._util import Lazy as BaseLazy
from ..._util import get_boolean_from_environ
from .temporary_storage import TemporaryStorage
logger = logging.getLogger(__name__)
_CLOSED_CONNECTION = ClosedConnection()
#: Set the ``RELSTORAGE_LOCK_EARLY`` environment variable if you
#: experience deadlocks or failures to commit (``tpc_finish``). This
#: will cause the commit lock to be taken as part of ``tpc_vote``
#: (similar to RelStorage 2.x) instead of deferring it until
#: ``tpc_finish``.
#:
#: If this is necessary, this is probably a bug in RelStorage; please report
#: it.
LOCK_EARLY = get_boolean_from_environ(
'RELSTORAGE_LOCK_EARLY',
False,
logger=logger,
)
class _LazyResource(BaseLazy):
# If not None, a callable ``(storage, resource, force)``
# that aborts the *resource*, possibly forcefully (*force*).
# The return value will be the new value in the object
# instance.
abort_function = None
# If not None, a callable ``(storage, resource)`` to clean up
# any use of the *resource* after success.
release_function = None
def _stored_value_for_name_in_inst(self, value, name, inst):
# type: (Any, str, SharedTPCState) -> None
if name == 'store_connection':
# Try to do this first
inst._used_resources.insert(0, self)
else:
inst._used_resources.append(self)
def aborter(self, func):
assert not isinstance(func, _LazyResource)
self.abort_function = func
return self
def releaser(self, func):
assert not isinstance(func, _LazyResource)
self.release_function = func
return self
def cleaner(self, func):
self.abort_function = self.release_function = func
return self
class SharedTPCState(object):
"""
Contains attributes marking resources that *might* be used during the commit
process. If any of them are, then the `abort` method takes care of cleaning them up.
Accessing a resource implicitly begins it, if needed.
"""
# pylint:disable=method-hidden
prepared_txn = None
transaction = None
not_in_transaction_state = None
read_only = False # Or we wouldn't allocate this object.
def __init__(self, initial_state, storage, transaction):
self.initial_state = initial_state
self._storage = storage
self.transaction = transaction
self._used_resources = []
@_LazyResource
def local_client(self):
return self._storage._cache.local_client
@_LazyResource
def store_connection(self):
conn = self._storage._store_connection_pool.borrow()
# Report on the connection we will use.
# https://github.com/zodb/relstorage/issues/460
logger.info("Using store connection %s", conn)
return conn
@store_connection.aborter
def store_connection(self, storage, store_connection, force):
try:
adapter = storage._adapter
if store_connection:
# It's possible that this connection/cursor was
# already closed if an error happened (which would
# release the locks). Don't try to re-open it.
adapter.locker.release_commit_lock(store_connection.cursor)
# Though, this might re-open it.
adapter.txncontrol.abort(
store_connection,
self.prepared_txn)
if force:
store_connection.drop()
finally:
storage._store_connection_pool.replace(store_connection)
return _CLOSED_CONNECTION
@store_connection.releaser
def store_connection(self, storage, store_connection):
storage._store_connection_pool.replace(store_connection)
return _CLOSED_CONNECTION
@_LazyResource
def load_connection(self):
return self._storage._load_connection
@load_connection.aborter
def load_connection(self, _storage, load_connection, force):
if force:
load_connection.drop()
else:
load_connection.rollback_quietly()
load_connection.exit_critical_phase()
return _CLOSED_CONNECTION
@load_connection.releaser
def load_connection(self, _storage, load_connection):
load_connection.rollback_quietly()
load_connection.exit_critical_phase()
return _CLOSED_CONNECTION
@_LazyResource
def blobhelper(self):
blobhelper = self._storage.blobhelper
blobhelper.begin()
return blobhelper
@blobhelper.aborter
def blobhelper(self, _storage, blobhelper, _force):
blobhelper.abort()
@blobhelper.releaser
def blobhelper(self, _storage, blobhelper):
blobhelper.clear_temp()
def has_blobs(self):
# pylint:disable=no-member
return (
'blobhelper' in self.__dict__
and self.blobhelper is not None
and self.blobhelper.txn_has_blobs
)
@BaseLazy
def cache(self):
return self._storage._cache
@BaseLazy
def adapter(self):
return self._storage._adapter
@_LazyResource
def temp_storage(self):
return TemporaryStorage()
@temp_storage.cleaner
def temp_storage(self, _storage, temp_storage, _force=None):
temp_storage.close()
def has_temp_data(self):
return 'temp_storage' in self.__dict__ and self.temp_storage
@_LazyResource
def _statsd_buf(self):
return []
@_statsd_buf.cleaner
def _statds_buf(self, _storage, buf, _force=None):
client = statsd_client()
if client is not None and buf:
client.sendbuf(buf)
def stat_timing(self, stat, value, rate=1):
"""
Record a timing value.
For compatibility with the default settings of ``perfmetrics``,
the stat name should end in ``.t``
The *value* should be a floating point difference of seconds
(eg, ``time.time() - time.time()``). This will be converted to an integer
number of milliseconds (again for consistency with ``perfmetrics``).
"""
client = statsd_client()
if client is not None:
# scale from float seconds to milliseconds
value = int(value * 1000.0)
client.timing(stat, value, rate, self._statsd_buf)
def stat_count(self, stat, value, rate=1):
client = statsd_client()
if client is not None:
client.incr(stat, value, rate, self._statsd_buf)
def __cleanup(self, method_name, method_args):
storage = self._storage
resources = self._used_resources
self._used_resources = () # No more opening resources.
exceptions = []
for resource in resources:
assert resource.__name__ in vars(self)
cleaner = getattr(resource, method_name)
if not cleaner:
setattr(self, resource.__name__, None)
continue
value = getattr(self, resource.__name__)
new_value = None
try:
new_value = cleaner(self, storage, value, *method_args)
except Exception as ex: # pylint:disable=broad-except
exceptions.append(ex)
setattr(self, resource.__name__, new_value)
if exceptions: # pragma: no cover
# This usually indicates a bug in RelStorage that should be fixed.
raise Exception("Failed to close one or more resources: %s" % (exceptions,))
def abort(self, force=False):
self.__cleanup('abort_function', (force,))
def release(self):
self.__cleanup('release_function', ())
@implementer(ITPCStateDatabaseAvailable)
class AbstractTPCStateDatabaseAvailable(object):
__slots__ = (
'shared_state',
)
# - store
# - restore/restoreBlob
# - deleteObject
# - undo
# should raise ReadOnlyError if the storage is read only.
# - tpc_vote should raise StorageTransactionError
# Because entering tpc_begin wasn't allowed if the storage was
# read only, this needs to happen in the "not in transaction"
# state.
def __init__(self, shared_state):
self.shared_state = shared_state # type: SharedTPCState
@property
def transaction(self):
return self.shared_state.transaction
@property
def initial_state(self):
return self.shared_state.initial_state
@property
def store_connection(self):
return self.shared_state.store_connection
def __repr__(self):
result = "<%s at 0x%x stored_count=%s %s" % (
type(self).__name__,
id(self),
len(getattr(self, 'temp_storage', ()) or ()),
self._tpc_state_transaction_data(),
)
extra = self._tpc_state_extra_repr_info()
for k, v in extra.items():
result += ' %s=%r' % (k, v)
result += '>'
return result
def _tpc_state_extra_repr_info(self):
return {}
def _tpc_state_transaction_data(self):
# Grovels around in the transaction object and tries to find interesting
# things to include.
# The ZODB Connection passes us an internal TransactionMetaData
# object; the real transaction object stores a reference to that in its data,
# keyed off the connection.
# We may or may not be able to get the real transaction using transaction.get(),
# depending on if we are using the global (thread local) transaction manager or not.
try:
global_tx = get_thread_local_transaction()
except NoTransaction:
# It's in explicit mode and we're not using it.
return "<no global transaction> tx=%r" % (self.transaction,)
tx_data = getattr(global_tx, '_data', None)
if not tx_data:
# No data stored on the transaction (or the implementation changed!)
return "<no transaction data> tx=%r" % (self.transaction,)
for v in tx_data.values():
if v is self.transaction:
# Yes, we found the metadata that ZODB uses, so we are
# joined to this transaction.
break
else:
return "<no transaction meta %r> tx=%r" % (tx_data, self.transaction,)
resources = sorted(global_tx._resources, key=rm_key)
return "transaction=%r resources=%r" % (global_tx, resources)
def tpc_finish(self, storage, transaction, f=None, _time=None): # pylint:disable=unused-argument
# For the sake of some ZODB tests, we need to implement this everywhere,
# even if it's not actually usable, and the first thing it needs to
# do is check the transaction.
if transaction is not self.transaction:
raise StorageTransactionError('tpc_finish called with wrong transaction')
raise NotImplementedError("tpc_finish not allowed in this state.")
def tpc_begin(self, _storage, transaction):
# Ditto as for tpc_finish
raise StorageTransactionError('tpc_begin not allowed in this state', type(self))
def tpc_abort(self, transaction, force=False):
if not force:
if transaction is not self.transaction:
return self
self.shared_state.abort(force)
return self.initial_state
def no_longer_stale(self):
return self
def stale(self, e):
return Stale(self, e)
def close(self):
if self.shared_state is not None:
self.tpc_abort(None, True)
self.shared_state = None
@implementer(ITPCStateNotInTransaction)
class NotInTransaction(object):
# The default state, when the storage is not attached to a
# transaction.
__slots__ = (
'last_committed_tid_int',
'read_only',
'begin_factory',
)
transaction = None
def __init__(self, begin_factory, read_only, committed_tid_int=0):
self.begin_factory = begin_factory
self.read_only = read_only
self.last_committed_tid_int = committed_tid_int
def with_committed_tid_int(self, committed_tid_int):
return NotInTransaction(
self.begin_factory,
self.read_only,
committed_tid_int
)
def tpc_abort(self, *args, **kwargs): # pylint:disable=arguments-differ,unused-argument,signature-differs
# Nothing to do
return self
def _no_transaction(self, *args, **kwargs):
raise StorageTransactionError("No transaction in progress")
tpc_finish = tpc_vote = _no_transaction
checkCurrentSerialInTransaction = _no_transaction
def store(self, *_args, **_kwargs):
if self.read_only:
raise ReadOnlyError()
self._no_transaction()
restore = deleteObject = undo = restoreBlob = store
def tpc_begin(self, storage, transaction): # XXX: Signature needs to change.
if self.read_only:
raise ReadOnlyError()
if transaction is self.transaction: # Also handles None.
raise StorageTransactionError("Duplicate tpc_begin calls for same transaction.")
state = SharedTPCState(self, storage, transaction)
try:
return self.begin_factory(state)
except:
state.abort()
raise
@property
def initial_state(self):
return self
# This object appears to be false.
def __bool__(self):
return False
__nonzero__ = __bool__
def close(self):
pass
@implementer(ITPCStateNotInTransaction)
class Stale(object):
"""
An error that lets us know we are stale
was encountered.
Just about all accesses to this object result in
re-raising that error.
"""
transaction = None
last_committed_tid_int = 0
def __init__(self, previous_state, stale_error):
self.previous_state = previous_state
self.stale_error = stale_error
def _stale(self, *args, **kwargs):
raise self.stale_error
store = restore = checkCurrentSerialInTransaction = _stale
undo = deleteObject = restoreBlob = _stale
tpc_begin = tpc_finish = tpc_vote = _stale
def tpc_abort(self, *args, **kwargs):
return self.previous_state.tpc_abort(*args, **kwargs)
@property
def initial_state(self):
return self.previous_state.initial_state
def no_longer_stale(self):
return self.previous_state
def stale(self, _e):
return self
def __bool__(self):
return False
__nonzero__ = __bool__
|
the-stack_0_543 |
IGNORED = None
ACTION_PENDING = 1
# Bigger than necessary
_MAX_VK_KEY = 0x200
_VK_KEY_MASK = 0x1ff
_CURRENT_KEY_STATE = [False] * _MAX_VK_KEY
_MODIFIERS = set()
def on_key_hook(vk_code, is_down, special_modifier_state = None):
"""
Module-wide storage for the current key state.
:param vk_code:
:param is_down:
:param special_modifier_state: map of vcodes to the up/down state
(True == is_down, False == !is_down). This is part of the
windows key state / locked desktop work-around.
:return: True if it's a recognized key, False if it isn't known.
"""
if special_modifier_state is not None:
for k, v in special_modifier_state.items():
if k != vk_code and k in _MODIFIER_KEYS:
if _CURRENT_KEY_STATE[k] != v:
print("DEBUG modifier {0} does not match inner state.".format(k))
if k in _MODIFIER_KEYS:
if is_down:
_MODIFIERS.add(k)
else:
_MODIFIERS.remove(k)
_CURRENT_KEY_STATE[k] = v
if 0 <= vk_code <= _MAX_VK_KEY:
_CURRENT_KEY_STATE[vk_code] = is_down
if vk_code in _MODIFIER_KEYS:
if is_down:
_MODIFIERS.add(vk_code)
else:
_MODIFIERS.remove(vk_code)
return True
return False
class KeyOverride(object):
"""
Captures all key presses. Certain keys map to actions.
All keys are simple (straight up keys; modifiers are considered keys).
One key per action.
"""
def __init__(self, key_commands=None):
self.__keys = {}
if key_commands is not None:
self.set_key_actions(key_commands)
def set_key_actions(self, actions):
assert isinstance(actions, dict)
# FIXME use a dict instead
# TODO in the future we may allow "shift+left" type keys here.
# The implementation in key_action would just check the _MODIFIERS
# state.
new_key_actions = {}
for key, action in actions.items():
assert isinstance(action, list) or isinstance(action, tuple)
action = tuple(action)
key = key.strip().lower()
if key in VK_ALIASES:
for k in VK_ALIASES[key]:
if k in MODIFIERS:
# TODO better error / warning
# Note use of user's value "key", rather than internal "k"
print("CONFIG ERROR: Simple keys are not allowed to be modifiers: {0}".format(key))
elif k in STR_VK_MAP:
# print("DEBUG KeyOverride: assigning {0} = `{1}`".format(hex(STR_VK_MAP[k]), action))
new_key_actions[STR_VK_MAP[k]] = action
else:
# TODO better error / warning
print("ERROR IN SETUP: alias {0} not in vk map".format(k))
elif key in MODIFIERS:
# TODO better error / warning
print("CONFIG ERROR: Simple keys are not allowed to be modifiers: {0}".format(key))
elif key in STR_VK_MAP:
new_key_actions[STR_VK_MAP[key]] = action
else:
# TODO better error / warning
print("CONFIG ERROR: Simple key not a known key: {0}".format(key))
self.__keys = new_key_actions
def reset(self):
pass
def key_action(self, vk_code, is_down):
if vk_code in _MODIFIER_KEYS:
# Ignore all modifier keys, so the "release" from a mode switch works right.
# This ties in with modifiers not allowed as simple keys.
return IGNORED
if not is_down and vk_code in self.__keys:
return self.__keys[vk_code]
# Prevent all other keys from working
return ACTION_PENDING
class HotKeyChain(object):
"""
Takes a keypress, and manages the state of the keys.
It stores a list of key chains to action pairs.
There should be one of these per system "mode".
"""
def __init__(self, chain_commands=None):
self.__combos = []
# The modifiers which are down and associated with the active combos
self.__active_modifiers = []
# The previous key in the combo chain; we're waiting for it to be off.
self.__active_key = None
# The active combo chains. Index 0 in each item is the remaining list
# of key down actions to look for ([0] meaning the next one). Index 1
# in each item is the command to return.
self.__active_combos = []
# Set to True to prevent the OS shell from using the "windows" key.
self.block_win_key = False
if chain_commands is not None:
self.set_key_chains(chain_commands)
def set_key_chains(self, chain_commands):
assert isinstance(chain_commands, dict)
combos = []
for key_chain, command in chain_commands.items():
assert isinstance(command, list) or isinstance(command, tuple)
keys = parse_combo_str(key_chain)
if len(keys) > 0:
# We store modifiers a little differently.
# Rather than having a list of lists, which must be
# carefully examined, we instead construct the
# permutations of the keys, and store each of those as
# their own combo.
permutation_keys = []
_key_permutations(keys[0], 0, [], permutation_keys)
for perm in permutation_keys:
# print("DEBUG Combo {0} + {1} => {2}".format(perm, keys[1:], command))
combos.append((perm, keys[1:], tuple(command)))
# Change the variable in a single command.
self.__combos = combos
self.reset()
def reset(self):
self.__active_combos = []
self.__active_modifiers = []
self.__active_key = None
def key_action(self, vk_code, is_down):
"""
:param is_down:
:param vk_code:
:return: IGNORED if the key should be passed through,
ACTION_PENDING if the key should be blocked from passing to
another application, but does not complete an action, or
a list of the action to run.
"""
if _MODIFIERS == self.__active_modifiers:
if self.__active_key is None or not _CURRENT_KEY_STATE[self.__active_key]:
# The previous key is no longer down.
self.__active_key = None
next_combos = []
for ac in self.__active_combos:
if vk_code in ac[0][0]:
ac[0].pop(0)
if len(ac[0]) <= 0:
# We have our key
command = ac[1]
self.reset()
# print("DEBUG keys generated command {0}".format(command))
return command
next_combos.append(ac)
if len(next_combos) > 0:
self.__active_key = vk_code
self.__active_combos = next_combos
return ACTION_PENDING
elif is_down:
# A new key was pressed, which isn't a key in a pending
# combo. Reset our hot keys, and return an ignored.
self.reset()
# else, the previous active key is still down; wait for it
# to come up.
else:
# Discover which combo matches the modifiers.
self.reset()
new_active = []
for combo in self.__combos:
if combo[0] == _MODIFIERS:
new_active.append((list(combo[1]), combo[2]))
if len(new_active) > 0:
self.__active_key = None
self.__active_combos = new_active
self.__active_modifiers = set(_MODIFIERS)
# We still pass on the modifiers to the OS, just in case it's not
# a match.
if self.block_win_key and vk_code in _WIN_KEYS:
return ACTION_PENDING
return IGNORED
def parse_combo_str(chain_description):
"""
Special compact form of the string. For each key combo part,
we make a "string" of the only VK codes that must be "down" in
order to trigger the next part of the chain.
The format is "primary + primary + ... + key, key, key, ..."
:param chain_description:
:return: list of aliased combo lists. So, the return will be
[primary, key1, key2, ...], where "primary" are the primary
keys that must be pressed through the whole action. Key1 and
key2 (and so on) are the keys that must be pressed and released
in order (the last key will respond on key down). Each key
in the list is itself a list of alternate keys.
"""
assert isinstance(chain_description, str)
key_parts = chain_description.split(",")
# Parse the primary first. These are separated by "+".
# The last key in the list is the "non-always-down" key,
# meaning it's the first in the key chain.
primary_list = []
primary_keys = key_parts[0].split("+")
secondary_keys = [primary_keys[-1]]
secondary_keys.extend(key_parts[1:])
for key_text in primary_keys[:-1]:
primary_key = []
key_text = key_text.strip().lower()
if key_text in VK_ALIASES:
for k in VK_ALIASES[key_text]:
if k in STR_VK_MAP:
if k in MODIFIERS:
primary_key.append(STR_VK_MAP[k])
else:
# TODO better error / warning
print("CONFIG ERROR: Primary key not a modifier {0}".format(k))
else:
print("ERROR IN SETUP: alias {0} not in vk map".format(k))
elif key_text in STR_VK_MAP:
if key_text in MODIFIERS:
primary_key.append(STR_VK_MAP[key_text])
else:
# TODO better error / warning
print("CONFIG ERROR: Primary key not a modifier {0}".format(key_text))
else:
# TODO better error / warning
print("CONFIG ERROR: unknown key code [{0}]".format(key_text))
if len(primary_key) > 0:
primary_list.append(primary_key)
chain = [primary_list]
for key_text in secondary_keys:
key = []
key_text = key_text.strip().lower()
if key_text in VK_ALIASES:
for k in VK_ALIASES[key_text]:
if k in STR_VK_MAP:
if k in MODIFIERS:
# TODO better error / warning
print("CONFIG ERROR: secondary key is a modifier {0}".format(k))
else:
key.append(STR_VK_MAP[k])
else:
print("ERROR IN SETUP: alias {0} not in vk map".format(k))
elif key_text in STR_VK_MAP:
if key_text in MODIFIERS:
# TODO better error / warning
print("CONFIG ERROR: secondary key is a modifier {0}".format(key_text))
else:
key.append(STR_VK_MAP[key_text])
else:
# TODO better error / warning
print("CONFIG ERROR: unknown key code {0}".format(key_text))
if len(key) > 0:
chain.append(key)
return chain
def _key_permutations(key_alt_list, alt_index, current_list, final_list):
"""
Takes a list of key alternates ([ [k1a, k1b, ...], [k2a, k2b, ...], ...])
and transforms this into the
:param key_alt_list:
:return:
"""
for key in key_alt_list[alt_index]:
next_list = list(current_list)
next_list.append(key)
if alt_index + 1 >= len(key_alt_list):
final_list.append(set(next_list))
else:
_key_permutations(key_alt_list, alt_index + 1, next_list, final_list)
def vk_to_names(vk):
maps = []
for vk_str, code in STR_VK_MAP.items():
# There are multiple mappings; return them all.
if code == vk:
maps.append(vk_str)
if len(maps) <= 0:
maps.append("#{0}".format(hex(vk)))
return maps
def is_vk_modifier(vk):
return vk in _MODIFIER_KEYS
# Built-in alias VK keys for user-specified keys
VK_ALIASES = {
"win": ["lwin", "rwin"],
"shift": ["lshift", "rshift"],
"control": ["lcontrol", "rcontrol"],
"alt": ["lalt", "ralt"],
"menu": ["lmenu", "rmenu"],
}
# Set of all recognized modifiers
MODIFIERS = {
"shift",
"lshift",
"rshift",
"control",
"ctrl",
"lcontrol",
"lctrl",
"rcontrol",
"rctrl",
"alt",
"lalt",
"ralt",
"lwin",
"rwin",
"lmenu",
"rmenu",
"apps",
"caps-lock",
}
# https://msdn.microsoft.com/en-us/library/windows/desktop/dd375731(v=vs.85).aspx
STR_VK_MAP = {
"lmb": 0x01, # VK_LBUTTON Left mouse button
"rmb": 0x02, # VK_RBUTTON Right mouse button
"break": 0x03, # VK_CANCEL Control-break processing
"mmb": 0x04, # VK_MBUTTON Middle mouse button (three-button mouse)
"x1mb": 0x05, # VK_XBUTTON1 X1 mouse button
"x2mb": 0x06, # VK_XBUTTON2 X2 mouse button
"x3mb": 0x07, # - Undefined
"back": 0x08, # VK_BACK BACKSPACE key
"backspace": 0x08, # VK_BACK BACKSPACE key
"tab": 0x09, # VK_TAB TAB key
# - 0x0A-0B Reserved
"clear": 0x0C, # VK_CLEAR CLEAR key
"return": 0x0D, # VK_RETURN ENTER key
"enter": 0x0D, # VK_RETURN ENTER key
"cr": 0x0D, # VK_RETURN ENTER key
"lf": 0x0D, # VK_RETURN ENTER key
# - 0x0E-0F Undefined
# These VK keys don't seem to get generated by the global key handler;
# instead, the more low-level (lcontrol, rcontrol, etc) ones are.
"shift": 0x10, # VK_SHIFT SHIFT key
"sft": 0x10, # VK_SHIFT SHIFT key
"control": 0x11, # VK_CONTROL CTRL key
"ctrl": 0x11, # VK_CONTROL CTRL key
"menu": 0x12, # VK_MENU ALT key
"alt": 0x12, # VK_MENU ALT key
"pause": 0x13, # VK_PAUSE PAUSE key
"caps-lock": 0x14, # VK_CAPITAL CAPS LOCK key
"kana": 0x15, # VK_KANA IME Kana mode
"hanguel": 0x15, # VK_HANGUEL IME Hanguel mode (maintained for compatibility; use VK_HANGUL)
"hangul": 0x15, # VK_HANGUL IME Hangul mode
# - 0x16 Undefined
"junja": 0x17, # VK_JUNJA IME Junja mode
"final": 0x18, # VK_FINAL IME final mode
"hanja": 0x19, # VK_HANJA IME Hanja mode
"kanji": 0x19, # VK_KANJI IME Kanji mode
# 0x1A - Undefined
"escape": 0x1B, # VK_ESCAPE ESC key
"esc": 0x1B, # VK_ESCAPE ESC key
"convert": 0x1C, # VK_CONVERT IME convert
"nonconvert": 0x1D, # VK_NONCONVERT IME nonconvert
"accept": 0x1E, # VK_ACCEPT IME accept
"modechange": 0x1F, # VK_MODECHANGE IME mode change request
"space": 0x20, # VK_SPACE SPACEBAR
"prior": 0x21, # VK_PRIOR PAGE UP key
"pgup": 0x21, # VK_PRIOR PAGE UP key
"pageup": 0x21, # VK_PRIOR PAGE UP key
"next": 0x22, # VK_NEXT PAGE DOWN key
"pgdn": 0x22, # VK_NEXT PAGE DOWN key
"pagedown": 0x22, # VK_NEXT PAGE DOWN key
"end": 0x23, # VK_END END key
"home": 0x24, # VK_HOME HOME key
"left": 0x25, # VK_LEFT LEFT ARROW key
"up": 0x26, # VK_UP UP ARROW key
"right": 0x27, # VK_RIGHT RIGHT ARROW key
"down": 0x28, # VK_DOWN DOWN ARROW key
"select": 0x29, # VK_SELECT SELECT key
"print": 0x2A, # VK_PRINT PRINT key
"execute": 0x2B, # VK_EXECUTE EXECUTE key
"snapshot": 0x2C, # VK_SNAPSHOT PRINT SCREEN key
"insert": 0x2D, # VK_INSERT INS key
"delete": 0x2E, # VK_DELETE DEL key
"del": 0x2E, # VK_DELETE DEL key
"help": 0x2F, # VK_HELP HELP key
"lwin": 0x5B, # VK_LWIN Left Windows key (Natural keyboard)
"rwin": 0x5C, # VK_RWIN Right Windows key (Natural keyboard)
"apps": 0x5D, # VK_APPS Applications key (Natural keyboard)
# 0x5E - Reserved
"sleep": 0x5F, # VK_SLEEP Computer Sleep key
"numpad0": 0x60, # VK_NUMPAD0 Numeric keypad 0 key
"numpad1": 0x61, # VK_NUMPAD1 Numeric keypad 1 key
"numpad2": 0x62, # VK_NUMPAD2 Numeric keypad 2 key
"numpad3": 0x63, # VK_NUMPAD3 Numeric keypad 3 key
"numpad4": 0x64, # VK_NUMPAD4 Numeric keypad 4 key
"numpad5": 0x65, # VK_NUMPAD5 Numeric keypad 5 key
"numpad6": 0x66, # VK_NUMPAD6 Numeric keypad 6 key
"numpad7": 0x67, # VK_NUMPAD7 Numeric keypad 7 key
"numpad8": 0x68, # VK_NUMPAD8 Numeric keypad 8 key
"numpad9": 0x69, # VK_NUMPAD9 Numeric keypad 9 key
"multiply": 0x6A, # VK_MULTIPLY Multiply key
"add": 0x6B, # VK_ADD Add key
"separator": 0x6C, # VK_SEPARATOR Separator key
"subtract": 0x6D, # VK_SUBTRACT Subtract key
"decimal": 0x6E, # VK_DECIMAL Decimal key
"divide": 0x6F, # VK_DIVIDE Divide key
"f1": 0x70, # VK_F1 F1 key
"f2": 0x71, # VK_F2 F2 key
"f3": 0x72, # VK_F3 F3 key
"f4": 0x73, # VK_F4 F4 key
"f5": 0x74, # VK_F5 F5 key
"f6": 0x75, # VK_F6 F6 key
"f7": 0x76, # VK_F7 F7 key
"f8": 0x77, # VK_F8 F8 key
"f9": 0x78, # VK_F9 F9 key
"f10": 0x79, # VK_F10 F10 key
"f11": 0x7A, # VK_F11 F11 key
"f12": 0x7B, # VK_F12 F12 key
"f13": 0x7C, # VK_F13 F13 key
"f14": 0x7D, # VK_F14 F14 key
"f15": 0x7E, # VK_F15 F15 key
"f16": 0x7F, # VK_F16 F16 key
"f17": 0x80, # VK_F17 F17 key
"f18": 0x81, # VK_F18 F18 key
"f19": 0x82, # VK_F19 F19 key
"f20": 0x83, # VK_F20 F20 key
"f21": 0x84, # VK_F21 F21 key
"f22": 0x85, # VK_F22 F22 key
"f23": 0x86, # VK_F23 F23 key
"f24": 0x87, # VK_F24 F24 key
# 0x88-8F - Unassigned
"numlock": 0x90, # VK_NUMLOCK NUM LOCK key
"scroll": 0x91, # VK_SCROLL SCROLL LOCK key
# 0x92-96 - OEM specific
# 0x97-9F - Unassigned
"lshift": 0xA0, # VK_LSHIFT Left SHIFT key
"rshift": 0xA1, # VK_RSHIFT Right SHIFT key
"lcontrol": 0xA2, # VK_LCONTROL Left CONTROL key
"lctrl": 0xA2, # VK_LCONTROL Left CONTROL key
"rcontrol": 0xA3, # VK_RCONTROL Right CONTROL key
"rctrl": 0xA3, # VK_RCONTROL Right CONTROL key
"lmenu": 0xA4, # VK_LMENU Left MENU key
"lalt": 0xA4, # VK_LMENU Left MENU key
"rmenu": 0xA5, # VK_RMENU Right MENU key
"ralt": 0xA5, # VK_RMENU Right MENU key
"browser-back": 0xA6, # VK_BROWSER_BACK Browser Back key
"browser-forward": 0xA7, # VK_BROWSER_FORWARD Browser Forward key
"browser-refresh": 0xA8, # VK_BROWSER_REFRESH Browser Refresh key
"browser-stop": 0xA9, # VK_BROWSER_STOP Browser Stop key
"browser-search": 0xAA, # VK_BROWSER_SEARCH Browser Search key
"browser-favorites": 0xAB, # VK_BROWSER_FAVORITES Browser Favorites key
"browser-home": 0xAC, # VK_BROWSER_HOME Browser Start and Home key
"volume-mute": 0xAD, # VK_VOLUME_MUTE Volume Mute key
"volume-down": 0xAE, # VK_VOLUME_DOWN Volume Down key
"volume-up": 0xAF, # VK_VOLUME_UP Volume Up key
"media-next-track": 0xB0, # VK_MEDIA_NEXT_TRACK Next Track key
"media-prev-track": 0xB1, # VK_MEDIA_PREV_TRACK Previous Track key
"media-stop": 0xB2, # VK_MEDIA_STOP Stop Media key
"media-play-pause": 0xB3, # VK_MEDIA_PLAY_PAUSE Play/Pause Media key
"launch-mail": 0xB4, # VK_LAUNCH_MAIL Start Mail key
"launch-media-select": 0xB5, # VK_LAUNCH_MEDIA_SELECT Select Media key
"launch-app1": 0xB6, # VK_LAUNCH_APP1 Start Application 1 key
"launch-app2": 0xB7, # VK_LAUNCH_APP2 Start Application 2 key
# 0xB8-B9 - Reserved
"oem_1": 0xBA, # VK_OEM_1 Used for miscellaneous characters;
# it can vary by keyboard. For the US standard keyboard, the ';:' key
":": 0xBA,
";": 0xBA,
"colon": 0xBA,
"oem_plus": 0xBB, # VK_OEM_PLUS For any country/region, the '+' key
"plus": 0xBB,
"oem_comma": 0xBC, # VK_OEM_COMMA For any country/region, the ',' key
"comma": 0xBC,
",": 0xBC,
"<": 0xBC,
"oem_minus": 0xBD, # VK_OEM_MINUS For any country/region, the '-' key
"minus": 0xBD,
"oem_period": 0xBE, # VK_OEM_PERIOD For any country/region, the '.' key
".": 0xBE,
"period": 0xBE,
">": 0xBE,
"oem_2": 0xBF, # VK_OEM_2 Used for miscellaneous characters;
# it can vary by keyboard. For the US standard keyboard, the '/?' key
"/": 0xBF,
"slash": 0xBF,
"?": 0xBF,
"question": 0xBF,
"question-mark": 0xBF,
"oem2": 0xBF,
"oem_3": 0xC0, # VK_OEM_3 Used for miscellaneous characters;
# it can vary by keyboard. For the US standard keyboard, the '`~' key
"oem3": 0xC0,
"~": 0xC0,
"tilde": 0xC0,
"twiddle": 0xC0,
"`": 0xC0,
"back-tick": 0xC0,
# 0xC1-D7 - Reserved
# 0xD8-DA - Unassigned
"oem_4": 0xDB, # VK_OEM_4 Used for miscellaneous characters;
# it can vary by keyboard. For the US standard keyboard, the '[{' key
"oem4": 0xDB,
"[": 0xDB,
"{": 0xDB,
"left-bracket": 0xDB,
"oem_5": 0xDC, # VK_OEM_5 Used for miscellaneous characters;
# it can vary by keyboard. For the US standard keyboard, the '\|' key
"oem5": 0xDC,
"|": 0xDC,
"\\": 0xDC,
"pipe": 0xDC,
"backslash": 0xDC,
"oem_6": 0xDD, # VK_OEM_6 Used for miscellaneous characters;
# it can vary by keyboard. For the US standard keyboard, the ']}' key
"oem6": 0xDD,
"]": 0xDD,
"}": 0xDD,
"right-bracket": 0xDD,
"oem_7": 0xDE, # VK_OEM_7 Used for miscellaneous characters;
# it can vary by keyboard. For the US standard keyboard,
# the 'single-quote/double-quote' key
"oem7": 0xDE,
'"': 0xDE,
"'": 0xDE,
"quote": 0xDE,
"tick": 0xDE,
"oem_8": 0xDF, # VK_OEM_8 Used for miscellaneous characters; it can vary by keyboard.
"oem8": 0xDF,
# 0xE0 - Reserved
# 0xE1 - OEM specific
"oem_102": 0xE2, # VK_OEM_102 Either the angle bracket key or the backslash key on
# the RT 102-key keyboard
"oem102": 0xE2,
# 0xE3-E4 - OEM specific
"processkey": 0xE5, # VK_PROCESSKEY IME PROCESS key
# 0xE6 - OEM specific
"packet": 0xE7, # VK_PACKET Used to pass Unicode characters as if they were
# keystrokes. The VK_PACKET key is the low word of a 32-bit Virtual
# Key value used for non-keyboard input methods. For more
# information, see Remark in KEYBDINPUT, SendInput, WM_KEYDOWN, and WM_KEYUP
# 0xE8 - Unassigned
# 0xE9-F5 - OEM specific
"attn": 0xF6, # VK_ATTN Attn key
"crsel": 0xF7, # VK_CRSEL CrSel key
"exsel": 0xF8, # VK_EXSEL ExSel key
"ereof": 0xF9, # VK_EREOF Erase EOF key
"play": 0xFA, # VK_PLAY Play key
"zoom": 0xFB, # VK_ZOOM Zoom key
"noname": 0xFC, # VK_NONAME Reserved
"pa1": 0xFD, # VK_PA1 PA1 key
"oem_clear": 0xFE, # VK_OEM_CLEAR Clear key
# 0x3A-40 - Undefined
"0": 0x30, # 0 key
"1": 0x31, # 1 key
"2": 0x32, # 2 key
"3": 0x33, # 3 key
"4": 0x34, # 4 key
"5": 0x35, # 5 key
"6": 0x36, # 6 key
"7": 0x37, # 7 key
"8": 0x38, # 8 key
"9": 0x39, # 9 key
"a": 0x41, # A key
"b": 0x42, # B key
"c": 0x43, # C key
"d": 0x44, # D key
"e": 0x45, # E key
"f": 0x46, # F key
"g": 0x47, # G key
"h": 0x48, # H key
"i": 0x49, # I key
"j": 0x4A, # J key
"k": 0x4B, # K key
"l": 0x4C, # L key
"m": 0x4D, # M key
"n": 0x4E, # N key
"o": 0x4F, # O key
"p": 0x50, # P key
"q": 0x51, # Q key
"r": 0x52, # R key
"s": 0x53, # S key
"t": 0x54, # T key
"u": 0x55, # U key
"v": 0x56, # V key
"w": 0x57, # W key
"x": 0x58, # X key
"y": 0x59, # Y key
"z": 0x5A, # Z key
}
_MODIFIER_KEYS = set()
for __k in MODIFIERS:
_MODIFIER_KEYS.add(STR_VK_MAP[__k])
_WIN_KEYS = [STR_VK_MAP['lwin'], STR_VK_MAP['rwin']]
SPECIAL_MODIFIER_CHECK_VKEY_CODES = (
STR_VK_MAP['lwin'], STR_VK_MAP['rwin']
)
|
the-stack_0_547 | from data.cifar import Cifar
from utility.step_lr import StepLR
from utility.initialize import initialize
from utility.log import Log
from utility.lognolr import LogNoLR
from model import *
import time
from model.preact_resnet import *
from model.smooth_cross_entropy import smooth_crossentropy
from model.wideresnet import WideResNet
from model.resnet import *
from model.vgg import *
from sam import SAM
import argparse
import torch
import sys
import os
import torchvision
import torchvision.transforms as transforms
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import CosineAnnealingLR, CosineAnnealingWarmRestarts
from utility.cosine_annealing_with_warmup_lr import CosineAnnealingWarmUpRestarts
import tensorboard
from utils import progress_bar
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--adaptive", default=True, type=bool,
help="True if you want to use the Adaptive SAM.")
parser.add_argument("--batch_size", default=128, type=int,
help="Batch size used in the training and validation loop.")
parser.add_argument("--depth", default=16, type=int,
help="Number of layers.")
parser.add_argument("--dropout", default=0.0,
type=float, help="Dropout rate.")
parser.add_argument("--epochs", default=150, type=int,
help="Total number of epochs.")
parser.add_argument("--label_smoothing", default=0.1,
type=float, help="Use 0.0 for no label smoothing.")
parser.add_argument("--learning_rate", default=0.1, type=float,
help="Base learning rate at the start of the training.")
parser.add_argument("--momentum", default=0.9,
type=float, help="SGD Momentum.")
parser.add_argument("--threads", default=2, type=int,
help="Number of CPU threads for dataloaders.")
parser.add_argument("--rho", default=0.5, type=int,
help="Rho parameter for SAM.")
parser.add_argument("--weight_decay", default=0.0005,
type=float, help="L2 weight decay.")
parser.add_argument("--width_factor", default=8, type=int,
help="In case WideResNet, how many times wider compared to normal ResNet.")
parser.add_argument("--SAM", default=False, type=bool,
help="Use SAM optimizer or SGD.")
parser.add_argument('--resume', '-r', action='store_true',
help='resume from checkpoint')
args = parser.parse_args()
initialize(args, seed=42)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Data
print('==> Preparing data..')
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465),
(0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465),
(0.2023, 0.1994, 0.2010)),
])
trainset = torchvision.datasets.CIFAR10(
root='./data', train=True, download=True, transform=transform_train)
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=128, shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(
root='./data', train=False, download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(
testset, batch_size=100, shuffle=False, num_workers=2)
classes = ('plane', 'car', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck')
# dataset = Cifar(args.batch_size, args.threads)
# Logger
# log = Log(log_each=10)
log = LogNoLR(log_each=10)
# which model to use (VGG, Preactivation ResNet,)
# model = WideResNet(args.depth, 10, args.width_factor,
# dropRate=args.dropout).to(device)
model = VGG16().to(device)
if device == 'cuda':
model = torch.nn.DataParallel(model)
best_acc = 0 # best test accuracy
start_epoch = 0 # start from epoch 0 or last checkpoint epoch
hermite_bias_list = []
hermite_weight_list = []
for name, param in model.named_parameters():
if 'bias' in name:
hermite_bias_list.append(name)
if 'wts' in name:
hermite_weight_list.append(name)
hermite_list = hermite_bias_list + hermite_weight_list
params1 = list(map(lambda x: x[1], list(
filter(lambda kv: kv[0] in hermite_bias_list, model.named_parameters()))))
params2 = list(map(lambda x: x[1], list(
filter(lambda kv: kv[0] in hermite_weight_list, model.named_parameters()))))
# params3 = list(map(lambda x: x[1], list(
# filter(lambda kv: kv[0] in hermite_weight2_list, model.named_parameters()))))
# params3 = list(map(lambda x: x[1], list(
# filter(lambda kv: kv[0] in w3, model.named_parameters()))))
# params4 = list(map(lambda x: x[1], list(
# filter(lambda kv: kv[0] in w4, model.named_parameters()))))
base_params = list(map(lambda x: x[1], list(
filter(lambda kv: kv[0] not in hermite_list, model.named_parameters()))))
if args.resume:
# Load checkpoint.
print('==> Resuming from checkpoint..')
assert os.path.isdir(
'checkpoint'), 'Error: no checkpoint directory found!'
checkpoint = torch.load('./checkpoint/ckpt.pth')
model.load_state_dict(checkpoint['net'])
best_acc = checkpoint['acc']
start_epoch = checkpoint['epoch']
# Optimizer (SGD or SAM): SAM shows slightly better accuracy compared to SGD
if args.SAM is True:
base_optimizer = torch.optim.SGD
optimizer = SAM(
[{'params': base_params},
{'params': params1, 'weight_decay': 0, 'lr': args.learning_rate},
{'params': params2, 'weight_decay': args.weight_decay /
2, 'lr': args.learning_rate},
# {'params': params3, 'weight_decay': args.weight_decay /
# 2, 'lr': args.learning_rate},
# {'params': params4, 'weight_decay': args.weight_decay /
# 2, 'lr': args.learning_rate}
],
base_optimizer, rho=args.rho, adaptive=args.adaptive, lr=args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay)
else:
optimizer = torch.optim.SGD(
[{'params': base_params},
{'params': params1, 'weight_decay': args.weight_decay /
2, 'lr': args.learning_rate},
{'params': params2, 'weight_decay': args.weight_decay /
2, 'lr': args.learning_rate},
# {'params': params3, 'weight_decay': args.weight_decay /
# 2, 'lr': args.learning_rate},
# {'params': params4, 'weight_decay': args.weight_decay/2, 'lr': args.learning_rate}
],
lr=args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay)
scheduler = StepLR(optimizer, args.learning_rate, args.epochs)
print(args.epochs, " epochs")
if args.SAM is True:
print("SAM optimizer")
else:
print("SGD optimizer")
# print(list(model.parameters()))
def train(epoch):
print('\nEpoch: %d' % epoch)
model.train()
train_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(trainloader):
inputs, targets = inputs.to(device), targets.to(device)
if args.SAM is False:
optimizer.zero_grad()
outputs = model(inputs)
loss = smooth_crossentropy(outputs, targets)
loss.mean().backward()
if args.SAM is True:
optimizer.first_step(zero_grad=True)
smooth_crossentropy(model(inputs), targets).mean().backward()
optimizer.second_step(zero_grad=True)
else:
optimizer.step()
train_loss += loss.mean().item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (train_loss/(batch_idx+1), 100.*correct/total, correct, total))
def test(epoch):
global best_acc
model.eval()
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(testloader):
inputs, targets = inputs.to(device), targets.to(device)
outputs = model(inputs)
loss = smooth_crossentropy(outputs, targets)
test_loss += loss.mean().item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (test_loss/(batch_idx+1), 100.*correct/total, correct, total))
acc = 100.*correct/total
if acc > best_acc:
best_acc = acc
# Save checkpoint.
print('Saving checkpoint..')
state = {
'net': model.state_dict(),
'acc': acc,
'epoch': epoch,
}
if not os.path.isdir('checkpoint'):
os.mkdir('checkpoint')
torch.save(state, './checkpoint/ckpt.pth')
# # Save torchscript
with torch.no_grad():
print('Saving Torch Script..')
if not os.path.isdir('torchscript'):
os.mkdir('torchscript')
example = torch.rand(1, 3, 32, 32).to(device)
traced_script_module = torch.jit.trace(model, example)
traced_script_module.save("./torchscript/model.pt")
for epoch in range(start_epoch, start_epoch+200):
train(epoch)
test(epoch)
scheduler(epoch)
##############################################################################################
##############################################################################################
##############################################################################################
##############################################################################################
##############################################################################################
##############################################################################################
##############################################################################################
##############################################################################################
##############################################################################################
##############################################################################################
# for epoch in range(args.epochs):
# model.train()
# log.train(len_dataset=len(dataset.train))
# for batch in dataset.train:
# inputs, targets = (b.to(device) for b in batch)
# if args.SAM is False:
# optimizer.zero_grad()
# predictions = model(inputs)
# loss = smooth_crossentropy(predictions, targets)
# loss.mean().backward()
# if args.SAM is True:
# optimizer.first_step(zero_grad=True)
# smooth_crossentropy(model(inputs), targets).mean().backward()
# optimizer.second_step(zero_grad=True)
# else:
# optimizer.step()
# with torch.no_grad():
# correct = torch.argmax(predictions.data, 1) == targets
# # log(model, loss.cpu(), correct.cpu(), scheduler.lr())
# # check which log function to use at line 61
# log(model, loss.cpu(), correct.cpu())
# scheduler(epoch)
# model.eval()
# log.eval(len_dataset=len(dataset.test))
# with torch.no_grad():
# for batch in dataset.test:
# inputs, targets = (b.to(device) for b in batch)
# predictions = model(inputs)
# loss = smooth_crossentropy(predictions, targets)
# correct = torch.argmax(predictions, 1) == targets
# log(model, loss.cpu(), correct.cpu())
# log.flush()
|
the-stack_0_550 | # https://www.kaggle.com/c/amazon-employee-access-challenge/forums/t/4838/python-code-to-achieve-0-90-auc-with-logistic-regression
__author__ = 'Miroslaw Horbal'
__email__ = '[email protected]'
__date__ = '14-06-2013'
import json
import pymongo as pymongo
from numpy import array
from sklearn import metrics, linear_model
from sklearn.model_selection import train_test_split
from scipy import sparse
from itertools import combinations
import numpy as np
import pandas as pd
SEED = 25
def group_data(data, degree=3, hash=hash):
"""
numpy.array -> numpy.array
Groups all columns of data into all combinations of triples
"""
new_data = []
m, n = data.shape
for indicies in combinations(range(n), degree):
new_data.append([hash(tuple(v)) for v in data[:, indicies]])
return array(new_data).T
def OneHotEncoder(data, keymap=None):
"""
OneHotEncoder takes data matrix with categorical columns and
converts it to a sparse binary matrix.
Returns sparse binary matrix and keymap mapping categories to indicies.
If a keymap is supplied on input it will be used instead of creating one
and any categories appearing in the data that are not in the keymap are
ignored
"""
if keymap is None:
keymap = []
for col in data.T:
uniques = set(list(col))
keymap.append(dict((key, i) for i, key in enumerate(uniques)))
total_pts = data.shape[0]
outdat = []
for i, col in enumerate(data.T):
km = keymap[i]
num_labels = len(km)
spmat = sparse.lil_matrix((total_pts, num_labels))
for j, val in enumerate(col):
if val in km:
spmat[j, km[val]] = 1
outdat.append(spmat)
outdat = sparse.hstack(outdat).tocsr()
return outdat, keymap
def create_test_submission(filename, prediction):
content = []
for i, p in enumerate(prediction):
content.append({
'id': '%i' % (i + 1),
'ACTION': '%f' % p
})
f = open(filename, 'w')
json.dump(content, f)
f.close()
print('Saved')
# This loop essentially from Paul's starter code
def cv_loop(X, y, model, N):
mean_auc = 0.
for i in range(N):
X_train, X_cv, y_train, y_cv = train_test_split(
X, y, test_size=.20,
random_state=i * SEED)
model.fit(X_train, y_train)
preds = model.predict_proba(X_cv)[:, 1]
auc = metrics.roc_auc_score(y_cv, preds)
print("AUC (fold %d/%d): %f" % (i + 1, N, auc))
mean_auc += auc
return mean_auc / N
def main(user, password):
print("Reading dataset...")
client = pymongo.MongoClient("mongodb://%s:%s@businessdb:27017" % (user, password))
train_data = pd.read_json(json.dumps(list(client.test.train.find({}, {'_id': 0}))), orient='records')
test_data = pd.read_json(json.dumps(list(client.test.test.find({}, {'_id': 0}))), orient='records')
all_data = np.vstack((train_data.iloc[:, 1:], test_data.iloc[:, :]))
num_train = np.shape(train_data)[0]
# Transform data
print("Transforming data...")
dp = group_data(all_data, degree=2)
dt = group_data(all_data, degree=3)
y = array(train_data.iloc[:, 0])
X = all_data[:num_train]
X_2 = dp[:num_train]
X_3 = dt[:num_train]
X_test = all_data[num_train:]
X_test_2 = dp[num_train:]
X_test_3 = dt[num_train:]
X_train_all = np.hstack((X, X_2, X_3))
X_test_all = np.hstack((X_test, X_test_2, X_test_3))
num_features = X_train_all.shape[1]
model = linear_model.LogisticRegression()
# Xts holds one hot encodings for each individual feature in memory
# speeding up feature selection
Xts = [OneHotEncoder(X_train_all[:, [i]])[0] for i in range(num_features)]
print("Performing greedy feature selection...")
score_hist = []
N = 10
good_features = set([])
# Greedy feature selection loop
while len(score_hist) < 2 or score_hist[-1][0] > score_hist[-2][0]:
scores = []
for f in range(len(Xts)):
if f not in good_features:
feats = list(good_features) + [f]
Xt = sparse.hstack([Xts[j] for j in feats]).tocsr()
score = cv_loop(Xt, y, model, N)
scores.append((score, f))
print("Feature: %i Mean AUC: %f" % (f, score))
good_features.add(sorted(scores)[-1][1])
score_hist.append(sorted(scores)[-1])
print("Current features: %s" % sorted(list(good_features)))
# Remove last added feature from good_features
good_features.remove(score_hist[-1][1])
good_features = sorted(list(good_features))
print("Selected features %s" % good_features)
print("Performing hyperparameter selection...")
# Hyperparameter selection loop
score_hist = []
Xt = sparse.hstack([Xts[j] for j in good_features]).tocsr()
Cvals = np.logspace(-4, 4, 15, base=2)
for C in Cvals:
model.C = C
score = cv_loop(Xt, y, model, N)
score_hist.append((score, C))
print("C: %f Mean AUC: %f" % (C, score))
bestC = sorted(score_hist)[-1][1]
print("Best C value: %f" % (bestC))
print("Performing One Hot Encoding on entire dataset...")
Xt = np.vstack((X_train_all[:, good_features], X_test_all[:, good_features]))
Xt, keymap = OneHotEncoder(Xt)
X_train = Xt[:num_train]
X_test = Xt[num_train:]
print("Training full model...")
model.fit(X_train, y)
print("Making prediction and saving results...")
preds = model.predict_proba(X_test)[:, 1]
create_test_submission('results.json', preds)
if __name__ == "__main__":
main('admin', 'toor')
|
the-stack_0_551 | import json
import logging
from datetime import date, datetime
from gzip import GzipFile
from io import BytesIO
from typing import Any, Optional, Union
import requests
from dateutil.tz import tzutc
from posthog.utils import remove_trailing_slash
from posthog.version import VERSION
_session = requests.sessions.Session()
DEFAULT_HOST = "https://app.posthog.com"
USER_AGENT = "posthog-python/" + VERSION
def post(
api_key: str, host: Optional[str] = None, path=None, gzip: bool = False, timeout: int = 15, **kwargs
) -> requests.Response:
"""Post the `kwargs` to the API"""
log = logging.getLogger("posthog")
body = kwargs
body["sentAt"] = datetime.utcnow().replace(tzinfo=tzutc()).isoformat()
url = remove_trailing_slash(host or DEFAULT_HOST) + path
body["api_key"] = api_key
data = json.dumps(body, cls=DatetimeSerializer)
log.debug("making request: %s", data)
headers = {"Content-Type": "application/json", "User-Agent": USER_AGENT}
if gzip:
headers["Content-Encoding"] = "gzip"
buf = BytesIO()
with GzipFile(fileobj=buf, mode="w") as gz:
# 'data' was produced by json.dumps(),
# whose default encoding is utf-8.
gz.write(data.encode("utf-8"))
data = buf.getvalue()
res = _session.post(url, data=data, headers=headers, timeout=timeout)
if res.status_code == 200:
log.debug("data uploaded successfully")
return res
def _process_response(
res: requests.Response, success_message: str, *, return_json: bool = True
) -> Union[requests.Response, Any]:
log = logging.getLogger("posthog")
if not res:
raise APIError(
"N/A",
"Error when fetching PostHog API, please make sure you are using your public project token/key and not a private API key.",
)
if res.status_code == 200:
log.debug(success_message)
return res.json() if return_json else res
try:
payload = res.json()
log.debug("received response: %s", payload)
raise APIError(res.status_code, payload["detail"])
except ValueError:
raise APIError(res.status_code, res.text)
def decide(api_key: str, host: Optional[str] = None, gzip: bool = False, timeout: int = 15, **kwargs) -> Any:
"""Post the `kwargs to the decide API endpoint"""
res = post(api_key, host, "/decide/", gzip, timeout, **kwargs)
return _process_response(res, success_message="Feature flags decided successfully")
def batch_post(
api_key: str, host: Optional[str] = None, gzip: bool = False, timeout: int = 15, **kwargs
) -> requests.Response:
"""Post the `kwargs` to the batch API endpoint for events"""
res = post(api_key, host, "/batch/", gzip, timeout, **kwargs)
return _process_response(res, success_message="data uploaded successfully", return_json=False)
def get(api_key: str, url: str, host: Optional[str] = None, timeout: Optional[int] = None) -> requests.Response:
url = remove_trailing_slash(host or DEFAULT_HOST) + url
res = requests.get(url, headers={"Authorization": "Bearer %s" % api_key, "User-Agent": USER_AGENT}, timeout=timeout)
return _process_response(res, success_message=f"GET {url} completed successfully")
def shutdown():
# Avoid logs with
# sys:1: ResourceWarning: unclosed
# <ssl.SSLSocket fd=10, family=AddressFamily.AF_INET, type=SocketKind.SOCK_STREAM, proto=0,
# laddr=('x.x.x.x', y), raddr=('x.x.x.x', 443)>
# Should only be called when once, renders `_session` unusable
_session.close()
class APIError(Exception):
def __init__(self, status: Union[int, str], message: str):
self.message = message
self.status = status
def __str__(self):
msg = "[PostHog] {0} ({1})"
return msg.format(self.message, self.status)
class DatetimeSerializer(json.JSONEncoder):
def default(self, obj: Any):
if isinstance(obj, (date, datetime)):
return obj.isoformat()
return json.JSONEncoder.default(self, obj)
|
the-stack_0_555 | from threading import Thread
import pyrealtime as prt
class SubprocessLayer(prt.TransformMixin, prt.ThreadLayer):
def __init__(self, port_in, cmd, *args, encoder=None, decoder=None, **kwargs):
super().__init__(port_in, *args, **kwargs)
self.cmd = cmd
self.proc = None
self.read_thread = None
self._encode = encoder if encoder is not None else self.encode
self._decode = decoder if decoder is not None else self.decode
def encode(self, data):
return data + "\n"
def decode(self, data):
return data.rstrip().decode('utf-8')
def initialize(self):
try:
import pexpect.popen_spawn
except ImportError:
raise ModuleNotFoundError("pexpect required to use subprocess layers")
self.proc = pexpect.popen_spawn.PopenSpawn(self.cmd)
self.read_thread = Thread(target=self.read_loop)
self.read_thread.start()
def read_loop(self):
import pexpect
while True:
try:
index = self.proc.expect(".*\n")
data = self.proc.match[index]
self.handle_output(self._decode(data))
except pexpect.exceptions.EOF:
print("end of file")
return prt.LayerSignal.STOP
def transform(self, data):
self.proc.write(self._encode(data))
return None
|
the-stack_0_557 | """
Code originally developed for pyEcholab
(https://github.com/CI-CMG/pyEcholab)
by Rick Towler <[email protected]> at NOAA AFSC.
The code has been modified to handle split-beam data and
channel-transducer structure from different EK80 setups.
"""
import logging
import re
import struct
import sys
import xml.etree.ElementTree as ET
from collections import Counter
import numpy as np
from .ek_date_conversion import nt_to_unix
TCVR_CH_NUM_MATCHER = re.compile(r"\d{6}-\w{1,2}|\w{12}-\w{1,2}")
__all__ = [
"SimradNMEAParser",
"SimradDepthParser",
"SimradBottomParser",
"SimradAnnotationParser",
"SimradConfigParser",
"SimradRawParser",
]
log = logging.getLogger(__name__)
class _SimradDatagramParser(object):
""""""
def __init__(self, header_type, header_formats):
self._id = header_type
self._headers = header_formats
self._versions = list(header_formats.keys())
def header_fmt(self, version=0):
return "=" + "".join([x[1] for x in self._headers[version]])
def header_size(self, version=0):
return struct.calcsize(self.header_fmt(version))
def header_fields(self, version=0):
return [x[0] for x in self._headers[version]]
def header(self, version=0):
return self._headers[version][:]
def validate_data_header(self, data):
if isinstance(data, dict):
type_ = data["type"][:3]
version = int(data["type"][3])
elif isinstance(data, str):
type_ = data[:3]
version = int(data[3])
else:
raise TypeError("Expected a dict or str")
if type_ != self._id:
raise ValueError("Expected data of type %s, not %s" % (self._id, type_))
if version not in self._versions:
raise ValueError(
"No parser available for type %s version %d" % (self._id, version)
)
return type_, version
def from_string(self, raw_string, bytes_read):
header = raw_string[:4]
if sys.version_info.major > 2:
header = header.decode()
id_, version = self.validate_data_header(header)
return self._unpack_contents(raw_string, bytes_read, version=version)
def to_string(self, data={}):
id_, version = self.validate_data_header(data)
datagram_content_str = self._pack_contents(data, version=version)
return self.finalize_datagram(datagram_content_str)
def _unpack_contents(self, raw_string="", version=0):
raise NotImplementedError
def _pack_contents(self, data={}, version=0):
raise NotImplementedError
@classmethod
def finalize_datagram(cls, datagram_content_str):
datagram_size = len(datagram_content_str)
final_fmt = "=l%dsl" % (datagram_size)
return struct.pack(
final_fmt, datagram_size, datagram_content_str, datagram_size
)
class SimradDepthParser(_SimradDatagramParser):
"""
ER60 Depth Detection datagram (from .bot files) contain the following keys:
type: string == 'DEP0'
low_date: long uint representing LSBytes of 64bit NT date
high_date: long uint representing MSBytes of 64bit NT date
timestamp: datetime.datetime object of NT date, assumed to be UTC
transceiver_count: [long uint] with number of tranceivers
depth: [float], one value for each active channel
reflectivity: [float], one value for each active channel
unused: [float], unused value for each active channel
The following methods are defined:
from_string(str): parse a raw ER60 Depth datagram
(with leading/trailing datagram size stripped)
to_string(): Returns the datagram as a raw string
(including leading/trailing size fields)
ready for writing to disk
"""
def __init__(self):
headers = {
0: [
("type", "4s"),
("low_date", "L"),
("high_date", "L"),
("transceiver_count", "L"),
]
}
_SimradDatagramParser.__init__(self, "DEP", headers)
def _unpack_contents(self, raw_string, bytes_read, version):
""""""
header_values = struct.unpack(
self.header_fmt(version), raw_string[: self.header_size(version)]
)
data = {}
for indx, field in enumerate(self.header_fields(version)):
data[field] = header_values[indx]
if isinstance(data[field], bytes):
data[field] = data[field].decode()
data["timestamp"] = nt_to_unix((data["low_date"], data["high_date"]))
data["bytes_read"] = bytes_read
if version == 0:
data_fmt = "=3f"
data_size = struct.calcsize(data_fmt)
data["depth"] = np.zeros((data["transceiver_count"],))
data["reflectivity"] = np.zeros((data["transceiver_count"],))
data["unused"] = np.zeros((data["transceiver_count"],))
buf_indx = self.header_size(version)
for indx in range(data["transceiver_count"]):
d, r, u = struct.unpack(
data_fmt, raw_string[buf_indx : buf_indx + data_size] # noqa
)
data["depth"][indx] = d
data["reflectivity"][indx] = r
data["unused"][indx] = u
buf_indx += data_size
return data
def _pack_contents(self, data, version):
datagram_fmt = self.header_fmt(version)
datagram_contents = []
if version == 0:
lengths = [
len(data["depth"]),
len(data["reflectivity"]),
len(data["unused"]),
data["transceiver_count"],
]
if len(set(lengths)) != 1:
min_indx = min(lengths)
log.warning(
"Data lengths mismatched: d:%d, r:%d, u:%d, t:%d", *lengths
)
log.warning(" Using minimum value: %d", min_indx)
data["transceiver_count"] = min_indx
else:
min_indx = data["transceiver_count"]
for field in self.header_fields(version):
datagram_contents.append(data[field])
datagram_fmt += "%df" % (3 * data["transceiver_count"])
for indx in range(data["transceiver_count"]):
datagram_contents.extend(
[
data["depth"][indx],
data["reflectivity"][indx],
data["unused"][indx],
]
)
return struct.pack(datagram_fmt, *datagram_contents)
class SimradBottomParser(_SimradDatagramParser):
"""
Bottom Detection datagram contains the following keys:
type: string == 'BOT0'
low_date: long uint representing LSBytes of 64bit NT date
high_date: long uint representing MSBytes of 64bit NT date
datetime: datetime.datetime object of NT date converted to UTC
transceiver_count: long uint with number of tranceivers
depth: [float], one value for each active channel
The following methods are defined:
from_string(str): parse a raw ER60 Bottom datagram
(with leading/trailing datagram size stripped)
to_string(): Returns the datagram as a raw string
(including leading/trailing size fields)
ready for writing to disk
"""
def __init__(self):
headers = {
0: [
("type", "4s"),
("low_date", "L"),
("high_date", "L"),
("transceiver_count", "L"),
]
}
_SimradDatagramParser.__init__(self, "BOT", headers)
def _unpack_contents(self, raw_string, bytes_read, version):
""""""
header_values = struct.unpack(
self.header_fmt(version), raw_string[: self.header_size(version)]
)
data = {}
for indx, field in enumerate(self.header_fields(version)):
data[field] = header_values[indx]
if isinstance(data[field], bytes):
data[field] = data[field].decode()
data["timestamp"] = nt_to_unix((data["low_date"], data["high_date"]))
data["bytes_read"] = bytes_read
if version == 0:
depth_fmt = "=%dd" % (data["transceiver_count"],)
depth_size = struct.calcsize(depth_fmt)
buf_indx = self.header_size(version)
data["depth"] = np.fromiter(
struct.unpack(
depth_fmt, raw_string[buf_indx : buf_indx + depth_size]
), # noqa
"float",
)
return data
def _pack_contents(self, data, version):
datagram_fmt = self.header_fmt(version)
datagram_contents = []
if version == 0:
if len(data["depth"]) != data["transceiver_count"]:
log.warning(
"# of depth values %d does not match transceiver count %d",
len(data["depth"]),
data["transceiver_count"],
)
data["transceiver_count"] = len(data["depth"])
for field in self.header_fields(version):
datagram_contents.append(data[field])
datagram_fmt += "%dd" % (data["transceiver_count"])
datagram_contents.extend(data["depth"])
return struct.pack(datagram_fmt, *datagram_contents)
class SimradAnnotationParser(_SimradDatagramParser):
"""
ER60 Annotation datagram contains the following keys:
type: string == 'TAG0'
low_date: long uint representing LSBytes of 64bit NT date
high_date: long uint representing MSBytes of 64bit NT date
timestamp: datetime.datetime object of NT date, assumed to be UTC
text: Annotation
The following methods are defined:
from_string(str): parse a raw ER60 Annotation datagram
(with leading/trailing datagram size stripped)
to_string(): Returns the datagram as a raw string
(including leading/trailing size fields)
ready for writing to disk
"""
def __init__(self):
headers = {0: [("type", "4s"), ("low_date", "L"), ("high_date", "L")]}
_SimradDatagramParser.__init__(self, "TAG", headers)
def _unpack_contents(self, raw_string, bytes_read, version):
""""""
header_values = struct.unpack(
self.header_fmt(version), raw_string[: self.header_size(version)]
)
data = {}
for indx, field in enumerate(self.header_fields(version)):
data[field] = header_values[indx]
if isinstance(data[field], bytes):
data[field] = data[field].decode()
data["timestamp"] = nt_to_unix((data["low_date"], data["high_date"]))
data["bytes_read"] = bytes_read
# if version == 0:
# data['text'] = raw_string[self.header_size(version):].strip('\x00')
# if isinstance(data['text'], bytes):
# data['text'] = data['text'].decode()
if version == 0:
if sys.version_info.major > 2:
data["text"] = str(
raw_string[self.header_size(version) :].strip(b"\x00"),
"ascii",
errors="replace",
)
else:
data["text"] = unicode( # noqa
raw_string[self.header_size(version) :].strip("\x00"),
"ascii",
errors="replace",
)
return data
def _pack_contents(self, data, version):
datagram_fmt = self.header_fmt(version)
datagram_contents = []
if version == 0:
for field in self.header_fields(version):
datagram_contents.append(data[field])
if data["text"][-1] != "\x00":
tmp_string = data["text"] + "\x00"
else:
tmp_string = data["text"]
# Pad with more nulls to 4-byte word boundry if necessary
if len(tmp_string) % 4:
tmp_string += "\x00" * (4 - (len(tmp_string) % 4))
datagram_fmt += "%ds" % (len(tmp_string))
datagram_contents.append(tmp_string)
return struct.pack(datagram_fmt, *datagram_contents)
class SimradNMEAParser(_SimradDatagramParser):
"""
ER60 NMEA datagram contains the following keys:
type: string == 'NME0'
low_date: long uint representing LSBytes of 64bit NT date
high_date: long uint representing MSBytes of 64bit NT date
timestamp: datetime.datetime object of NT date, assumed to be UTC
nmea_string: full (original) NMEA string
The following methods are defined:
from_string(str): parse a raw ER60 NMEA datagram
(with leading/trailing datagram size stripped)
to_string(): Returns the datagram as a raw string
(including leading/trailing size fields)
ready for writing to disk
"""
nmea_head_re = re.compile(r"\$[A-Za-z]{5},") # noqa
def __init__(self):
headers = {
0: [("type", "4s"), ("low_date", "L"), ("high_date", "L")],
1: [("type", "4s"), ("low_date", "L"), ("high_date", "L"), ("port", "32s")],
}
_SimradDatagramParser.__init__(self, "NME", headers)
def _unpack_contents(self, raw_string, bytes_read, version):
"""
Parses the NMEA string provided in raw_string
:param raw_string: Raw NMEA strin (i.e. '$GPZDA,160012.71,11,03,2004,-1,00*7D')
:type raw_string: str
:returns: None
"""
header_values = struct.unpack(
self.header_fmt(version), raw_string[: self.header_size(version)]
)
data = {}
for indx, field in enumerate(self.header_fields(version)):
data[field] = header_values[indx]
if isinstance(data[field], bytes):
data[field] = data[field].decode()
data["timestamp"] = nt_to_unix((data["low_date"], data["high_date"]))
data["bytes_read"] = bytes_read
# Remove trailing \x00 from the PORT field for NME1, rest of the datagram identical to NME0
if version == 1:
data["port"] = data["port"].strip("\x00")
if version == 0 or version == 1:
if sys.version_info.major > 2:
data["nmea_string"] = str(
raw_string[self.header_size(version) :].strip(b"\x00"),
"ascii",
errors="replace",
)
else:
data["nmea_string"] = unicode( # noqa
raw_string[self.header_size(version) :].strip("\x00"),
"ascii",
errors="replace",
)
if self.nmea_head_re.match(data["nmea_string"][:7]) is not None:
data["nmea_talker"] = data["nmea_string"][1:3]
data["nmea_type"] = data["nmea_string"][3:6]
else:
data["nmea_talker"] = ""
data["nmea_type"] = "UNKNOWN"
return data
def _pack_contents(self, data, version):
datagram_fmt = self.header_fmt(version)
datagram_contents = []
if version == 0:
for field in self.header_fields(version):
datagram_contents.append(data[field])
if data["nmea_string"][-1] != "\x00":
tmp_string = data["nmea_string"] + "\x00"
else:
tmp_string = data["nmea_string"]
# Pad with more nulls to 4-byte word boundry if necessary
if len(tmp_string) % 4:
tmp_string += "\x00" * (4 - (len(tmp_string) % 4))
datagram_fmt += "%ds" % (len(tmp_string))
# Convert to python string if needed
if isinstance(tmp_string, str):
tmp_string = tmp_string.encode("ascii", errors="replace")
datagram_contents.append(tmp_string)
return struct.pack(datagram_fmt, *datagram_contents)
class SimradMRUParser(_SimradDatagramParser):
"""
EK80 MRU datagram contains the following keys:
type: string == 'MRU0'
low_date: long uint representing LSBytes of 64bit NT date
high_date: long uint representing MSBytes of 64bit NT date
timestamp: datetime.datetime object of NT date, assumed to be UTC
heave: float
roll : float
pitch: float
heading: float
The following methods are defined:
from_string(str): parse a raw ER60 NMEA datagram
(with leading/trailing datagram size stripped)
to_string(): Returns the datagram as a raw string
(including leading/trailing size fields)
ready for writing to disk
"""
def __init__(self):
headers = {
0: [
("type", "4s"),
("low_date", "L"),
("high_date", "L"),
("heave", "f"),
("roll", "f"),
("pitch", "f"),
("heading", "f"),
]
}
_SimradDatagramParser.__init__(self, "MRU", headers)
def _unpack_contents(self, raw_string, bytes_read, version):
"""
Unpacks the data in raw_string into dictionary containing MRU data
:param raw_string:
:type raw_string: str
:returns: None
"""
header_values = struct.unpack(
self.header_fmt(version), raw_string[: self.header_size(version)]
)
data = {}
for indx, field in enumerate(self.header_fields(version)):
data[field] = header_values[indx]
if isinstance(data[field], bytes):
data[field] = data[field].decode()
data["timestamp"] = nt_to_unix((data["low_date"], data["high_date"]))
data["bytes_read"] = bytes_read
return data
def _pack_contents(self, data, version):
datagram_fmt = self.header_fmt(version)
datagram_contents = []
if version == 0:
for field in self.header_fields(version):
datagram_contents.append(data[field])
if data["nmea_string"][-1] != "\x00":
tmp_string = data["nmea_string"] + "\x00"
else:
tmp_string = data["nmea_string"]
# Pad with more nulls to 4-byte word boundry if necessary
if len(tmp_string) % 4:
tmp_string += "\x00" * (4 - (len(tmp_string) % 4))
datagram_fmt += "%ds" % (len(tmp_string))
# Convert to python string if needed
if isinstance(tmp_string, str):
tmp_string = tmp_string.encode("ascii", errors="replace")
datagram_contents.append(tmp_string)
return struct.pack(datagram_fmt, *datagram_contents)
class SimradXMLParser(_SimradDatagramParser):
"""
EK80 XML datagram contains the following keys:
type: string == 'XML0'
low_date: long uint representing LSBytes of 64bit NT date
high_date: long uint representing MSBytes of 64bit NT date
timestamp: datetime.datetime object of NT date, assumed to be UTC
subtype: string representing Simrad XML datagram type:
configuration, environment, or parameter
[subtype]: dict containing the data specific to the XML subtype.
The following methods are defined:
from_string(str): parse a raw EK80 XML datagram
(with leading/trailing datagram size stripped)
to_string(): Returns the datagram as a raw string
(including leading/trailing size fields)
ready for writing to disk
"""
# define the XML parsing options - here we define dictionaries for various xml datagram
# types. When parsing that xml datagram, these dictionaries are used to inform the parser about
# type conversion, name wrangling, and delimiter. If a field is missing, the parser
# assumes no conversion: type will be string, default mangling, and that there is only 1
# element.
#
# the dicts are in the form:
# 'XMLParamName':[converted type,'fieldname', 'parse char']
#
# For example: 'PulseDurationFM':[float,'pulse_duration_fm',';']
#
# will result in a return dictionary field named 'pulse_duration_fm' that contains a list
# of float values parsed from a string that uses ';' to separate values. Empty strings
# for fieldname and/or parse char result in the default action for those parsing steps.
channel_parsing_options = {
"MaxTxPowerTransceiver": [int, "", ""],
"PulseDuration": [float, "", ";"],
"PulseDurationFM": [float, "pulse_duration_fm", ";"],
"SampleInterval": [float, "", ";"],
"ChannelID": [str, "channel_id", ""],
"HWChannelConfiguration": [str, "hw_channel_configuration", ""],
}
transceiver_parsing_options = {
"TransceiverNumber": [int, "", ""],
"Version": [str, "transceiver_version", ""],
"IPAddress": [str, "ip_address", ""],
"Impedance": [int, "", ""],
}
transducer_parsing_options = {
"SerialNumber": [str, "transducer_serial_number", ""],
"Frequency": [float, "transducer_frequency", ""],
"FrequencyMinimum": [float, "transducer_frequency_minimum", ""],
"FrequencyMaximum": [float, "transducer_frequency_maximum", ""],
"BeamType": [int, "transducer_beam_type", ""],
"Gain": [float, "", ";"],
"SaCorrection": [float, "", ";"],
"MaxTxPowerTransducer": [float, "", ""],
"EquivalentBeamAngle": [float, "", ""],
"BeamWidthAlongship": [float, "", ""],
"BeamWidthAthwartship": [float, "", ""],
"AngleSensitivityAlongship": [float, "", ""],
"AngleSensitivityAthwartship": [float, "", ""],
"AngleOffsetAlongship": [float, "", ""],
"AngleOffsetAthwartship": [float, "", ""],
"DirectivityDropAt2XBeamWidth": [
float,
"directivity_drop_at_2x_beam_width",
"",
],
"TransducerOffsetX": [float, "", ""],
"TransducerOffsetY": [float, "", ""],
"TransducerOffsetZ": [float, "", ""],
"TransducerAlphaX": [float, "", ""],
"TransducerAlphaY": [float, "", ""],
"TransducerAlphaZ": [float, "", ""],
}
header_parsing_options = {"Version": [str, "application_version", ""]}
envxdcr_parsing_options = {"SoundSpeed": [float, "transducer_sound_speed", ""]}
environment_parsing_options = {
"Depth": [float, "", ""],
"Acidity": [float, "", ""],
"Salinity": [float, "", ""],
"SoundSpeed": [float, "", ""],
"Temperature": [float, "", ""],
"Latitude": [float, "", ""],
"SoundVelocityProfile": [float, "", ";"],
"DropKeelOffset": [float, "", ""],
"DropKeelOffsetIsManual": [int, "", ""],
"WaterLevelDraft": [float, "", ""],
"WaterLevelDraftIsManual": [int, "", ""],
}
parameter_parsing_options = {
"ChannelID": [str, "channel_id", ""],
"ChannelMode": [int, "", ""],
"PulseForm": [int, "", ""],
"Frequency": [float, "", ""],
"PulseDuration": [float, "", ""],
"SampleInterval": [float, "", ""],
"TransmitPower": [float, "", ""],
"Slope": [float, "", ""],
}
def __init__(self):
headers = {0: [("type", "4s"), ("low_date", "L"), ("high_date", "L")]}
_SimradDatagramParser.__init__(self, "XML", headers)
def _unpack_contents(self, raw_string, bytes_read, version):
"""
Parses the NMEA string provided in raw_string
:param raw_string: Raw NMEA strin (i.e. '$GPZDA,160012.71,11,03,2004,-1,00*7D')
:type raw_string: str
:returns: None
"""
def from_CamelCase(xml_param):
"""
convert name from CamelCase to fit with existing naming convention by
inserting an underscore before each capital and then lowering the caps
e.g. CamelCase becomes camel_case.
"""
idx = list(reversed([i for i, c in enumerate(xml_param) if c.isupper()]))
param_len = len(xml_param)
for i in idx:
# check if we should insert an underscore
if i > 0 and i < param_len:
xml_param = xml_param[:i] + "_" + xml_param[i:]
xml_param = xml_param.lower()
return xml_param
def dict_to_dict(xml_dict, data_dict, parse_opts):
"""
dict_to_dict appends the ETree xml value dicts to a provided dictionary
and along the way converts the key name to conform to the project's
naming convention and optionally parses and or converts values as
specified in the parse_opts dictionary.
"""
for k in xml_dict:
# check if we're parsing this key/value
if k in parse_opts:
# try to parse the string
if parse_opts[k][2]:
try:
data = xml_dict[k].split(parse_opts[k][2])
except:
# bad or empty parse chararacter(s) provided
data = xml_dict[k]
else:
# no parse char provided - nothing to parse
data = xml_dict[k]
# try to convert to specified type
if isinstance(data, list):
for i in range(len(data)):
try:
data[i] = parse_opts[k][0](data[i])
except:
pass
else:
data = parse_opts[k][0](data)
# and add the value to the provided dict
if parse_opts[k][1]:
# add using the specified key name
data_dict[parse_opts[k][1]] = data
else:
# add using the default key name wrangling
data_dict[from_CamelCase(k)] = data
else:
# nothing to do with the value string
data = xml_dict[k]
# add the parameter to the provided dictionary
data_dict[from_CamelCase(k)] = data
header_values = struct.unpack(
self.header_fmt(version), raw_string[: self.header_size(version)]
)
data = {}
for indx, field in enumerate(self.header_fields(version)):
data[field] = header_values[indx]
if isinstance(data[field], bytes):
data[field] = data[field].decode()
data["timestamp"] = nt_to_unix((data["low_date"], data["high_date"]))
data["bytes_read"] = bytes_read
if version == 0:
if sys.version_info.major > 2:
xml_string = str(
raw_string[self.header_size(version) :].strip(b"\x00"),
"ascii",
errors="replace",
)
else:
xml_string = unicode( # noqa
raw_string[self.header_size(version) :].strip("\x00"),
"ascii",
errors="replace",
)
# get the ElementTree element
root = ET.fromstring(xml_string)
# get the XML message type
data["subtype"] = root.tag.lower()
# create the dictionary that contains the message data
data[data["subtype"]] = {}
# parse it
if data["subtype"] == "configuration":
# parse the Transceiver section
for tcvr in root.iter("Transceiver"):
# parse the Transceiver section
tcvr_xml = tcvr.attrib
# parse the Channel section -- this works with multiple channels
# under 1 transceiver
for tcvr_ch in tcvr.iter("Channel"):
tcvr_ch_xml = tcvr_ch.attrib
channel_id = tcvr_ch_xml["ChannelID"]
# create the configuration dict for this channel
data["configuration"][channel_id] = {}
# add the transceiver data to the config dict (this is
# replicated for all channels)
dict_to_dict(
tcvr_xml,
data["configuration"][channel_id],
self.transceiver_parsing_options,
)
# add the general channel data to the config dict
dict_to_dict(
tcvr_ch_xml,
data["configuration"][channel_id],
self.channel_parsing_options,
)
# check if there are >1 transducer under a single transceiver channel
if len(list(tcvr_ch)) > 1:
ValueError(
"Found >1 transducer under a single transceiver channel!"
)
else: # should only have 1 transducer
tcvr_ch_xducer = tcvr_ch.find(
"Transducer"
) # get Element of this xducer
f_par = tcvr_ch_xducer.findall("FrequencyPar")
# Save calibration parameters
if f_par:
cal_par = {
"frequency": np.array(
[int(f.attrib["Frequency"]) for f in f_par]
),
"gain": np.array(
[float(f.attrib["Gain"]) for f in f_par]
),
"impedance": np.array(
[int(f.attrib["Impedance"]) for f in f_par]
),
"phase": np.array(
[float(f.attrib["Phase"]) for f in f_par]
),
"beamwidth_alongship": np.array(
[
float(f.attrib["BeamWidthAlongship"])
for f in f_par
]
),
"beamwidth_athwartship": np.array(
[
float(f.attrib["BeamWidthAthwartship"])
for f in f_par
]
),
"angle_offset_alongship": np.array(
[
float(f.attrib["AngleOffsetAlongship"])
for f in f_par
]
),
"angle_offset_athwartship": np.array(
[
float(f.attrib["AngleOffsetAthwartship"])
for f in f_par
]
),
}
data["configuration"][channel_id][
"calibration"
] = cal_par
# add the transducer data to the config dict
dict_to_dict(
tcvr_ch_xducer.attrib,
data["configuration"][channel_id],
self.transducer_parsing_options,
)
# get unique transceiver channel number stored in channel_id
tcvr_ch_num = TCVR_CH_NUM_MATCHER.search(channel_id)[0]
# parse the Transducers section from the root
# TODO Remove Transducers if doesnt exist
xducer = root.find("Transducers")
if xducer is not None:
# built occurrence lookup table for transducer name
xducer_name_list = []
for xducer_ch in xducer.iter("Transducer"):
xducer_name_list.append(
xducer_ch.attrib["TransducerName"]
)
# find matching transducer for this channel_id
match_found = False
for xducer_ch in xducer.iter("Transducer"):
if not match_found:
xducer_ch_xml = xducer_ch.attrib
match_name = (
xducer_ch.attrib["TransducerName"]
== tcvr_ch_xducer.attrib["TransducerName"]
)
if xducer_ch.attrib["TransducerSerialNumber"] == "":
match_sn = False
else:
match_sn = (
xducer_ch.attrib["TransducerSerialNumber"]
== tcvr_ch_xducer.attrib["SerialNumber"]
)
match_tcvr = (
tcvr_ch_num
in xducer_ch.attrib["TransducerCustomName"]
)
# if find match add the transducer mounting details
if (
Counter(xducer_name_list)[
xducer_ch.attrib["TransducerName"]
]
> 1
):
# if more than one transducer has the same name
# only check sn and transceiver unique number
match_found = match_sn or match_tcvr
else:
match_found = (
match_name or match_sn or match_tcvr
)
# add transducer mounting details
if match_found:
dict_to_dict(
xducer_ch_xml,
data["configuration"][channel_id],
self.transducer_parsing_options,
)
# add the header data to the config dict
h = root.find("Header")
dict_to_dict(
h.attrib,
data["configuration"][channel_id],
self.header_parsing_options,
)
elif data["subtype"] == "parameter":
# parse the parameter XML datagram
for h in root.iter("Channel"):
parm_xml = h.attrib
# add the data to the environment dict
dict_to_dict(
parm_xml, data["parameter"], self.parameter_parsing_options
)
elif data["subtype"] == "environment":
# parse the environment XML datagram
for h in root.iter("Environment"):
env_xml = h.attrib
# add the data to the environment dict
dict_to_dict(
env_xml, data["environment"], self.environment_parsing_options
)
for h in root.iter("Transducer"):
transducer_xml = h.attrib
# add the data to the environment dict
dict_to_dict(
transducer_xml,
data["environment"],
self.envxdcr_parsing_options,
)
data["xml"] = xml_string
return data
def _pack_contents(self, data, version):
def to_CamelCase(xml_param):
"""
convert name from project's convention to CamelCase for converting back to
XML to in Kongsberg's convention.
"""
idx = list(reversed([i for i, c in enumerate(xml_param) if c.isupper()]))
param_len = len(xml_param)
for i in idx:
# check if we should insert an underscore
if idx > 0 and idx < param_len - 1:
xml_param = xml_param[:idx] + "_" + xml_param[idx:]
xml_param = xml_param.lower()
return xml_param
datagram_fmt = self.header_fmt(version)
datagram_contents = []
if version == 0:
for field in self.header_fields(version):
datagram_contents.append(data[field])
if data["nmea_string"][-1] != "\x00":
tmp_string = data["nmea_string"] + "\x00"
else:
tmp_string = data["nmea_string"]
# Pad with more nulls to 4-byte word boundry if necessary
if len(tmp_string) % 4:
tmp_string += "\x00" * (4 - (len(tmp_string) % 4))
datagram_fmt += "%ds" % (len(tmp_string))
# Convert to python string if needed
if isinstance(tmp_string, str):
tmp_string = tmp_string.encode("ascii", errors="replace")
datagram_contents.append(tmp_string)
return struct.pack(datagram_fmt, *datagram_contents)
class SimradFILParser(_SimradDatagramParser):
"""
EK80 FIL datagram contains the following keys:
type: string == 'FIL1'
low_date: long uint representing LSBytes of 64bit NT date
high_date: long uint representing MSBytes of 64bit NT date
timestamp: datetime.datetime object of NT date, assumed to be UTC
stage: int
channel_id: string
n_coefficients: int
decimation_factor: int
coefficients: np.complex64
The following methods are defined:
from_string(str): parse a raw EK80 FIL datagram
(with leading/trailing datagram size stripped)
to_string(): Returns the datagram as a raw string
(including leading/trailing size fields)
ready for writing to disk
"""
def __init__(self):
headers = {
1: [
("type", "4s"),
("low_date", "L"),
("high_date", "L"),
("stage", "h"),
("spare", "2s"),
("channel_id", "128s"),
("n_coefficients", "h"),
("decimation_factor", "h"),
]
}
_SimradDatagramParser.__init__(self, "FIL", headers)
def _unpack_contents(self, raw_string, bytes_read, version):
data = {}
header_values = struct.unpack(
self.header_fmt(version), raw_string[: self.header_size(version)]
)
for indx, field in enumerate(self.header_fields(version)):
data[field] = header_values[indx]
# handle Python 3 strings
if (sys.version_info.major > 2) and isinstance(data[field], bytes):
data[field] = data[field].decode("latin_1")
data["timestamp"] = nt_to_unix((data["low_date"], data["high_date"]))
data["bytes_read"] = bytes_read
if version == 1:
# clean up the channel ID
data["channel_id"] = data["channel_id"].strip("\x00")
# unpack the coefficients
indx = self.header_size(version)
block_size = data["n_coefficients"] * 8
data["coefficients"] = np.frombuffer(
raw_string[indx : indx + block_size], dtype="complex64" # noqa
)
return data
def _pack_contents(self, data, version):
datagram_fmt = self.header_fmt(version)
datagram_contents = []
if version == 0:
pass
elif version == 1:
for field in self.header_fields(version):
datagram_contents.append(data[field])
datagram_fmt += "%ds" % (len(data["beam_config"]))
datagram_contents.append(data["beam_config"])
return struct.pack(datagram_fmt, *datagram_contents)
class SimradConfigParser(_SimradDatagramParser):
"""
Simrad Configuration Datagram parser operates on dictionaries with the following keys:
type: string == 'CON0'
low_date: long uint representing LSBytes of 64bit NT date
high_date: long uint representing MSBytes of 64bit NT date
timestamp: datetime.datetime object of NT date, assumed to be UTC
survey_name [str]
transect_name [str]
sounder_name [str]
version [str]
spare0 [str]
transceiver_count [long]
transceivers [list] List of dicts representing Transducer Configs:
ME70 Data contains the following additional values (data contained w/in first 14
bytes of the spare0 field)
multiplexing [short] Always 0
time_bias [long] difference between UTC and local time in min.
sound_velocity_avg [float] [m/s]
sound_velocity_transducer [float] [m/s]
beam_config [str] Raw XML string containing beam config. info
Transducer Config Keys (ER60/ES60/ES70 sounders):
channel_id [str] channel ident string
beam_type [long] Type of channel (0 = Single, 1 = Split)
frequency [float] channel frequency
equivalent_beam_angle [float] dB
beamwidth_alongship [float]
beamwidth_athwartship [float]
angle_sensitivity_alongship [float]
angle_sensitivity_athwartship [float]
angle_offset_alongship [float]
angle_offset_athwartship [float]
pos_x [float]
pos_y [float]
pos_z [float]
dir_x [float]
dir_y [float]
dir_z [float]
pulse_length_table [float[5]]
spare1 [str]
gain_table [float[5]]
spare2 [str]
sa_correction_table [float[5]]
spare3 [str]
gpt_software_version [str]
spare4 [str]
Transducer Config Keys (ME70 sounders):
channel_id [str] channel ident string
beam_type [long] Type of channel (0 = Single, 1 = Split)
reserved1 [float] channel frequency
equivalent_beam_angle [float] dB
beamwidth_alongship [float]
beamwidth_athwartship [float]
angle_sensitivity_alongship [float]
angle_sensitivity_athwartship [float]
angle_offset_alongship [float]
angle_offset_athwartship [float]
pos_x [float]
pos_y [float]
pos_z [float]
beam_steering_angle_alongship [float]
beam_steering_angle_athwartship [float]
beam_steering_angle_unused [float]
pulse_length [float]
reserved2 [float]
spare1 [str]
gain [float]
reserved3 [float]
spare2 [str]
sa_correction [float]
reserved4 [float]
spare3 [str]
gpt_software_version [str]
spare4 [str]
from_string(str): parse a raw config datagram
(with leading/trailing datagram size stripped)
to_string(dict): Returns raw string (including leading/trailing size fields)
ready for writing to disk
"""
COMMON_KEYS = [
("channel_id", "128s"),
("beam_type", "l"),
("frequency", "f"),
("gain", "f"),
("equivalent_beam_angle", "f"),
("beamwidth_alongship", "f"),
("beamwidth_athwartship", "f"),
("angle_sensitivity_alongship", "f"),
("angle_sensitivity_athwartship", "f"),
("angle_offset_alongship", "f"),
("angle_offset_athwartship", "f"),
("pos_x", "f"),
("pos_y", "f"),
("pos_z", "f"),
("dir_x", "f"),
("dir_y", "f"),
("dir_z", "f"),
("pulse_length_table", "5f"),
("spare1", "8s"),
("gain_table", "5f"),
("spare2", "8s"),
("sa_correction_table", "5f"),
("spare3", "8s"),
("gpt_software_version", "16s"),
("spare4", "28s"),
]
def __init__(self):
headers = {
0: [
("type", "4s"),
("low_date", "L"),
("high_date", "L"),
("survey_name", "128s"),
("transect_name", "128s"),
("sounder_name", "128s"),
("version", "30s"),
("spare0", "98s"),
("transceiver_count", "l"),
],
1: [("type", "4s"), ("low_date", "L"), ("high_date", "L")],
}
_SimradDatagramParser.__init__(self, "CON", headers)
self._transducer_headers = {
"ER60": self.COMMON_KEYS,
"ES60": self.COMMON_KEYS,
"ES70": self.COMMON_KEYS,
"MBES": [
("channel_id", "128s"),
("beam_type", "l"),
("frequency", "f"),
("reserved1", "f"),
("equivalent_beam_angle", "f"),
("beamwidth_alongship", "f"),
("beamwidth_athwartship", "f"),
("angle_sensitivity_alongship", "f"),
("angle_sensitivity_athwartship", "f"),
("angle_offset_alongship", "f"),
("angle_offset_athwartship", "f"),
("pos_x", "f"),
("pos_y", "f"),
("pos_z", "f"),
("beam_steering_angle_alongship", "f"),
("beam_steering_angle_athwartship", "f"),
("beam_steering_angle_unused", "f"),
("pulse_length", "f"),
("reserved2", "f"),
("spare1", "20s"),
("gain", "f"),
("reserved3", "f"),
("spare2", "20s"),
("sa_correction", "f"),
("reserved4", "f"),
("spare3", "20s"),
("gpt_software_version", "16s"),
("spare4", "28s"),
],
}
def _unpack_contents(self, raw_string, bytes_read, version):
data = {}
round6 = lambda x: round(x, ndigits=6) # noqa
header_values = struct.unpack(
self.header_fmt(version), raw_string[: self.header_size(version)]
)
for indx, field in enumerate(self.header_fields(version)):
data[field] = header_values[indx]
# handle Python 3 strings
if (sys.version_info.major > 2) and isinstance(data[field], bytes):
data[field] = data[field].decode("latin_1")
data["timestamp"] = nt_to_unix((data["low_date"], data["high_date"]))
data["bytes_read"] = bytes_read
if version == 0:
data["transceivers"] = {}
for field in ["transect_name", "version", "survey_name", "sounder_name"]:
data[field] = data[field].strip("\x00")
sounder_name = data["sounder_name"]
if sounder_name == "MBES":
_me70_extra_values = struct.unpack("=hLff", data["spare0"][:14])
data["multiplexing"] = _me70_extra_values[0]
data["time_bias"] = _me70_extra_values[1]
data["sound_velocity_avg"] = _me70_extra_values[2]
data["sound_velocity_transducer"] = _me70_extra_values[3]
data["spare0"] = data["spare0"][:14] + data["spare0"][14:].strip("\x00")
else:
data["spare0"] = data["spare0"].strip("\x00")
buf_indx = self.header_size(version)
try:
transducer_header = self._transducer_headers[sounder_name]
_sounder_name_used = sounder_name
except KeyError:
log.warning(
"Unknown sounder_name: %s, (no one of %s)",
sounder_name,
list(self._transducer_headers.keys()),
)
log.warning("Will use ER60 transducer config fields as default")
transducer_header = self._transducer_headers["ER60"]
_sounder_name_used = "ER60"
txcvr_header_fields = [x[0] for x in transducer_header]
txcvr_header_fmt = "=" + "".join([x[1] for x in transducer_header])
txcvr_header_size = struct.calcsize(txcvr_header_fmt)
for txcvr_indx in range(1, data["transceiver_count"] + 1):
txcvr_header_values_encoded = struct.unpack(
txcvr_header_fmt,
raw_string[buf_indx : buf_indx + txcvr_header_size], # noqa
)
txcvr_header_values = list(txcvr_header_values_encoded)
for tx_idx, tx_val in enumerate(txcvr_header_values_encoded):
if isinstance(tx_val, bytes):
txcvr_header_values[tx_idx] = tx_val.decode("latin_1")
txcvr = data["transceivers"].setdefault(txcvr_indx, {})
if _sounder_name_used in ["ER60", "ES60", "ES70"]:
for txcvr_field_indx, field in enumerate(txcvr_header_fields[:17]):
txcvr[field] = txcvr_header_values[txcvr_field_indx]
txcvr["pulse_length_table"] = np.fromiter(
list(map(round6, txcvr_header_values[17:22])), "float"
)
txcvr["spare1"] = txcvr_header_values[22]
txcvr["gain_table"] = np.fromiter(
list(map(round6, txcvr_header_values[23:28])), "float"
)
txcvr["spare2"] = txcvr_header_values[28]
txcvr["sa_correction_table"] = np.fromiter(
list(map(round6, txcvr_header_values[29:34])), "float"
)
txcvr["spare3"] = txcvr_header_values[34]
txcvr["gpt_software_version"] = txcvr_header_values[35]
txcvr["spare4"] = txcvr_header_values[36]
elif _sounder_name_used == "MBES":
for txcvr_field_indx, field in enumerate(txcvr_header_fields):
txcvr[field] = txcvr_header_values[txcvr_field_indx]
else:
raise RuntimeError(
"Unknown _sounder_name_used (Should not happen, this is a bug!)"
)
txcvr["channel_id"] = txcvr["channel_id"].strip("\x00")
txcvr["spare1"] = txcvr["spare1"].strip("\x00")
txcvr["spare2"] = txcvr["spare2"].strip("\x00")
txcvr["spare3"] = txcvr["spare3"].strip("\x00")
txcvr["spare4"] = txcvr["spare4"].strip("\x00")
txcvr["gpt_software_version"] = txcvr["gpt_software_version"].strip(
"\x00"
)
buf_indx += txcvr_header_size
elif version == 1:
# CON1 only has a single data field: beam_config, holding an xml string
data["beam_config"] = raw_string[self.header_size(version) :].strip("\x00")
return data
def _pack_contents(self, data, version):
datagram_fmt = self.header_fmt(version)
datagram_contents = []
if version == 0:
if data["transceiver_count"] != len(data["transceivers"]):
log.warning(
"Mismatch between 'transceiver_count' and actual # of transceivers"
)
data["transceiver_count"] = len(data["transceivers"])
sounder_name = data["sounder_name"]
if sounder_name == "MBES":
_packed_me70_values = struct.pack(
"=hLff",
data["multiplexing"],
data["time_bias"],
data["sound_velocity_avg"],
data["sound_velocity_transducer"],
)
data["spare0"] = _packed_me70_values + data["spare0"][14:]
for field in self.header_fields(version):
datagram_contents.append(data[field])
try:
transducer_header = self._transducer_headers[sounder_name]
_sounder_name_used = sounder_name
except KeyError:
log.warning(
"Unknown sounder_name: %s, (no one of %s)",
sounder_name,
list(self._transducer_headers.keys()),
)
log.warning("Will use ER60 transducer config fields as default")
transducer_header = self._transducer_headers["ER60"]
_sounder_name_used = "ER60"
txcvr_header_fields = [x[0] for x in transducer_header]
txcvr_header_fmt = "=" + "".join([x[1] for x in transducer_header])
txcvr_header_size = struct.calcsize(txcvr_header_fmt) # noqa
for txcvr_indx, txcvr in list(data["transceivers"].items()):
txcvr_contents = []
if _sounder_name_used in ["ER60", "ES60", "ES70"]:
for field in txcvr_header_fields[:17]:
txcvr_contents.append(txcvr[field])
txcvr_contents.extend(txcvr["pulse_length_table"])
txcvr_contents.append(txcvr["spare1"])
txcvr_contents.extend(txcvr["gain_table"])
txcvr_contents.append(txcvr["spare2"])
txcvr_contents.extend(txcvr["sa_correction_table"])
txcvr_contents.append(txcvr["spare3"])
txcvr_contents.extend(
[txcvr["gpt_software_version"], txcvr["spare4"]]
)
txcvr_contents_str = struct.pack(txcvr_header_fmt, *txcvr_contents)
elif _sounder_name_used == "MBES":
for field in txcvr_header_fields:
txcvr_contents.append(txcvr[field])
txcvr_contents_str = struct.pack(txcvr_header_fmt, *txcvr_contents)
else:
raise RuntimeError(
"Unknown _sounder_name_used (Should not happen, this is a bug!)"
)
datagram_fmt += "%ds" % (len(txcvr_contents_str))
datagram_contents.append(txcvr_contents_str)
elif version == 1:
for field in self.header_fields(version):
datagram_contents.append(data[field])
datagram_fmt += "%ds" % (len(data["beam_config"]))
datagram_contents.append(data["beam_config"])
return struct.pack(datagram_fmt, *datagram_contents)
class SimradRawParser(_SimradDatagramParser):
"""
Sample Data Datagram parser operates on dictonaries with the following keys:
type: string == 'RAW0'
low_date: long uint representing LSBytes of 64bit NT date
high_date: long uint representing MSBytes of 64bit NT date
timestamp: datetime.datetime object of NT date, assumed to be UTC
channel [short] Channel number
mode [short] 1 = Power only, 2 = Angle only 3 = Power & Angle
transducer_depth [float]
frequency [float]
transmit_power [float]
pulse_length [float]
bandwidth [float]
sample_interval [float]
sound_velocity [float]
absorption_coefficient [float]
heave [float]
roll [float]
pitch [float]
temperature [float]
heading [float]
transmit_mode [short] 0 = Active, 1 = Passive, 2 = Test, -1 = Unknown
spare0 [str]
offset [long]
count [long]
power [numpy array] Unconverted power values (if present)
angle [numpy array] Unconverted angle values (if present)
from_string(str): parse a raw sample datagram
(with leading/trailing datagram size stripped)
to_string(dict): Returns raw string (including leading/trailing size fields)
ready for writing to disk
"""
def __init__(self):
headers = {
0: [
("type", "4s"),
("low_date", "L"),
("high_date", "L"),
("channel", "h"),
("mode", "h"),
("transducer_depth", "f"),
("frequency", "f"),
("transmit_power", "f"),
("pulse_length", "f"),
("bandwidth", "f"),
("sample_interval", "f"),
("sound_velocity", "f"),
("absorption_coefficient", "f"),
("heave", "f"),
("roll", "f"),
("pitch", "f"),
("temperature", "f"),
("heading", "f"),
("transmit_mode", "h"),
("spare0", "6s"),
("offset", "l"),
("count", "l"),
],
3: [
("type", "4s"),
("low_date", "L"),
("high_date", "L"),
("channel_id", "128s"),
("data_type", "h"),
("spare", "2s"),
("offset", "l"),
("count", "l"),
],
}
_SimradDatagramParser.__init__(self, "RAW", headers)
def _unpack_contents(self, raw_string, bytes_read, version):
header_values = struct.unpack(
self.header_fmt(version), raw_string[: self.header_size(version)]
)
data = {}
for indx, field in enumerate(self.header_fields(version)):
data[field] = header_values[indx]
if isinstance(data[field], bytes):
data[field] = data[field].decode()
data["timestamp"] = nt_to_unix((data["low_date"], data["high_date"]))
data["bytes_read"] = bytes_read
if version == 0:
if data["count"] > 0:
block_size = data["count"] * 2
indx = self.header_size(version)
if int(data["mode"]) & 0x1:
data["power"] = np.frombuffer(
raw_string[indx : indx + block_size], dtype="int16" # noqa
)
indx += block_size
else:
data["power"] = None
if int(data["mode"]) & 0x2:
data["angle"] = np.frombuffer(
raw_string[indx : indx + block_size], dtype="int8" # noqa
)
data["angle"] = data["angle"].reshape((-1, 2))
else:
data["angle"] = None
else:
data["power"] = np.empty((0,), dtype="int16")
data["angle"] = np.empty((0, 2), dtype="int8")
elif version == 3:
# result = 1j*Data[...,1]; result += Data[...,0]
# clean up the channel ID
data["channel_id"] = data["channel_id"].strip("\x00")
if data["count"] > 0:
# set the initial block size and indx value.
block_size = data["count"] * 2
indx = self.header_size(version)
if data["data_type"] & 0b1:
data["power"] = np.frombuffer(
raw_string[indx : indx + block_size], dtype="int16" # noqa
)
indx += block_size
else:
data["power"] = None
if data["data_type"] & 0b10:
data["angle"] = np.frombuffer(
raw_string[indx : indx + block_size], dtype="int8" # noqa
)
data["angle"] = data["angle"].reshape((-1, 2))
indx += block_size
else:
data["angle"] = None
# determine the complex sample data type - this is contained in bits 2 and 3
# of the datatype <short> value. I'm assuming the types are exclusive...
data["complex_dtype"] = np.float16
type_bytes = 2
if data["data_type"] & 0b1000:
data["complex_dtype"] = np.float32
type_bytes = 8
# determine the number of complex samples
data["n_complex"] = data["data_type"] >> 8
# unpack the complex samples
if data["n_complex"] > 0:
# determine the block size
block_size = data["count"] * data["n_complex"] * type_bytes
data["complex"] = np.frombuffer(
raw_string[indx : indx + block_size], # noqa
dtype=data["complex_dtype"],
)
data["complex"].dtype = np.complex64
else:
data["complex"] = None
else:
data["power"] = np.empty((0,), dtype="int16")
data["angle"] = np.empty((0,), dtype="int8")
data["complex"] = np.empty((0,), dtype="complex64")
data["n_complex"] = 0
return data
def _pack_contents(self, data, version):
datagram_fmt = self.header_fmt(version)
datagram_contents = []
if version == 0:
if data["count"] > 0:
if (int(data["mode"]) & 0x1) and (
len(data.get("power", [])) != data["count"]
):
log.warning(
"Data 'count' = %d, but contains %d power samples. Ignoring power."
)
data["mode"] &= ~(1 << 0)
if (int(data["mode"]) & 0x2) and (
len(data.get("angle", [])) != data["count"]
):
log.warning(
"Data 'count' = %d, but contains %d angle samples. Ignoring angle."
)
data["mode"] &= ~(1 << 1)
if data["mode"] == 0:
log.warning(
"Data 'count' = %d, but mode == 0. Setting count to 0",
data["count"],
)
data["count"] = 0
for field in self.header_fields(version):
datagram_contents.append(data[field])
if data["count"] > 0:
if int(data["mode"]) & 0x1:
datagram_fmt += "%dh" % (data["count"])
datagram_contents.extend(data["power"])
if int(data["mode"]) & 0x2:
datagram_fmt += "%dH" % (data["count"])
datagram_contents.extend(data["angle"])
return struct.pack(datagram_fmt, *datagram_contents)
|
the-stack_0_558 | import os
import tkinter as tk
from tkinter import ttk
from tkinter import filedialog
from tkinter import PhotoImage
from tkinter import messagebox
import pafy
import youtube_dl
# if you get api limit exceeded error, get an api key and paste
# here as a string value
# pafy.set_api_key(key)
# sample video url
# https://www.youtube.com/watch?v=CjeYOtL6ORE
cwd = os.getcwd()
class CustomEntry(tk.Entry):
def __init__(self, parent, *args, **kwargs):
tk.Entry.__init__(self, parent, *args, **kwargs)
self.parent = parent
self.bind('<FocusOut>', self.add_placeholder)
self.bind('<FocusIn>', self.clear_placeholder)
self.configure(fg="gray70")
self.insert(0, 'Enter Video URL')
def add_placeholder(self, event=None):
if not self.get():
self.configure(fg="gray70")
self.insert(0, 'Enter Video URL')
def clear_placeholder(self, event):
if event and self.get() == 'Enter Video URL':
self.delete('0', 'end')
self.configure(fg="black")
# Application Class -----------------------------------------------
class Application(tk.Frame):
def __init__(self, master=None):
super().__init__(master=master)
self.master = master
self.master.focus_set()
self.pack()
self.url = ''
self.video_quality = tk.StringVar()
self.filesize = 0
self.is_video_downloading = False
self.is_audio_downloading = False
self.draw_title_frame()
self.draw_main_frame()
self.bind('<Return>', self.search_video)
def draw_title_frame(self):
self.title_frame = tk.Frame(self, bg='red', width=440, height=60)
self.title_frame.grid(row=0, column=0, columnspan=5, pady=5)
self.title_frame.grid_propagate(False)
self.title = tk.Label(self.title_frame, text=' SaveFromYT - Youtube Audio/Video Downloader',
fg='white', bg='red', font=('Times', 14),
width=450, height=50, image=youtube_icon, compound=tk.LEFT,
anchor = 'w')
self.title.grid(row=0, column=0, padx=5, ipadx=20)
def draw_main_frame(self):
self.main_frame = tk.Frame(self, width=440, height=240, highlightthickness=1,
highlightbackground='red')
self.main_frame.grid(row=1, column=0, columnspan=5, pady=5, rowspan=3)
self.main_frame.grid_propagate(False)
self.entry = CustomEntry(self.main_frame, width=52)
self.entry.grid(row=0, column=0, columnspan=3, pady=100, padx=(20,10))
self.entry.bind('<Return>', self.search_video)
self.search = tk.Button(self.main_frame, image=search_icon,
fg='white', cursor='hand2', command=self.search_video,
relief=tk.FLAT)
self.search.grid(row=0, column=4, pady=100, padx=(30,10))
def draw_download_frame(self):
self.main_frame.destroy()
self.info_frame = tk.Frame(self, width=150, height=173, highlightthickness=1,
highlightbackground='red')
self.info_frame.grid(row=1, column=0, columnspan=2)
self.info_frame.grid_propagate(False)
self.video_frame = tk.Frame(self, width=290, height=173, highlightthickness=1,
highlightbackground='red')
self.video_frame.grid(row=1, column=2, columnspan=3)
self.video_frame.grid_propagate(False)
self.audio_frame = tk.Frame(self, width=370, height=67, highlightthickness=1,
highlightbackground='red')
self.audio_frame.grid(row=2, column=0, columnspan=4)
self.audio_frame.grid_propagate(False)
self.back_frame = tk.Frame(self, width=70, height=67, highlightthickness=1,
highlightbackground='red')
self.back_frame.grid(row=2, column=4)
self.back_frame.grid_propagate(False)
def draw_download_widgets(self):
# self.info_frame
self.title = tk.Label(self.info_frame, width=20, height=3, bg='red',
wraplength=120, fg='white')
self.title.grid(row=0, column=0, padx=1, pady=2)
self.views = tk.Label(self.info_frame, width=20, height=2, bg='red',
fg='white')
self.views.grid(row=1, column=0, padx=1, pady=1)
self.duration = tk.Label(self.info_frame, width=20, height=2, bg='red',
fg='white')
self.duration.grid(row=2, column=0, padx=1, pady=1)
self.published = tk.Label(self.info_frame, width=20, height=2, bg='red',
fg='white')
self.published.grid(row=3, column=0, padx=1, pady=1)
# self.video_frame
self.video_quality.set(self.option_streams[0])
self.options = tk.OptionMenu(self.video_frame, self.video_quality,
*self.option_streams)
self.options.config(bg='red', fg='white')
self.options['menu'].config(bg='red', fg='white')
self.options.grid(row=0, column=0, padx=50, pady=20, columnspan=5)
self.video_dwn = tk.Button(self.video_frame, text='Download MP4',
command=self.download_video, bg='red', fg='white',
width=15, cursor='hand2')
self.video_dwn.grid(row=1, column=0, padx=50, pady=10, columnspan=5)
# self.audio_frame
self.audio_dwn = tk.Button(self.audio_frame, text='Download MP3',
command=self.download_mp3, bg='red', fg='white',
width=15, cursor='hand2')
self.audio_dwn.grid(row=0, column=0, padx=20, pady=20)
# self.back_frame
self.back = tk.Button(self.back_frame, text='back', image=back_icon,
command=self.go_back, relief=tk.FLAT)
self.back.grid(row=0, column=0, pady=10, padx=10)
def cease_buttons(self):
if self.is_video_downloading:
self.video_dwn['text'] = 'downloading'
if self.is_audio_downloading:
self.audio_dwn['text'] = 'downloading'
self.video_dwn.config(state='disabled')
self.audio_dwn.config(state='disabled')
def release_buttons(self):
self.video_dwn.config(state='normal')
self.audio_dwn.config(state='normal')
if not self.is_video_downloading:
self.video_dwn['text'] = 'Download MP4'
if not self.is_audio_downloading:
self.audio_dwn['text'] = 'Download MP3'
def search_video(self, event=None):
self.url = self.entry.get()
self.master.focus_set()
if self.url and ' ' not in self.url:
try:
video = pafy.new(self.url)
self.video_title = video.title
duration = video.duration
views = video.viewcount
published = video.published
thumbnail = video.thumb
self.streams = video.streams
self.option_streams = self.streams[::-1]
self.draw_download_frame()
self.draw_download_widgets()
self.title['text'] = self.video_title[:50]
self.views['text'] = f'Views : {views:,}'
self.duration['text'] = f'Length : {duration}'
self.published['text'] = f'Pub : {published[:10]}'
except OSError:
messagebox.showerror('SaveFromYT', 'Cannot extract data')
except ValueError:
messagebox.showerror('SaveFromYT', 'Invalid URL')
except:
messagebox.showerror('SaveFromYT', 'Cannot connect with internet')
def download_video(self):
filetypes = [('MP4', '.mp4')]
filepath = filedialog.asksaveasfilename(initialdir=cwd,
initialfile=self.video_title[:25]+'.mp4',
filetypes=filetypes)
if filepath:
self.is_video_downloading = True
self.cease_buttons()
vq = self.video_quality.get()
l = len(self.streams)
opts = [str(stream) for stream in self.option_streams]
stream = self.streams[opts.index(vq) - l + 1]
self.filesize = stream.get_filesize()
self.sizelabel = tk.Label(self.video_frame, bg='red', fg='white',
text=f'Filesize : {self.filesize/(1024*1024):.2f} Mb')
self.sizelabel.grid(row=2, column=0, pady=5)
self.pb = ttk.Progressbar(self.video_frame, orient=tk.HORIZONTAL,
mode='determinate', length=100)
self.pb.grid(row=2, column=2, columnspan=3, pady=5)
try:
stream.download(quiet=True, callback=self.download_callback,
filepath=filepath)
messagebox.showinfo('SaveFromYT', 'Video Downloaded Successfully')
except:
messagebox.showerror('SaveFromYT', 'Cannot connect with internet')
self.pb.destroy()
self.sizelabel.destroy()
self.is_video_downloading = False
self.release_buttons()
def download_callback(self, total, recvd, ratio, rate, eta):
perc = (recvd / total) * 100
self.pb['value'] = int(perc)
self.update()
def download_mp3(self):
filetypes = ['MP3', '.mp3']
filepath = filedialog.asksaveasfilename(initialdir=cwd,
initialfile=''.join(self.video_title[:25]+'.mp3'))
if filepath:
ydl_opts = {
'format': 'bestaudio/best',
'outtmpl' : filepath,
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192'
}],
'postprocessor_args': [
'-ar', '16000'
],
'prefer_ffmpeg': True,
'keepvideo': True,
'progress_hooks': [self.download_hook]
}
self.is_audio_downloading = True
self.cease_buttons()
try:
self.pb = ttk.Progressbar(self.audio_frame, orient=tk.HORIZONTAL,
mode='determinate', length=100)
self.pb.grid(row=0, column=2, pady=20, padx=20)
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
ydl.download([self.url])
for file in os.listdir():
if file.endswith('.webm'):
os.remove(file)
self.pb.destroy()
messagebox.showinfo('SaveFromYT', 'Successfully Downloaded Mp3')
except:
messagebox.showinfo('SaveFromYT', "Can't connect with internet")
self.is_audio_downloading = False
self.release_buttons()
def download_hook(self, d):
if d['status'] == 'downloading':
p = d['_percent_str']
p = float(p.replace('%','').replace(' ',''))
self.pb['value'] = round(p)
self.update()
def go_back(self):
self.info_frame.destroy()
self.video_frame.destroy()
self.audio_frame.destroy()
self.back_frame.destroy()
self.draw_main_frame()
if __name__ == '__main__':
root = tk.Tk()
root.geometry('450x320')
root.title('SaveFromYT')
root.resizable(0,0)
youtube_icon = PhotoImage(file='icons/youtube.png')
back_icon = PhotoImage(file='icons/back.png')
search_icon = PhotoImage(file='icons/search.png')
app = Application(master=root)
app.mainloop() |
the-stack_0_561 | #!/usr/bin/python
# -*- coding: utf8 -*-
import os
import logging
import sys
import argparse
sys.path.append("../core")
from qgis_project_substitute import substitute_project
from processor import Processor
def argparser_prepare():
class PrettyFormatter(argparse.ArgumentDefaultsHelpFormatter,
argparse.RawDescriptionHelpFormatter):
max_help_position = 35
parser = argparse.ArgumentParser(description='OSMTram process',
formatter_class=PrettyFormatter)
parser.add_argument('--prune',dest='prune', required=False, action='store_true', help='Clear temporary folder')
parser.add_argument('--skip-osmupdate',dest='skip_osmupdate', required=False, action='store_true')
parser.add_argument('--workdir',dest='WORKDIR', required=True)
parser.epilog = \
'''Samples:
%(prog)s
''' \
% {'prog': parser.prog}
return parser
dump_url = 'http://download.geofabrik.de/europe/latvia-latest.osm.pbf'
parser = argparser_prepare()
args = parser.parse_args()
WORKDIR=args.WORKDIR
logging.basicConfig(level=logging.DEBUG,format='%(asctime)s %(levelname)-8s %(message)s',datefmt='%Y-%m-%d %H:%M:%S')
logger = logging.getLogger(__name__)
logger.info('Start')
processor = Processor()
processor.process_sheets('latvia.geojson',WORKDIR,dump_url,dump_name='latvia')
#,attribute_filter='''"name_ru"= 'Лиепая' and "type"='tram' '''
|
the-stack_0_562 | import numpy as np
from src.util import Util, Article
class Answer:
"""Answer questions based on the initialized article."""
def __init__(self, article):
"""
Create a new instance of the Answer class.
Args:
article: An instance of the Article class
"""
self.article = article
def answer(self, question, return_score=False):
"""
Answer the given question.
Args:
question: Question string
Returns:
Answer to question as string
"""
u = Util()
question_embedding = u.embeddings([question])[0]
sentences_list = []
for paragraph in self.article.sentences:
paragraph_text = [s.text for s in paragraph]
sentences_list += paragraph_text
sentences_embeddings = u.embeddings(sentences_list)
distances = []
for i, embedding in enumerate(sentences_embeddings):
diffs = np.inner(question_embedding, embedding)
dist = diffs
distances.append((dist, sentences_list[i]))
distances.sort(key=lambda x: x[0], reverse=True)
most_similar_sentence = distances[0][1]
most_similar_score = distances[0][0]
if return_score:
return (most_similar_sentence, most_similar_score)
return most_similar_sentence
if __name__ == "__main__":
u = Util()
art = Article(u.load_txt_article("../articles/Development_data/set4/set4/a1.txt"))
a = Answer(art)
q = "Who studied the stars of the southern hemisphere from 1750 until 1754 from Cape of Good Hope?"
print(a.answer(q))
# Who is a product of a revision of the Old Babylonian system in later Neo-Babylonian astronomy 6th century BC?
# Who interpreted the creatures appearing in the books of Ezekiel (and thence in Revelation) as the middle signs of the four quarters of the Zodiac?
# Who studied the stars of the southern hemisphere from 1750 until 1754 from Cape of Good Hope?
# Who aided the IAU (International Astronomical Union) in dividing the celestial sphere into 88 official constellations?
# Who is a product of a revision of the Old Babylonian system in later Neo-Babylonian astronomy 6th century BC?
|
the-stack_0_563 | from math import ceil
from hashlib import md5
from pecan import expose, request, abort, response, redirect
from pecan.secure import secure
from pecan.ext.wtforms import with_form
from sqlalchemy import select, and_, or_, asc, desc, func, case, literal
from draughtcraft import model
from draughtcraft.lib.beerxml import export
from draughtcraft.lib.forms.recipes.browse import RecipeSearchForm
from create import RecipeCreationController
from builder import RecipeBuilderController
class SlugController(object):
def __init__(self, slug):
self.slug = slug
# Make sure the provided slug is valid
if not slug:
redirect(request.context['recipe'].slugs[0].slug)
if slug not in [slug.slug for slug in request.context['recipe'].slugs]:
abort(404)
@expose('recipes/builder/index.html')
@expose('json', content_type='application/json')
def index(self):
recipe = request.context['recipe']
if recipe.state == "DRAFT":
if recipe.author and recipe.author != request.context['user']:
abort(404)
if not recipe.author and recipe != request.context['trial_recipe']:
abort(404)
# Log a view for the recipe (if the viewer *is not* the author)
if recipe.author != request.context['user'] and \
request.pecan.get('content_type') == 'application/json':
model.RecipeView(recipe=recipe)
return dict(
recipe=recipe,
editable=False
)
@expose(content_type='application/xml')
def xml(self):
recipe = request.context['recipe']
if recipe.state == "DRAFT":
if recipe.author and recipe.author != request.context['user']:
abort(404)
response.headers['Content-Disposition'] = \
'attachment; filename="%s.xml"' % self.slug
return export.to_xml([request.context['recipe']])
@expose(generic=True)
def draft(self):
abort(405)
@draft.when(method="POST")
def do_draft(self):
source = request.context['recipe']
if source.author is None or source.author != request.context['user']:
abort(401)
if source.state != "PUBLISHED":
abort(401)
draft = source.draft()
draft.flush()
redirect("%sbuilder" % draft.url())
@expose(generic=True)
def copy(self):
abort(405)
@copy.when(method="POST")
def do_copy(self):
source = request.context['recipe']
if request.context['user'] is None:
redirect("/signup")
if source.author is None:
abort(401)
diff_user = source.author != request.context['user']
name = source.name if diff_user else "%s (Duplicate)" % source.name
copy = source.duplicate({
'name': name,
'author': request.context['user']
})
if diff_user:
copy.copied_from = source
redirect("/")
@expose(generic=True)
def delete(self):
abort(405)
@delete.when(method="POST")
def do_delete(self):
source = request.context['recipe']
if source.author is None or source.author != request.context['user']:
abort(401)
source.delete()
redirect("/")
builder = secure(
RecipeBuilderController(),
RecipeBuilderController.check_permissions
)
class RecipeController(object):
@expose()
def _lookup(self, slug, *remainder):
return SlugController(slug), remainder
def __init__(self, recipeID):
try:
primary_key = int(str(recipeID), 16)
except ValueError:
abort(404)
recipe = model.Recipe.get(primary_key)
if recipe is None:
abort(404)
request.context['recipe'] = recipe
class RecipesController(object):
@expose()
def _lookup(self, recipeID, *remainder):
return RecipeController(recipeID), remainder
@expose('recipes/browse/index.html')
def index(self):
return dict(
styles=model.Style.query.order_by(model.Style.name).all()
)
@expose(template='recipes/browse/list.html')
@with_form(RecipeSearchForm, validate_safe=True)
def recipes(self, **kw):
if request.pecan['form'].errors:
abort(400)
perpage = 25.0
offset = int(perpage * (kw['page'] - 1))
views = func.count(model.RecipeView.id).label('views')
username = func.lower(model.User.username).label('username')
sortable_type = case([
(model.Recipe.type == 'MASH', literal('All Grain')),
(model.Recipe.type == 'EXTRACT', literal('Extract')),
(
model.Recipe.type == 'EXTRACTSTEEP',
literal('Extract w/ Steeped Grains')
),
(model.Recipe.type == 'MINIMASH', literal('Mini-Mash')),
]).label('type')
# map of columns
column_map = dict(
type=(sortable_type,),
srm=(model.Recipe._srm,),
name=(model.Recipe.name,),
author=(username,),
style=(model.Style.name,),
last_updated=(model.Recipe.last_updated,),
views=(views,)
)
# determine the sorting direction and column
order_column = column_map.get(kw['order_by'])
order_direction = dict(
ASC=asc,
DESC=desc
).get(kw['direction'])
where = [
model.Recipe.state == 'PUBLISHED'
]
# If applicable, filter by style
if kw['style']:
query = where.append(model.Recipe.style == kw['style'])
# If applicable, filter by type (MASH, etc...)
where.append(or_(
model.Recipe.id is None,
model.Recipe.type == 'MASH' if kw['mash'] else None,
model.Recipe.type == 'MINIMASH' if kw['minimash'] else None,
model.Recipe.type.in_(('EXTRACTSTEEP', 'EXTRACT'))
if kw['extract'] else None,
))
# If applicable, filter by color
if kw['color']:
start, end = {
'light': (0, 8),
'amber': (8, 18),
'brown': (16, 25),
'dark': (25, 5000)
}.get(kw['color'])
where.append(and_(
model.Recipe._srm >= start,
model.Recipe._srm <= end,
))
# Join the `recipe`, `recipeview`, `user`, and `style` tables
from_obj = model.Recipe.table.outerjoin(
model.RecipeView.table,
onclause=model.RecipeView.recipe_id == model.Recipe.id
).outerjoin(
model.Style.table,
onclause=model.Recipe.style_id == model.Style.id
).join(
model.User.table,
onclause=model.Recipe.author_id == model.User.id
)
username_full = model.User.username.label('username')
email = model.User.email.label('email')
style_name = model.Style.name.label('style_name')
style_url = model.Style.url.label('style_url')
query = select(
[
model.Recipe.id,
model.Recipe.name,
model.Recipe._srm,
username_full,
email,
sortable_type,
style_name,
style_url,
model.Recipe.last_updated,
views
],
and_(*where),
from_obj=[from_obj],
group_by=model.Recipe.id
)
total = select(
[func.count(model.Recipe.id)],
and_(*where)
).execute().fetchone()[0]
if views not in order_column:
query = query.group_by(*order_column)
query = query.group_by(username_full)
query = query.group_by(email)
query = query.group_by(style_name)
query = query.group_by(style_url)
recipes = query.order_by(
*[order_direction(column) for column in order_column]
).offset(
offset
).limit(
perpage
).execute().fetchall()
class RecipeProxy(object):
def __init__(self, recipe):
self.id, self.name, self._srm, self.username, self.email, self.printable_type, self.style_name, self.style_url, self.last_updated, self.views = recipe
@property
def metric_unit(self):
return 'EBC' if request.context['metric'] is True else 'SRM'
@property
def color(self):
if self.metric_unit is 'SRM':
return self._srm
round(self._srm * 1.97, 1)
@property
def gravatar(self):
return 'https://www.gravatar.com/avatar/%s?d=https://draughtcraft.com/images/glass-square.png' % (
md5(self.email.strip().lower()).hexdigest()
)
@property
def url(self):
return '/recipes/%s/' % (('%x' % self.id).lower())
return dict(
pages=max(1, int(ceil(total / perpage))),
current_page=kw['page'],
offset=offset,
perpage=perpage,
total=total,
order_by=kw['order_by'],
direction=kw['direction'],
recipes=map(RecipeProxy, recipes)
)
create = RecipeCreationController()
|
the-stack_0_566 | from .probe import Probe
from .utils import most_frequent, process_dict_list, merge_dicts
"""
Analyses a group of clips.
"""
class Analysis:
def __init__(self, clips=[]):
self.clips = clips
def summary(self):
file_summary = []
for clip in self.clips:
summary = Probe(clip).run().extract_summary()
file_summary.append(summary)
final_list = None
for item in file_summary:
if final_list == None:
final_list = item
else:
final_list = merge_dicts(final_list, item)
return process_dict_list(final_list, most_frequent)
|
the-stack_0_569 | import re
from os.path import *
import cv2
import numpy as np
import torch.nn.functional as F
from PIL import Image
cv2.setNumThreads(0)
cv2.ocl.setUseOpenCL(False)
TAG_CHAR = np.array([202021.25], np.float32)
def read_flow_middlebury(fn):
"""
Read .flo file in Middlebury format
Parameters
-----------
fn : str
Absolute path to flow file
Returns
--------
flow : np.ndarray
Optical flow map
"""
# Code adapted from:
# http://stackoverflow.com/questions/28013200/reading-middlebury-flow-files-with-python-bytes-array-numpy
# WARNING: this will work on little-endian architectures (eg Intel x86) only!
# print 'fn = %s'%(fn)
with open(fn, "rb") as f:
magic = np.fromfile(f, np.float32, count=1)
if 202021.25 != magic:
print("Magic number incorrect. Invalid .flo file")
return None
else:
w = np.fromfile(f, np.int32, count=1)
h = np.fromfile(f, np.int32, count=1)
# print 'Reading %d x %d flo file\n' % (w, h)
data = np.fromfile(f, np.float32, count=2 * int(w) * int(h))
# Reshape data into 3D array (banda, columns, rows)
return np.resize(data, (int(h), int(w), 2))
def read_flow_pfm(file):
"""
Read optical flow from a .pfm file
Parameters
-----------
file : str
Path to flow file
Returns
--------
flow : np.ndarray
Optical flow map
"""
file = open(file, "rb")
color = None
width = None
height = None
scale = None
endian = None
header = file.readline().rstrip()
if header == b"PF":
color = True
elif header == b"Pf":
color = False
else:
raise Exception("Not a PFM file.")
dim_match = re.match(rb"^(\d+)\s(\d+)\s$", file.readline())
if dim_match:
width, height = map(int, dim_match.groups())
else:
raise Exception("Malformed PFM header.")
scale = float(file.readline().rstrip())
if scale < 0: # little-endian
endian = "<"
scale = -scale
else:
endian = ">" # big-endian
data = np.fromfile(file, endian + "f")
shape = (height, width, 3) if color else (height, width)
data = np.reshape(data, shape)
data = np.flipud(data)
return data
def read_flow_png(filename):
"""
Read optical flow from a png file.
Parameters
-----------
filename : str
Path to flow file
Returns
--------
flow : np.ndarray
Optical flow map
valid : np.ndarray
Valid flow map
"""
flow = cv2.imread(filename, cv2.IMREAD_ANYDEPTH | cv2.IMREAD_COLOR)
flow = flow[:, :, ::-1].astype(np.float32)
flow, valid = flow[:, :, :2], flow[:, :, 2]
flow = (flow - 2**15) / 64.0
return flow, valid
def write_flow(filename, uv, v=None):
"""Write optical flow to file.
If v is None, uv is assumed to contain both u and v channels,
stacked in depth.
Parameters
----------
filename : str
Path to file
uv : np.ndarray
Optical flow
v : np.ndarray, optional
Optional second channel
"""
# Original code by Deqing Sun, adapted from Daniel Scharstein.
n_bands = 2
if v is None:
assert uv.ndim == 3
assert uv.shape[2] == 2
u = uv[:, :, 0]
v = uv[:, :, 1]
else:
u = uv
assert u.shape == v.shape
height, width = u.shape
f = open(filename, "wb")
# write the header
f.write(TAG_CHAR)
np.array(width).astype(np.int32).tofile(f)
np.array(height).astype(np.int32).tofile(f)
# arrange into matrix form
tmp = np.zeros((height, width * n_bands))
tmp[:, np.arange(width) * 2] = u
tmp[:, np.arange(width) * 2 + 1] = v
tmp.astype(np.float32).tofile(f)
f.close()
def read_image(file_name):
"""
Read images from a variety of file formats
Parameters
-----------
file_name : str
Path to flow file
Returns
--------
flow : np.ndarray
Optical flow map
"""
ext = splitext(file_name)[-1]
if ext == ".png" or ext == ".jpeg" or ext == ".ppm" or ext == ".jpg":
return Image.open(file_name)
elif ext == ".bin" or ext == ".raw":
return np.load(file_name)
return []
def read_flow(file_name):
"""
Read ground truth flow from a variety of file formats
Parameters
-----------
file_name : str
Path to flow file
Returns
--------
flow : np.ndarray
Optical flow map
valid : None if .flo and .pfm files else np.ndarray
Valid flow map
"""
ext = splitext(file_name)[-1]
if ext == ".flo":
flow = read_flow_middlebury(file_name).astype(np.float32)
return flow, None
elif ext == ".pfm":
flow = read_flow_pfm(file_name).astype(np.float32)
if len(flow.shape) == 2:
return flow, None
else:
return flow[:, :, :-1], None
elif ext == ".png":
return read_flow_png(file_name)
return []
class InputPadder:
"""
Class to pad / unpad the input to a network with a given padding
Parameters
-----------
dims : tuple
Dimensions of the input
divisor : int
Divisor to make the input evenly divisible by
mode : str
Padding mode
"""
def __init__(self, dims, divisor=8, mode="sintel"):
self.ht, self.wd = dims[-2:]
pad_ht = (((self.ht // divisor) + 1) * divisor - self.ht) % divisor
pad_wd = (((self.wd // divisor) + 1) * divisor - self.wd) % divisor
if mode == "sintel":
self._pad = [
pad_wd // 2,
pad_wd - pad_wd // 2,
pad_ht // 2,
pad_ht - pad_ht // 2,
]
else:
self._pad = [pad_wd // 2, pad_wd - pad_wd // 2, 0, pad_ht]
def pad(self, *inputs):
"""
Pad the input
Parameters
-----------
inputs : list
List of inputs to pad
Returns
--------
list
Padded inputs
"""
return [F.pad(x, self._pad, mode="replicate") for x in inputs]
def unpad(self, x):
"""
Unpad the input
Parameters
-----------
x : torch.Tensor
Input to unpad
Returns
--------
torch.Tensor
Unpadded input
"""
ht, wd = x.shape[-2:]
c = [self._pad[2], ht - self._pad[3], self._pad[0], wd - self._pad[1]]
return x[..., c[0] : c[1], c[2] : c[3]]
|
the-stack_0_570 | """Norwegian-specific Form helpers."""
from __future__ import unicode_literals
import datetime
import re
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import CharField, Field, RegexField, Select
from django.utils.translation import ugettext_lazy as _
from localflavor.generic.forms import DeprecatedPhoneNumberFormFieldMixin
from .no_municipalities import MUNICIPALITY_CHOICES
class NOZipCodeField(RegexField):
"""
A form field that validates input as a Norwegian zip code.
Valid codes have four digits.
"""
default_error_messages = {
'invalid': _('Enter a zip code in the format XXXX.'),
}
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(NOZipCodeField, self).__init__(r'^\d{4}$',
max_length, min_length, *args, **kwargs)
class NOMunicipalitySelect(Select):
"""A Select widget that uses a list of Norwegian municipalities (fylker) as its choices."""
def __init__(self, attrs=None):
super(NOMunicipalitySelect, self).__init__(attrs, choices=MUNICIPALITY_CHOICES)
class NOSocialSecurityNumber(Field):
"""Algorithm is documented at http://no.wikipedia.org/wiki/Personnummer."""
default_error_messages = {
'invalid': _('Enter a valid Norwegian social security number.'),
}
def clean(self, value):
super(NOSocialSecurityNumber, self).clean(value)
if value in EMPTY_VALUES:
return ''
if not re.match(r'^\d{11}$', value):
raise ValidationError(self.error_messages['invalid'])
self.birthday = self._get_birthday(value)
self.gender = self._get_gender(value)
digits = map(int, list(value))
weight_1 = [3, 7, 6, 1, 8, 9, 4, 5, 2, 1, 0]
weight_2 = [5, 4, 3, 2, 7, 6, 5, 4, 3, 2, 1]
def multiply_reduce(aval, bval):
return sum([(a * b) for (a, b) in zip(aval, bval)])
if multiply_reduce(digits, weight_1) % 11 != 0:
raise ValidationError(self.error_messages['invalid'])
if multiply_reduce(digits, weight_2) % 11 != 0:
raise ValidationError(self.error_messages['invalid'])
return value
def _get_gender(self, value):
sexnum = int(value[8])
if sexnum % 2 == 0:
gender = 'F'
else:
gender = 'M'
return gender
def _get_birthday(self, value):
birthday = None
day = int(value[:2])
month = int(value[2:4])
year2 = int(value[4:6])
inum = int(value[6:9])
try:
if 000 <= inum < 500:
birthday = datetime.date(1900 + year2, month, day)
if 500 <= inum < 750 and year2 > 54:
birthday = datetime.date(1800 + year2, month, day)
if 500 <= inum < 1000 and year2 < 40:
birthday = datetime.date(2000 + year2, month, day)
if 900 <= inum < 1000 and year2 > 39:
birthday = datetime.date(1900 + year2, month, day)
except ValueError:
raise ValidationError(self.error_messages['invalid'])
return birthday
class NOBankAccountNumber(CharField):
"""
A form field for Norwegian bank account numbers.
Performs MOD11 with the custom weights for the Norwegian bank account numbers,
including a check for a remainder of 0, in which event the checksum is also 0.
Usually their string representation is along the lines of ZZZZ.YY.XXXXX, where the last X is the check digit.
They're always a total of 11 digits long, with 10 out of these 11 being the actual account number itself.
* Accepts, and strips, account numbers with extra spaces.
* Accepts, and strips, account numbers provided in form of XXXX.YY.XXXXX.
.. note:: No consideration is taking for banking clearing numbers as of yet, seeing as these are only used between
banks themselves.
.. versionadded:: 1.5
"""
default_error_messages = {
'invalid': _('Enter a valid Norwegian bank account number.'),
'invalid_checksum': _('Invalid control digit. Enter a valid Norwegian bank account number.'),
'invalid_length': _('Invalid length. Norwegian bank account numbers are 11 digits long.'),
}
def validate(self, value):
super(NOBankAccountNumber, self).validate(value)
if value is '':
# It's alright to be empty.
return
elif not value.isdigit():
# You must only contain decimals.
raise ValidationError(self.error_messages['invalid'])
elif len(value) is not 11:
# They only have one length: the number is 10!
# That being said, you always store them with the check digit included, so 11.
raise ValidationError(self.error_messages['invalid_length'])
# The control/check digit is the last digit
check_digit = int(value[-1])
bank_number = value[:-1]
# These are the weights by which we multiply to get our checksum digit
weights = [5, 4, 3, 2, 7, 6, 5, 4, 3, 2]
result = sum(w * (int(x)) for w, x in zip(weights, bank_number))
remainder = result % 11
# The checksum is 0 in the event there's no remainder, seeing as we cannot have a checksum of 11
# when 11 is one digit longer than we've got room for
checksum = 0 if remainder is 0 else 11 - remainder
if checksum != check_digit:
raise ValidationError(self.error_messages['invalid_checksum'])
def to_python(self, value):
value = super(NOBankAccountNumber, self).to_python(value)
return value.replace('.', '').replace(' ', '')
def prepare_value(self, value):
if value in EMPTY_VALUES:
return value
return '{}.{}.{}'.format(value[0:4], value[4:6], value[6:11])
class NOPhoneNumberField(RegexField, DeprecatedPhoneNumberFormFieldMixin):
"""
Field with phonenumber validation.
Requires a phone number with 8 digits and optional country code
"""
default_error_messages = {
'invalid': _('A phone number must be 8 digits and may have country code'),
}
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(NOPhoneNumberField, self).__init__(
r'^(?:\+47)? ?(\d{3}\s?\d{2}\s?\d{3}|\d{2}\s?\d{2}\s?\d{2}\s?\d{2})$',
max_length, min_length, *args, **kwargs)
|
the-stack_0_571 | from __future__ import division, print_function
import numpy as np
from librmm_cffi import librmm as rmm
import cudf._lib as libcudf
from cudf.core import Series
from cudf.core.column import column
def test_gather_single_col():
col = column.as_column(np.arange(100), dtype=np.int32)
gather_map = np.array([0, 1, 2, 3, 5, 8, 13, 21], dtype=np.int32)
device_gather_map = rmm.to_device(gather_map)
out = libcudf.copying.gather(col, device_gather_map)
np.testing.assert_array_equal(out.to_array(), gather_map)
def test_gather_cols():
cols = [
column.as_column(np.arange(10), dtype=np.int32),
column.as_column(np.arange(0.0, 2.0, 0.2), dtype=np.float32),
]
gather_map = np.array([0, 1, 2, 3, 5, 8], dtype=np.int32)
expected = np.array(gather_map * 0.2, dtype=np.float32)
device_gather_map = rmm.to_device(gather_map)
out = libcudf.copying.gather(cols, device_gather_map)
np.testing.assert_array_equal(out[0].to_array(), gather_map)
np.testing.assert_array_almost_equal(out[1].to_array(), expected)
def test_gather_string_col():
col = column.as_column(["a", "b", "c", "d"])
gather_map = column.as_column([0, 2, 3], dtype="int32").data.mem
result = libcudf.copying.gather(col, gather_map)
assert result.data.to_host() == ["a", "c", "d"]
col = column.as_column(["a", "b", None, "d"])
gather_map = column.as_column([0, 2, 3], dtype="int32").data.mem
result = libcudf.copying.gather(col, gather_map)
assert result.data.to_host() == ["a", None, "d"]
def test_null_copy():
col = Series(np.arange(2049))
col[:] = None
assert len(col) == 2049
|
the-stack_0_574 | from .Function_Module import Function_Module
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
import selenium
from geopy.geocoders import Nominatim
import time
import os
import pathlib
class get_gps_location(Function_Module):
name = "get_gps_location"
help_description = "Current location"
time_sleep = 2
error = "Sir, I'm sorry I can't get my location."
chrome_not_found_error = "Sir, I can't find the Chrome binaries. Make sure that C:\Program Files(x86)\Google\Chrome\Application\chrome.exe is present!"
def respond(self, entities):
try:
coordinates = self.getLocation(self)
return self.convert_to_location(self, coordinates)
except selenium.common.exceptions.WebDriverException:
return self.chrome_not_found_error
except:
return self.error
def getLocation(self):
chrome_options = Options()
chrome_options.add_argument("--use-fake-ui-for-media-stream")
# You shouldn't see the browser, so; headless does not work, otherwise gps will not be activated!
# chrome_options.add_argument ("headless")
timeout = 20
# The directory in which the Chrome driver (required for Selenium) is located: Is the script directory, i.e. the same
chrome_driver_path = str( str( pathlib.Path(__file__).parent.absolute() ) + r"\chromedriver.exe" )
print("Chrome-Driver Path: ", chrome_driver_path)
driver = webdriver.Chrome(executable_path=chrome_driver_path, chrome_options=chrome_options)
driver.get("https://mycurrentlocation.net/")
wait = WebDriverWait(driver, timeout)
time.sleep(self.time_sleep)
longitude = driver.find_elements_by_xpath('//*[@id="longitude"]')
longitude = [x.text for x in longitude]
longitude = str(longitude[0])
latitude = driver.find_elements_by_xpath('//*[@id="latitude"]')
latitude = [x.text for x in latitude]
latitude = str(latitude[0])
driver.quit()
coordinates = [latitude, longitude]
return coordinates
def convert_to_location(self, coordinates):
geolocator = Nominatim(user_agent="F.R.I.D.A.Y")
location = geolocator.reverse(coordinates[0] + ',' + coordinates[1])
print(location.raw)
# Compose an answer text from the address
address = location.raw['address']
# Street with 'the'
if("street" in address['road'] or "road" in address['road']):
result = "According to GPS, you are currently in the "+ address['road'] + ', ' + address['town'] + ', ' + address['state'] + ', ' + address['country'] + '.'
else:
result = "According to GPS, you are currently in "+ address['road'] + ', ' + address['town'] + ', ' + address['state'] + ', ' + address['country'] + '.'
return result |
the-stack_0_575 | # -*- coding: utf-8 -*-
# =============================================================================
# Copyright (c) 2012, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
# Written by Joel Bernier <[email protected]> and others.
# LLNL-CODE-529294.
# All rights reserved.
#
# This file is part of HEXRD. For details on dowloading the source,
# see the file COPYING.
#
# Please also see the file LICENSE.
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License (as published by the Free
# Software Foundation) version 2.1 dated February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program (see file LICENSE); if not, write to
# the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
# Boston, MA 02111-1307 USA or visit <http://www.gnu.org/licenses/>.
# =============================================================================
"""
Created on Fri Dec 9 13:05:27 2016
@author: bernier2
"""
import copy
import os
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
from functools import partial
import yaml
import h5py
import numpy as np
from io import IOBase
from scipy import ndimage
from scipy.linalg.matfuncs import logm
from hexrd import constants
from hexrd.gridutil import cellConnectivity, cellIndices, make_tolerance_grid
from hexrd import matrixutil as mutil
from hexrd.transforms.xfcapi import \
anglesToGVec, \
angularDifference, \
detectorXYToGvec, \
gvecToDetectorXY, \
makeOscillRotMat, \
makeRotMatOfExpMap, \
mapAngle, \
oscillAnglesOfHKLs, \
rowNorm, \
unitRowVector
from hexrd import xrdutil
from hexrd.crystallography import PlaneData
from hexrd import constants as ct
from hexrd.rotations import angleAxisOfRotMat, RotMatEuler
from hexrd import distortion as distortion_pkg
from hexrd.utils.compatibility import h5py_read_string
from hexrd.utils.concurrent import distribute_tasks
from hexrd.utils.decorators import memoize
from hexrd.valunits import valWUnit
from hexrd.wppf import LeBail
from skimage.draw import polygon
from skimage.util import random_noise
from hexrd.wppf import wppfsupport
try:
from fast_histogram import histogram1d
fast_histogram = True
except(ImportError):
from numpy import histogram as histogram1d
fast_histogram = False
if ct.USE_NUMBA:
import numba
# =============================================================================
# PARAMETERS
# =============================================================================
instrument_name_DFLT = 'instrument'
beam_energy_DFLT = 65.351
beam_vec_DFLT = ct.beam_vec
eta_vec_DFLT = ct.eta_vec
panel_id_DFLT = 'generic'
nrows_DFLT = 2048
ncols_DFLT = 2048
pixel_size_DFLT = (0.2, 0.2)
tilt_params_DFLT = np.zeros(3)
t_vec_d_DFLT = np.r_[0., 0., -1000.]
chi_DFLT = 0.
t_vec_s_DFLT = np.zeros(3)
max_workers_DFLT = max(1, os.cpu_count() - 1)
"""
Calibration parameter flags
for instrument level, len is 7
[beam energy,
beam azimuth,
beam elevation,
chi,
tvec[0],
tvec[1],
tvec[2],
]
"""
instr_calibration_flags_DFLT = np.zeros(7, dtype=bool)
"""
for each panel, order is:
[tilt[0],
tilt[1],
tilt[2],
tvec[0],
tvec[1],
tvec[2],
<dparams>,
]
len is 6 + len(dparams) for each panel
by default, dparams are not set for refinement
"""
panel_calibration_flags_DFLT = np.array(
[1, 1, 1, 1, 1, 1],
dtype=bool
)
buffer_key = 'buffer'
distortion_key = 'distortion'
# =============================================================================
# UTILITY METHODS
# =============================================================================
def _fix_indices(idx, lo, hi):
nidx = np.array(idx)
off_lo = nidx < lo
off_hi = nidx > hi
nidx[off_lo] = lo
nidx[off_hi] = hi
return nidx
def calc_beam_vec(azim, pola):
"""
Calculate unit beam propagation vector from
spherical coordinate spec in DEGREES.
...MAY CHANGE; THIS IS ALSO LOCATED IN XRDUTIL!
"""
tht = np.radians(azim)
phi = np.radians(pola)
bv = np.r_[
np.sin(phi)*np.cos(tht),
np.cos(phi),
np.sin(phi)*np.sin(tht)]
return -bv
def calc_angles_from_beam_vec(bvec):
"""
Return the azimuth and polar angle from a beam
vector
"""
bvec = np.atleast_1d(bvec).flatten()
nvec = unitRowVector(-bvec)
azim = float(
np.degrees(np.arctan2(nvec[2], nvec[0]))
)
pola = float(np.degrees(np.arccos(nvec[1])))
return azim, pola
def migrate_instrument_config(instrument_config):
"""utility function to generate old instrument config dictionary"""
cfg_list = []
for detector_id in instrument_config['detectors']:
cfg_list.append(
dict(
detector=instrument_config['detectors'][detector_id],
oscillation_stage=instrument_config['oscillation_stage'],
)
)
return cfg_list
def angle_in_range(angle, ranges, ccw=True, units='degrees'):
"""
Return the index of the first wedge the angle is found in
WARNING: always clockwise; assumes wedges are not overlapping
"""
tau = 360.
if units.lower() == 'radians':
tau = 2*np.pi
w = np.nan
for i, wedge in enumerate(ranges):
amin = wedge[0]
amax = wedge[1]
check = amin + np.mod(angle - amin, tau)
if check < amax:
w = i
break
return w
# ???: move to gridutil?
def centers_of_edge_vec(edges):
assert np.r_[edges].ndim == 1, "edges must be 1-d"
return np.average(np.vstack([edges[:-1], edges[1:]]), axis=0)
def max_tth(instr):
"""
Return the maximum Bragg angle (in radians) subtended by the instrument.
Parameters
----------
instr : hexrd.instrument.HEDMInstrument instance
the instrument class to evalutate.
Returns
-------
tth_max : float
The maximum observable Bragg angle by the instrument in radians.
"""
tth_max = 0.
for det in instr.detectors.values():
ptth, peta = det.pixel_angles()
tth_max = max(np.max(ptth), tth_max)
return tth_max
def pixel_resolution(instr):
"""
Return the minimum, median, and maximum angular
resolution of the instrument.
Parameters
----------
instr : HEDMInstrument instance
An instrument.
Returns
-------
tth_stats : float
min/median/max tth resolution in radians.
eta_stats : TYPE
min/median/max eta resolution in radians.
"""
max_tth = np.inf
max_eta = np.inf
min_tth = -np.inf
min_eta = -np.inf
ang_ps_full = []
for panel in instr.detectors.values():
angps = panel.angularPixelSize(
np.stack(
panel.pixel_coords,
axis=0
).reshape(2, np.cumprod(panel.shape)[-1]).T
)
ang_ps_full.append(angps)
max_tth = min(max_tth, np.min(angps[:, 0]))
max_eta = min(max_eta, np.min(angps[:, 1]))
min_tth = max(min_tth, np.max(angps[:, 0]))
min_eta = max(min_eta, np.max(angps[:, 1]))
pass
med_tth, med_eta = np.median(np.vstack(ang_ps_full), axis=0).flatten()
return (min_tth, med_tth, max_tth), (min_eta, med_eta, max_eta)
def max_resolution(instr):
"""
Return the maximum angular resolution of the instrument.
Parameters
----------
instr : HEDMInstrument instance
An instrument.
Returns
-------
max_tth : float
Maximum tth resolution in radians.
max_eta : TYPE
maximum eta resolution in radians.
"""
max_tth = np.inf
max_eta = np.inf
for panel in instr.detectors.values():
angps = panel.angularPixelSize(
np.stack(
panel.pixel_coords,
axis=0
).reshape(2, np.cumprod(panel.shape)[-1]).T
)
max_tth = min(max_tth, np.min(angps[:, 0]))
max_eta = min(max_eta, np.min(angps[:, 1]))
return max_tth, max_eta
def _gaussian_dist(x, cen, fwhm):
sigm = fwhm/(2*np.sqrt(2*np.log(2)))
return np.exp(-0.5*(x - cen)**2/sigm**2)
def _sigma_to_fwhm(sigm):
return sigm*ct.sigma_to_fwhm
def _fwhm_to_sigma(fwhm):
return fwhm/ct.sigma_to_fwhm
# FIXME find a better place for this, and maybe include loop over pixels
if ct.USE_NUMBA:
@numba.njit(nogil=True, cache=True)
def _solid_angle_of_triangle(vtx_list):
norms = np.sqrt(np.sum(vtx_list*vtx_list, axis=1))
norms_prod = norms[0] * norms[1] * norms[2]
scalar_triple_product = np.dot(vtx_list[0],
np.cross(vtx_list[2], vtx_list[1]))
denominator = norms_prod \
+ norms[0]*np.dot(vtx_list[1], vtx_list[2]) \
+ norms[1]*np.dot(vtx_list[2], vtx_list[0]) \
+ norms[2]*np.dot(vtx_list[0], vtx_list[1])
return 2.*np.arctan2(scalar_triple_product, denominator)
else:
def _solid_angle_of_triangle(vtx_list):
norms = rowNorm(vtx_list)
norms_prod = np.cumprod(norms)[-1]
scalar_triple_product = np.dot(vtx_list[0],
np.cross(vtx_list[2], vtx_list[1]))
denominator = norms_prod \
+ norms[0]*np.dot(vtx_list[1], vtx_list[2]) \
+ norms[1]*np.dot(vtx_list[2], vtx_list[0]) \
+ norms[2]*np.dot(vtx_list[0], vtx_list[1])
return 2.*np.arctan2(scalar_triple_product, denominator)
# =============================================================================
# CLASSES
# =============================================================================
class HEDMInstrument(object):
"""
Abstraction of XRD instrument.
* Distortion needs to be moved to a class with registry; tuple unworkable
* where should reference eta be defined? currently set to default config
"""
def __init__(self, instrument_config=None,
image_series=None, eta_vector=None,
instrument_name=None, tilt_calibration_mapping=None,
max_workers=max_workers_DFLT):
self._id = instrument_name_DFLT
if eta_vector is None:
self._eta_vector = eta_vec_DFLT
else:
self._eta_vector = eta_vector
self.max_workers = max_workers
if instrument_config is None:
if instrument_name is not None:
self._id = instrument_name
self._num_panels = 1
self._beam_energy = beam_energy_DFLT
self._beam_vector = beam_vec_DFLT
self._detectors = dict(
panel_id_DFLT=PlanarDetector(
rows=nrows_DFLT, cols=ncols_DFLT,
pixel_size=pixel_size_DFLT,
tvec=t_vec_d_DFLT,
tilt=tilt_params_DFLT,
bvec=self._beam_vector,
evec=self._eta_vector,
distortion=None,
max_workers=self.max_workers),
)
self._tvec = t_vec_s_DFLT
self._chi = chi_DFLT
else:
if isinstance(instrument_config, h5py.File):
tmp = {}
unwrap_h5_to_dict(instrument_config, tmp)
instrument_config.close()
instrument_config = tmp['instrument']
elif not isinstance(instrument_config, dict):
raise RuntimeError(
"instrument_config must be either an HDF5 file object"
+ "or a dictionary. You gave a %s"
% type(instrument_config)
)
if instrument_name is None:
if 'id' in instrument_config:
self._id = instrument_config['id']
else:
self._id = instrument_name
self._num_panels = len(instrument_config['detectors'])
self._beam_energy = instrument_config['beam']['energy'] # keV
self._beam_vector = calc_beam_vec(
instrument_config['beam']['vector']['azimuth'],
instrument_config['beam']['vector']['polar_angle'],
)
# now build detector dict
detectors_config = instrument_config['detectors']
det_dict = dict.fromkeys(detectors_config)
for det_id, det_info in detectors_config.items():
pixel_info = det_info['pixels']
affine_info = det_info['transform']
try:
saturation_level = det_info['saturation_level']
except(KeyError):
saturation_level = 2**16
shape = (pixel_info['rows'], pixel_info['columns'])
panel_buffer = None
if buffer_key in det_info:
det_buffer = det_info[buffer_key]
if det_buffer is not None:
if isinstance(det_buffer, np.ndarray):
if det_buffer.ndim == 2:
assert det_buffer.shape == shape, \
"buffer shape must match detector"
else:
assert len(det_buffer) == 2
panel_buffer = det_buffer
elif isinstance(det_buffer, list):
panel_buffer = np.asarray(det_buffer)
elif np.isscalar(det_buffer):
panel_buffer = det_buffer*np.ones(2)
else:
raise RuntimeError(
"panel buffer spec invalid for %s" % det_id
)
# handle distortion
distortion = None
if distortion_key in det_info:
distortion_cfg = det_info[distortion_key]
if distortion_cfg is not None:
try:
func_name = distortion_cfg['function_name']
dparams = distortion_cfg['parameters']
distortion = distortion_pkg.get_mapping(
func_name, dparams
)
except(KeyError):
raise RuntimeError(
"problem with distortion specification"
)
det_dict[det_id] = PlanarDetector(
name=det_id,
rows=pixel_info['rows'],
cols=pixel_info['columns'],
pixel_size=pixel_info['size'],
panel_buffer=panel_buffer,
saturation_level=saturation_level,
tvec=affine_info['translation'],
tilt=affine_info['tilt'],
bvec=self._beam_vector,
evec=self._eta_vector,
distortion=distortion,
max_workers=self.max_workers)
self._detectors = det_dict
self._tvec = np.r_[
instrument_config['oscillation_stage']['translation']
]
self._chi = instrument_config['oscillation_stage']['chi']
#
# set up calibration parameter list and refinement flags
#
# first, grab the mapping function for tilt parameters if specified
if tilt_calibration_mapping is not None:
if not isinstance(tilt_calibration_mapping, RotMatEuler):
raise RuntimeError(
"tilt mapping must be a 'RotMatEuler' instance"
)
self._tilt_calibration_mapping = tilt_calibration_mapping
# grab angles from beam vec
# !!! these are in DEGREES!
azim, pola = calc_angles_from_beam_vec(self._beam_vector)
# stack instrument level parameters
# units: keV, degrees, mm
self._calibration_parameters = [
self._beam_energy,
azim,
pola,
np.degrees(self._chi),
*self._tvec,
]
self._calibration_flags = instr_calibration_flags_DFLT
# collect info from panels and append
det_params = []
det_flags = []
for detector in self._detectors.values():
this_det_params = detector.calibration_parameters
if self._tilt_calibration_mapping is not None:
rmat = makeRotMatOfExpMap(detector.tilt)
self._tilt_calibration_mapping.rmat = rmat
tilt = np.degrees(self._tilt_calibration_mapping.angles)
this_det_params[:3] = tilt
det_params.append(this_det_params)
det_flags.append(detector.calibration_flags)
det_params = np.hstack(det_params)
det_flags = np.hstack(det_flags)
# !!! hstack here assumes that calib params will be float and
# !!! flags will all be bool
self._calibration_parameters = np.hstack(
[self._calibration_parameters,
det_params]
).flatten()
self._calibration_flags = np.hstack(
[self._calibration_flags,
det_flags]
)
return
# properties for physical size of rectangular detector
@property
def id(self):
return self._id
@property
def num_panels(self):
return self._num_panels
@property
def detectors(self):
return self._detectors
@property
def detector_parameters(self):
pdict = {}
for key, panel in self.detectors.items():
pdict[key] = panel.config_dict(
self.chi, self.tvec,
beam_energy=self.beam_energy,
beam_vector=self.beam_vector
)
return pdict
@property
def tvec(self):
return self._tvec
@tvec.setter
def tvec(self, x):
x = np.array(x).flatten()
assert len(x) == 3, 'input must have length = 3'
self._tvec = x
@property
def chi(self):
return self._chi
@chi.setter
def chi(self, x):
self._chi = float(x)
@property
def beam_energy(self):
return self._beam_energy
@beam_energy.setter
def beam_energy(self, x):
self._beam_energy = float(x)
@property
def beam_wavelength(self):
return ct.keVToAngstrom(self.beam_energy)
@property
def beam_vector(self):
return self._beam_vector
@beam_vector.setter
def beam_vector(self, x):
x = np.array(x).flatten()
if len(x) == 3:
assert sum(x*x) > 1-ct.sqrt_epsf, \
'input must have length = 3 and have unit magnitude'
self._beam_vector = x
elif len(x) == 2:
self._beam_vector = calc_beam_vec(*x)
else:
raise RuntimeError("input must be a unit vector or angle pair")
# ...maybe change dictionary item behavior for 3.x compatibility?
for detector_id in self.detectors:
panel = self.detectors[detector_id]
panel.bvec = self._beam_vector
@property
def eta_vector(self):
return self._eta_vector
@eta_vector.setter
def eta_vector(self, x):
x = np.array(x).flatten()
assert len(x) == 3 and sum(x*x) > 1-ct.sqrt_epsf, \
'input must have length = 3 and have unit magnitude'
self._eta_vector = x
# ...maybe change dictionary item behavior for 3.x compatibility?
for detector_id in self.detectors:
panel = self.detectors[detector_id]
panel.evec = self._eta_vector
@property
def tilt_calibration_mapping(self):
return self._tilt_calibration_mapping
@tilt_calibration_mapping.setter
def tilt_calibration_mapping(self, x):
if not isinstance(x, RotMatEuler) and x is not None:
raise RuntimeError(
"tilt mapping must be None or a 'RotMatEuler' instance"
)
self._tilt_calibration_mapping = x
@property
def calibration_parameters(self):
"""
Yields concatenated list of instrument parameters.
Returns
-------
array_like
concatenated list of instrument parameters.
"""
# grab angles from beam vec
# !!! these are in DEGREES!
azim, pola = calc_angles_from_beam_vec(self.beam_vector)
# stack instrument level parameters
# units: keV, degrees, mm
calibration_parameters = [
self.beam_energy,
azim,
pola,
np.degrees(self.chi),
*self.tvec,
]
# collect info from panels and append
det_params = []
det_flags = []
for detector in self.detectors.values():
this_det_params = detector.calibration_parameters
if self.tilt_calibration_mapping is not None:
rmat = makeRotMatOfExpMap(detector.tilt)
self.tilt_calibration_mapping.rmat = rmat
tilt = np.degrees(self.tilt_calibration_mapping.angles)
this_det_params[:3] = tilt
det_params.append(this_det_params)
det_flags.append(detector.calibration_flags)
det_params = np.hstack(det_params)
det_flags = np.hstack(det_flags)
# !!! hstack here assumes that calib params will be float and
# !!! flags will all be bool
calibration_parameters = np.hstack(
[calibration_parameters,
det_params]
).flatten()
self._calibration_parameters = calibration_parameters
return self._calibration_parameters
@property
def calibration_flags(self):
return self._calibration_flags
@calibration_flags.setter
def calibration_flags(self, x):
x = np.array(x, dtype=bool).flatten()
if len(x) != len(self._calibration_flags):
raise RuntimeError(
"length of parameter list must be %d; you gave %d"
% (len(self._calibration_flags), len(x))
)
ii = 7
for panel in self.detectors.values():
npp = 6
if panel.distortion is not None:
npp += len(panel.distortion.params)
panel.calibration_flags = x[ii:ii + npp]
self._calibration_flags = x
# =========================================================================
# METHODS
# =========================================================================
def write_config(self, filename=None, style='yaml', calibration_dict={}):
""" WRITE OUT YAML FILE """
# initialize output dictionary
assert style.lower() in ['yaml', 'hdf5'], \
"style must be either 'yaml', or 'hdf5'; you gave '%s'" % style
par_dict = {}
par_dict['id'] = self.id
azim, pola = calc_angles_from_beam_vec(self.beam_vector)
beam = dict(
energy=self.beam_energy,
vector=dict(
azimuth=azim,
polar_angle=pola,
)
)
par_dict['beam'] = beam
if calibration_dict:
par_dict['calibration_crystal'] = calibration_dict
ostage = dict(
chi=self.chi,
translation=self.tvec.tolist()
)
par_dict['oscillation_stage'] = ostage
det_dict = dict.fromkeys(self.detectors)
for det_name, detector in self.detectors.items():
# grab panel config
# !!! don't need beam or tvec
# !!! have vetted style
pdict = detector.config_dict(chi=self.chi, tvec=self.tvec,
beam_energy=self.beam_energy,
beam_vector=self.beam_vector,
style=style)
det_dict[det_name] = pdict['detector']
par_dict['detectors'] = det_dict
# handle output file if requested
if filename is not None:
if style.lower() == 'yaml':
with open(filename, 'w') as f:
yaml.dump(par_dict, stream=f)
else:
# hdf5
with h5py.File(filename, 'w') as f:
instr_grp = f.create_group('instrument')
unwrap_dict_to_h5(instr_grp, par_dict, asattr=False)
return par_dict
def update_from_parameter_list(self, p):
"""
Update the instrument class from a parameter list.
Utility function to update instrument parameters from a 1-d master
parameter list (e.g. as used in calibration)
!!! Note that angles are reported in DEGREES!
"""
self.beam_energy = p[0]
self.beam_vector = calc_beam_vec(p[1], p[2])
self.chi = np.radians(p[3])
self.tvec = np.r_[p[4:7]]
ii = 7
for det_name, detector in self.detectors.items():
this_det_params = detector.calibration_parameters
npd = len(this_det_params) # total number of params
dpnp = npd - 6 # number of distortion params
# first do tilt
tilt = np.r_[p[ii:ii + 3]]
if self.tilt_calibration_mapping is not None:
self.tilt_calibration_mapping.angles = np.radians(tilt)
rmat = self.tilt_calibration_mapping.rmat
phi, n = angleAxisOfRotMat(rmat)
tilt = phi*n.flatten()
detector.tilt = tilt
# then do translation
ii += 3
detector.tvec = np.r_[p[ii:ii + 3]]
# then do distortion (if necessart)
# FIXME will need to update this with distortion fix
ii += 3
if dpnp > 0:
if detector.distortion is None:
raise RuntimeError(
"distortion discrepancy for '%s'!"
% det_name
)
else:
try:
detector.distortion.params = p[ii:ii + dpnp]
except(AssertionError):
raise RuntimeError(
"distortion for '%s' " % det_name
+ "expects %d params but got %d"
% (len(detector.distortion.params), dpnp)
)
ii += dpnp
return
def extract_polar_maps(self, plane_data, imgser_dict,
active_hkls=None, threshold=None,
tth_tol=None, eta_tol=0.25):
"""
Extract eta-omega maps from an imageseries.
Quick and dirty way to histogram angular patch data for make
pole figures suitable for fiber generation
TODO: streamline projection code
TODO: normalization
!!!: images must be non-negative!
"""
if tth_tol is not None:
plane_data.tThWidth = np.radians(tth_tol)
else:
tth_tol = np.degrees(plane_data.tThWidth)
tth_ranges = plane_data.getTThRanges()
if active_hkls is not None:
assert hasattr(active_hkls, '__len__'), \
"active_hkls must be an iterable with __len__"
tth_ranges = tth_ranges[active_hkls]
# # need this for making eta ranges
# eta_tol_vec = 0.5*np.radians([-eta_tol, eta_tol])
# make rings clipped to panel
# !!! eta_idx has the same length as plane_data.exclusions
# each entry are the integer indices into the bins
# !!! eta_edges is the list of eta bin EDGES
# We can use the same eta_edge for all detectors, so calculate it once
pow_angs, pow_xys, eta_idx, eta_edges = list(
self.detectors.values()
)[0].make_powder_rings(plane_data,
merge_hkls=False, delta_eta=eta_tol,
full_output=True)
delta_eta = eta_edges[1] - eta_edges[0]
ncols_eta = len(eta_edges) - 1
ring_maps_panel = dict.fromkeys(self.detectors)
for i_d, det_key in enumerate(self.detectors):
print("working on detector '%s'..." % det_key)
# grab panel
panel = self.detectors[det_key]
# native_area = panel.pixel_area # pixel ref area
# pixel angular coords for the detector panel
ptth, peta = panel.pixel_angles()
# grab omegas from imageseries and squawk if missing
try:
omegas = imgser_dict[det_key].metadata['omega']
except(KeyError):
msg = "imageseries for '%s' has no omega info" % det_key
raise RuntimeError(msg)
# initialize maps and assing by row (omega/frame)
nrows_ome = len(omegas)
# init map with NaNs
shape = (len(tth_ranges), nrows_ome, ncols_eta)
ring_maps = np.full(shape, np.nan)
# Generate ring parameters once, and re-use them for each image
ring_params = []
for tthr in tth_ranges:
kwargs = {
'tthr': tthr,
'ptth': ptth,
'peta': peta,
'eta_edges': eta_edges,
'delta_eta': delta_eta,
}
ring_params.append(_generate_ring_params(**kwargs))
# Divide up the images among processes
ims = imgser_dict[det_key]
tasks = distribute_tasks(len(ims), self.max_workers)
func = partial(_run_histograms, ims=ims, tth_ranges=tth_ranges,
ring_maps=ring_maps, ring_params=ring_params,
threshold=threshold)
with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
executor.map(func, tasks)
ring_maps_panel[det_key] = ring_maps
return ring_maps_panel, eta_edges
def extract_line_positions(self, plane_data, imgser_dict,
tth_tol=None, eta_tol=1., npdiv=2,
eta_centers=None,
collapse_eta=True, collapse_tth=False,
do_interpolation=True):
"""
Perform annular interpolation on diffraction images.
Provides data for extracting the line positions from powder diffraction
images, pole figure patches from imageseries, or Bragg peaks from
Laue diffraction images.
Parameters
----------
plane_data : hexrd.crystallography.PlaneData object or array_like
Object determining the 2theta positions for the integration
sectors. If PlaneData, this will be all non-excluded reflections,
subject to merging within PlaneData.tThWidth. If array_like,
interpreted as a list of 2theta angles IN RADIAN (this may change).
imgser_dict : dict
Dictionary of powder diffraction images, one for each detector.
tth_tol : scalar, optional
The radial (i.e. 2theta) width of the integration sectors
IN DEGREES. This arg is required if plane_data is array_like.
The default is None.
eta_tol : scalar, optional
The azimuthal (i.e. eta) width of the integration sectors
IN DEGREES. The default is 1.
npdiv : int, optional
The number of oversampling pixel subdivision (see notes).
The default is 2.
eta_centers : array_like, optional
The desired azimuthal sector centers. The default is None. If
None, then bins are distrubted sequentially from (-180, 180).
collapse_eta : bool, optional
Flag for summing sectors in eta. The default is True.
collapse_tth : bool, optional
Flag for summing sectors in 2theta. The default is False.
do_interpolation : bool, optional
If True, perform bilinear interpolation. The default is True.
Raises
------
RuntimeError
DESCRIPTION.
Returns
-------
panel_data : dict
Dictionary over the detctors with the following structure:
[list over (merged) 2theta ranges]
[list over valid eta sectors]
[angle data <input dependent>,
bin intensities <input dependent>]
Notes
-----
TODO: May change the array_like input units to degrees.
TODO: rename function.
"""
if not hasattr(plane_data, '__len__'):
plane_data = plane_data.makeNew() # make local copy to munge
if tth_tol is not None:
plane_data.tThWidth = np.radians(tth_tol)
tth_ranges = np.degrees(plane_data.getMergedRanges()[1])
tth_tols = np.hstack([i[1] - i[0] for i in tth_ranges])
else:
tth_tols = np.ones(len(plane_data))*tth_tol
# =====================================================================
# LOOP OVER DETECTORS
# =====================================================================
panel_data = dict.fromkeys(self.detectors)
for i_det, detector_id in enumerate(self.detectors):
print("working on detector '%s'..." % detector_id)
# pbar.update(i_det + 1)
# grab panel
panel = self.detectors[detector_id]
instr_cfg = panel.config_dict(
chi=self.chi, tvec=self.tvec,
beam_energy=self.beam_energy,
beam_vector=self.beam_vector
)
native_area = panel.pixel_area # pixel ref area
images = imgser_dict[detector_id]
if images.ndim == 2:
n_images = 1
images = np.tile(images, (1, 1, 1))
elif images.ndim == 3:
n_images = len(images)
else:
raise RuntimeError("images must be 2- or 3-d")
# make rings
pow_angs, pow_xys = panel.make_powder_rings(
plane_data, merge_hkls=True,
delta_tth=tth_tol, delta_eta=eta_tol,
eta_list=eta_centers)
# =================================================================
# LOOP OVER RING SETS
# =================================================================
ring_data = []
for i_ring, these_data in enumerate(zip(pow_angs, pow_xys)):
print("interpolating 2theta bin %d..." % i_ring)
# points are already checked to fall on detector
angs = these_data[0]
xys = these_data[1]
# make the tth,eta patches for interpolation
patches = xrdutil.make_reflection_patches(
instr_cfg, angs, panel.angularPixelSize(xys),
tth_tol=tth_tols[i_ring], eta_tol=eta_tol,
npdiv=npdiv, quiet=True)
# loop over patches
# FIXME: fix initialization
if collapse_tth:
patch_data = np.zeros((len(angs), n_images))
else:
patch_data = []
for i_p, patch in enumerate(patches):
# strip relevant objects out of current patch
vtx_angs, vtx_xys, conn, areas, xys_eval, ijs = patch
# need to reshape eval pts for interpolation
xy_eval = np.vstack([
xys_eval[0].flatten(),
xys_eval[1].flatten()]).T
_, on_panel = panel.clip_to_panel(xy_eval)
if np.any(~on_panel):
continue
if collapse_tth:
ang_data = (vtx_angs[0][0, [0, -1]],
vtx_angs[1][[0, -1], 0])
elif collapse_eta:
# !!! yield the tth bin centers
tth_centers = np.average(
np.vstack([vtx_angs[0][0, :-1], vtx_angs[0][0, 1:]]),
axis=0
)
ang_data = (tth_centers,
angs[i_p][-1])
else:
ang_data = vtx_angs
prows, pcols = areas.shape
area_fac = areas/float(native_area)
# interpolate
if not collapse_tth:
ims_data = []
for j_p in np.arange(len(images)):
# catch interpolation type
image = images[j_p]
if do_interpolation:
tmp = panel.interpolate_bilinear(
xy_eval,
image,
).reshape(prows, pcols)*area_fac
else:
tmp = image[ijs[0], ijs[1]]*area_fac
# catch collapsing options
if collapse_tth:
patch_data[i_p, j_p] = np.average(tmp)
# ims_data.append(np.sum(tmp))
else:
if collapse_eta:
ims_data.append(np.average(tmp, axis=0))
else:
ims_data.append(tmp)
pass # close image loop
if not collapse_tth:
patch_data.append((ang_data, ims_data))
pass # close patch loop
ring_data.append(patch_data)
pass # close ring loop
panel_data[detector_id] = ring_data
pass # close panel loop
# pbar.finish()
return panel_data
def simulate_powder_pattern(self,
mat_list,
params=None,
bkgmethod=None,
origin=None,
noise=None):
"""
Generate powder diffraction iamges from specified materials.
Parameters
----------
mat_list : array_like (n, )
List of Material classes.
params : dict, optional
Dictionary of LeBail parameters (see Notes). The default is None.
bkgmethod : dict, optional
Background function specification. The default is None.
origin : array_like (3,), optional
Vector describing the origin of the diffrction volume.
The default is None, wiich is equivalent to [0, 0, 0].
noise : str, optional
Flag describing type of noise to be applied. The default is None.
Returns
-------
img_dict : dict
Dictionary of diffraciton images over the detectors.
Notes
-----
TODO: add more controls for noise function.
TODO: modify hooks to LeBail parameters.
TODO: add optional volume fraction weights for phases in mat_list
"""
"""
>> @AUTHOR: Saransh Singh, Lanwrence Livermore National Lab,
[email protected]
>> @DATE: 01/22/2021 SS 1.0 original
>> @DETAILS: adding hook to WPPF class. this changes the input list
significantly
"""
if origin is None:
origin = self.tvec
origin = np.asarray(origin).squeeze()
assert len(origin) == 3, \
"origin must be a 3-element sequence"
'''
if params is none, fill in some sane default values
only the first value is used. the rest of the values are
the upper, lower bounds and vary flag for refinement which
are not used but required for interfacing with WPPF
zero_error : zero shift error
U, V, W : Cagliotti parameters
P, X, Y : Lorentzian parameters
eta1, eta2, eta3 : Mixing parameters
'''
if(params is None):
# params = {'zero_error': [0.0, -1., 1., True],
# 'U': [2e-1, -1., 1., True],
# 'V': [2e-2, -1., 1., True],
# 'W': [2e-2, -1., 1., True],
# 'X': [2e-1, -1., 1., True],
# 'Y': [2e-1, -1., 1., True]
# }
params = wppfsupport._generate_default_parameters_LeBail(
mat_list,
1)
'''
use the material list to obtain the dictionary of initial intensities
we need to make sure that the intensities are properly scaled by the
lorentz polarization factor. since the calculation is done in the
LeBail class, all that means is the initial intensity needs that factor
in there
'''
img_dict = dict.fromkeys(self.detectors)
# find min and max tth over all panels
tth_mi = np.inf
tth_ma = 0.
ptth_dict = dict.fromkeys(self.detectors)
for det_key, panel in self.detectors.items():
ptth, peta = panel.pixel_angles(origin=origin)
tth_mi = min(tth_mi, ptth.min())
tth_ma = max(tth_ma, ptth.max())
ptth_dict[det_key] = ptth
'''
now make a list of two theta and dummy ones for the experimental
spectrum this is never really used so any values should be okay. We
could also pas the integrated detector image if we would like to
simulate some realistic background. But thats for another day.
'''
# convert angles to degrees because thats what the WPPF expects
tth_mi = np.degrees(tth_mi)
tth_ma = np.degrees(tth_ma)
# get tth angular resolution for instrument
ang_res = max_resolution(self)
# !!! calc nsteps by oversampling
nsteps = int(np.ceil(2*(tth_ma - tth_mi)/np.degrees(ang_res[0])))
# evaulation vector for LeBail
tth = np.linspace(tth_mi, tth_ma, nsteps)
expt = np.vstack([tth, np.ones_like(tth)]).T
wavelength = [
valWUnit('lp', 'length', self.beam_wavelength, 'angstrom'),
1.
]
'''
now go through the material list and get the intensity dictionary
'''
intensity = {}
for mat in mat_list:
multiplicity = mat.planeData.getMultiplicity()
tth = mat.planeData.getTTh()
LP = (1 + np.cos(tth)**2) / \
np.cos(0.5*tth)/np.sin(0.5*tth)**2
intensity[mat.name] = {}
intensity[mat.name]['synchrotron'] = \
mat.planeData.get_structFact() * LP * multiplicity
kwargs = {
'expt_spectrum': expt,
'params': params,
'phases': mat_list,
'wavelength': {
'synchrotron': wavelength
},
'bkgmethod': bkgmethod,
'intensity_init': intensity,
'peakshape': 'pvtch'
}
self.WPPFclass = LeBail(**kwargs)
self.simulated_spectrum = self.WPPFclass.spectrum_sim
self.background = self.WPPFclass.background
'''
now that we have the simulated intensities, its time to get the
two theta for the detector pixels and interpolate what the intensity
for each pixel should be
'''
img_dict = dict.fromkeys(self.detectors)
for det_key, panel in self.detectors.items():
ptth = ptth_dict[det_key]
img = np.interp(np.degrees(ptth),
self.simulated_spectrum.x,
self.simulated_spectrum.y + self.background.y)
# normalize everything to 0-1
mi = img.min()
ma = img.max()
if(ma > mi):
img = (img - mi) / (ma - mi)
if(noise is None):
img_dict[det_key] = img
else:
if(noise.lower() == 'poisson'):
im_noise = random_noise(img,
mode='poisson',
clip=True)
mi = im_noise.min()
ma = im_noise.max()
if(ma > mi):
im_noise = (im_noise - mi)/(ma - mi)
img_dict[det_key] = im_noise
elif(noise.lower() == 'gaussian'):
img_dict[det_key] = random_noise(img,
mode='gaussian',
clip=True)
elif(noise.lower() == 'salt'):
img_dict[det_key] = random_noise(img, mode='salt')
elif(noise.lower() == 'pepper'):
img_dict[det_key] = random_noise(img, mode='pepper')
elif(noise.lower() == 's&p'):
img_dict[det_key] = random_noise(img, mode='s&p')
elif(noise.lower() == 'speckle'):
img_dict[det_key] = random_noise(img,
mode='speckle',
clip=True)
return img_dict
def simulate_laue_pattern(self, crystal_data,
minEnergy=5., maxEnergy=35.,
rmat_s=None, grain_params=None):
"""
Simulate Laue diffraction over the instrument.
Parameters
----------
crystal_data : TYPE
DESCRIPTION.
minEnergy : TYPE, optional
DESCRIPTION. The default is 5..
maxEnergy : TYPE, optional
DESCRIPTION. The default is 35..
rmat_s : TYPE, optional
DESCRIPTION. The default is None.
grain_params : TYPE, optional
DESCRIPTION. The default is None.
Returns
-------
results : TYPE
DESCRIPTION.
TODO: revisit output; dict, or concatenated list?
"""
results = dict.fromkeys(self.detectors)
for det_key, panel in self.detectors.items():
results[det_key] = panel.simulate_laue_pattern(
crystal_data,
minEnergy=minEnergy, maxEnergy=maxEnergy,
rmat_s=rmat_s, tvec_s=self.tvec,
grain_params=grain_params,
beam_vec=self.beam_vector)
return results
def simulate_rotation_series(self, plane_data, grain_param_list,
eta_ranges=[(-np.pi, np.pi), ],
ome_ranges=[(-np.pi, np.pi), ],
ome_period=(-np.pi, np.pi),
wavelength=None):
"""
Simulate a monochromatic rotation series over the instrument.
Parameters
----------
plane_data : TYPE
DESCRIPTION.
grain_param_list : TYPE
DESCRIPTION.
eta_ranges : TYPE, optional
DESCRIPTION. The default is [(-np.pi, np.pi), ].
ome_ranges : TYPE, optional
DESCRIPTION. The default is [(-np.pi, np.pi), ].
ome_period : TYPE, optional
DESCRIPTION. The default is (-np.pi, np.pi).
wavelength : TYPE, optional
DESCRIPTION. The default is None.
Returns
-------
results : TYPE
DESCRIPTION.
TODO: revisit output; dict, or concatenated list?
"""
results = dict.fromkeys(self.detectors)
for det_key, panel in self.detectors.items():
results[det_key] = panel.simulate_rotation_series(
plane_data, grain_param_list,
eta_ranges=eta_ranges,
ome_ranges=ome_ranges,
ome_period=ome_period,
chi=self.chi, tVec_s=self.tvec,
wavelength=wavelength)
return results
def pull_spots(self, plane_data, grain_params,
imgser_dict,
tth_tol=0.25, eta_tol=1., ome_tol=1.,
npdiv=2, threshold=10,
eta_ranges=[(-np.pi, np.pi), ],
ome_period=(-np.pi, np.pi),
dirname='results', filename=None, output_format='text',
return_spot_list=False,
quiet=True, check_only=False,
interp='nearest'):
"""
Exctract reflection info from a rotation series.
Input must be encoded as an OmegaImageseries object.
Parameters
----------
plane_data : TYPE
DESCRIPTION.
grain_params : TYPE
DESCRIPTION.
imgser_dict : TYPE
DESCRIPTION.
tth_tol : TYPE, optional
DESCRIPTION. The default is 0.25.
eta_tol : TYPE, optional
DESCRIPTION. The default is 1..
ome_tol : TYPE, optional
DESCRIPTION. The default is 1..
npdiv : TYPE, optional
DESCRIPTION. The default is 2.
threshold : TYPE, optional
DESCRIPTION. The default is 10.
eta_ranges : TYPE, optional
DESCRIPTION. The default is [(-np.pi, np.pi), ].
ome_period : TYPE, optional
DESCRIPTION. The default is (-np.pi, np.pi).
dirname : TYPE, optional
DESCRIPTION. The default is 'results'.
filename : TYPE, optional
DESCRIPTION. The default is None.
output_format : TYPE, optional
DESCRIPTION. The default is 'text'.
return_spot_list : TYPE, optional
DESCRIPTION. The default is False.
quiet : TYPE, optional
DESCRIPTION. The default is True.
check_only : TYPE, optional
DESCRIPTION. The default is False.
interp : TYPE, optional
DESCRIPTION. The default is 'nearest'.
Returns
-------
compl : TYPE
DESCRIPTION.
output : TYPE
DESCRIPTION.
"""
# grain parameters
rMat_c = makeRotMatOfExpMap(grain_params[:3])
tVec_c = grain_params[3:6]
# grab omega ranges from first imageseries
#
# WARNING: all imageseries AND all wedges within are assumed to have
# the same omega values; put in a check that they are all the same???
oims0 = next(iter(imgser_dict.values()))
ome_ranges = [np.radians([i['ostart'], i['ostop']])
for i in oims0.omegawedges.wedges]
# delta omega in DEGREES grabbed from first imageseries in the dict
delta_ome = oims0.omega[0, 1] - oims0.omega[0, 0]
# make omega grid for frame expansion around reference frame
# in DEGREES
ndiv_ome, ome_del = make_tolerance_grid(
delta_ome, ome_tol, 1, adjust_window=True,
)
# generate structuring element for connected component labeling
if ndiv_ome == 1:
label_struct = ndimage.generate_binary_structure(2, 2)
else:
label_struct = ndimage.generate_binary_structure(3, 3)
# simulate rotation series
sim_results = self.simulate_rotation_series(
plane_data, [grain_params, ],
eta_ranges=eta_ranges,
ome_ranges=ome_ranges,
ome_period=ome_period)
# patch vertex generator (global for instrument)
tol_vec = 0.5*np.radians(
[-tth_tol, -eta_tol,
-tth_tol, eta_tol,
tth_tol, eta_tol,
tth_tol, -eta_tol])
# prepare output if requested
if filename is not None and output_format.lower() == 'hdf5':
this_filename = os.path.join(dirname, filename)
writer = GrainDataWriter_h5(
os.path.join(dirname, filename),
self.write_config(), grain_params)
# =====================================================================
# LOOP OVER PANELS
# =====================================================================
iRefl = 0
compl = []
output = dict.fromkeys(self.detectors)
for detector_id in self.detectors:
# initialize text-based output writer
if filename is not None and output_format.lower() == 'text':
output_dir = os.path.join(
dirname, detector_id
)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
this_filename = os.path.join(
output_dir, filename
)
writer = PatchDataWriter(this_filename)
# grab panel
panel = self.detectors[detector_id]
instr_cfg = panel.config_dict(
self.chi, self.tvec,
beam_energy=self.beam_energy,
beam_vector=self.beam_vector
)
native_area = panel.pixel_area # pixel ref area
# pull out the OmegaImageSeries for this panel from input dict
ome_imgser = imgser_dict[detector_id]
# extract simulation results
sim_results_p = sim_results[detector_id]
hkl_ids = sim_results_p[0][0]
hkls_p = sim_results_p[1][0]
ang_centers = sim_results_p[2][0]
xy_centers = sim_results_p[3][0]
ang_pixel_size = sim_results_p[4][0]
# now verify that full patch falls on detector...
# ???: strictly necessary?
#
# patch vertex array from sim
nangs = len(ang_centers)
patch_vertices = (
np.tile(ang_centers[:, :2], (1, 4)) +
np.tile(tol_vec, (nangs, 1))
).reshape(4*nangs, 2)
ome_dupl = np.tile(
ang_centers[:, 2], (4, 1)
).T.reshape(len(patch_vertices), 1)
# find vertices that all fall on the panel
det_xy, rmats_s, on_plane = xrdutil._project_on_detector_plane(
np.hstack([patch_vertices, ome_dupl]),
panel.rmat, rMat_c, self.chi,
panel.tvec, tVec_c, self.tvec,
panel.distortion)
_, on_panel = panel.clip_to_panel(det_xy, buffer_edges=True)
# all vertices must be on...
patch_is_on = np.all(on_panel.reshape(nangs, 4), axis=1)
patch_xys = det_xy.reshape(nangs, 4, 2)[patch_is_on]
# re-filter...
hkl_ids = hkl_ids[patch_is_on]
hkls_p = hkls_p[patch_is_on, :]
ang_centers = ang_centers[patch_is_on, :]
xy_centers = xy_centers[patch_is_on, :]
ang_pixel_size = ang_pixel_size[patch_is_on, :]
# TODO: add polygon testing right here!
# done <JVB 06/21/16>
if check_only:
patch_output = []
for i_pt, angs in enumerate(ang_centers):
# the evaluation omegas;
# expand about the central value using tol vector
ome_eval = np.degrees(angs[2]) + ome_del
# ...vectorize the omega_to_frame function to avoid loop?
frame_indices = [
ome_imgser.omega_to_frame(ome)[0] for ome in ome_eval
]
if -1 in frame_indices:
if not quiet:
msg = """
window for (%d%d%d) falls outside omega range
""" % tuple(hkls_p[i_pt, :])
print(msg)
continue
else:
these_vertices = patch_xys[i_pt]
ijs = panel.cartToPixel(these_vertices)
ii, jj = polygon(ijs[:, 0], ijs[:, 1])
contains_signal = False
for i_frame in frame_indices:
contains_signal = contains_signal or np.any(
ome_imgser[i_frame][ii, jj] > threshold
)
compl.append(contains_signal)
patch_output.append((ii, jj, frame_indices))
else:
# make the tth,eta patches for interpolation
patches = xrdutil.make_reflection_patches(
instr_cfg,
ang_centers[:, :2], ang_pixel_size,
omega=ang_centers[:, 2],
tth_tol=tth_tol, eta_tol=eta_tol,
rmat_c=rMat_c, tvec_c=tVec_c,
npdiv=npdiv, quiet=True)
# GRAND LOOP over reflections for this panel
patch_output = []
for i_pt, patch in enumerate(patches):
# strip relevant objects out of current patch
vtx_angs, vtx_xy, conn, areas, xy_eval, ijs = patch
prows, pcols = areas.shape
nrm_fac = areas/float(native_area)
nrm_fac = nrm_fac / np.min(nrm_fac)
# grab hkl info
hkl = hkls_p[i_pt, :]
hkl_id = hkl_ids[i_pt]
# edge arrays
tth_edges = vtx_angs[0][0, :]
delta_tth = tth_edges[1] - tth_edges[0]
eta_edges = vtx_angs[1][:, 0]
delta_eta = eta_edges[1] - eta_edges[0]
# need to reshape eval pts for interpolation
xy_eval = np.vstack([xy_eval[0].flatten(),
xy_eval[1].flatten()]).T
# the evaluation omegas;
# expand about the central value using tol vector
ome_eval = np.degrees(ang_centers[i_pt, 2]) + ome_del
# ???: vectorize the omega_to_frame function to avoid loop?
frame_indices = [
ome_imgser.omega_to_frame(ome)[0] for ome in ome_eval
]
if -1 in frame_indices:
if not quiet:
msg = """
window for (%d%d%d) falls outside omega range
""" % tuple(hkl)
print(msg)
continue
else:
# initialize spot data parameters
# !!! maybe change these to nan to not fuck up writer
peak_id = -999
sum_int = np.nan
max_int = np.nan
meas_angs = np.nan*np.ones(3)
meas_xy = np.nan*np.ones(2)
# quick check for intensity
contains_signal = False
patch_data_raw = []
for i_frame in frame_indices:
tmp = ome_imgser[i_frame][ijs[0], ijs[1]]
contains_signal = contains_signal or np.any(
tmp > threshold
)
patch_data_raw.append(tmp)
pass
patch_data_raw = np.stack(patch_data_raw, axis=0)
compl.append(contains_signal)
if contains_signal:
# initialize patch data array for intensities
if interp.lower() == 'bilinear':
patch_data = np.zeros(
(len(frame_indices), prows, pcols))
for i, i_frame in enumerate(frame_indices):
patch_data[i] = \
panel.interpolate_bilinear(
xy_eval,
ome_imgser[i_frame],
pad_with_nans=False
).reshape(prows, pcols) # * nrm_fac
elif interp.lower() == 'nearest':
patch_data = patch_data_raw # * nrm_fac
else:
msg = "interpolation option " + \
"'%s' not understood"
raise(RuntimeError, msg % interp)
# now have interpolated patch data...
labels, num_peaks = ndimage.label(
patch_data > threshold, structure=label_struct
)
slabels = np.arange(1, num_peaks + 1)
if num_peaks > 0:
peak_id = iRefl
coms = np.array(
ndimage.center_of_mass(
patch_data,
labels=labels,
index=slabels
)
)
if num_peaks > 1:
center = np.r_[patch_data.shape]*0.5
center_t = np.tile(center, (num_peaks, 1))
com_diff = coms - center_t
closest_peak_idx = np.argmin(
np.sum(com_diff**2, axis=1)
)
else:
closest_peak_idx = 0
pass # end multipeak conditional
coms = coms[closest_peak_idx]
# meas_omes = \
# ome_edges[0] + (0.5 + coms[0])*delta_ome
meas_omes = \
ome_eval[0] + coms[0]*delta_ome
meas_angs = np.hstack(
[tth_edges[0] + (0.5 + coms[2])*delta_tth,
eta_edges[0] + (0.5 + coms[1])*delta_eta,
mapAngle(
np.radians(meas_omes), ome_period
)
]
)
# intensities
# - summed is 'integrated' over interpolated
# data
# - max is max of raw input data
sum_int = np.sum(
patch_data[
labels == slabels[closest_peak_idx]
]
)
max_int = np.max(
patch_data_raw[
labels == slabels[closest_peak_idx]
]
)
# ???: Should this only use labeled pixels?
# Those are segmented from interpolated data,
# not raw; likely ok in most cases.
# need MEASURED xy coords
gvec_c = anglesToGVec(
meas_angs,
chi=self.chi,
rMat_c=rMat_c,
bHat_l=self.beam_vector)
rMat_s = makeOscillRotMat(
[self.chi, meas_angs[2]]
)
meas_xy = gvecToDetectorXY(
gvec_c,
panel.rmat, rMat_s, rMat_c,
panel.tvec, self.tvec, tVec_c,
beamVec=self.beam_vector)
if panel.distortion is not None:
meas_xy = panel.distortion.apply_inverse(
np.atleast_2d(meas_xy)
).flatten()
pass
# FIXME: why is this suddenly necessary???
meas_xy = meas_xy.squeeze()
pass # end num_peaks > 0
else:
patch_data = patch_data_raw
pass # end contains_signal
# write output
if filename is not None:
if output_format.lower() == 'text':
writer.dump_patch(
peak_id, hkl_id, hkl, sum_int, max_int,
ang_centers[i_pt], meas_angs,
xy_centers[i_pt], meas_xy)
elif output_format.lower() == 'hdf5':
xyc_arr = xy_eval.reshape(
prows, pcols, 2
).transpose(2, 0, 1)
writer.dump_patch(
detector_id, iRefl, peak_id, hkl_id, hkl,
tth_edges, eta_edges, np.radians(ome_eval),
xyc_arr, ijs, frame_indices, patch_data,
ang_centers[i_pt], xy_centers[i_pt],
meas_angs, meas_xy)
pass # end conditional on write output
pass # end conditional on check only
if return_spot_list:
# Full output
xyc_arr = xy_eval.reshape(
prows, pcols, 2
).transpose(2, 0, 1)
_patch_output = [
detector_id, iRefl, peak_id, hkl_id, hkl,
tth_edges, eta_edges, np.radians(ome_eval),
xyc_arr, ijs, frame_indices, patch_data,
ang_centers[i_pt], xy_centers[i_pt],
meas_angs, meas_xy
]
else:
# Trimmed output
_patch_output = [
peak_id, hkl_id, hkl, sum_int, max_int,
ang_centers[i_pt], meas_angs, meas_xy
]
patch_output.append(_patch_output)
iRefl += 1
pass # end patch conditional
pass # end patch loop
output[detector_id] = patch_output
if filename is not None and output_format.lower() == 'text':
writer.close()
pass # end detector loop
if filename is not None and output_format.lower() == 'hdf5':
writer.close()
return compl, output
"""def fit_grain(self, grain_params, data_dir='results'):"""
pass # end class: HEDMInstrument
class PlanarDetector(object):
"""Base class for 2D planar, rectangular row-column detector"""
__pixelPitchUnit = 'mm'
def __init__(self,
rows=2048, cols=2048,
pixel_size=(0.2, 0.2),
tvec=np.r_[0., 0., -1000.],
tilt=ct.zeros_3,
name='default',
bvec=ct.beam_vec,
evec=ct.eta_vec,
saturation_level=None,
panel_buffer=None,
roi=None,
distortion=None,
max_workers=max_workers_DFLT):
"""
Instantiate a PlanarDetector object.
Parameters
----------
rows : TYPE, optional
DESCRIPTION. The default is 2048.
cols : TYPE, optional
DESCRIPTION. The default is 2048.
pixel_size : TYPE, optional
DESCRIPTION. The default is (0.2, 0.2).
tvec : TYPE, optional
DESCRIPTION. The default is np.r_[0., 0., -1000.].
tilt : TYPE, optional
DESCRIPTION. The default is ct.zeros_3.
name : TYPE, optional
DESCRIPTION. The default is 'default'.
bvec : TYPE, optional
DESCRIPTION. The default is ct.beam_vec.
evec : TYPE, optional
DESCRIPTION. The default is ct.eta_vec.
saturation_level : TYPE, optional
DESCRIPTION. The default is None.
panel_buffer : TYPE, optional
If a scalar or len(2) array_like, the interpretation is a border
in mm. If an array with shape (nrows, ncols), interpretation is a
boolean with True marking valid pixels. The default is None.
roi : TYPE, optional
DESCRIPTION. The default is None.
distortion : TYPE, optional
DESCRIPTION. The default is None.
Returns
-------
None.
"""
self._name = name
self._rows = rows
self._cols = cols
self._pixel_size_row = pixel_size[0]
self._pixel_size_col = pixel_size[1]
self._saturation_level = saturation_level
self._panel_buffer = panel_buffer
self._roi = roi
self._tvec = np.array(tvec).flatten()
self._tilt = np.array(tilt).flatten()
self._bvec = np.array(bvec).flatten()
self._evec = np.array(evec).flatten()
self._distortion = distortion
self.max_workers = max_workers
#
# set up calibration parameter list and refinement flags
#
# order for a single detector will be
#
# [tilt, translation, <distortion>]
dparams = []
if self._distortion is not None:
dparams = self._distortion.params
self._calibration_parameters = np.hstack(
[self._tilt, self._tvec, dparams]
)
self._calibration_flags = np.hstack(
[panel_calibration_flags_DFLT,
np.zeros(len(dparams), dtype=bool)]
)
return
# detector ID
@property
def name(self):
return self._name
@name.setter
def name(self, s):
assert isinstance(s, str), "requires string input"
self._name = s
# properties for physical size of rectangular detector
@property
def rows(self):
return self._rows
@rows.setter
def rows(self, x):
assert isinstance(x, int)
self._rows = x
@property
def cols(self):
return self._cols
@cols.setter
def cols(self, x):
assert isinstance(x, int)
self._cols = x
@property
def pixel_size_row(self):
return self._pixel_size_row
@pixel_size_row.setter
def pixel_size_row(self, x):
self._pixel_size_row = float(x)
@property
def pixel_size_col(self):
return self._pixel_size_col
@pixel_size_col.setter
def pixel_size_col(self, x):
self._pixel_size_col = float(x)
@property
def pixel_area(self):
return self.pixel_size_row * self.pixel_size_col
@property
def saturation_level(self):
return self._saturation_level
@saturation_level.setter
def saturation_level(self, x):
if x is not None:
assert np.isreal(x)
self._saturation_level = x
@property
def panel_buffer(self):
return self._panel_buffer
@panel_buffer.setter
def panel_buffer(self, x):
"""if not None, a buffer in mm (x, y)"""
if x is not None:
assert len(x) == 2 or x.ndim == 2
self._panel_buffer = x
@property
def roi(self):
return self._roi
@roi.setter
def roi(self, vertex_array):
"""
vertex array must be
[[r0, c0], [r1, c1], ..., [rn, cn]]
and have len >= 3
does NOT need to repeat start vertex for closure
"""
if vertex_array is not None:
assert len(vertex_array) >= 3
self._roi = vertex_array
@property
def row_dim(self):
return self.rows * self.pixel_size_row
@property
def col_dim(self):
return self.cols * self.pixel_size_col
@property
def row_pixel_vec(self):
return self.pixel_size_row*(0.5*(self.rows-1)-np.arange(self.rows))
@property
def row_edge_vec(self):
return _row_edge_vec(self.rows, self.pixel_size_row)
@property
def col_pixel_vec(self):
return self.pixel_size_col*(np.arange(self.cols)-0.5*(self.cols-1))
@property
def col_edge_vec(self):
return _col_edge_vec(self.cols, self.pixel_size_col)
@property
def corner_ul(self):
return np.r_[-0.5 * self.col_dim, 0.5 * self.row_dim]
@property
def corner_ll(self):
return np.r_[-0.5 * self.col_dim, -0.5 * self.row_dim]
@property
def corner_lr(self):
return np.r_[0.5 * self.col_dim, -0.5 * self.row_dim]
@property
def corner_ur(self):
return np.r_[0.5 * self.col_dim, 0.5 * self.row_dim]
@property
def shape(self):
return (self.rows, self.cols)
@property
def tvec(self):
return self._tvec
@tvec.setter
def tvec(self, x):
x = np.array(x).flatten()
assert len(x) == 3, 'input must have length = 3'
self._tvec = x
@property
def tilt(self):
return self._tilt
@tilt.setter
def tilt(self, x):
assert len(x) == 3, 'input must have length = 3'
self._tilt = np.array(x).squeeze()
@property
def bvec(self):
return self._bvec
@bvec.setter
def bvec(self, x):
x = np.array(x).flatten()
assert len(x) == 3 and sum(x*x) > 1-ct.sqrt_epsf, \
'input must have length = 3 and have unit magnitude'
self._bvec = x
@property
def evec(self):
return self._evec
@evec.setter
def evec(self, x):
x = np.array(x).flatten()
assert len(x) == 3 and sum(x*x) > 1-ct.sqrt_epsf, \
'input must have length = 3 and have unit magnitude'
self._evec = x
@property
def distortion(self):
return self._distortion
@distortion.setter
def distortion(self, x):
# FIXME: ne to reconcile check with new class type!
assert len(x) == 2 and hasattr(x[0], '__call__'), \
'distortion must be a tuple: (<func>, params)'
self._distortion = x
@property
def rmat(self):
return makeRotMatOfExpMap(self.tilt)
@property
def normal(self):
return self.rmat[:, 2]
@property
def beam_position(self):
"""
returns the coordinates of the beam in the cartesian detector
frame {Xd, Yd, Zd}. NaNs if no intersection.
"""
output = np.nan * np.ones(2)
b_dot_n = np.dot(self.bvec, self.normal)
if np.logical_and(
abs(b_dot_n) > ct.sqrt_epsf,
np.sign(b_dot_n) == -1
):
u = np.dot(self.normal, self.tvec) / b_dot_n
p2_l = u*self.bvec
p2_d = np.dot(self.rmat.T, p2_l - self.tvec)
output = p2_d[:2]
return output
# ...memoize???
@property
def pixel_coords(self):
pix_i, pix_j = np.meshgrid(
self.row_pixel_vec, self.col_pixel_vec,
indexing='ij')
return pix_i, pix_j
@property
def pixel_solid_angles(self):
kwargs = {
'rows': self.rows,
'cols': self.cols,
'pixel_size_row': self.pixel_size_row,
'pixel_size_col': self.pixel_size_col,
'rmat': self.rmat,
'tvec': self.tvec,
'max_workers': self.max_workers,
}
return _pixel_solid_angles(**kwargs)
@property
def calibration_parameters(self):
#
# set up calibration parameter list and refinement flags
#
# order for a single detector will be
#
# [tilt, translation, <distortion>]
dparams = []
if self.distortion is not None:
dparams = self.distortion.params
self._calibration_parameters = np.hstack(
[self.tilt, self.tvec, dparams]
)
return self._calibration_parameters
@property
def calibration_flags(self):
return self._calibration_flags
@calibration_flags.setter
def calibration_flags(self, x):
x = np.array(x, dtype=bool).flatten()
if len(x) != len(self._calibration_flags):
raise RuntimeError(
"length of parameter list must be %d; you gave %d"
% (len(self._calibration_flags), len(x))
)
self._calibration_flags = x
# =========================================================================
# METHODS
# =========================================================================
def lorentz_polarization_factor(self, f_hor, f_vert):
"""
Calculated the lorentz polarization factor for every pixel.
Parameters
----------
f_hor : float
the fraction of horizontal polarization. for XFELs
this is close to 1.
f_vert : TYPE
the fraction of vertical polarization, which is ~0 for XFELs.
Raises
------
RuntimeError
DESCRIPTION.
Returns
-------
TYPE
DESCRIPTION.
"""
s = f_hor + f_vert
if np.abs(s - 1) > constants.sqrt_epsf:
msg = ("sum of fraction of "
"horizontal and vertical polarizations "
"must be equal to 1.")
raise RuntimeError(msg)
if f_hor < 0 or f_vert < 0:
msg = ("fraction of polarization in horizontal "
"or vertical directions can't be negative.")
raise RuntimeError(msg)
tth, eta = self.pixel_angles()
args = (tth, eta, f_hor, f_vert)
return _lorentz_polarization_factor(*args)
def config_dict(self, chi=0, tvec=ct.zeros_3,
beam_energy=beam_energy_DFLT, beam_vector=ct.beam_vec,
sat_level=None, panel_buffer=None, style='yaml'):
"""
Return a dictionary of detector parameters.
Optional instrument level parameters. This is a convenience function
to work with the APIs in several functions in xrdutil.
Parameters
----------
chi : float, optional
DESCRIPTION. The default is 0.
tvec : array_like (3,), optional
DESCRIPTION. The default is ct.zeros_3.
beam_energy : float, optional
DESCRIPTION. The default is beam_energy_DFLT.
beam_vector : aray_like (3,), optional
DESCRIPTION. The default is ct.beam_vec.
sat_level : scalar, optional
DESCRIPTION. The default is None.
panel_buffer : scalar, array_like (2,), optional
DESCRIPTION. The default is None.
Returns
-------
config_dict : dict
DESCRIPTION.
"""
assert style.lower() in ['yaml', 'hdf5'], \
"style must be either 'yaml', or 'hdf5'; you gave '%s'" % style
config_dict = {}
# =====================================================================
# DETECTOR PARAMETERS
# =====================================================================
# transform and pixels
#
# assign local vars; listify if necessary
tilt = self.tilt
translation = self.tvec
if style.lower() == 'yaml':
tilt = tilt.tolist()
translation = translation.tolist()
tvec = tvec.tolist()
det_dict = dict(
transform=dict(
tilt=tilt,
translation=translation,
),
pixels=dict(
rows=self.rows,
columns=self.cols,
size=[self.pixel_size_row, self.pixel_size_col],
)
)
# distortion
if self.distortion is not None:
dparams = self.distortion.params
if style.lower() == 'yaml':
dparams = dparams.tolist()
dist_d = dict(
function_name=self.distortion.maptype,
parameters=dparams
)
det_dict['distortion'] = dist_d
# saturation level
if sat_level is None:
sat_level = self.saturation_level
det_dict['saturation_level'] = sat_level
# panel buffer
if panel_buffer is None:
# could be non, a 2-element list, or a 2-d array (rows, cols)
panel_buffer = copy.deepcopy(self.panel_buffer)
# !!! now we have to do some style-dependent munging of panel_buffer
if isinstance(panel_buffer, np.ndarray):
if panel_buffer.ndim == 1:
assert len(panel_buffer) == 2, \
"length of 1-d buffer must be 2"
# if here is a 2-element array
if style.lower() == 'yaml':
panel_buffer = panel_buffer.tolist()
elif panel_buffer.ndim == 2:
if style.lower() == 'yaml':
# !!! can't practically write array-like buffers to YAML
# so forced to clobber
print("clobbering panel buffer array in yaml-ready output")
panel_buffer = [0., 0.]
else:
raise RuntimeError(
"panel buffer ndim must be 1 or 2; you specified %d"
% panel_buffer.ndmin
)
elif panel_buffer is None:
# still None on self
if style.lower() == 'hdf5':
# !!! can't write None to hdf5; substitute with zeros
panel_buffer = np.r_[0., 0.]
det_dict['buffer'] = panel_buffer
# =====================================================================
# SAMPLE STAGE PARAMETERS
# =====================================================================
stage_dict = dict(
chi=chi,
translation=tvec
)
# =====================================================================
# BEAM PARAMETERS
# =====================================================================
# !!! make_reflection_patches is still using the vector
# azim, pola = calc_angles_from_beam_vec(beam_vector)
# beam_dict = dict(
# energy=beam_energy,
# vector=dict(
# azimuth=azim,
# polar_angle=pola
# )
# )
beam_dict = dict(
energy=beam_energy,
vector=beam_vector
)
config_dict['detector'] = det_dict
config_dict['oscillation_stage'] = stage_dict
config_dict['beam'] = beam_dict
return config_dict
def pixel_angles(self, origin=ct.zeros_3):
return _pixel_angles(origin, self.pixel_coords, self.distortion,
self.rmat, self.tvec, self.bvec, self.evec,
self.rows, self.cols)
def pixel_tth_gradient(self, origin=ct.zeros_3):
assert len(origin) == 3, "origin must have 3 elemnts"
ptth, _ = self.pixel_angles(origin=origin)
return np.linalg.norm(np.stack(np.gradient(ptth)), axis=0)
def pixel_eta_gradient(self, origin=ct.zeros_3):
period = np.r_[0., 2*np.pi]
assert len(origin) == 3, "origin must have 3 elemnts"
_, peta = self.pixel_angles(origin=origin)
# !!! handle cyclic nature of eta
rowmap = np.empty_like(peta)
for i in range(rowmap.shape[0]):
rowmap[i, :] = mapAngle(
peta[i, :], peta[i, 0] + period
)
colmap = np.empty_like(peta)
for i in range(colmap.shape[1]):
colmap[:, i] = mapAngle(
peta[:, i], peta[0, i] + period
)
peta_grad_row = np.gradient(rowmap)
peta_grad_col = np.gradient(colmap)
return np.linalg.norm(
np.stack([peta_grad_col[0], peta_grad_row[1]]),
axis=0
)
def cartToPixel(self, xy_det, pixels=False):
"""
Convert vstacked array or list of [x,y] points in the center-based
cartesian frame {Xd, Yd, Zd} to (i, j) edge-based indices
i is the row index, measured from the upper-left corner
j is the col index, measured from the upper-left corner
if pixels=True, then (i,j) are integer pixel indices.
else (i,j) are continuous coords
"""
xy_det = np.atleast_2d(xy_det)
npts = len(xy_det)
tmp_ji = xy_det - np.tile(self.corner_ul, (npts, 1))
i_pix = -tmp_ji[:, 1] / self.pixel_size_row - 0.5
j_pix = tmp_ji[:, 0] / self.pixel_size_col - 0.5
ij_det = np.vstack([i_pix, j_pix]).T
if pixels:
ij_det = np.array(np.round(ij_det), dtype=int)
return ij_det
def pixelToCart(self, ij_det):
"""
Convert vstacked array or list of [i,j] pixel indices
(or UL corner-based points) and convert to (x,y) in the
cartesian frame {Xd, Yd, Zd}
"""
ij_det = np.atleast_2d(ij_det)
x = (ij_det[:, 1] + 0.5)*self.pixel_size_col\
+ self.corner_ll[0]
y = (self.rows - ij_det[:, 0] - 0.5)*self.pixel_size_row\
+ self.corner_ll[1]
return np.vstack([x, y]).T
def angularPixelSize(self, xy, rMat_s=None, tVec_s=None, tVec_c=None):
"""
Wraps xrdutil.angularPixelSize
"""
# munge kwargs
if rMat_s is None:
rMat_s = ct.identity_3x3
if tVec_s is None:
tVec_s = ct.zeros_3x1
if tVec_c is None:
tVec_c = ct.zeros_3x1
# call function
ang_ps = xrdutil.angularPixelSize(
xy, (self.pixel_size_row, self.pixel_size_col),
self.rmat, rMat_s,
self.tvec, tVec_s, tVec_c,
distortion=self.distortion,
beamVec=self.bvec, etaVec=self.evec)
return ang_ps
def clip_to_panel(self, xy, buffer_edges=True):
"""
if self.roi is not None, uses it by default
TODO: check if need shape kwarg
TODO: optimize ROI search better than list comprehension below
TODO: panel_buffer can be a 2-d boolean mask, but needs testing
"""
xy = np.atleast_2d(xy)
if self.roi is not None:
ij_crds = self.cartToPixel(xy, pixels=True)
ii, jj = polygon(self.roi[:, 0], self.roi[:, 1],
shape=(self.rows, self.cols))
on_panel_rows = [i in ii for i in ij_crds[:, 0]]
on_panel_cols = [j in jj for j in ij_crds[:, 1]]
on_panel = np.logical_and(on_panel_rows, on_panel_cols)
else:
xlim = 0.5*self.col_dim
ylim = 0.5*self.row_dim
if buffer_edges and self.panel_buffer is not None:
if self.panel_buffer.ndim == 2:
pix = self.cartToPixel(xy, pixels=True)
roff = np.logical_or(pix[:, 0] < 0, pix[:, 0] >= self.rows)
coff = np.logical_or(pix[:, 1] < 0, pix[:, 1] >= self.cols)
idx = np.logical_or(roff, coff)
pix[idx, :] = 0
on_panel = self.panel_buffer[pix[:, 0], pix[:, 1]]
on_panel[idx] = False
else:
xlim -= self.panel_buffer[0]
ylim -= self.panel_buffer[1]
on_panel_x = np.logical_and(
xy[:, 0] >= -xlim, xy[:, 0] <= xlim
)
on_panel_y = np.logical_and(
xy[:, 1] >= -ylim, xy[:, 1] <= ylim
)
on_panel = np.logical_and(on_panel_x, on_panel_y)
elif not buffer_edges or self.panel_buffer is None:
on_panel_x = np.logical_and(
xy[:, 0] >= -xlim, xy[:, 0] <= xlim
)
on_panel_y = np.logical_and(
xy[:, 1] >= -ylim, xy[:, 1] <= ylim
)
on_panel = np.logical_and(on_panel_x, on_panel_y)
return xy[on_panel, :], on_panel
def cart_to_angles(self, xy_data, rmat_s=None, tvec_s=None, tvec_c=None):
"""
TODO: distortion
"""
if rmat_s is None:
rmat_s = ct.identity_3x3
if tvec_s is None:
tvec_s = ct.zeros_3
if tvec_c is None:
tvec_c = ct.zeros_3
angs, g_vec = detectorXYToGvec(
xy_data, self.rmat, rmat_s,
self.tvec, tvec_s, tvec_c,
beamVec=self.bvec, etaVec=self.evec)
tth_eta = np.vstack([angs[0], angs[1]]).T
return tth_eta, g_vec
def angles_to_cart(self, tth_eta,
rmat_s=None, tvec_s=None,
rmat_c=None, tvec_c=None):
"""
TODO: distortion
"""
if rmat_s is None:
rmat_s = ct.identity_3x3
if tvec_s is None:
tvec_s = ct.zeros_3
if rmat_c is None:
rmat_c = ct.identity_3x3
if tvec_c is None:
tvec_c = ct.zeros_3
# !!! warning, this assumes an rmat_s made from chi, ome pair
chi = np.arccos(rmat_s[1, 1])
ome = np.arccos(rmat_s[0, 0])
angs = np.hstack([tth_eta, np.tile(ome, (len(tth_eta), 1))])
xy_det = gvecToDetectorXY(
anglesToGVec(angs, bHat_l=self.bvec, eHat_l=self.evec, chi=chi),
self.rmat, rmat_s, rmat_c,
self.tvec, tvec_s, tvec_c,
beamVec=self.bvec)
return xy_det
def interpolate_nearest(self, xy, img, pad_with_nans=True):
"""
TODO: revisit normalization in here?
"""
is_2d = img.ndim == 2
right_shape = img.shape[0] == self.rows and img.shape[1] == self.cols
assert is_2d and right_shape,\
"input image must be 2-d with shape (%d, %d)"\
% (self.rows, self.cols)
# initialize output with nans
if pad_with_nans:
int_xy = np.nan*np.ones(len(xy))
else:
int_xy = np.zeros(len(xy))
# clip away points too close to or off the edges of the detector
xy_clip, on_panel = self.clip_to_panel(xy, buffer_edges=True)
# get pixel indices of clipped points
i_src = cellIndices(self.row_pixel_vec, xy_clip[:, 1])
j_src = cellIndices(self.col_pixel_vec, xy_clip[:, 0])
# next interpolate across cols
int_vals = img[i_src, j_src]
int_xy[on_panel] = int_vals
return int_xy
def interpolate_bilinear(self, xy, img, pad_with_nans=True):
"""
Interpolate an image array at the specified cartesian points.
Parameters
----------
xy : array_like, (n, 2)
Array of cartesian coordinates in the image plane at which
to evaluate intensity.
img : array_like
2-dimensional image array.
pad_with_nans : bool, optional
Toggle for assigning NaN to points that fall off the detector.
The default is True.
Returns
-------
int_xy : array_like, (n,)
The array of interpolated intensities at each of the n input
coordinates.
Notes
-----
TODO: revisit normalization in here?
"""
is_2d = img.ndim == 2
right_shape = img.shape[0] == self.rows and img.shape[1] == self.cols
assert is_2d and right_shape,\
"input image must be 2-d with shape (%d, %d)"\
% (self.rows, self.cols)
# initialize output with nans
if pad_with_nans:
int_xy = np.nan*np.ones(len(xy))
else:
int_xy = np.zeros(len(xy))
# clip away points too close to or off the edges of the detector
xy_clip, on_panel = self.clip_to_panel(xy, buffer_edges=True)
# grab fractional pixel indices of clipped points
ij_frac = self.cartToPixel(xy_clip)
# get floors/ceils from array of pixel _centers_
# and fix indices running off the pixel centers
# !!! notice we already clipped points to the panel!
i_floor = cellIndices(self.row_pixel_vec, xy_clip[:, 1])
i_floor_img = _fix_indices(i_floor, 0, self.rows - 1)
j_floor = cellIndices(self.col_pixel_vec, xy_clip[:, 0])
j_floor_img = _fix_indices(j_floor, 0, self.cols - 1)
# ceilings from floors
i_ceil = i_floor + 1
i_ceil_img = _fix_indices(i_ceil, 0, self.rows - 1)
j_ceil = j_floor + 1
j_ceil_img = _fix_indices(j_ceil, 0, self.cols - 1)
# first interpolate at top/bottom rows
row_floor_int = \
(j_ceil - ij_frac[:, 1])*img[i_floor_img, j_floor_img] \
+ (ij_frac[:, 1] - j_floor)*img[i_floor_img, j_ceil_img]
row_ceil_int = \
(j_ceil - ij_frac[:, 1])*img[i_ceil_img, j_floor_img] \
+ (ij_frac[:, 1] - j_floor)*img[i_ceil_img, j_ceil_img]
# next interpolate across cols
int_vals = \
(i_ceil - ij_frac[:, 0])*row_floor_int \
+ (ij_frac[:, 0] - i_floor)*row_ceil_int
int_xy[on_panel] = int_vals
return int_xy
def make_powder_rings(
self, pd, merge_hkls=False, delta_tth=None,
delta_eta=10., eta_period=None, eta_list=None,
rmat_s=ct.identity_3x3, tvec_s=ct.zeros_3,
tvec_c=ct.zeros_3, full_output=False):
"""
Generate points on Debye_Scherrer rings over the detector.
!!! it is assuming that rmat_s is built from (chi, ome) as it the case
for HEDM!
Parameters
----------
pd : TYPE
DESCRIPTION.
merge_hkls : TYPE, optional
DESCRIPTION. The default is False.
delta_tth : TYPE, optional
DESCRIPTION. The default is None.
delta_eta : TYPE, optional
DESCRIPTION. The default is 10..
eta_period : TYPE, optional
DESCRIPTION. The default is None.
eta_list : TYPE, optional
DESCRIPTION. The default is None.
rmat_s : TYPE, optional
DESCRIPTION. The default is ct.identity_3x3.
tvec_s : TYPE, optional
DESCRIPTION. The default is ct.zeros_3.
tvec_c : TYPE, optional
DESCRIPTION. The default is ct.zeros_3.
full_output : TYPE, optional
DESCRIPTION. The default is False.
Raises
------
RuntimeError
DESCRIPTION.
Returns
-------
TYPE
DESCRIPTION.
"""
# in case you want to give it tth angles directly
if hasattr(pd, '__len__'):
tth = np.array(pd).flatten()
if delta_tth is None:
raise RuntimeError(
"If supplying a 2theta list as first arg, "
+ "must supply a delta_tth")
sector_vertices = np.tile(
0.5*np.radians([-delta_tth, -delta_eta,
-delta_tth, delta_eta,
delta_tth, delta_eta,
delta_tth, -delta_eta,
0.0, 0.0]), (len(tth), 1)
)
# Convert to radians as is done below
del_eta = np.radians(delta_eta)
else:
# Okay, we have a PlaneData object
try:
pd = PlaneData.makeNew(pd) # make a copy to munge
except(TypeError):
# !!! have some other object here, likely a dummy plane data
# object of some sort...
pass
if delta_tth is not None:
pd.tThWidth = np.radians(delta_tth)
else:
delta_tth = np.degrees(pd.tThWidth)
# conversions, meh...
del_eta = np.radians(delta_eta)
# do merging if asked
if merge_hkls:
_, tth_ranges = pd.getMergedRanges(cullDupl=True)
tth = np.array([0.5*sum(i) for i in tth_ranges])
else:
tth_ranges = pd.getTThRanges()
tth = pd.getTTh()
tth_pm = tth_ranges - np.tile(tth, (2, 1)).T
sector_vertices = np.vstack(
[[i[0], -del_eta,
i[0], del_eta,
i[1], del_eta,
i[1], -del_eta,
0.0, 0.0]
for i in tth_pm])
# for generating rings, make eta vector in correct period
if eta_period is None:
eta_period = (-np.pi, np.pi)
if eta_list is None:
neta = int(360./float(delta_eta))
# this is the vector of ETA EDGES
eta_edges = mapAngle(
np.radians(
delta_eta*np.linspace(0., neta, num=neta + 1)
) + eta_period[0],
eta_period
)
# get eta bin centers from edges
"""
# !!! this way is probably overkill, since we have delta eta
eta_centers = np.average(
np.vstack([eta[:-1], eta[1:]),
axis=0)
"""
# !!! should be safe as eta_edges are monotonic
eta_centers = eta_edges[:-1] + 0.5*del_eta
else:
eta_centers = np.radians(eta_list).flatten()
neta = len(eta_centers)
eta_edges = (
np.tile(eta_centers, (2, 1)) +
np.tile(0.5*del_eta*np.r_[-1, 1], (neta, 1)).T
).T.flatten()
# get chi and ome from rmat_s
# ??? not needed chi = np.arctan2(rmat_s[2, 1], rmat_s[1, 1])
ome = np.arctan2(rmat_s[0, 2], rmat_s[0, 0])
# make list of angle tuples
angs = [
np.vstack(
[i*np.ones(neta), eta_centers, ome*np.ones(neta)]
) for i in tth
]
# need xy coords and pixel sizes
valid_ang = []
valid_xy = []
map_indices = []
npp = 5 # [ll, ul, ur, lr, center]
for i_ring in range(len(angs)):
# expand angles to patch vertices
these_angs = angs[i_ring].T
patch_vertices = (
np.tile(these_angs[:, :2], (1, npp))
+ np.tile(sector_vertices[i_ring], (neta, 1))
).reshape(npp*neta, 2)
# duplicate ome array
ome_dupl = np.tile(
these_angs[:, 2], (npp, 1)
).T.reshape(npp*neta, 1)
# find vertices that all fall on the panel
gVec_ring_l = anglesToGVec(
np.hstack([patch_vertices, ome_dupl]),
bHat_l=self.bvec)
all_xy = gvecToDetectorXY(
gVec_ring_l,
self.rmat, rmat_s, ct.identity_3x3,
self.tvec, tvec_s, tvec_c,
beamVec=self.bvec)
if self.distortion is not None:
all_xy = self.distortion.apply_inverse(all_xy)
_, on_panel = self.clip_to_panel(all_xy)
# all vertices must be on...
patch_is_on = np.all(on_panel.reshape(neta, npp), axis=1)
patch_xys = all_xy.reshape(neta, 5, 2)[patch_is_on]
# form output arrays
valid_ang.append(these_angs[patch_is_on, :2])
valid_xy.append(patch_xys[:, -1, :].squeeze())
map_indices.append(patch_is_on)
pass
# ??? is this option necessary?
if full_output:
return valid_ang, valid_xy, map_indices, eta_edges
else:
return valid_ang, valid_xy
def map_to_plane(self, pts, rmat, tvec):
"""
Map detctor points to specified plane.
Parameters
----------
pts : TYPE
DESCRIPTION.
rmat : TYPE
DESCRIPTION.
tvec : TYPE
DESCRIPTION.
Returns
-------
TYPE
DESCRIPTION.
Notes
-----
by convention:
n * (u*pts_l - tvec) = 0
[pts]_l = rmat*[pts]_m + tvec
"""
# arg munging
pts = np.atleast_2d(pts)
npts = len(pts)
# map plane normal & translation vector, LAB FRAME
nvec_map_lab = rmat[:, 2].reshape(3, 1)
tvec_map_lab = np.atleast_2d(tvec).reshape(3, 1)
tvec_d_lab = np.atleast_2d(self.tvec).reshape(3, 1)
# put pts as 3-d in panel CS and transform to 3-d lab coords
pts_det = np.hstack([pts, np.zeros((npts, 1))])
pts_lab = np.dot(self.rmat, pts_det.T) + tvec_d_lab
# scaling along pts vectors to hit map plane
u = np.dot(nvec_map_lab.T, tvec_map_lab) \
/ np.dot(nvec_map_lab.T, pts_lab)
# pts on map plane, in LAB FRAME
pts_map_lab = np.tile(u, (3, 1)) * pts_lab
return np.dot(rmat.T, pts_map_lab - tvec_map_lab)[:2, :].T
def simulate_rotation_series(self, plane_data, grain_param_list,
eta_ranges=[(-np.pi, np.pi), ],
ome_ranges=[(-np.pi, np.pi), ],
ome_period=(-np.pi, np.pi),
chi=0., tVec_s=ct.zeros_3,
wavelength=None):
"""
Simulate a monochromatic rotation series for a list of grains.
Parameters
----------
plane_data : TYPE
DESCRIPTION.
grain_param_list : TYPE
DESCRIPTION.
eta_ranges : TYPE, optional
DESCRIPTION. The default is [(-np.pi, np.pi), ].
ome_ranges : TYPE, optional
DESCRIPTION. The default is [(-np.pi, np.pi), ].
ome_period : TYPE, optional
DESCRIPTION. The default is (-np.pi, np.pi).
chi : TYPE, optional
DESCRIPTION. The default is 0..
tVec_s : TYPE, optional
DESCRIPTION. The default is ct.zeros_3.
wavelength : TYPE, optional
DESCRIPTION. The default is None.
Returns
-------
valid_ids : TYPE
DESCRIPTION.
valid_hkls : TYPE
DESCRIPTION.
valid_angs : TYPE
DESCRIPTION.
valid_xys : TYPE
DESCRIPTION.
ang_pixel_size : TYPE
DESCRIPTION.
"""
# grab B-matrix from plane data
bMat = plane_data.latVecOps['B']
# reconcile wavelength
# * added sanity check on exclusions here; possible to
# * make some reflections invalid (NaN)
if wavelength is None:
wavelength = plane_data.wavelength
else:
if plane_data.wavelength != wavelength:
plane_data.wavelength = ct.keVToAngstrom(wavelength)
assert not np.any(np.isnan(plane_data.getTTh())),\
"plane data exclusions incompatible with wavelength"
# vstacked G-vector id, h, k, l
full_hkls = xrdutil._fetch_hkls_from_planedata(plane_data)
""" LOOP OVER GRAINS """
valid_ids = []
valid_hkls = []
valid_angs = []
valid_xys = []
ang_pixel_size = []
for gparm in grain_param_list:
# make useful parameters
rMat_c = makeRotMatOfExpMap(gparm[:3])
tVec_c = gparm[3:6]
vInv_s = gparm[6:]
# All possible bragg conditions as vstacked [tth, eta, ome]
# for each omega solution
angList = np.vstack(
oscillAnglesOfHKLs(
full_hkls[:, 1:], chi,
rMat_c, bMat, wavelength,
vInv=vInv_s,
)
)
# filter by eta and omega ranges
# ??? get eta range from detector?
allAngs, allHKLs = xrdutil._filter_hkls_eta_ome(
full_hkls, angList, eta_ranges, ome_ranges
)
allAngs[:, 2] = mapAngle(allAngs[:, 2], ome_period)
# find points that fall on the panel
det_xy, rMat_s, on_plane = xrdutil._project_on_detector_plane(
allAngs,
self.rmat, rMat_c, chi,
self.tvec, tVec_c, tVec_s,
self.distortion)
xys_p, on_panel = self.clip_to_panel(det_xy)
valid_xys.append(xys_p)
# filter angs and hkls that are on the detector plane
# !!! check this -- seems unnecessary but the results of
# _project_on_detector_plane() can have len < the input.
# the output of _project_on_detector_plane has been modified to
# hand back the index array to remedy this JVB 2020-05-27
filtered_angs = np.atleast_2d(allAngs[on_plane, :])
filtered_hkls = np.atleast_2d(allHKLs[on_plane, :])
# grab hkls and gvec ids for this panel
valid_hkls.append(filtered_hkls[on_panel, 1:])
valid_ids.append(filtered_hkls[on_panel, 0])
# reflection angles (voxel centers) and pixel size in (tth, eta)
valid_angs.append(filtered_angs[on_panel, :])
ang_pixel_size.append(self.angularPixelSize(xys_p))
return valid_ids, valid_hkls, valid_angs, valid_xys, ang_pixel_size
def simulate_laue_pattern(self, crystal_data,
minEnergy=5., maxEnergy=35.,
rmat_s=None, tvec_s=None,
grain_params=None,
beam_vec=None):
"""
"""
if isinstance(crystal_data, PlaneData):
plane_data = crystal_data
# grab the expanded list of hkls from plane_data
hkls = np.hstack(plane_data.getSymHKLs())
# and the unit plane normals (G-vectors) in CRYSTAL FRAME
gvec_c = np.dot(plane_data.latVecOps['B'], hkls)
elif len(crystal_data) == 2:
# !!! should clean this up
hkls = np.array(crystal_data[0])
bmat = crystal_data[1]
gvec_c = np.dot(bmat, hkls)
else:
raise(RuntimeError, 'argument list not understood')
nhkls_tot = hkls.shape[1]
# parse energy ranges
# TODO: allow for spectrum parsing
multipleEnergyRanges = False
if hasattr(maxEnergy, '__len__'):
assert len(maxEnergy) == len(minEnergy), \
'energy cutoff ranges must have the same length'
multipleEnergyRanges = True
lmin = []
lmax = []
for i in range(len(maxEnergy)):
lmin.append(ct.keVToAngstrom(maxEnergy[i]))
lmax.append(ct.keVToAngstrom(minEnergy[i]))
else:
lmin = ct.keVToAngstrom(maxEnergy)
lmax = ct.keVToAngstrom(minEnergy)
# parse grain parameters kwarg
if grain_params is None:
grain_params = np.atleast_2d(
np.hstack([np.zeros(6), ct.identity_6x1])
)
n_grains = len(grain_params)
# sample rotation
if rmat_s is None:
rmat_s = ct.identity_3x3
# dummy translation vector... make input
if tvec_s is None:
tvec_s = ct.zeros_3
# beam vector
if beam_vec is None:
beam_vec = ct.beam_vec
# =========================================================================
# LOOP OVER GRAINS
# =========================================================================
# pre-allocate output arrays
xy_det = np.nan*np.ones((n_grains, nhkls_tot, 2))
hkls_in = np.nan*np.ones((n_grains, 3, nhkls_tot))
angles = np.nan*np.ones((n_grains, nhkls_tot, 2))
dspacing = np.nan*np.ones((n_grains, nhkls_tot))
energy = np.nan*np.ones((n_grains, nhkls_tot))
for iG, gp in enumerate(grain_params):
rmat_c = makeRotMatOfExpMap(gp[:3])
tvec_c = gp[3:6].reshape(3, 1)
vInv_s = mutil.vecMVToSymm(gp[6:].reshape(6, 1))
# stretch them: V^(-1) * R * Gc
gvec_s_str = np.dot(vInv_s, np.dot(rmat_c, gvec_c))
ghat_c_str = mutil.unitVector(np.dot(rmat_c.T, gvec_s_str))
# project
dpts = gvecToDetectorXY(ghat_c_str.T,
self.rmat, rmat_s, rmat_c,
self.tvec, tvec_s, tvec_c,
beamVec=beam_vec)
# check intersections with detector plane
canIntersect = ~np.isnan(dpts[:, 0])
npts_in = sum(canIntersect)
if np.any(canIntersect):
dpts = dpts[canIntersect, :].reshape(npts_in, 2)
dhkl = hkls[:, canIntersect].reshape(3, npts_in)
# back to angles
tth_eta, gvec_l = detectorXYToGvec(
dpts,
self.rmat, rmat_s,
self.tvec, tvec_s, tvec_c,
beamVec=beam_vec)
tth_eta = np.vstack(tth_eta).T
# warp measured points
if self.distortion is not None:
dpts = self.distortion.apply_inverse(dpts)
# plane spacings and energies
dsp = 1. / rowNorm(gvec_s_str[:, canIntersect].T)
wlen = 2*dsp*np.sin(0.5*tth_eta[:, 0])
# clip to detector panel
_, on_panel = self.clip_to_panel(dpts, buffer_edges=True)
if multipleEnergyRanges:
validEnergy = np.zeros(len(wlen), dtype=bool)
for i in range(len(lmin)):
in_energy_range = np.logical_and(
wlen >= lmin[i],
wlen <= lmax[i])
validEnergy = validEnergy | in_energy_range
pass
else:
validEnergy = np.logical_and(wlen >= lmin, wlen <= lmax)
pass
# index for valid reflections
keepers = np.where(np.logical_and(on_panel, validEnergy))[0]
# assign output arrays
xy_det[iG][keepers, :] = dpts[keepers, :]
hkls_in[iG][:, keepers] = dhkl[:, keepers]
angles[iG][keepers, :] = tth_eta[keepers, :]
dspacing[iG, keepers] = dsp[keepers]
energy[iG, keepers] = ct.keVToAngstrom(wlen[keepers])
pass # close conditional on valids
pass # close loop on grains
return xy_det, hkls_in, angles, dspacing, energy
# =============================================================================
# UTILITIES
# =============================================================================
class PatchDataWriter(object):
"""Class for dumping Bragg reflection data."""
def __init__(self, filename):
self._delim = ' '
header_items = (
'# ID', 'PID',
'H', 'K', 'L',
'sum(int)', 'max(int)',
'pred tth', 'pred eta', 'pred ome',
'meas tth', 'meas eta', 'meas ome',
'pred X', 'pred Y',
'meas X', 'meas Y'
)
self._header = self._delim.join([
self._delim.join(np.tile('{:<6}', 5)).format(*header_items[:5]),
self._delim.join(np.tile('{:<12}', 2)).format(*header_items[5:7]),
self._delim.join(np.tile('{:<23}', 10)).format(*header_items[7:17])
])
if isinstance(filename, IOBase):
self.fid = filename
else:
self.fid = open(filename, 'w')
print(self._header, file=self.fid)
def __del__(self):
self.close()
def close(self):
self.fid.close()
def dump_patch(self, peak_id, hkl_id,
hkl, spot_int, max_int,
pangs, mangs, pxy, mxy):
"""
!!! maybe need to check that last four inputs are arrays
"""
if mangs is None:
spot_int = np.nan
max_int = np.nan
mangs = np.nan*np.ones(3)
mxy = np.nan*np.ones(2)
res = [int(peak_id), int(hkl_id)] \
+ np.array(hkl, dtype=int).tolist() \
+ [spot_int, max_int] \
+ pangs.tolist() \
+ mangs.tolist() \
+ pxy.tolist() \
+ mxy.tolist()
output_str = self._delim.join(
[self._delim.join(np.tile('{:<6d}', 5)).format(*res[:5]),
self._delim.join(np.tile('{:<12e}', 2)).format(*res[5:7]),
self._delim.join(np.tile('{:<23.16e}', 10)).format(*res[7:])]
)
print(output_str, file=self.fid)
return output_str
class GrainDataWriter(object):
"""Class for dumping grain data."""
def __init__(self, filename=None, array=None):
"""Writes to either file or np array
Array must be initialized with number of rows to be written.
"""
if filename is None and array is None:
raise RuntimeError(
'GrainDataWriter must be specified with filename or array')
self.array = None
self.fid = None
# array supersedes filename
if array is not None:
assert array.shape[1] == 21, \
f'grain data table must have 21 columns not {array.shape[21]}'
self.array = array
self._array_row = 0
return
self._delim = ' '
header_items = (
'# grain ID', 'completeness', 'chi^2',
'exp_map_c[0]', 'exp_map_c[1]', 'exp_map_c[2]',
't_vec_c[0]', 't_vec_c[1]', 't_vec_c[2]',
'inv(V_s)[0,0]', 'inv(V_s)[1,1]', 'inv(V_s)[2,2]',
'inv(V_s)[1,2]*sqrt(2)',
'inv(V_s)[0,2]*sqrt(2)',
'inv(V_s)[0,1]*sqrt(2)',
'ln(V_s)[0,0]', 'ln(V_s)[1,1]', 'ln(V_s)[2,2]',
'ln(V_s)[1,2]', 'ln(V_s)[0,2]', 'ln(V_s)[0,1]'
)
self._header = self._delim.join(
[self._delim.join(
np.tile('{:<12}', 3)
).format(*header_items[:3]),
self._delim.join(
np.tile('{:<23}', len(header_items) - 3)
).format(*header_items[3:])]
)
if isinstance(filename, IOBase):
self.fid = filename
else:
self.fid = open(filename, 'w')
print(self._header, file=self.fid)
def __del__(self):
self.close()
def close(self):
if self.fid is not None:
self.fid.close()
def dump_grain(self, grain_id, completeness, chisq,
grain_params):
assert len(grain_params) == 12, \
"len(grain_params) must be 12, not %d" % len(grain_params)
# extract strain
emat = logm(np.linalg.inv(mutil.vecMVToSymm(grain_params[6:])))
evec = mutil.symmToVecMV(emat, scale=False)
res = [int(grain_id), completeness, chisq] \
+ grain_params.tolist() \
+ evec.tolist()
if self.array is not None:
row = self._array_row
assert row < self.array.shape[0], \
f'invalid row {row} in array table'
self.array[row] = res
self._array_row += 1
return res
# (else) format and write to file
output_str = self._delim.join(
[self._delim.join(
['{:<12d}', '{:<12f}', '{:<12e}']
).format(*res[:3]),
self._delim.join(
np.tile('{:<23.16e}', len(res) - 3)
).format(*res[3:])]
)
print(output_str, file=self.fid)
return output_str
class GrainDataWriter_h5(object):
"""Class for dumping grain results to an HDF5 archive.
TODO: add material spec
"""
def __init__(self, filename, instr_cfg, grain_params, use_attr=False):
if isinstance(filename, h5py.File):
self.fid = filename
else:
self.fid = h5py.File(filename + ".hdf5", "w")
icfg = dict(instr_cfg)
# add instrument groups and attributes
self.instr_grp = self.fid.create_group('instrument')
unwrap_dict_to_h5(self.instr_grp, icfg, asattr=use_attr)
# add grain group
self.grain_grp = self.fid.create_group('grain')
rmat_c = makeRotMatOfExpMap(grain_params[:3])
tvec_c = np.array(grain_params[3:6]).flatten()
vinv_s = np.array(grain_params[6:]).flatten()
vmat_s = np.linalg.inv(mutil.vecMVToSymm(vinv_s))
if use_attr: # attribute version
self.grain_grp.attrs.create('rmat_c', rmat_c)
self.grain_grp.attrs.create('tvec_c', tvec_c)
self.grain_grp.attrs.create('inv(V)_s', vinv_s)
self.grain_grp.attrs.create('vmat_s', vmat_s)
else: # dataset version
self.grain_grp.create_dataset('rmat_c', data=rmat_c)
self.grain_grp.create_dataset('tvec_c', data=tvec_c)
self.grain_grp.create_dataset('inv(V)_s', data=vinv_s)
self.grain_grp.create_dataset('vmat_s', data=vmat_s)
data_key = 'reflection_data'
self.data_grp = self.fid.create_group(data_key)
for det_key in self.instr_grp['detectors'].keys():
self.data_grp.create_group(det_key)
# FIXME: throws exception when called after close method
# def __del__(self):
# self.close()
def close(self):
self.fid.close()
def dump_patch(self, panel_id,
i_refl, peak_id, hkl_id, hkl,
tth_edges, eta_edges, ome_centers,
xy_centers, ijs, frame_indices,
spot_data, pangs, pxy, mangs, mxy, gzip=1):
"""
to be called inside loop over patches
default GZIP level for data arrays is 1
"""
fi = np.array(frame_indices, dtype=int)
panel_grp = self.data_grp[panel_id]
spot_grp = panel_grp.create_group("spot_%05d" % i_refl)
spot_grp.attrs.create('peak_id', int(peak_id))
spot_grp.attrs.create('hkl_id', int(hkl_id))
spot_grp.attrs.create('hkl', np.array(hkl, dtype=int))
spot_grp.attrs.create('predicted_angles', pangs)
spot_grp.attrs.create('predicted_xy', pxy)
if mangs is None:
mangs = np.nan*np.ones(3)
spot_grp.attrs.create('measured_angles', mangs)
if mxy is None:
mxy = np.nan*np.ones(3)
spot_grp.attrs.create('measured_xy', mxy)
# get centers crds from edge arrays
# FIXME: export full coordinate arrays, or just center vectors???
#
# ome_crd, eta_crd, tth_crd = np.meshgrid(
# ome_centers,
# centers_of_edge_vec(eta_edges),
# centers_of_edge_vec(tth_edges),
# indexing='ij')
#
# ome_dim, eta_dim, tth_dim = spot_data.shape
# !!! for now just exporting center vectors for spot_data
tth_crd = centers_of_edge_vec(tth_edges)
eta_crd = centers_of_edge_vec(eta_edges)
shuffle_data = True # reduces size by 20%
spot_grp.create_dataset('tth_crd', data=tth_crd,
compression="gzip", compression_opts=gzip,
shuffle=shuffle_data)
spot_grp.create_dataset('eta_crd', data=eta_crd,
compression="gzip", compression_opts=gzip,
shuffle=shuffle_data)
spot_grp.create_dataset('ome_crd', data=ome_centers,
compression="gzip", compression_opts=gzip,
shuffle=shuffle_data)
spot_grp.create_dataset('xy_centers', data=xy_centers,
compression="gzip", compression_opts=gzip,
shuffle=shuffle_data)
spot_grp.create_dataset('ij_centers', data=ijs,
compression="gzip", compression_opts=gzip,
shuffle=shuffle_data)
spot_grp.create_dataset('frame_indices', data=fi,
compression="gzip", compression_opts=gzip,
shuffle=shuffle_data)
spot_grp.create_dataset('intensities', data=spot_data,
compression="gzip", compression_opts=gzip,
shuffle=shuffle_data)
return
def unwrap_dict_to_h5(grp, d, asattr=False):
"""
Unwraps a dictionary to an HDF5 file of the same structure.
Parameters
----------
grp : HDF5 group object
The HDF5 group to recursively unwrap the dict into.
d : dict
Input dict (of dicts).
asattr : bool, optional
Flag to write end member in dictionary tree to an attribute. If False,
if writes the object to a dataset using numpy. The default is False.
Returns
-------
None.
"""
while len(d) > 0:
key, item = d.popitem()
if isinstance(item, dict):
subgrp = grp.create_group(key)
unwrap_dict_to_h5(subgrp, item, asattr=asattr)
else:
if asattr:
grp.attrs.create(key, item)
else:
try:
grp.create_dataset(key, data=np.atleast_1d(item))
except(TypeError):
# probably a string badness
grp.create_dataset(key, data=item)
def unwrap_h5_to_dict(f, d):
"""
Unwraps a simple HDF5 file to a dictionary of the same structure.
Parameters
----------
f : HDF5 file (mode r)
The input HDF5 file object.
d : dict
dictionary object to update.
Returns
-------
None.
Notes
-----
As written, ignores attributes and uses numpy to cast HDF5 datasets to
dict entries. Checks for 'O' type arrays and casts to strings; also
converts single-element arrays to scalars.
"""
for key, val in f.items():
try:
d[key] = {}
unwrap_h5_to_dict(val, d[key])
except(AttributeError):
# reached a dataset
if np.dtype(val) == 'O':
d[key] = h5py_read_string(val)
else:
tmp = np.array(val)
if tmp.ndim == 1 and len(tmp) == 1:
d[key] = tmp[0]
else:
d[key] = tmp
class GenerateEtaOmeMaps(object):
"""
eta-ome map class derived from new image_series and YAML config
...for now...
must provide:
self.dataStore
self.planeData
self.iHKLList
self.etaEdges # IN RADIANS
self.omeEdges # IN RADIANS
self.etas # IN RADIANS
self.omegas # IN RADIANS
"""
def __init__(self, image_series_dict, instrument, plane_data,
active_hkls=None, eta_step=0.25, threshold=None,
ome_period=(0, 360)):
"""
image_series must be OmegaImageSeries class
instrument_params must be a dict (loaded from yaml spec)
active_hkls must be a list (required for now)
"""
self._planeData = plane_data
# ???: change name of iHKLList?
# ???: can we change the behavior of iHKLList?
if active_hkls is None:
n_rings = len(plane_data.getTTh())
self._iHKLList = range(n_rings)
else:
self._iHKLList = active_hkls
n_rings = len(active_hkls)
# ???: need to pass a threshold?
eta_mapping, etas = instrument.extract_polar_maps(
plane_data, image_series_dict,
active_hkls=active_hkls, threshold=threshold,
tth_tol=None, eta_tol=eta_step)
# grab a det key
# WARNING: this process assumes that the imageseries for all panels
# have the same length and omegas
det_key = list(eta_mapping.keys())[0]
data_store = []
for i_ring in range(n_rings):
full_map = np.zeros_like(eta_mapping[det_key][i_ring])
nan_mask_full = np.zeros(
(len(eta_mapping), full_map.shape[0], full_map.shape[1])
)
i_p = 0
for det_key, eta_map in eta_mapping.items():
nan_mask = ~np.isnan(eta_map[i_ring])
nan_mask_full[i_p] = nan_mask
full_map[nan_mask] += eta_map[i_ring][nan_mask]
i_p += 1
re_nan_these = np.sum(nan_mask_full, axis=0) == 0
full_map[re_nan_these] = np.nan
data_store.append(full_map)
self._dataStore = data_store
# handle omegas
omegas_array = image_series_dict[det_key].metadata['omega']
self._omegas = mapAngle(
np.radians(np.average(omegas_array, axis=1)),
np.radians(ome_period)
)
self._omeEdges = mapAngle(
np.radians(np.r_[omegas_array[:, 0], omegas_array[-1, 1]]),
np.radians(ome_period)
)
# !!! must avoid the case where omeEdges[0] = omeEdges[-1] for the
# indexer to work properly
if abs(self._omeEdges[0] - self._omeEdges[-1]) <= ct.sqrt_epsf:
# !!! SIGNED delta ome
del_ome = np.radians(omegas_array[0, 1] - omegas_array[0, 0])
self._omeEdges[-1] = self._omeEdges[-2] + del_ome
# handle etas
# WARNING: unlinke the omegas in imageseries metadata,
# these are in RADIANS and represent bin centers
self._etaEdges = etas
self._etas = self._etaEdges[:-1] + 0.5*np.radians(eta_step)
@property
def dataStore(self):
return self._dataStore
@property
def planeData(self):
return self._planeData
@property
def iHKLList(self):
return np.atleast_1d(self._iHKLList).flatten()
@property
def etaEdges(self):
return self._etaEdges
@property
def omeEdges(self):
return self._omeEdges
@property
def etas(self):
return self._etas
@property
def omegas(self):
return self._omegas
def save(self, filename):
xrdutil.EtaOmeMaps.save_eta_ome_maps(self, filename)
pass # end of class: GenerateEtaOmeMaps
def _row_edge_vec(rows, pixel_size_row):
return pixel_size_row*(0.5*rows-np.arange(rows+1))
def _col_edge_vec(cols, pixel_size_col):
return pixel_size_col*(np.arange(cols+1)-0.5*cols)
def _generate_pixel_solid_angles(start_stop, rows, cols, pixel_size_row,
pixel_size_col, rmat, tvec):
start, stop = start_stop
row_edge_vec = _row_edge_vec(rows, pixel_size_row)
col_edge_vec = _col_edge_vec(cols, pixel_size_col)
nvtx = len(row_edge_vec) * len(col_edge_vec)
# pixel vertex coords
pvy, pvx = np.meshgrid(row_edge_vec, col_edge_vec, indexing='ij')
# add Z_d coord and transform to lab frame
pcrd_array_full = np.dot(
np.vstack([pvx.flatten(), pvy.flatten(), np.zeros(nvtx)]).T,
rmat.T
) + tvec
conn = cellConnectivity(rows, cols)
ret = np.empty(len(range(start, stop)), dtype=float)
for i, ipix in enumerate(range(start, stop)):
pix_conn = conn[ipix]
vtx_list = pcrd_array_full[pix_conn, :]
ret[i] = (_solid_angle_of_triangle(vtx_list[[0, 1, 2], :]) +
_solid_angle_of_triangle(vtx_list[[2, 3, 0], :]))
return ret
@memoize
def _pixel_angles(origin, pixel_coords, distortion, rmat, tvec, bvec, evec,
rows, cols):
assert len(origin) == 3, "origin must have 3 elements"
pix_i, pix_j = pixel_coords
xy = np.ascontiguousarray(
np.vstack([
pix_j.flatten(), pix_i.flatten()
]).T
)
if distortion is not None:
xy = distortion.apply(xy)
angs, g_vec = detectorXYToGvec(
xy, rmat, ct.identity_3x3,
tvec, ct.zeros_3, origin,
beamVec=bvec, etaVec=evec)
tth = angs[0].reshape(rows, cols)
eta = angs[1].reshape(rows, cols)
return tth, eta
@memoize
def _pixel_solid_angles(rows, cols, pixel_size_row, pixel_size_col,
rmat, tvec, max_workers):
# connectivity array for pixels
conn = cellConnectivity(rows, cols)
# result
solid_angs = np.empty(len(conn), dtype=float)
# Distribute tasks to each process
tasks = distribute_tasks(len(conn), max_workers)
kwargs = {
'rows': rows,
'cols': cols,
'pixel_size_row': pixel_size_row,
'pixel_size_col': pixel_size_col,
'rmat': rmat,
'tvec': tvec,
}
func = partial(_generate_pixel_solid_angles, **kwargs)
with ProcessPoolExecutor(max_workers=max_workers) as executor:
results = executor.map(func, tasks)
# Concatenate all the results together
solid_angs[:] = np.concatenate(list(results))
solid_angs = solid_angs.reshape(rows, cols)
mi = solid_angs.min()
if mi > 0.:
solid_angs = solid_angs/mi
return solid_angs
@memoize
def _lorentz_polarization_factor(tth, eta, f_hor, f_vert):
"""
06/14/2021 SS adding lorentz polarization factor computation
to the detector so that it can be compenstated for in the
intensity correction
parameters: tth two theta of every pixel in radians
eta azimuthal angle of every pixel
f_hor fraction of horizontal polarization
(~1 for XFELs)
f_vert fraction of vertical polarization
(~0 for XFELs)
notice f_hor + f_vert = 1
"""
theta = 0.5*tth
cth = np.cos(theta)
sth2 = np.sin(theta)**2
ctth2 = np.cos(tth)**2
seta2 = np.sin(eta)**2
ceta2 = np.cos(eta)**2
L = 1./(cth*sth2)
P = f_hor*(seta2 + ceta2*ctth2) + f_vert*(ceta2 + seta2*ctth2)
return L*P
def _generate_ring_params(tthr, ptth, peta, eta_edges, delta_eta):
# mark pixels in the spec'd tth range
pixels_in_tthr = np.logical_and(
ptth >= tthr[0], ptth <= tthr[1]
)
# catch case where ring isn't on detector
if not np.any(pixels_in_tthr):
return None
# ???: faster to index with bool or use np.where,
# or recode in numba?
rtth_idx = np.where(pixels_in_tthr)
# grab relevant eta coords using histogram
# !!!: This allows use to calculate arc length and
# detect a branch cut. The histogram idx var
# is the left-hand edges...
retas = peta[rtth_idx]
if fast_histogram:
reta_hist = histogram1d(
retas,
len(eta_edges) - 1,
(eta_edges[0], eta_edges[-1])
)
else:
reta_hist, _ = histogram1d(retas, bins=eta_edges)
reta_idx = np.where(reta_hist)[0]
reta_bin_idx = np.hstack(
[reta_idx,
reta_idx[-1] + 1]
)
# ring arc lenght on panel
arc_length = angularDifference(
eta_edges[reta_bin_idx[0]],
eta_edges[reta_bin_idx[-1]]
)
# Munge eta bins
# !!! need to work with the subset to preserve
# NaN values at panel extents!
#
# !!! MUST RE-MAP IF BRANCH CUT IS IN RANGE
#
# The logic below assumes that eta_edges span 2*pi to
# single precision
eta_bins = eta_edges[reta_bin_idx]
if arc_length < 1e-4:
# have branch cut in here
ring_gap = np.where(
reta_idx
- np.arange(len(reta_idx))
)[0]
if len(ring_gap) > 0:
# have incomplete ring
eta_stop_idx = ring_gap[0]
eta_stop = eta_edges[eta_stop_idx]
new_period = np.cumsum([eta_stop, 2*np.pi])
# remap
retas = mapAngle(retas, new_period)
tmp_bins = mapAngle(
eta_edges[reta_idx], new_period
)
tmp_idx = np.argsort(tmp_bins)
reta_idx = reta_idx[np.argsort(tmp_bins)]
eta_bins = np.hstack(
[tmp_bins[tmp_idx],
tmp_bins[tmp_idx][-1] + delta_eta]
)
return retas, eta_bins, rtth_idx, reta_idx
def _run_histograms(rows, ims, tth_ranges, ring_maps, ring_params, threshold):
for i_row in range(*rows):
image = ims[i_row]
# handle threshold if specified
if threshold is not None:
# !!! NaNs get preserved
image = np.array(image)
image[image < threshold] = 0.
for i_r, tthr in enumerate(tth_ranges):
this_map = ring_maps[i_r]
params = ring_params[i_r]
if not params:
# We are supposed to skip this ring...
continue
# Unpack the params
retas, eta_bins, rtth_idx, reta_idx = params
if fast_histogram:
result = histogram1d(retas, len(eta_bins) - 1,
(eta_bins[0], eta_bins[-1]),
weights=image[rtth_idx])
else:
result, _ = histogram1d(retas, bins=eta_bins,
weights=image[rtth_idx])
this_map[i_row, reta_idx] = result
|
the-stack_0_576 | #!/usr/bin/env python3
import pathlib
import fileinput
from ci.util import (
check_env,
existing_file,
)
repo_dir = check_env('REPO_DIR')
effective_version = check_env('EFFECTIVE_VERSION')
template_file = existing_file(pathlib.Path(repo_dir, 'concourse', 'resources', 'defaults.mako'))
lines_replaced = 0
string_to_match = 'tag = '
for line in fileinput.FileInput(str(template_file), inplace=True):
if string_to_match in line:
if lines_replaced != 0:
raise RuntimeError(f'More than one image tag found in template file')
leading_spaces = line.index(string_to_match)
print(f'{leading_spaces * " "}{string_to_match}"{effective_version}"')
lines_replaced = 1
else:
print(line, end='')
|
the-stack_0_578 | # Copyright 2019 Huawei Technologies Co.,LTD.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from oslo_db import exception as db_exc
from cyborg.common import exception
from cyborg import objects
from cyborg.tests.unit.db.base import DbTestCase
from cyborg.tests.unit import fake_deployable
from cyborg.tests.unit import fake_device
from cyborg.tests.unit.objects import test_objects
class TestDeployableObject(DbTestCase):
@property
def fake_device(self):
db_device = fake_device.get_fake_devices_as_dict()[2]
return db_device
@property
def fake_deployable(self):
db_deploy = fake_deployable.fake_db_deployable(id=1)
return db_deploy
@property
def fake_deployable2(self):
db_deploy = fake_deployable.fake_db_deployable(id=2)
return db_deploy
def test_create(self):
db_device = self.fake_device
device = objects.Device(context=self.context,
**db_device)
device.create(self.context)
device_get = objects.Device.get(self.context, device.uuid)
db_dpl = self.fake_deployable
dpl = objects.Deployable(context=self.context,
**db_dpl)
dpl.device_id = device_get.id
dpl.create(self.context)
self.assertEqual(db_dpl['uuid'], dpl.uuid)
def test_get(self):
db_device = self.fake_device
device = objects.Device(context=self.context,
**db_device)
device.create(self.context)
device_get = objects.Device.get(self.context, device.uuid)
db_dpl = self.fake_deployable
dpl = objects.Deployable(context=self.context,
**db_dpl)
dpl.device_id = device_get.id
dpl.create(self.context)
dpl_get = objects.Deployable.get(self.context, dpl.uuid)
self.assertEqual(dpl_get.uuid, dpl.uuid)
def test_get_by_filter(self):
db_device = self.fake_device
device = objects.Device(context=self.context,
**db_device)
device.create(self.context)
device_get = objects.Device.get(self.context, device.uuid)
db_dpl = self.fake_deployable
dpl = objects.Deployable(context=self.context,
**db_dpl)
dpl.device_id = device_get.id
dpl.create(self.context)
query = {"uuid": dpl['uuid']}
dpl_get_list = objects.Deployable.get_by_filter(self.context, query)
self.assertEqual(dpl_get_list[0].uuid, dpl.uuid)
def test_save(self):
db_device = self.fake_device
device = objects.Device(context=self.context,
**db_device)
device.create(self.context)
device_get = objects.Device.get(self.context, device.uuid)
db_dpl = self.fake_deployable
dpl = objects.Deployable(context=self.context,
**db_dpl)
dpl.device_id = device_get.id
dpl.create(self.context)
dpl.num_accelerators = 8
dpl.save(self.context)
dpl_get = objects.Deployable.get(self.context, dpl.uuid)
self.assertEqual(dpl_get.num_accelerators, 8)
def test_destroy(self):
db_device = self.fake_device
device = objects.Device(context=self.context,
**db_device)
device.create(self.context)
device_get = objects.Device.get(self.context, device.uuid)
db_dpl = self.fake_deployable
dpl = objects.Deployable(context=self.context,
**db_dpl)
dpl.device_id = device_get.id
dpl.create(self.context)
self.assertEqual(db_dpl['uuid'], dpl.uuid)
dpl.destroy(self.context)
self.assertRaises(exception.ResourceNotFound,
objects.Deployable.get, self.context,
dpl.uuid)
class TestDeployableObject(test_objects._LocalTest,
TestDeployableObject):
def _test_save_objectfield_fk_constraint_fails(self, foreign_key,
expected_exception):
error = db_exc.DBReferenceError('table', 'constraint', foreign_key,
'key_table')
# Prevent lazy-loading any fields, results in InstanceNotFound
deployable = fake_deployable.fake_deployable_obj(self.context)
fields_with_save_methods = [field for field in deployable.fields
if hasattr(deployable, '_save_%s' % field)]
for field in fields_with_save_methods:
@mock.patch.object(deployable, '_save_%s' % field)
@mock.patch.object(deployable, 'obj_attr_is_set')
def _test(mock_is_set, mock_save_field):
mock_is_set.return_value = True
mock_save_field.side_effect = error
deployable.obj_reset_changes(fields=[field])
deployable._changed_fields.add(field)
self.assertRaises(expected_exception, deployable.save)
deployable.obj_reset_changes(fields=[field])
_test()
|
the-stack_0_580 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
from django.conf import settings
import model_utils.fields
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Dataset',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('dataset', models.FileField(upload_to=b'datasets')),
('dimensions', models.PositiveIntegerField(default=0)),
('length', models.PositiveIntegerField(default=0)),
('filesize', models.PositiveIntegerField(default=0)),
('signature', models.CharField(unique=True, max_length=44, blank=True)),
('datatype', models.CharField(default=b'csv', max_length=4, choices=[(b'csv', b'csv'), (b'json', b'json'), (b'xml', b'xml')])),
('delimiter', models.CharField(default=b',', max_length=1)),
('uploader', models.ForeignKey(related_name='datasets', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-created',),
'db_table': 'datasets',
'get_latest_by': 'created',
},
),
]
|
the-stack_0_582 | """ Setup remote debugger with Python Tools for Visual Studio (PTVSD)
"""
import os
from .celery_log_setup import get_task_logger
REMOTE_DEBUG_PORT = 3000
log = get_task_logger(__name__)
def setup_remote_debugging(force_enabled: bool = False, *, boot_mode=None) -> None:
""" Programaticaly enables remote debugging if SC_BOOT_MODE==debug-ptvsd
"""
if "SC_BOOT_MODE" not in os.environ:
log.warning("Remote debugging only available when running in a container")
return
boot_mode = boot_mode or os.environ.get("SC_BOOT_MODE")
if boot_mode == "debug-ptvsd" or force_enabled:
try:
log.debug("Enabling attach ptvsd ...")
#
# SEE https://github.com/microsoft/ptvsd#enabling-debugging
#
import ptvsd
ptvsd.enable_attach(
address=("0.0.0.0", REMOTE_DEBUG_PORT), redirect_output=True
) # nosec
except ImportError:
log.exception("Unable to use remote debugging. ptvsd is not installed")
else:
log.info("Remote debugging enabled: listening port %s", REMOTE_DEBUG_PORT)
else:
log.debug("Booting without remote debugging since SC_BOOT_MODE=%s", boot_mode)
__all__ = ["setup_remote_debugging"]
|
the-stack_0_583 | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
from slack_g_cal.parse import JSON, Datetime
class WitDatetimeContainer(JSON):
""" Container wrapping datetime values from the Wit API """
def __init__(self, **dt_json):
self.is_interval = dt_json['type'] == 'interval'
# Get rid of values; we don't need this parameter
dt_json.pop('values', None)
if self.is_interval:
from_, to_ = dt_json.pop('from'), dt_json.pop('to')
self.dt_from = WitDatetime(date_input=from_.value, grain=from_.grain)
self.dt_to = WitDatetime(date_input=to_.value, grain=to_.grain)
else:
self.date = WitDatetime(date_input=dt_json.pop('value'), grain=dt_json.pop('grain'))
super(WitDatetimeContainer, self).__init__(**dt_json)
class WitDatetime(Datetime):
def __init__(self, date_input, **dt_json):
self.grain = dt_json.pop('grain')
super(WitDatetime, self).__init__(date_input=date_input, **dt_json)
def adjust_grain_by(self, adj_val):
kwargs = {self.grain: getattr(self._datetime, self.grain) + adj_val}
self._datetime = self._datetime.replace(**kwargs)
|
the-stack_0_584 | # -*- coding: utf-8 -*-
"""IPython Test Suite Runner.
This module provides a main entry point to a user script to test IPython
itself from the command line. There are two ways of running this script:
1. With the syntax `iptest all`. This runs our entire test suite by
calling this script (with different arguments) recursively. This
causes modules and package to be tested in different processes, using nose
or trial where appropriate.
2. With the regular nose syntax, like `iptest -vvs IPython`. In this form
the script simply calls nose, but with special command line flags and
plugins loaded.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import glob
from io import BytesIO
import os
import os.path as path
import sys
from threading import Thread, Lock, Event
import warnings
import nose.plugins.builtin
from nose.plugins.xunit import Xunit
from nose import SkipTest
from nose.core import TestProgram
from nose.plugins import Plugin
from nose.util import safe_str
from IPython import version_info
from IPython.utils.py3compat import decode
from IPython.utils.importstring import import_item
from IPython.testing.plugin.ipdoctest import IPythonDoctest
from IPython.external.decorators import KnownFailure, knownfailureif
pjoin = path.join
# Enable printing all warnings raise by IPython's modules
warnings.filterwarnings('ignore', message='.*Matplotlib is building the font cache.*', category=UserWarning, module='.*')
warnings.filterwarnings('error', message='.*', category=ResourceWarning, module='.*')
warnings.filterwarnings('error', message=".*{'config': True}.*", category=DeprecationWarning, module='IPy.*')
warnings.filterwarnings('default', message='.*', category=Warning, module='IPy.*')
warnings.filterwarnings('error', message='.*apply_wrapper.*', category=DeprecationWarning, module='.*')
warnings.filterwarnings('error', message='.*make_label_dec', category=DeprecationWarning, module='.*')
warnings.filterwarnings('error', message='.*decorated_dummy.*', category=DeprecationWarning, module='.*')
warnings.filterwarnings('error', message='.*skip_file_no_x11.*', category=DeprecationWarning, module='.*')
warnings.filterwarnings('error', message='.*onlyif_any_cmd_exists.*', category=DeprecationWarning, module='.*')
warnings.filterwarnings('error', message='.*disable_gui.*', category=DeprecationWarning, module='.*')
warnings.filterwarnings('error', message='.*ExceptionColors global is deprecated.*', category=DeprecationWarning, module='.*')
# Jedi older versions
warnings.filterwarnings(
'error', message='.*elementwise != comparison failed and.*', category=FutureWarning, module='.*')
if version_info < (6,):
# nose.tools renames all things from `camelCase` to `snake_case` which raise an
# warning with the runner they also import from standard import library. (as of Dec 2015)
# Ignore, let's revisit that in a couple of years for IPython 6.
warnings.filterwarnings(
'ignore', message='.*Please use assertEqual instead', category=Warning, module='IPython.*')
if version_info < (7,):
warnings.filterwarnings('ignore', message='.*Completer.complete.*',
category=PendingDeprecationWarning, module='.*')
else:
warnings.warn(
'Completer.complete was pending deprecation and should be changed to Deprecated', FutureWarning)
# ------------------------------------------------------------------------------
# Monkeypatch Xunit to count known failures as skipped.
# ------------------------------------------------------------------------------
def monkeypatch_xunit():
try:
knownfailureif(True)(lambda: None)()
except Exception as e:
KnownFailureTest = type(e)
def addError(self, test, err, capt=None):
if issubclass(err[0], KnownFailureTest):
err = (SkipTest,) + err[1:]
return self.orig_addError(test, err, capt)
Xunit.orig_addError = Xunit.addError
Xunit.addError = addError
#-----------------------------------------------------------------------------
# Check which dependencies are installed and greater than minimum version.
#-----------------------------------------------------------------------------
def extract_version(mod):
return mod.__version__
def test_for(item, min_version=None, callback=extract_version):
"""Test to see if item is importable, and optionally check against a minimum
version.
If min_version is given, the default behavior is to check against the
`__version__` attribute of the item, but specifying `callback` allows you to
extract the value you are interested in. e.g::
In [1]: import sys
In [2]: from IPython.testing.iptest import test_for
In [3]: test_for('sys', (2,6), callback=lambda sys: sys.version_info)
Out[3]: True
"""
try:
check = import_item(item)
except (ImportError, RuntimeError):
# GTK reports Runtime error if it can't be initialized even if it's
# importable.
return False
else:
if min_version:
if callback:
# extra processing step to get version to compare
check = callback(check)
return check >= min_version
else:
return True
# Global dict where we can store information on what we have and what we don't
# have available at test run time
have = {'matplotlib': test_for('matplotlib'),
'pygments': test_for('pygments'),
'sqlite3': test_for('sqlite3')}
#-----------------------------------------------------------------------------
# Test suite definitions
#-----------------------------------------------------------------------------
test_group_names = ['core',
'extensions', 'lib', 'terminal', 'testing', 'utils',
]
class TestSection(object):
def __init__(self, name, includes):
self.name = name
self.includes = includes
self.excludes = []
self.dependencies = []
self.enabled = True
def exclude(self, module):
if not module.startswith('IPython'):
module = self.includes[0] + "." + module
self.excludes.append(module.replace('.', os.sep))
def requires(self, *packages):
self.dependencies.extend(packages)
@property
def will_run(self):
return self.enabled and all(have[p] for p in self.dependencies)
# Name -> (include, exclude, dependencies_met)
test_sections = {n:TestSection(n, ['IPython.%s' % n]) for n in test_group_names}
# Exclusions and dependencies
# ---------------------------
# core:
sec = test_sections['core']
if not have['sqlite3']:
sec.exclude('tests.test_history')
sec.exclude('history')
if not have['matplotlib']:
sec.exclude('pylabtools'),
sec.exclude('tests.test_pylabtools')
# lib:
sec = test_sections['lib']
sec.exclude('kernel')
if not have['pygments']:
sec.exclude('tests.test_lexers')
# We do this unconditionally, so that the test suite doesn't import
# gtk, changing the default encoding and masking some unicode bugs.
sec.exclude('inputhookgtk')
# We also do this unconditionally, because wx can interfere with Unix signals.
# There are currently no tests for it anyway.
sec.exclude('inputhookwx')
# Testing inputhook will need a lot of thought, to figure out
# how to have tests that don't lock up with the gui event
# loops in the picture
sec.exclude('inputhook')
# testing:
sec = test_sections['testing']
# These have to be skipped on win32 because they use echo, rm, cd, etc.
# See ticket https://github.com/ipython/ipython/issues/87
if sys.platform == 'win32':
sec.exclude('plugin.test_exampleip')
sec.exclude('plugin.dtexample')
# don't run jupyter_console tests found via shim
test_sections['terminal'].exclude('console')
# extensions:
sec = test_sections['extensions']
# This is deprecated in favour of rpy2
sec.exclude('rmagic')
# autoreload does some strange stuff, so move it to its own test section
sec.exclude('autoreload')
sec.exclude('tests.test_autoreload')
test_sections['autoreload'] = TestSection('autoreload',
['IPython.extensions.autoreload', 'IPython.extensions.tests.test_autoreload'])
test_group_names.append('autoreload')
#-----------------------------------------------------------------------------
# Functions and classes
#-----------------------------------------------------------------------------
def check_exclusions_exist():
from IPython.paths import get_ipython_package_dir
from warnings import warn
parent = os.path.dirname(get_ipython_package_dir())
for sec in test_sections:
for pattern in sec.exclusions:
fullpath = pjoin(parent, pattern)
if not os.path.exists(fullpath) and not glob.glob(fullpath + '.*'):
warn("Excluding nonexistent file: %r" % pattern)
class ExclusionPlugin(Plugin):
"""A nose plugin to effect our exclusions of files and directories.
"""
name = 'exclusions'
score = 3000 # Should come before any other plugins
def __init__(self, exclude_patterns=None):
"""
Parameters
----------
exclude_patterns : sequence of strings, optional
Filenames containing these patterns (as raw strings, not as regular
expressions) are excluded from the tests.
"""
self.exclude_patterns = exclude_patterns or []
super(ExclusionPlugin, self).__init__()
def options(self, parser, env=os.environ):
Plugin.options(self, parser, env)
def configure(self, options, config):
Plugin.configure(self, options, config)
# Override nose trying to disable plugin.
self.enabled = True
def wantFile(self, filename):
"""Return whether the given filename should be scanned for tests.
"""
if any(pat in filename for pat in self.exclude_patterns):
return False
return None
def wantDirectory(self, directory):
"""Return whether the given directory should be scanned for tests.
"""
if any(pat in directory for pat in self.exclude_patterns):
return False
return None
class StreamCapturer(Thread):
daemon = True # Don't hang if main thread crashes
started = False
def __init__(self, echo=False):
super(StreamCapturer, self).__init__()
self.echo = echo
self.streams = []
self.buffer = BytesIO()
self.readfd, self.writefd = os.pipe()
self.buffer_lock = Lock()
self.stop = Event()
def run(self):
self.started = True
while not self.stop.is_set():
chunk = os.read(self.readfd, 1024)
with self.buffer_lock:
self.buffer.write(chunk)
if self.echo:
sys.stdout.write(decode(chunk))
os.close(self.readfd)
os.close(self.writefd)
def reset_buffer(self):
with self.buffer_lock:
self.buffer.truncate(0)
self.buffer.seek(0)
def get_buffer(self):
with self.buffer_lock:
return self.buffer.getvalue()
def ensure_started(self):
if not self.started:
self.start()
def halt(self):
"""Safely stop the thread."""
if not self.started:
return
self.stop.set()
os.write(self.writefd, b'\0') # Ensure we're not locked in a read()
self.join()
class SubprocessStreamCapturePlugin(Plugin):
name='subprocstreams'
def __init__(self):
Plugin.__init__(self)
self.stream_capturer = StreamCapturer()
self.destination = os.environ.get('IPTEST_SUBPROC_STREAMS', 'capture')
# This is ugly, but distant parts of the test machinery need to be able
# to redirect streams, so we make the object globally accessible.
nose.iptest_stdstreams_fileno = self.get_write_fileno
def get_write_fileno(self):
if self.destination == 'capture':
self.stream_capturer.ensure_started()
return self.stream_capturer.writefd
elif self.destination == 'discard':
return os.open(os.devnull, os.O_WRONLY)
else:
return sys.__stdout__.fileno()
def configure(self, options, config):
Plugin.configure(self, options, config)
# Override nose trying to disable plugin.
if self.destination == 'capture':
self.enabled = True
def startTest(self, test):
# Reset log capture
self.stream_capturer.reset_buffer()
def formatFailure(self, test, err):
# Show output
ec, ev, tb = err
captured = self.stream_capturer.get_buffer().decode('utf-8', 'replace')
if captured.strip():
ev = safe_str(ev)
out = [ev, '>> begin captured subprocess output <<',
captured,
'>> end captured subprocess output <<']
return ec, '\n'.join(out), tb
return err
formatError = formatFailure
def finalize(self, result):
self.stream_capturer.halt()
def run_iptest():
"""Run the IPython test suite using nose.
This function is called when this script is **not** called with the form
`iptest all`. It simply calls nose with appropriate command line flags
and accepts all of the standard nose arguments.
"""
# Apply our monkeypatch to Xunit
if '--with-xunit' in sys.argv and not hasattr(Xunit, 'orig_addError'):
monkeypatch_xunit()
arg1 = sys.argv[1]
if arg1 in test_sections:
section = test_sections[arg1]
sys.argv[1:2] = section.includes
elif arg1.startswith('IPython.') and arg1[8:] in test_sections:
section = test_sections[arg1[8:]]
sys.argv[1:2] = section.includes
else:
section = TestSection(arg1, includes=[arg1])
argv = sys.argv + [ '--detailed-errors', # extra info in tracebacks
# We add --exe because of setuptools' imbecility (it
# blindly does chmod +x on ALL files). Nose does the
# right thing and it tries to avoid executables,
# setuptools unfortunately forces our hand here. This
# has been discussed on the distutils list and the
# setuptools devs refuse to fix this problem!
'--exe',
]
if '-a' not in argv and '-A' not in argv:
argv = argv + ['-a', '!crash']
if nose.__version__ >= '0.11':
# I don't fully understand why we need this one, but depending on what
# directory the test suite is run from, if we don't give it, 0 tests
# get run. Specifically, if the test suite is run from the source dir
# with an argument (like 'iptest.py IPython.core', 0 tests are run,
# even if the same call done in this directory works fine). It appears
# that if the requested package is in the current dir, nose bails early
# by default. Since it's otherwise harmless, leave it in by default
# for nose >= 0.11, though unfortunately nose 0.10 doesn't support it.
argv.append('--traverse-namespace')
plugins = [ ExclusionPlugin(section.excludes), KnownFailure(),
SubprocessStreamCapturePlugin() ]
# we still have some vestigial doctests in core
if (section.name.startswith(('core', 'IPython.core', 'IPython.utils'))):
plugins.append(IPythonDoctest())
argv.extend([
'--with-ipdoctest',
'--ipdoctest-tests',
'--ipdoctest-extension=txt',
])
# Use working directory set by parent process (see iptestcontroller)
if 'IPTEST_WORKING_DIR' in os.environ:
os.chdir(os.environ['IPTEST_WORKING_DIR'])
# We need a global ipython running in this process, but the special
# in-process group spawns its own IPython kernels, so for *that* group we
# must avoid also opening the global one (otherwise there's a conflict of
# singletons). Ultimately the solution to this problem is to refactor our
# assumptions about what needs to be a singleton and what doesn't (app
# objects should, individual shells shouldn't). But for now, this
# workaround allows the test suite for the inprocess module to complete.
if 'kernel.inprocess' not in section.name:
from IPython.testing import globalipapp
globalipapp.start_ipython()
# Now nose can run
TestProgram(argv=argv, addplugins=plugins)
if __name__ == '__main__':
run_iptest()
|
the-stack_0_586 | import os
from io import StringIO
from django.contrib.gis.geos import Point
from django.test import TestCase
from uk_geo_utils.models import Onspd
from uk_geo_utils.management.commands.import_onspd import Command
class OnsudImportTest(TestCase):
def test_import_onspd(self):
# check table is empty before we start
self.assertEqual(0, Onspd.objects.count())
# path to file we're going to import
csv_path = os.path.abspath(
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'../fixtures/onspd'
)
)
cmd = Command()
# supress output
out = StringIO()
cmd.stdout = out
# import data
opts = {
'path': csv_path,
}
cmd.handle(**opts)
# ensure all our tasty data has been imported
self.assertEqual(4, Onspd.objects.count())
# row with valid grid ref should have valid Point() location
al11aa = Onspd.objects.filter(pcds="AL1 1AA")[0]
self.assertEqual(Point(-0.341337, 51.749084, srid=4326), al11aa.location)
# row with invalid grid ref should have NULL location
im11aa = Onspd.objects.filter(pcds="IM1 1AA")[0]
self.assertIsNone(im11aa.location)
|
the-stack_0_587 | #!/usr/bin/env python
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import argparse
import shutil
arg_parser = argparse.ArgumentParser(description="This is a script to convert coco anntations to voc-like annotations.")
arg_parser.add_argument('-ti', '--train_images', type=str, default="./coco2014/train2014", help='where to put coco2014 train images.')
arg_parser.add_argument('-vi', '--val_images', type=str, default='./coco2014/val2014', help='where to put coco2014 val images.')
arg_parser.add_argument('-ta', '--train_anno', type=str, default='./coco2014/instances_train2014.json', help='where to put cooc2014 train set annotations.')
arg_parser.add_argument('-va', '--val_anno', type=str, default='./coco2014/instances_val2014.json', help='where to put coco2014 val set annotations')
arg_parser.add_argument('-tlf', '--tran_list_file', type=str, default='./coco2014/train2014.txt', help='image list for training')
arg_parser.add_argument('-vlf', '--val_list_file', type=str, default='./coco2014/val2014.txt', help='image list for evalution.')
arg_parser.add_argument('-ai', '--all_images', type=str, default='./coco2014/Images', help='where to put all images.')
arg_parser.add_argument('-aa', '--all_anno', type=str, default='./coco2014/Annotations', help='where to put all annotations.')
args = arg_parser.parse_args()
'''How to organize coco dataset folder:
inputs:
coco2014/
|->train2014/
|->val2014/
|->instances_train2014.json
|->instances_val2014.json
outputs:
coco2014/
|->Annotations/
|->Images/
|->train2014.txt
|->val2014.txt
'''
def convert_images_coco2voc(args):
assert os.path.exists(args.train_images)
assert os.path.exists(args.val_images)
os.system('mv ' + args.train_images + ' ' + args.all_images)
imagename_list = os.listdir(args.val_images)
for imagename in imagename_list:
shutil.copy(os.path.join(args.val_images, imagename), args.all_images)
os.system('rm -r ' + args.val_images)
def generate_cid_name(json_object):
id2name_dict = {}
for ind, category_info in enumerate(json_object['categories']):
id2name_dict[category_info['id']] = category_info['name']
return id2name_dict
def generate_image_dict(json_object):
id2image_dict = {}
for ind, image_info in enumerate(json_object['images']):
id2image_dict[image_info['id']] = image_info['file_name']
return id2image_dict
def generate_annotation_files(json_object, annotation_path, id2image_dict, id2name, image_list_file):
if not os.path.exists(annotation_path):
os.mkdir(annotation_path)
f_image = open(image_list_file, 'w')
all_images_name = []
for ind, anno_info in enumerate(json_object['annotations']):
print('preprocess: {}'.format(ind))
category_id = anno_info['category_id']
cls_name = id2name[category_id]
if cls_name != "person":
continue
image_id = anno_info['image_id']
image_name = id2image_dict[image_id]
bbox = anno_info['bbox']
bbox[2] = bbox[0] + bbox[2]
bbox[3] = bbox[3] + bbox[1]
bbox_str = ' '.join([str(int(x)) for x in bbox])
with open(os.path.join(annotation_path, image_name.split('.')[0] + '.txt'), 'a') as f_anno:
f_anno.writelines(image_name.split('.')[0] + " " + cls_name + " " + bbox_str + "\n")
if image_name not in all_images_name:
all_images_name.append(image_name)
for image_name in all_images_name:
f_image.writelines(image_name.split('.')[0] + "\n")
f_image.close()
def convert_anno_coco2voc(coco_anno_file, image_list_file, all_anno_path):
with open(coco_anno_file, 'r') as f_ann:
line = f_ann.readlines()
json_object = json.loads(line[0])
id2name = generate_cid_name(json_object)
id2image_dict = generate_image_dict(json_object)
generate_annotation_files(json_object, all_anno_path, id2image_dict, id2name, image_list_file)
def convert_anno_all(args):
convert_anno_coco2voc(args.train_anno, args.tran_list_file, args.all_anno)
convert_anno_coco2voc(args.val_anno, args.val_list_file, args.all_anno)
if __name__ == "__main__":
convert_anno_all(args)
convert_images_coco2voc(args)
|
the-stack_0_588 | #!/usr/bin/env python
"""
models for the mailroom program.
This is where the program logic is.
This version has been made Object Oriented.
"""
# handy utility to make pretty printing easier
from textwrap import dedent
from pathlib import Path
import json_save.json_save_dec as js
import json
from . import data_dir
@js.json_save
class Donor:
"""
class to hold the information about a single donor
"""
name = js.String()
donations = js.List()
# reference to the DB its in -- this will be set in the instance
# when added to the DonorDB
_donor_db = None
def __init__(self, name, donations=None):
"""
create a new Donor object
:param name: the full name of the donor
:param donations=None: iterable of past donations
"""
self.norm_name = self.normalize_name(name)
self.name = name.strip()
if donations is None:
self.donations = []
else:
self.donations = list(donations)
def __str__(self):
msg = (f"Donor: {self.name}, with {self.num_donations:d} "
f"donations, totaling: ${self.total_donations:.2f}")
return msg
def mutating(method):
"""
Decorator that saves the DB when a change is made
It should be applied to all mutating methods, so the
data will be saved whenever it's been changed.
NOTE: This requires that the donor object is in a DonorDB.
"""
# note that this is expecting to decorate a method
# so self will be the first argument
def wrapped(self, *args, **kwargs):
print("wrapped method called")
print(self)
print(self._donor_db)
res = method(self, *args, **kwargs)
if self._donor_db is not None:
self._donor_db.save()
return res
return wrapped
@staticmethod
def normalize_name(name):
"""
return a normalized version of a name to use as a comparison key
simple enough to not be in a method now, but maybe you'd want to make it fancier later.
"""
return name.lower().strip()
@property
def last_donation(self):
"""
The most recent donation made
"""
try:
return self.donations[-1]
except IndexError:
return None
@property
def total_donations(self):
return sum(self.donations)
@property
def num_donations(self):
return len(self.donations)
@property
def average_donation(self):
return self.total_donations / self.num_donations
@mutating
def add_donation(self, amount):
"""
add a new donation
"""
print("add_donation called")
amount = float(amount)
if amount <= 0.0:
raise ValueError("Donation must be greater than zero")
self.donations.append(amount)
def gen_letter(self):
"""
Generate a thank you letter for the donor
:param: donor tuple
:returns: string with letter
note: This doesn't actually write to a file -- that's a separate
function. This makes it more flexible and easier to test.
"""
return dedent('''Dear {0:s},
Thank you for your very kind donation of ${1:.2f}.
It will be put to very good use.
Sincerely,
-The Team
'''.format(self.name, self.last_donation)
)
@js.json_save
class DonorDB:
"""
Encapsulation of the entire database of donors and data associated with them.
"""
# specify a json_save dict as the data structure for the data.
donor_data = js.Dict()
_frozen = False
def __init__(self, donors=None, db_file=None):
"""
Initialize a new donor database
:param donors=None: iterable of Donor objects
:param db_file=None: path to file to store the datbase in.
if None, the data will be stored in the
package data_dir
"""
if db_file is None:
self.db_file = data_dir / "mailroom_data.json"
else:
self.db_file = Path(db_file)
self.donor_data = {}
if donors is not None:
# you can set _frozen so that it won't save on every change.
self._frozen = True
for d in donors:
self.add_donor(d)
self.save # save resets _frozen
def mutating(method):
"""
Decorator that saves the DB when a change is made
It should be applied to all mutating methods, so the
data will be saved whenever it's been changed.
NOTE: This is not very efficient -- it will re-write
the entire file each time.
"""
# note that this is expecting to decorate a method
# so self will be the first argument
def wrapped(self, *args, **kwargs):
res = method(self, *args, **kwargs)
if not self._frozen:
self.save()
return res
return wrapped
@classmethod
def load_from_file(cls, filename):
"""
loads a donor database from a raw json file
NOTE: This is not a json_save format file!
-- it is a simpler, less flexible format.
"""
with open(filename) as infile:
donors = json.load(infile)
db = cls([Donor(*d) for d in donors])
return db
@classmethod
def load(cls, filepath):
"""
loads a donor database from a json_save format file.
"""
with open(filepath) as jsfile:
db = js.from_json(jsfile)
db.db_file = filepath
def save(self):
"""
Save the data to a json_save file
"""
# if explicitly called, you want to do it!
self._frozen = False
with open(self.db_file, 'w') as db_file:
self.to_json(db_file)
@property
def donors(self):
"""
an iterable of all the donors
"""
return self.donor_data.values()
def list_donors(self):
"""
creates a list of the donors as a string, so they can be printed
Not calling print from here makes it more flexible and easier to
test
"""
listing = ["Donor list:"]
for donor in self.donors:
listing.append(donor.name)
return "\n".join(listing)
def find_donor(self, name):
"""
find a donor in the donor db
:param: the name of the donor
:returns: The donor data structure -- None if not in the self.donor_data
"""
return self.donor_data.get(Donor.normalize_name(name))
@mutating
def add_donor(self, donor):
"""
Add a new donor to the donor db
:param donor: A Donor instance, or the name of the donor
:returns: The new or existing Donor object
"""
if not isinstance(donor, Donor):
donor = Donor(donor)
self.donor_data[donor.norm_name] = donor
donor._donor_db = self
return donor
@staticmethod
def sort_key(item):
# used to sort on name in self.donor_data
return item[1]
def generate_donor_report(self):
"""
Generate the report of the donors and amounts donated.
:returns: the donor report as a string.
"""
# First, reduce the raw data into a summary list view
report_rows = []
for donor in self.donor_data.values():
name = donor.name
gifts = donor.donations
total_gifts = donor.total_donations
num_gifts = len(gifts)
avg_gift = donor.average_donation
report_rows.append((name, total_gifts, num_gifts, avg_gift))
# sort the report data
report_rows.sort(key=self.sort_key)
report = []
report.append("{:25s} | {:11s} | {:9s} | {:12s}".format("Donor Name",
"Total Given",
"Num Gifts",
"Average Gift"))
report.append("-" * 66)
for row in report_rows:
report.append("{:25s} ${:10.2f} {:9d} ${:11.2f}".format(*row))
return "\n".join(report)
def save_letters_to_disk(self):
"""
make a letter for each donor, and save it to disk.
"""
print("Saving letters:")
for donor in self.donor_data.values():
print("donor:", donor.name)
letter = donor.gen_letter()
# I don't like spaces in filenames...
filename = donor.name.replace(" ", "_") + ".txt"
open(filename, 'w').write(letter)
|
the-stack_0_589 | from toga import Key
from toga_cocoa.libs import (
NSEventModifierFlagCapsLock,
NSEventModifierFlagShift,
NSEventModifierFlagControl,
NSEventModifierFlagOption,
NSEventModifierFlagCommand,
)
######################################################################
# Utilities to convert Cocoa constants to Toga ones
######################################################################
def modified_key(key, shift=None):
def mod_fn(modifierFlags):
if modifierFlags & NSEventModifierFlagShift:
return shift
return key
return mod_fn
def toga_key(event):
"""Convert a Cocoa NSKeyEvent into a Toga event."""
key = {
0: Key.A,
1: Key.S,
2: Key.D,
3: Key.F,
4: Key.H,
5: Key.G,
6: Key.Z,
7: Key.X,
8: Key.C,
9: Key.V,
11: Key.B,
12: Key.Q,
13: Key.W,
14: Key.E,
15: Key.R,
16: Key.Y,
17: Key.T,
18: modified_key(Key._1, shift=Key.EXCLAMATION)(event.modifierFlags),
19: modified_key(Key._2, shift=Key.AT)(event.modifierFlags),
20: modified_key(Key._3, shift=Key.HASH)(event.modifierFlags),
21: modified_key(Key._4, shift=Key.DOLLAR)(event.modifierFlags),
22: modified_key(Key._6, shift=Key.CARET)(event.modifierFlags),
23: modified_key(Key._5, shift=Key.PERCENT)(event.modifierFlags),
24: modified_key(Key.PLUS, shift=Key.EQUAL)(event.modifierFlags),
25: modified_key(Key._9, shift=Key.OPEN_PARENTHESIS)(event.modifierFlags),
26: modified_key(Key._7, shift=Key.AND)(event.modifierFlags),
27: modified_key(Key.MINUS, shift=Key.UNDERSCORE)(event.modifierFlags),
28: modified_key(Key._8, shift=Key.ASTERISK)(event.modifierFlags),
29: modified_key(Key._0, shift=Key.CLOSE_PARENTHESIS)(event.modifierFlags),
30: Key.CLOSE_BRACKET,
31: Key.O,
32: Key.U,
33: Key.OPEN_BRACKET,
34: Key.I,
35: Key.P,
36: Key.ENTER,
37: Key.L,
38: Key.J,
39: modified_key(Key.QUOTE, shift=Key.DOUBLE_QUOTE)(event.modifierFlags),
40: Key.K,
41: modified_key(Key.COLON, shift=Key.SEMICOLON)(event.modifierFlags),
42: Key.BACKSLASH,
43: modified_key(Key.COMMA, shift=Key.LESS_THAN)(event.modifierFlags),
44: modified_key(Key.SLASH, shift=Key.QUESTION)(event.modifierFlags),
45: Key.N,
46: Key.M,
47: modified_key(Key.FULL_STOP, shift=Key.GREATER_THAN)(event.modifierFlags),
48: Key.TAB,
49: Key.SPACE,
50: modified_key(Key.BACK_QUOTE, shift=Key.TILDE)(event.modifierFlags),
51: Key.BACKSPACE,
53: Key.ESCAPE,
65: Key.NUMPAD_DECIMAL_POINT,
67: Key.NUMPAD_MULTIPLY,
69: Key.NUMPAD_PLUS,
71: Key.NUMPAD_CLEAR,
75: Key.NUMPAD_DIVIDE,
76: Key.NUMPAD_ENTER,
78: Key.NUMPAD_MINUS,
81: Key.NUMPAD_EQUAL,
82: Key.NUMPAD_0,
83: Key.NUMPAD_1,
84: Key.NUMPAD_2,
85: Key.NUMPAD_3,
86: Key.NUMPAD_4,
87: Key.NUMPAD_5,
88: Key.NUMPAD_6,
89: Key.NUMPAD_7,
91: Key.NUMPAD_8,
92: Key.NUMPAD_9,
# : Key.F4,
96: Key.F5,
97: Key.F7,
98: Key.F5,
99: Key.F3,
100: Key.F8,
101: Key.F9,
109: Key.F9,
115: Key.HOME,
116: Key.PAGE_UP,
117: Key.DELETE,
119: Key.END,
120: Key.F2,
121: Key.PAGE_DOWN,
122: Key.F1,
123: Key.LEFT,
124: Key.RIGHT,
125: Key.DOWN,
126: Key.UP,
}.get(event.keyCode, None)
modifiers = set()
if event.modifierFlags & NSEventModifierFlagCapsLock:
modifiers.add(Key.CAPSLOCK)
if event.modifierFlags & NSEventModifierFlagShift:
modifiers.add(Key.SHIFT)
if event.modifierFlags & NSEventModifierFlagControl:
modifiers.add(Key.CONTROL)
if event.modifierFlags & NSEventModifierFlagOption:
modifiers.add(Key.OPTION)
if event.modifierFlags & NSEventModifierFlagCommand:
modifiers.add(Key.COMMAND)
return {
'key': key,
'modifiers': modifiers
}
|
the-stack_0_590 | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import typing
# Use consistent types for marshal and unmarshal functions across
# both JSON and Binary format.
MarshallerType = typing.Optional[
typing.Callable[[typing.Any], typing.Union[bytes, str]]
]
UnmarshallerType = typing.Optional[
typing.Callable[[typing.Union[bytes, str]], typing.Any]
]
|
the-stack_0_593 | # Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""ml2_vnic_type
Revision ID: 27cc183af192
Revises: 4ca36cfc898c
Create Date: 2014-02-09 12:19:21.362967
"""
# revision identifiers, used by Alembic.
revision = '27cc183af192'
down_revision = '4ca36cfc898c'
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
def upgrade():
if migration.schema_has_table('ml2_port_bindings'):
op.add_column('ml2_port_bindings',
sa.Column('vnic_type', sa.String(length=64),
nullable=False,
server_default='normal'))
|
the-stack_0_594 | # Copyright (C) 2010 Google Inc. All rights reserved.
# Copyright (C) 2010 Gabor Rapcsanyi ([email protected]), University of Szeged
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class TestInput(object):
"""Groups information about a test for easy passing of data."""
def __init__(self, test_name, timeout=None, requires_lock=None, reference_files=None, should_run_pixel_tests=None, should_add_missing_baselines=True):
# TestInput objects are normally constructed by the manager and passed
# to the workers, but these some fields are set lazily in the workers where possible
# because they require us to look at the filesystem and we want to be able to do that in parallel.
self.test_name = test_name
self.timeout = timeout # in msecs; should rename this for consistency
self.requires_lock = requires_lock
self.reference_files = reference_files
self.should_run_pixel_tests = should_run_pixel_tests
self.should_add_missing_baselines = should_add_missing_baselines
def __repr__(self):
return "TestInput('%s', timeout=%s, requires_lock=%s, reference_files=%s, should_run_pixel_tests=%s, should_add_missing_baselines%s)" % (self.test_name, self.timeout, self.requires_lock, self.reference_files, self.should_run_pixel_tests, self.should_add_missing_baselines)
|
the-stack_0_596 | from students import views as students_views
from django.urls import path
from django.contrib.auth import views as auth_views
urlpatterns = [
path('login/', auth_views.LoginView.as_view(template_name='students/student/login.html'), name = 'login'),
path('logout/', auth_views.LogoutView.as_view(template_name='students/student/logout.html'), name = 'logout'),
path('register/',students_views.StudentRegistrationView.as_view(), name='student_registration'),
path('enroll-course/',students_views.StudentEnrollCourseView.as_view(), name='student_enroll_course'),
path('courses/', students_views.StudentCourseListView.as_view(), name='student_course_list'),
path('course/<pk>/', students_views.StudentCourseDetailView.as_view(), name='student_course_detail'),
path('course/<pk>/<module_id>/', students_views.StudentCourseDetailView.as_view(), name='student_course_detail_module'),
] |
the-stack_0_597 | import os
import unittest
from pathlib import Path
import paramak
import pytest
class test_object_properties(unittest.TestCase):
def test_shape_default_properties(self):
"""Creates a Shape object and checks that the points attribute has
a default of None."""
test_shape = paramak.Shape()
assert test_shape.points is None
def test_incorrect_workplane(self):
"""Creates Shape object with incorrect workplane and checks ValueError
is raised."""
test_shape = paramak.Shape()
def incorrect_workplane():
"""Creates Shape object with unacceptable workplane."""
test_shape.workplane = "ZY"
self.assertRaises(ValueError, incorrect_workplane)
def test_incorrect_points(self):
"""Creates Shape objects and checks errors are raised correctly when
specifying points."""
test_shape = paramak.Shape()
def incorrect_points_end_point_is_start_point():
"""Checks ValueError is raised when the start and end points are
the same."""
test_shape.points = [(0, 200), (200, 100), (0, 0), (0, 200)]
self.assertRaises(
ValueError,
incorrect_points_end_point_is_start_point)
def incorrect_points_missing_z_value():
"""Checks ValueError is raised when a point is missing a z
value."""
test_shape.points = [(0, 200), (200), (0, 0), (0, 50)]
self.assertRaises(ValueError, incorrect_points_missing_z_value)
def incorrect_points_not_a_list():
"""Checks ValueError is raised when the points are not a list."""
test_shape.points = (0, 0), (0, 20), (20, 20), (20, 0)
self.assertRaises(ValueError, incorrect_points_not_a_list)
def incorrect_points_wrong_number_of_entries():
"""Checks ValueError is raised when individual points dont have 2
or 3 entries."""
test_shape.points = [(0, 0), (0, 20), (20, 20, 20, 20)]
self.assertRaises(ValueError, incorrect_points_wrong_number_of_entries)
def incorrect_x_point_value_type():
"""Checks ValueError is raised when X point is not a number."""
test_shape.points = [("string", 0), (0, 20), (20, 20)]
self.assertRaises(ValueError, incorrect_x_point_value_type)
def incorrect_y_point_value_type():
"""Checks ValueError is raised when Y point is not a number."""
test_shape.points = [(0, "string"), (0, 20), (20, 20)]
self.assertRaises(ValueError, incorrect_y_point_value_type)
def test_create_limits(self):
"""Creates a Shape object and checks that the create_limits function
returns the expected values for x_min, x_max, z_min and z_max."""
test_shape = paramak.Shape()
test_shape.points = [
(0, 0),
(0, 10),
(0, 20),
(10, 20),
(20, 20),
(20, 10),
(20, 0),
(10, 0),
]
assert test_shape.create_limits() == (0.0, 20.0, 0.0, 20.0)
# test with a component which has a find_points method
test_shape2 = paramak.Plasma()
test_shape2.create_limits()
assert test_shape2.x_min is not None
def test_create_limits_error(self):
"""Checks error is raised when no points are given."""
test_shape = paramak.Shape()
def limits():
test_shape.create_limits()
self.assertRaises(ValueError, limits)
def test_export_2d_image(self):
"""Creates a Shape object and checks that a png file of the object with
the correct suffix can be exported using the export_2d_image method."""
test_shape = paramak.Shape()
test_shape.points = [(0, 0), (0, 20), (20, 20), (20, 0)]
os.system("rm filename.png")
test_shape.export_2d_image("filename")
assert Path("filename.png").exists() is True
os.system("rm filename.png")
test_shape.export_2d_image("filename.png")
assert Path("filename.png").exists() is True
os.system("rm filename.png")
def test_initial_solid_construction(self):
"""Creates a shape and checks that a cadquery solid with a unique hash
value is created when .solid is called."""
test_shape = paramak.RotateStraightShape(
points=[(0, 0), (0, 20), (20, 20), (20, 0)], rotation_angle=360
)
assert test_shape.hash_value is None
assert test_shape.solid is not None
assert type(test_shape.solid).__name__ == "Workplane"
assert test_shape.hash_value is not None
def test_solid_return(self):
"""Checks that the same cadquery solid with the same unique hash value
is returned when shape.solid is called again after no changes have been
made to the Shape."""
test_shape = paramak.RotateStraightShape(
points=[(0, 0), (0, 20), (20, 20), (20, 0)], rotation_angle=360
)
assert test_shape.solid is not None
initial_hash_value = test_shape.hash_value
assert test_shape.solid is not None
assert initial_hash_value == test_shape.hash_value
def test_conditional_solid_reconstruction(self):
"""Checks that a new cadquery solid with a new unique hash value is
constructed when shape.solid is called after changes to the Shape have
been made."""
test_shape = paramak.RotateStraightShape(
points=[(0, 0), (0, 20), (20, 20)], rotation_angle=360
)
assert test_shape.solid is not None
assert test_shape.hash_value is not None
initial_hash_value = test_shape.hash_value
test_shape.rotation_angle = 180
assert test_shape.solid is not None
assert test_shape.hash_value is not None
assert initial_hash_value != test_shape.hash_value
def test_hash_value_update(self):
"""Checks that the hash value of a Shape is not updated until a new
cadquery solid has been created."""
test_shape = paramak.RotateStraightShape(
points=[(0, 0), (0, 20), (20, 20)], rotation_angle=360
)
test_shape.solid
assert test_shape.hash_value is not None
initial_hash_value = test_shape.hash_value
test_shape.rotation_angle = 180
assert test_shape.hash_value == initial_hash_value
test_shape.solid
assert test_shape.hash_value != initial_hash_value
def test_material_tag_warning(self):
"""Checks that a warning is raised when a Shape has a material tag >
28 characters."""
test_shape = paramak.Shape()
def warning_material_tag():
test_shape.material_tag = "abcdefghijklmnopqrstuvwxyz12345"
self.assertWarns(UserWarning, warning_material_tag)
def test_invalid_material_tag(self):
"""Checks a ValueError is raised when a Shape has an invalid material
tag."""
test_shape = paramak.Shape()
def invalid_material_tag():
test_shape.material_tag = 123
self.assertRaises(ValueError, invalid_material_tag)
def test_export_html(self):
"""Checks a plotly figure of the Shape is exported by the export_html
method with the correct filename with RGB and RGBA colors."""
test_shape = paramak.RotateStraightShape(
points=[(0, 0), (0, 20), (20, 20), (20, 0)], rotation_angle=360
)
os.system("rm filename.html")
test_shape.export_html('filename')
assert Path("filename.html").exists() is True
os.system("rm filename.html")
test_shape.color = (1, 0, 0, 0.5)
test_shape.export_html('filename')
assert Path("filename.html").exists() is True
os.system("rm filename.html")
def test_export_html_with_points_None(self):
"""Checks that an error is raised when points is None and export_html
"""
test_shape = paramak.Shape()
def export():
test_shape.export_html("out.html")
self.assertRaises(ValueError, export)
def test_invalid_stp_filename(self):
"""Checks ValueError is raised when invalid stp filenames are used."""
def invalid_filename_suffix():
paramak.RotateStraightShape(
points=[(0, 0), (0, 20), (20, 20)],
stp_filename="filename.invalid_suffix"
)
self.assertRaises(ValueError, invalid_filename_suffix)
def invalid_filename_type():
paramak.RotateStraightShape(
points=[(0, 0), (0, 20), (20, 20)],
stp_filename=123456
)
self.assertRaises(ValueError, invalid_filename_type)
def test_invalid_stl_filename(self):
"""Checks ValueError is raised when invalid stl filenames are used."""
def invalid_filename_suffix():
paramak.RotateStraightShape(
points=[(0, 0), (0, 20), (20, 20)],
stl_filename="filename.invalid_suffix"
)
self.assertRaises(ValueError, invalid_filename_suffix)
def invalid_filename_type():
paramak.RotateStraightShape(
points=[(0, 0), (0, 20), (20, 20)],
stl_filename=123456
)
self.assertRaises(ValueError, invalid_filename_type)
def test_invalid_color(self):
"""Checks ValueError is raised when invalid colors are used."""
def invalid_color_type():
paramak.RotateStraightShape(
points=[(0, 0), (0, 20), (20, 20)],
color=255
)
self.assertRaises(ValueError, invalid_color_type)
def invalid_color_length():
paramak.RotateStraightShape(
points=[(0, 0), (0, 20), (20, 20)],
color=(255, 255, 255, 1, 1)
)
self.assertRaises(ValueError, invalid_color_length)
def test_volumes_add_up_to_total_volume_Compound(self):
"""Checks the volume and volumes attributes are correct types
and that the volumes sum to equalt the volume for a Compound."""
test_shape = paramak.PoloidalFieldCoilSet(
heights=[10, 10],
widths=[20, 20],
center_points=[(15, 15), (50, 50)]
)
assert isinstance(test_shape.volume, float)
assert isinstance(test_shape.volumes, list)
assert isinstance(test_shape.volumes[0], float)
assert isinstance(test_shape.volumes[1], float)
assert len(test_shape.volumes) == 2
assert sum(test_shape.volumes) == pytest.approx(test_shape.volume)
def test_volumes_add_up_to_total_volume(self):
"""Checks the volume and volumes attributes are correct types
and that the volumes sum to equalt the volume."""
test_shape = paramak.PoloidalFieldCoil(
center_point=(100, 100),
height=50,
width=50
)
assert isinstance(test_shape.volume, float)
assert isinstance(test_shape.volumes, list)
assert isinstance(test_shape.volumes[0], float)
assert len(test_shape.volumes) == 1
assert sum(test_shape.volumes) == pytest.approx(test_shape.volume)
def test_areas_add_up_to_total_area_Compound(self):
"""Checks the area and areas attributes are correct types
and that the areas sum to equalt the area for a Compound."""
test_shape = paramak.PoloidalFieldCoilSet(
heights=[10, 10],
widths=[20, 20],
center_points=[(15, 15), (50, 50)]
)
assert isinstance(test_shape.area, float)
assert isinstance(test_shape.areas, list)
assert isinstance(test_shape.areas[0], float)
assert isinstance(test_shape.areas[1], float)
assert isinstance(test_shape.areas[2], float)
assert isinstance(test_shape.areas[3], float)
assert isinstance(test_shape.areas[4], float)
assert isinstance(test_shape.areas[5], float)
assert isinstance(test_shape.areas[6], float)
assert isinstance(test_shape.areas[7], float)
assert len(test_shape.areas) == 8
assert sum(test_shape.areas) == pytest.approx(test_shape.area)
def test_areas_add_up_to_total_area(self):
"""Checks the area and areas attributes are correct types
and that the areas sum to equalt the area."""
test_shape = paramak.PoloidalFieldCoil(
center_point=(100, 100),
height=50,
width=50
)
assert isinstance(test_shape.area, float)
assert isinstance(test_shape.areas, list)
assert isinstance(test_shape.areas[0], float)
assert isinstance(test_shape.areas[1], float)
assert isinstance(test_shape.areas[2], float)
assert isinstance(test_shape.areas[3], float)
assert len(test_shape.areas) == 4
assert sum(test_shape.areas) == pytest.approx(test_shape.area)
def test_trace(self):
"""Test trace method is populated"""
test_shape = paramak.PoloidalFieldCoil(
center_point=(100, 100),
height=50,
width=50,
name="coucou"
)
assert test_shape._trace() is not None
def test_create_patch_error(self):
"""Checks _create_patch raises a ValueError when points is None."""
test_shape = paramak.Shape()
def patch():
test_shape._create_patch()
self.assertRaises(ValueError, patch)
def test_create_patch_alpha(self):
"""Checks _create_patch returns a patch when alpha is given."""
test_shape = paramak.PoloidalFieldCoil(
center_point=(100, 100),
height=50,
width=50,
color=(0.5, 0.5, 0.5, 0.1)
)
assert test_shape._create_patch() is not None
def test_azimuth_placement_angle_error(self):
"""Checks an error is raised when invalid value for
azimuth_placement_angle is set.
"""
test_shape = paramak.Shape()
def angle_str():
test_shape.azimuth_placement_angle = "coucou"
def angle_str_in_Iterable():
test_shape.azimuth_placement_angle = [0, "coucou"]
self.assertRaises(ValueError, angle_str)
self.assertRaises(ValueError, angle_str_in_Iterable)
def test_name_error(self):
"""Checks an error is raised when invalid value for name is set."""
test_shape = paramak.Shape()
def name_float():
test_shape.name = 2.0
def name_int():
test_shape.name = 1
def name_list():
test_shape.name = ['coucou']
self.assertRaises(ValueError, name_float)
self.assertRaises(ValueError, name_int)
self.assertRaises(ValueError, name_list)
def test_tet_mesh_error(self):
"""Checks an error is raised when invalid value for tet_mesh is set.
"""
test_shape = paramak.Shape()
def tet_mesh_float():
test_shape.tet_mesh = 2.0
def tet_mesh_int():
test_shape.tet_mesh = 1
def tet_mesh_list():
test_shape.tet_mesh = ['coucou']
self.assertRaises(ValueError, tet_mesh_float)
self.assertRaises(ValueError, tet_mesh_int)
self.assertRaises(ValueError, tet_mesh_list)
if __name__ == "__main__":
unittest.main()
|
the-stack_0_599 | """This file is part of Splitter which is released under MIT License.
agg.py defines aggregation functions
"""
from splitter.dataflow.validation import check_metrics_and_filters, countable
from splitter.struct import IteratorVideoStream
from splitter.dataflow.xform import Null
import logging
import time
import itertools
def count(stream, keys, stats=False):
"""Count counts the true hits of a defined event.
"""
#actual logic is here
counter = {}
frame_count = 0
now = time.time()
for frame in stream:
frame_count += 1
if frame_count == 1:
logging.info("Processing first frame of stream")
for key in keys:
if frame[key]:
subkey = key + '_' + str(frame[key])
counter[subkey] = counter.get(subkey,0) + 1
# profiling
for obj in stream.lineage():
if hasattr(obj, "time_elapsed"):
logging.info("%s: %s" % (type(obj).__name__, obj.time_elapsed))
else:
logging.info("%s time not measured" % type(obj).__name__)
if not stats:
return counter
else:
return counter, {'frames': frame_count, \
'elapsed': (time.time() - now)}
def counts(streams, keys, stats=False):
"""Count counts the true hits of a defined event.
"""
stream = IteratorVideoStream(itertools.chain(*streams), streams)
lineage = []
for s in streams:
lineage.extend(s.lineage())
stream.global_lineage = lineage
return count(stream, keys, stats)
def get(stream, key, frame_rate=-1):
if frame_rate == -1:
return [(v['frame'], v['data']) for v in stream if v[key]]
else:
return [( int(v['frame']/frame_rate) , v['data']) for v in stream if v[key]]
|
the-stack_0_601 | import os
import sys
import subprocess
import tempfile
from time import sleep
from os.path import exists, join, abspath
from shutil import rmtree, copytree
from tempfile import mkdtemp
import six
from twisted.trial import unittest
from twisted.internet import defer
import scrapy
from scrapy.utils.python import to_native_str
from scrapy.utils.python import retry_on_eintr
from scrapy.utils.test import get_testenv
from scrapy.utils.testsite import SiteTest
from scrapy.utils.testproc import ProcessTest
class ProjectTest(unittest.TestCase):
project_name = 'testproject'
def setUp(self):
self.temp_path = mkdtemp()
self.cwd = self.temp_path
self.proj_path = join(self.temp_path, self.project_name)
self.proj_mod_path = join(self.proj_path, self.project_name)
self.env = get_testenv()
def tearDown(self):
rmtree(self.temp_path)
def call(self, *new_args, **kwargs):
with tempfile.TemporaryFile() as out:
args = (sys.executable, '-m', 'scrapy.cmdline') + new_args
return subprocess.call(args, stdout=out, stderr=out, cwd=self.cwd,
env=self.env, **kwargs)
def proc(self, *new_args, **kwargs):
args = (sys.executable, '-m', 'scrapy.cmdline') + new_args
p = subprocess.Popen(args, cwd=self.cwd, env=self.env,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
**kwargs)
waited = 0
interval = 0.2
while p.poll() is None:
sleep(interval)
waited += interval
if waited > 15:
p.kill()
assert False, 'Command took too much time to complete'
return p
class StartprojectTest(ProjectTest):
def test_startproject(self):
self.assertEqual(0, self.call('startproject', self.project_name))
assert exists(join(self.proj_path, 'scrapy.cfg'))
assert exists(join(self.proj_path, 'testproject'))
assert exists(join(self.proj_mod_path, '__init__.py'))
assert exists(join(self.proj_mod_path, 'items.py'))
assert exists(join(self.proj_mod_path, 'pipelines.py'))
assert exists(join(self.proj_mod_path, 'settings.py'))
assert exists(join(self.proj_mod_path, 'spiders', '__init__.py'))
self.assertEqual(1, self.call('startproject', self.project_name))
self.assertEqual(1, self.call('startproject', 'wrong---project---name'))
self.assertEqual(1, self.call('startproject', 'sys'))
class StartprojectTemplatesTest(ProjectTest):
def setUp(self):
super(StartprojectTemplatesTest, self).setUp()
self.tmpl = join(self.temp_path, 'templates')
self.tmpl_proj = join(self.tmpl, 'project')
def test_startproject_template_override(self):
copytree(join(scrapy.__path__[0], 'templates'), self.tmpl)
with open(join(self.tmpl_proj, 'root_template'), 'w'):
pass
assert exists(join(self.tmpl_proj, 'root_template'))
args = ['--set', 'TEMPLATES_DIR=%s' % self.tmpl]
p = self.proc('startproject', self.project_name, *args)
out = to_native_str(retry_on_eintr(p.stdout.read))
self.assertIn("New Scrapy project %r, using template directory" % self.project_name, out)
self.assertIn(self.tmpl_proj, out)
assert exists(join(self.proj_path, 'root_template'))
class CommandTest(ProjectTest):
def setUp(self):
super(CommandTest, self).setUp()
self.call('startproject', self.project_name)
self.cwd = join(self.temp_path, self.project_name)
self.env['SCRAPY_SETTINGS_MODULE'] = '%s.settings' % self.project_name
class GenspiderCommandTest(CommandTest):
def test_arguments(self):
# only pass one argument. spider script shouldn't be created
self.assertEqual(2, self.call('genspider', 'test_name'))
assert not exists(join(self.proj_mod_path, 'spiders', 'test_name.py'))
# pass two arguments <name> <domain>. spider script should be created
self.assertEqual(0, self.call('genspider', 'test_name', 'test.com'))
assert exists(join(self.proj_mod_path, 'spiders', 'test_name.py'))
def test_template(self, tplname='crawl'):
args = ['--template=%s' % tplname] if tplname else []
spname = 'test_spider'
p = self.proc('genspider', spname, 'test.com', *args)
out = to_native_str(retry_on_eintr(p.stdout.read))
self.assertIn("Created spider %r using template %r in module" % (spname, tplname), out)
self.assertTrue(exists(join(self.proj_mod_path, 'spiders', 'test_spider.py')))
p = self.proc('genspider', spname, 'test.com', *args)
out = to_native_str(retry_on_eintr(p.stdout.read))
self.assertIn("Spider %r already exists in module" % spname, out)
def test_template_basic(self):
self.test_template('basic')
def test_template_csvfeed(self):
self.test_template('csvfeed')
def test_template_xmlfeed(self):
self.test_template('xmlfeed')
def test_list(self):
self.assertEqual(0, self.call('genspider', '--list'))
def test_dump(self):
self.assertEqual(0, self.call('genspider', '--dump=basic'))
self.assertEqual(0, self.call('genspider', '-d', 'basic'))
def test_same_name_as_project(self):
self.assertEqual(2, self.call('genspider', self.project_name))
assert not exists(join(self.proj_mod_path, 'spiders', '%s.py' % self.project_name))
class MiscCommandsTest(CommandTest):
def test_list(self):
self.assertEqual(0, self.call('list'))
class RunSpiderCommandTest(CommandTest):
def test_runspider(self):
tmpdir = self.mktemp()
os.mkdir(tmpdir)
fname = abspath(join(tmpdir, 'myspider.py'))
with open(fname, 'w') as f:
f.write("""
import scrapy
class MySpider(scrapy.Spider):
name = 'myspider'
def start_requests(self):
self.logger.debug("It Works!")
return []
""")
p = self.proc('runspider', fname)
log = to_native_str(p.stderr.read())
self.assertIn("DEBUG: It Works!", log)
self.assertIn("INFO: Spider opened", log)
self.assertIn("INFO: Closing spider (finished)", log)
self.assertIn("INFO: Spider closed (finished)", log)
def test_runspider_no_spider_found(self):
tmpdir = self.mktemp()
os.mkdir(tmpdir)
fname = abspath(join(tmpdir, 'myspider.py'))
with open(fname, 'w') as f:
f.write("""
from scrapy.spiders import Spider
""")
p = self.proc('runspider', fname)
log = to_native_str(p.stderr.read())
self.assertIn("No spider found in file", log)
def test_runspider_file_not_found(self):
p = self.proc('runspider', 'some_non_existent_file')
log = to_native_str(p.stderr.read())
self.assertIn("File not found: some_non_existent_file", log)
def test_runspider_unable_to_load(self):
tmpdir = self.mktemp()
os.mkdir(tmpdir)
fname = abspath(join(tmpdir, 'myspider.txt'))
with open(fname, 'w') as f:
f.write("")
p = self.proc('runspider', fname)
log = to_native_str(p.stderr.read())
self.assertIn("Unable to load", log)
class ParseCommandTest(ProcessTest, SiteTest, CommandTest):
command = 'parse'
def setUp(self):
super(ParseCommandTest, self).setUp()
self.spider_name = 'parse_spider'
fname = abspath(join(self.proj_mod_path, 'spiders', 'myspider.py'))
with open(fname, 'w') as f:
f.write("""
import scrapy
class MySpider(scrapy.Spider):
name = '{0}'
def parse(self, response):
if getattr(self, 'test_arg', None):
self.logger.debug('It Works!')
return [scrapy.Item(), dict(foo='bar')]
""".format(self.spider_name))
fname = abspath(join(self.proj_mod_path, 'pipelines.py'))
with open(fname, 'w') as f:
f.write("""
import logging
class MyPipeline(object):
component_name = 'my_pipeline'
def process_item(self, item, spider):
logging.info('It Works!')
return item
""")
fname = abspath(join(self.proj_mod_path, 'settings.py'))
with open(fname, 'a') as f:
f.write("""
ITEM_PIPELINES = {'%s.pipelines.MyPipeline': 1}
""" % self.project_name)
@defer.inlineCallbacks
def test_spider_arguments(self):
_, _, stderr = yield self.execute(['--spider', self.spider_name,
'-a', 'test_arg=1',
'-c', 'parse',
self.url('/html')])
self.assertIn("DEBUG: It Works!", to_native_str(stderr))
@defer.inlineCallbacks
def test_pipelines(self):
_, _, stderr = yield self.execute(['--spider', self.spider_name,
'--pipelines',
'-c', 'parse',
self.url('/html')])
self.assertIn("INFO: It Works!", to_native_str(stderr))
@defer.inlineCallbacks
def test_parse_items(self):
status, out, stderr = yield self.execute(
['--spider', self.spider_name, '-c', 'parse', self.url('/html')]
)
self.assertIn("""[{}, {'foo': 'bar'}]""", to_native_str(out))
class BenchCommandTest(CommandTest):
def test_run(self):
p = self.proc('bench', '-s', 'LOGSTATS_INTERVAL=0.001',
'-s', 'CLOSESPIDER_TIMEOUT=0.01')
log = to_native_str(p.stderr.read())
self.assertIn('INFO: Crawled', log)
self.assertNotIn('Unhandled Error', log)
|
the-stack_0_603 | # -*- coding: utf-8 -*-
# Copyright (C) 2006 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
"""Read and write MPEG-4 audio files with iTunes metadata.
This module will read MPEG-4 audio information and metadata,
as found in Apple's MP4 (aka M4A, M4B, M4P) files.
There is no official specification for this format. The source code
for TagLib, FAAD, and various MPEG specifications at
* http://developer.apple.com/documentation/QuickTime/QTFF/
* http://www.geocities.com/xhelmboyx/quicktime/formats/mp4-layout.txt
* http://standards.iso.org/ittf/PubliclyAvailableStandards/\
c041828_ISO_IEC_14496-12_2005(E).zip
* http://wiki.multimedia.cx/index.php?title=Apple_QuickTime
were all consulted.
"""
import struct
import sys
from mutagen import FileType, Metadata, StreamInfo
from mutagen._constants import GENRES
from mutagen._util import (cdata, insert_bytes, DictProxy, MutagenError,
hashable, enum)
from mutagen._compat import (reraise, PY2, string_types, text_type, chr_,
iteritems, PY3, cBytesIO)
from ._atom import Atoms, Atom, AtomError
from ._util import parse_full_atom
from ._as_entry import AudioSampleEntry, ASEntryError
class error(IOError, MutagenError):
pass
class MP4MetadataError(error):
pass
class MP4StreamInfoError(error):
pass
class MP4MetadataValueError(ValueError, MP4MetadataError):
pass
__all__ = ['MP4', 'Open', 'delete', 'MP4Cover', 'MP4FreeForm', 'AtomDataType','MediaKind', 'HDVideo', 'ContentRating']
@enum
class AtomDataType(object):
"""Enum for `dataformat` attribute of MP4FreeForm.
.. versionadded:: 1.25
"""
IMPLICIT = 0
"""for use with tags for which no type needs to be indicated because
only one type is allowed"""
UTF8 = 1
"""without any count or null terminator"""
UTF16 = 2
"""also known as UTF-16BE"""
SJIS = 3
"""deprecated unless it is needed for special Japanese characters"""
HTML = 6
"""the HTML file header specifies which HTML version"""
XML = 7
"""the XML header must identify the DTD or schemas"""
UUID = 8
"""also known as GUID; stored as 16 bytes in binary (valid as an ID)"""
ISRC = 9
"""stored as UTF-8 text (valid as an ID)"""
MI3P = 10
"""stored as UTF-8 text (valid as an ID)"""
GIF = 12
"""(deprecated) a GIF image"""
JPEG = 13
"""a JPEG image"""
PNG = 14
"""PNG image"""
URL = 15
"""absolute, in UTF-8 characters"""
DURATION = 16
"""in milliseconds, 32-bit integer"""
DATETIME = 17
"""in UTC, counting seconds since midnight, January 1, 1904;
32 or 64-bits"""
GENRES = 18
"""a list of enumerated values"""
INTEGER = 21
"""a signed big-endian integer with length one of { 1,2,3,4,8 } bytes"""
RIAA_PA = 24
"""RIAA parental advisory; { -1=no, 1=yes, 0=unspecified },
8-bit ingteger"""
UPC = 25
"""Universal Product Code, in text UTF-8 format (valid as an ID)"""
BMP = 27
"""Windows bitmap image"""
@hashable
class MP4Cover(bytes):
"""A cover artwork.
Attributes:
* imageformat -- format of the image (either FORMAT_JPEG or FORMAT_PNG)
"""
FORMAT_JPEG = AtomDataType.JPEG
FORMAT_PNG = AtomDataType.PNG
def __new__(cls, data, *args, **kwargs):
return bytes.__new__(cls, data)
def __init__(self, data, imageformat=FORMAT_JPEG):
self.imageformat = imageformat
__hash__ = bytes.__hash__
def __eq__(self, other):
if not isinstance(other, MP4Cover):
return NotImplemented
if not bytes.__eq__(self, other):
return False
if self.imageformat != other.imageformat:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s(%r, %r)" % (
type(self).__name__, bytes(self),
AtomDataType(self.imageformat))
@hashable
class MP4FreeForm(bytes):
"""A freeform value.
Attributes:
* dataformat -- format of the data (see AtomDataType)
"""
FORMAT_DATA = AtomDataType.IMPLICIT # deprecated
FORMAT_TEXT = AtomDataType.UTF8 # deprecated
def __new__(cls, data, *args, **kwargs):
return bytes.__new__(cls, data)
def __init__(self, data, dataformat=AtomDataType.UTF8, version=0):
self.dataformat = dataformat
self.version = version
__hash__ = bytes.__hash__
def __eq__(self, other):
if not isinstance(other, MP4FreeForm):
return NotImplemented
if not bytes.__eq__(self, other):
return False
if self.dataformat != other.dataformat:
return False
if self.version != other.version:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s(%r, %r)" % (
type(self).__name__, bytes(self),
AtomDataType(self.dataformat))
def _name2key(name):
if PY2:
return name
return name.decode("latin-1")
def _key2name(key):
if PY2:
return key
return key.encode("latin-1")
class MP4Tags(DictProxy, Metadata):
r"""Dictionary containing Apple iTunes metadata list key/values.
Keys are four byte identifiers, except for freeform ('----')
keys. Values are usually unicode strings, but some atoms have a
special structure:
Text values (multiple values per key are supported):
* '\\xa9nam' -- track title
* '\\xa9alb' -- album
* '\\xa9ART' -- artist
* 'aART' -- album artist
* '\\xa9wrt' -- composer
* '\\xa9day' -- year
* '\\xa9cmt' -- comment
* 'desc' -- description (usually used in podcasts)
* 'purd' -- purchase date
* '\\xa9grp' -- grouping
* '\\xa9gen' -- genre
* '\\xa9lyr' -- lyrics
* 'purl' -- podcast URL
* 'egid' -- podcast episode GUID
* 'catg' -- podcast category
* 'keyw' -- podcast keywords
* '\\xa9too' -- encoded by
* 'cprt' -- copyright
* 'soal' -- album sort order
* 'soaa' -- album artist sort order
* 'soar' -- artist sort order
* 'sonm' -- title sort order
* 'soco' -- composer sort order
* 'sosn' -- show sort order
* 'tvsh' -- show name
Boolean values:
* 'cpil' -- part of a compilation
* 'pgap' -- part of a gapless album
* 'pcst' -- podcast (iTunes reads this only on import)
Tuples of ints (multiple values per key are supported):
* 'trkn' -- track number, total tracks
* 'disk' -- disc number, total discs
Others:
* 'tmpo' -- tempo/BPM, 16 bit int
* 'covr' -- cover artwork, list of MP4Cover objects (which are
tagged strs)
* 'gnre' -- ID3v1 genre. Not supported, use '\\xa9gen' instead.
The freeform '----' frames use a key in the format '----:mean:name'
where 'mean' is usually 'com.apple.iTunes' and 'name' is a unique
identifier for this frame. The value is a str, but is probably
text that can be decoded as UTF-8. Multiple values per key are
supported.
MP4 tag data cannot exist outside of the structure of an MP4 file,
so this class should not be manually instantiated.
Unknown non-text tags and tags that failed to parse will be written
back as is.
"""
def __init__(self, *args, **kwargs):
self._failed_atoms = {}
super(MP4Tags, self).__init__(*args, **kwargs)
def load(self, atoms, fileobj):
try:
ilst = atoms[b"moov.udta.meta.ilst"]
except KeyError as key:
raise MP4MetadataError(key)
for atom in ilst.children:
ok, data = atom.read(fileobj)
if not ok:
raise MP4MetadataError("Not enough data")
try:
if atom.name in self.__atoms:
info = self.__atoms[atom.name]
info[0](self, atom, data)
else:
# unknown atom, try as text
self.__parse_text(atom, data, implicit=False)
except MP4MetadataError:
# parsing failed, save them so we can write them back
key = _name2key(atom.name)
self._failed_atoms.setdefault(key, []).append(data)
def __setitem__(self, key, value):
if not isinstance(key, str):
raise TypeError("key has to be str")
super(MP4Tags, self).__setitem__(key, value)
@classmethod
def _can_load(cls, atoms):
return b"moov.udta.meta.ilst" in atoms
@staticmethod
def _key_sort(item):
(key, v) = item
# iTunes always writes the tags in order of "relevance", try
# to copy it as closely as possible.
order = ["\xa9nam", "\xa9ART", "\xa9wrt", "\xa9alb",
"\xa9gen", "gnre", "trkn", "disk",
"\xa9day", "cpil", "pgap", "pcst", "tmpo",
"\xa9too", "----", "covr", "\xa9lyr", "stik",
"tvsh", "tven", "tvsn", "tves", "tvnn"]
order = dict(zip(order, range(len(order))))
last = len(order)
# If there's no key-based way to distinguish, order by length.
# If there's still no way, go by string comparison on the
# values, so we at least have something determinstic.
return (order.get(key[:4], last), len(repr(v)), repr(v))
def save(self, filename):
"""Save the metadata to the given filename."""
values = []
items = sorted(self.items(), key=self._key_sort)
for key, value in items:
atom_name = _key2name(key)[:4]
if atom_name in self.__atoms:
render_func = self.__atoms[atom_name][1]
else:
render_func = type(self).__render_text
try:
if value:
values.append(render_func(self, key, value))
except (TypeError, ValueError) as s:
reraise(MP4MetadataValueError, s, sys.exc_info()[2])
for key, failed in iteritems(self._failed_atoms):
# don't write atoms back if we have added a new one with
# the same name, this excludes freeform which can have
# multiple atoms with the same key (most parsers seem to be able
# to handle that)
if key in self:
assert _key2name(key) != b"----"
continue
for data in failed:
values.append(Atom.render(_key2name(key), data))
data = Atom.render(b"ilst", b"".join(values))
# Find the old atoms.
with open(filename, "rb+") as fileobj:
try:
atoms = Atoms(fileobj)
except AtomError as err:
reraise(error, err, sys.exc_info()[2])
try:
path = atoms.path(b"moov", b"udta", b"meta", b"ilst")
except KeyError:
self.__save_new(fileobj, atoms, data)
else:
self.__save_existing(fileobj, atoms, path, data)
def __pad_ilst(self, data, length=None):
if length is None:
length = ((len(data) + 1023) & ~1023) - len(data)
return Atom.render(b"free", b"\x00" * length)
def __save_new(self, fileobj, atoms, ilst):
hdlr = Atom.render(b"hdlr", b"\x00" * 8 + b"mdirappl" + b"\x00" * 9)
meta = Atom.render(
b"meta", b"\x00\x00\x00\x00" + hdlr + ilst + self.__pad_ilst(ilst))
try:
path = atoms.path(b"moov", b"udta")
except KeyError:
# moov.udta not found -- create one
path = atoms.path(b"moov")
meta = Atom.render(b"udta", meta)
offset = path[-1].offset + 8
insert_bytes(fileobj, len(meta), offset)
fileobj.seek(offset)
fileobj.write(meta)
self.__update_parents(fileobj, path, len(meta))
self.__update_offsets(fileobj, atoms, len(meta), offset)
def __save_existing(self, fileobj, atoms, path, data):
# Replace the old ilst atom.
ilst = path.pop()
offset = ilst.offset
length = ilst.length
# Check for padding "free" atoms
meta = path[-1]
index = meta.children.index(ilst)
try:
prev = meta.children[index - 1]
if prev.name == b"free":
offset = prev.offset
length += prev.length
except IndexError:
pass
try:
next = meta.children[index + 1]
if next.name == b"free":
length += next.length
except IndexError:
pass
delta = len(data) - length
if delta > 0 or (delta < 0 and delta > -8):
data += self.__pad_ilst(data)
delta = len(data) - length
insert_bytes(fileobj, delta, offset)
elif delta < 0:
data += self.__pad_ilst(data, -delta - 8)
delta = 0
fileobj.seek(offset)
fileobj.write(data)
self.__update_parents(fileobj, path, delta)
self.__update_offsets(fileobj, atoms, delta, offset)
def __update_parents(self, fileobj, path, delta):
"""Update all parent atoms with the new size."""
for atom in path:
fileobj.seek(atom.offset)
size = cdata.uint_be(fileobj.read(4))
if size == 1: # 64bit
# skip name (4B) and read size (8B)
size = cdata.ulonglong_be(fileobj.read(12)[4:])
fileobj.seek(atom.offset + 8)
fileobj.write(cdata.to_ulonglong_be(size + delta))
else: # 32bit
fileobj.seek(atom.offset)
fileobj.write(cdata.to_uint_be(size + delta))
def __update_offset_table(self, fileobj, fmt, atom, delta, offset):
"""Update offset table in the specified atom."""
if atom.offset > offset:
atom.offset += delta
fileobj.seek(atom.offset + 12)
data = fileobj.read(atom.length - 12)
fmt = fmt % cdata.uint_be(data[:4])
offsets = struct.unpack(fmt, data[4:])
offsets = [o + (0, delta)[offset < o] for o in offsets]
fileobj.seek(atom.offset + 16)
fileobj.write(struct.pack(fmt, *offsets))
def __update_tfhd(self, fileobj, atom, delta, offset):
if atom.offset > offset:
atom.offset += delta
fileobj.seek(atom.offset + 9)
data = fileobj.read(atom.length - 9)
flags = cdata.uint_be(b"\x00" + data[:3])
if flags & 1:
o = cdata.ulonglong_be(data[7:15])
if o > offset:
o += delta
fileobj.seek(atom.offset + 16)
fileobj.write(cdata.to_ulonglong_be(o))
def __update_offsets(self, fileobj, atoms, delta, offset):
"""Update offset tables in all 'stco' and 'co64' atoms."""
if delta == 0:
return
moov = atoms[b"moov"]
for atom in moov.findall(b'stco', True):
self.__update_offset_table(fileobj, ">%dI", atom, delta, offset)
for atom in moov.findall(b'co64', True):
self.__update_offset_table(fileobj, ">%dQ", atom, delta, offset)
try:
for atom in atoms[b"moof"].findall(b'tfhd', True):
self.__update_tfhd(fileobj, atom, delta, offset)
except KeyError:
pass
def __parse_data(self, atom, data):
pos = 0
while pos < atom.length - 8:
head = data[pos:pos + 12]
if len(head) != 12:
raise MP4MetadataError("truncated atom % r" % atom.name)
length, name = struct.unpack(">I4s", head[:8])
version = ord(head[8:9])
flags = struct.unpack(">I", b"\x00" + head[9:12])[0]
if name != b"data":
raise MP4MetadataError(
"unexpected atom %r inside %r" % (name, atom.name))
chunk = data[pos + 16:pos + length]
if len(chunk) != length - 16:
raise MP4MetadataError("truncated atom % r" % atom.name)
yield version, flags, chunk
pos += length
def __add(self, key, value, single=False):
assert isinstance(key, str)
if single:
self[key] = value
else:
self.setdefault(key, []).extend(value)
def __render_data(self, key, version, flags, value):
return Atom.render(_key2name(key), b"".join([
Atom.render(
b"data", struct.pack(">2I", version << 24 | flags, 0) + data)
for data in value]))
def __parse_freeform(self, atom, data):
length = cdata.uint_be(data[:4])
mean = data[12:length]
pos = length
length = cdata.uint_be(data[pos:pos + 4])
name = data[pos + 12:pos + length]
pos += length
value = []
while pos < atom.length - 8:
length, atom_name = struct.unpack(">I4s", data[pos:pos + 8])
if atom_name != b"data":
raise MP4MetadataError(
"unexpected atom %r inside %r" % (atom_name, atom.name))
version = ord(data[pos + 8:pos + 8 + 1])
flags = struct.unpack(">I", b"\x00" + data[pos + 9:pos + 12])[0]
value.append(MP4FreeForm(data[pos + 16:pos + length],
dataformat=flags, version=version))
pos += length
key = _name2key(atom.name + b":" + mean + b":" + name)
self.__add(key, value)
def __render_freeform(self, key, value):
if isinstance(value, bytes):
value = [value]
dummy, mean, name = _key2name(key).split(b":", 2)
mean = struct.pack(">I4sI", len(mean) + 12, b"mean", 0) + mean
name = struct.pack(">I4sI", len(name) + 12, b"name", 0) + name
data = b""
for v in value:
flags = AtomDataType.UTF8
version = 0
if isinstance(v, MP4FreeForm):
flags = v.dataformat
version = v.version
data += struct.pack(
">I4s2I", len(v) + 16, b"data", version << 24 | flags, 0)
data += v.encode('UTF-8')
return Atom.render(b"----", mean + name + data)
def __parse_pair(self, atom, data):
key = _name2key(atom.name)
values = [struct.unpack(">2H", d[2:6]) for
version, flags, d in self.__parse_data(atom, data)]
self.__add(key, values)
def __render_pair(self, key, value):
data = []
for (track, total) in value:
if 0 <= track < 1 << 16 and 0 <= total < 1 << 16:
data.append(struct.pack(">4H", 0, track, total, 0))
else:
raise MP4MetadataValueError(
"invalid numeric pair %r" % ((track, total),))
return self.__render_data(key, 0, AtomDataType.IMPLICIT, data)
def __render_pair_no_trailing(self, key, value):
data = []
for (track, total) in value:
if 0 <= track < 1 << 16 and 0 <= total < 1 << 16:
data.append(struct.pack(">3H", 0, track, total))
else:
raise MP4MetadataValueError(
"invalid numeric pair %r" % ((track, total),))
return self.__render_data(key, 0, AtomDataType.IMPLICIT, data)
def __parse_genre(self, atom, data):
values = []
for version, flags, data in self.__parse_data(atom, data):
# version = 0, flags = 0
if len(data) != 2:
raise MP4MetadataValueError("invalid genre")
genre = cdata.short_be(data)
# Translate to a freeform genre.
try:
genre = GENRES[genre - 1]
except IndexError:
# this will make us write it back at least
raise MP4MetadataValueError("unknown genre")
values.append(genre)
key = _name2key(b"\xa9gen")
self.__add(key, values)
def __parse_tempo(self, atom, data):
values = []
for version, flags, data in self.__parse_data(atom, data):
# version = 0, flags = 0 or 21
if len(data) != 2:
raise MP4MetadataValueError("invalid tempo")
values.append(cdata.ushort_be(data))
key = _name2key(atom.name)
self.__add(key, values)
def __render_tempo(self, key, value):
try:
if len(value) == 0:
return self.__render_data(key, 0, AtomDataType.INTEGER, b"")
if (min(value) < 0) or (max(value) >= 2 ** 16):
raise MP4MetadataValueError(
"invalid 16 bit integers: %r" % value)
except TypeError:
raise MP4MetadataValueError(
"tmpo must be a list of 16 bit integers")
values = [cdata.to_ushort_be(v) for v in value]
return self.__render_data(key, 0, AtomDataType.INTEGER, values)
def __parse_bool(self, atom, data):
for version, flags, data in self.__parse_data(atom, data):
if len(data) != 1:
raise MP4MetadataValueError("invalid bool")
value = bool(ord(data))
key = _name2key(atom.name)
self.__add(key, value, single=True)
def __render_bool(self, key, value):
return self.__render_data(
key, 0, AtomDataType.INTEGER, [chr_(bool(value))])
def __parse_cover(self, atom, data):
values = []
pos = 0
while pos < atom.length - 8:
length, name, imageformat = struct.unpack(">I4sI",
data[pos:pos + 12])
if name != b"data":
if name == b"name":
pos += length
continue
raise MP4MetadataError(
"unexpected atom %r inside 'covr'" % name)
if imageformat not in (MP4Cover.FORMAT_JPEG, MP4Cover.FORMAT_PNG):
# Sometimes AtomDataType.IMPLICIT or simply wrong.
# In all cases it was jpeg, so default to it
imageformat = MP4Cover.FORMAT_JPEG
cover = MP4Cover(data[pos + 16:pos + length], imageformat)
values.append(cover)
pos += length
key = _name2key(atom.name)
self.__add(key, values)
def __render_cover(self, key, value):
atom_data = []
for cover in value:
try:
imageformat = cover.imageformat
except AttributeError:
imageformat = MP4Cover.FORMAT_JPEG
atom_data.append(Atom.render(
b"data", struct.pack(">2I", imageformat, 0) + cover))
return Atom.render(_key2name(key), b"".join(atom_data))
def __parse_text(self, atom, data, implicit=True):
# implicit = False, for parsing unknown atoms only take utf8 ones.
# For known ones we can assume the implicit are utf8 too.
values = []
for version, flags, atom_data in self.__parse_data(atom, data):
if implicit:
if flags not in (AtomDataType.IMPLICIT, AtomDataType.UTF8):
raise MP4MetadataError(
"Unknown atom type %r for %r" % (flags, atom.name))
else:
if flags != AtomDataType.UTF8:
raise MP4MetadataError(
"%r is not text, ignore" % atom.name)
try:
text = atom_data.decode("utf-8")
except UnicodeDecodeError as e:
raise MP4MetadataError("%s: %s" % (_name2key(atom.name), e))
values.append(text)
key = _name2key(atom.name)
self.__add(key, values)
def __render_text(self, key, value, flags=AtomDataType.UTF8):
if isinstance(value, string_types):
value = [value]
encoded = []
for v in value:
if not isinstance(v, text_type):
if PY3:
raise TypeError("%r not str" % v)
v = v.decode("utf-8")
encoded.append(v.encode("utf-8"))
return self.__render_data(key, 0, flags, encoded)
def __render_8int(self, key, value):
try:
if len(value) == 0:
return self.__render_data(key, 0x07, b"")
if min(value) < 0 or max(value) >= 2 ** 8:
raise MP4MetadataValueError(
"invalid 8 bit integers: %r" % value)
except TypeError:
raise MP4MetadataValueError(
"%s must be a list of 8 bit integers" % (key))
values = list(map(cdata.to_uchar_be, value))
return self.__render_data(key, 0, 0x07, values)
def __render_32int(self, key, value):
try:
if len(value) == 0:
return self.__render_data(key, 0x31, b"")
if min(value) < 0 or max(value) >= 2 ** 32:
raise MP4MetadataValueError(
"invalid 32 bit integers: %r" % value)
except TypeError:
raise MP4MetadataValueError(
"%s must be a list of 32 bit integers" % (key))
values = list(map(cdata.to_uint_be, value))
return self.__render_data(key, 0, 0x31, values)
def delete(self, filename):
"""Remove the metadata from the given filename."""
self._failed_atoms.clear()
self.clear()
self.save(filename)
__atoms = {
b"----": (__parse_freeform, __render_freeform),
b"trkn": (__parse_pair, __render_pair),
b"disk": (__parse_pair, __render_pair_no_trailing),
b"gnre": (__parse_genre, None),
b"tmpo": (__parse_tempo, __render_tempo),
b"cpil": (__parse_bool, __render_bool),
b"pgap": (__parse_bool, __render_bool),
b"pcst": (__parse_bool, __render_bool),
b"covr": (__parse_cover, __render_cover),
b"purl": (__parse_text, __render_text),
b"egid": (__parse_text, __render_text),
b"hdvd": (__parse_text, __render_8int),
b"tves": (__parse_text, __render_32int),
b"tvsn": (__parse_text, __render_32int),
b"stik": (__parse_text, __render_8int),
b"rtng": (__parse_text, __render_8int),
}
# these allow implicit flags and parse as text
for name in [b"\xa9nam", b"\xa9alb", b"\xa9ART", b"aART", b"\xa9wrt",
b"\xa9day", b"\xa9cmt", b"desc", b"purd", b"\xa9grp",
b"\xa9gen", b"\xa9lyr", b"catg", b"keyw", b"\xa9too",
b"cprt", b"soal", b"soaa", b"soar", b"sonm", b"soco",
b"sosn", b"tvsh", b"tven", b"tvnn"]:
__atoms[name] = (__parse_text, __render_text)
def pprint(self):
values = []
for key, value in iteritems(self):
if not isinstance(key, text_type):
key = key.decode("latin-1")
if key == "covr":
values.append("%s=%s" % (key, ", ".join(
["[%d bytes of data]" % len(data) for data in value])))
elif isinstance(value, list):
for v in value:
values.append("%s=%r" % (key, v))
else:
values.append("%s=%r" % (key, value))
return "\n".join(values)
class MP4Info(StreamInfo):
"""MPEG-4 stream information.
Attributes:
* bitrate -- bitrate in bits per second, as an int
* length -- file length in seconds, as a float
* channels -- number of audio channels
* sample_rate -- audio sampling rate in Hz
* bits_per_sample -- bits per sample
* codec (string):
* if starting with ``"mp4a"`` uses an mp4a audio codec
(see the codec parameter in rfc6381 for details e.g. ``"mp4a.40.2"``)
* for everything else see a list of possible values at
http://www.mp4ra.org/codecs.html
e.g. ``"mp4a"``, ``"alac"``, ``"mp4a.40.2"``, ``"ac-3"`` etc.
* codec_description (string):
Name of the codec used (ALAC, AAC LC, AC-3...). Values might change in
the future, use for display purposes only.
"""
bitrate = 0
channels = 0
sample_rate = 0
bits_per_sample = 0
codec = u""
codec_name = u""
def __init__(self, atoms, fileobj):
try:
moov = atoms[b"moov"]
except KeyError:
raise MP4StreamInfoError("not a MP4 file")
for trak in moov.findall(b"trak"):
hdlr = trak[b"mdia", b"hdlr"]
ok, data = hdlr.read(fileobj)
if not ok:
raise MP4StreamInfoError("Not enough data")
if data[8:12] == b"soun":
break
else:
raise MP4StreamInfoError("track has no audio data")
mdhd = trak[b"mdia", b"mdhd"]
ok, data = mdhd.read(fileobj)
if not ok:
raise MP4StreamInfoError("Not enough data")
try:
version, flags, data = parse_full_atom(data)
except ValueError as e:
raise MP4StreamInfoError(e)
if version == 0:
offset = 8
fmt = ">2I"
elif version == 1:
offset = 16
fmt = ">IQ"
else:
raise MP4StreamInfoError("Unknown mdhd version %d" % version)
end = offset + struct.calcsize(fmt)
unit, length = struct.unpack(fmt, data[offset:end])
try:
self.length = float(length) / unit
except ZeroDivisionError:
self.length = 0
try:
atom = trak[b"mdia", b"minf", b"stbl", b"stsd"]
except KeyError:
pass
else:
self._parse_stsd(atom, fileobj)
def _parse_stsd(self, atom, fileobj):
"""Sets channels, bits_per_sample, sample_rate and optionally bitrate.
Can raise MP4StreamInfoError.
"""
assert atom.name == b"stsd"
ok, data = atom.read(fileobj)
if not ok:
raise MP4StreamInfoError("Invalid stsd")
try:
version, flags, data = parse_full_atom(data)
except ValueError as e:
raise MP4StreamInfoError(e)
if version != 0:
raise MP4StreamInfoError("Unsupported stsd version")
try:
num_entries, offset = cdata.uint32_be_from(data, 0)
except cdata.error as e:
raise MP4StreamInfoError(e)
if num_entries == 0:
return
# look at the first entry if there is one
entry_fileobj = cBytesIO(data[offset:])
try:
entry_atom = Atom(entry_fileobj)
except AtomError as e:
raise MP4StreamInfoError(e)
try:
entry = AudioSampleEntry(entry_atom, entry_fileobj)
except ASEntryError as e:
raise MP4StreamInfoError(e)
else:
self.channels = entry.channels
self.bits_per_sample = entry.sample_size
self.sample_rate = entry.sample_rate
self.bitrate = entry.bitrate
self.codec = entry.codec
self.codec_description = entry.codec_description
def pprint(self):
return "MPEG-4 audio (%s), %.2f seconds, %d bps" % (
self.codec_description, self.length, self.bitrate)
class MP4(FileType):
"""An MPEG-4 audio file, probably containing AAC.
If more than one track is present in the file, the first is used.
Only audio ('soun') tracks will be read.
:ivar info: :class:`MP4Info`
:ivar tags: :class:`MP4Tags`
"""
MP4Tags = MP4Tags
_mimes = ["audio/mp4", "audio/x-m4a", "audio/mpeg4", "audio/aac"]
def load(self, filename):
self.filename = filename
with open(filename, "rb") as fileobj:
try:
atoms = Atoms(fileobj)
except AtomError as err:
reraise(error, err, sys.exc_info()[2])
try:
self.info = MP4Info(atoms, fileobj)
except error:
raise
except Exception as err:
reraise(MP4StreamInfoError, err, sys.exc_info()[2])
if not MP4Tags._can_load(atoms):
self.tags = None
else:
try:
self.tags = self.MP4Tags(atoms, fileobj)
except error:
raise
except Exception as err:
reraise(MP4MetadataError, err, sys.exc_info()[2])
def add_tags(self):
if self.tags is None:
self.tags = self.MP4Tags()
else:
raise error("an MP4 tag already exists")
@staticmethod
def score(filename, fileobj, header_data):
return (b"ftyp" in header_data) + (b"mp4" in header_data)
Open = MP4
def delete(filename):
"""Remove tags from a file."""
MP4(filename).delete()
class MediaKind:
MUSIC = [1]
AUDIO_BOOK = [2]
MUSIC_VIDEO = [6]
MOVIE = [9]
TV_SHOW = [10]
BOOKLET = [11]
RINGTONE = [14]
class HDVideo:
STANDARD = [0]
P720 = [1]
P1080 = [2]
class ContentRating:
NONE = [0]
CLEAN = [2]
EXPLICIT = [4]
|
the-stack_0_605 | # python3.7
"""Collects all available models together."""
from .model_zoo import MODEL_ZOO
from .pggan_generator import PGGANGenerator
from .pggan_discriminator import PGGANDiscriminator
from .stylegan_generator import StyleGANGenerator
from .stylegan_discriminator import StyleGANDiscriminator
from .stylegan2_generator import StyleGAN2Generator
from .stylegan2_discriminator import StyleGAN2Discriminator
from .stylegan2_gs_generator import StyleGAN2_GS_Generator
# from op import FusedLeakyReLU, fused_leaky_relu, upfirdn2d
__all__ = [
'MODEL_ZOO', 'PGGANGenerator', 'PGGANDiscriminator', 'StyleGANGenerator',
'StyleGANDiscriminator', 'StyleGAN2Generator', 'StyleGAN2Discriminator',
'StyleGAN2_GS_Generator', 'build_generator', 'build_discriminator', 'build_model',
]
_GAN_TYPES_ALLOWED = ['pggan', 'stylegan', 'stylegan2', 'stylegan2_gs']
_MODULES_ALLOWED = ['generator', 'discriminator']
def build_generator(gan_type, resolution, **kwargs):
"""Builds generator by GAN type.
Args:
gan_type: GAN type to which the generator belong.
resolution: Synthesis resolution.
**kwargs: Additional arguments to build the generator.
Raises:
ValueError: If the `gan_type` is not supported.
NotImplementedError: If the `gan_type` is not implemented.
"""
if gan_type not in _GAN_TYPES_ALLOWED:
raise ValueError(f'Invalid GAN type: `{gan_type}`!\n'
f'Types allowed: {_GAN_TYPES_ALLOWED}.')
if gan_type == 'pggan':
return PGGANGenerator(resolution, **kwargs)
if gan_type == 'stylegan':
return StyleGANGenerator(resolution, **kwargs)
if gan_type == 'stylegan2':
return StyleGAN2Generator(resolution, **kwargs)
if gan_type == 'stylegan2_gs':
return StyleGAN2_GS_Generator(resolution, **kwargs)
raise NotImplementedError(f'Unsupported GAN type `{gan_type}`!')
def build_discriminator(gan_type, resolution, **kwargs):
"""Builds discriminator by GAN type.
Args:
gan_type: GAN type to which the discriminator belong.
resolution: Synthesis resolution.
**kwargs: Additional arguments to build the discriminator.
Raises:
ValueError: If the `gan_type` is not supported.
NotImplementedError: If the `gan_type` is not implemented.
"""
if gan_type not in _GAN_TYPES_ALLOWED:
raise ValueError(f'Invalid GAN type: `{gan_type}`!\n'
f'Types allowed: {_GAN_TYPES_ALLOWED}.')
if gan_type == 'pggan':
return PGGANDiscriminator(resolution, **kwargs)
if gan_type == 'stylegan':
return StyleGANDiscriminator(resolution, **kwargs)
if gan_type == 'stylegan2':
return StyleGAN2Discriminator(resolution, **kwargs)
raise NotImplementedError(f'Unsupported GAN type `{gan_type}`!')
def build_model(gan_type, module, resolution, **kwargs):
"""Builds a GAN module (generator/discriminator/etc).
Args:
gan_type: GAN type to which the model belong.
module: GAN module to build, such as generator or discrimiantor.
resolution: Synthesis resolution.
**kwargs: Additional arguments to build the discriminator.
Raises:
ValueError: If the `module` is not supported.
NotImplementedError: If the `module` is not implemented.
"""
if module not in _MODULES_ALLOWED:
raise ValueError(f'Invalid module: `{module}`!\n'
f'Modules allowed: {_MODULES_ALLOWED}.')
if module == 'generator':
return build_generator(gan_type, resolution, **kwargs)
if module == 'discriminator':
return build_discriminator(gan_type, resolution, **kwargs)
raise NotImplementedError(f'Unsupported module `{module}`!')
def parse_gan_type(module):
"""Parses GAN type of a given module.
Args:
module: The module to parse GAN type from.
Returns:
A string, indicating the GAN type.
Raises:
ValueError: If the GAN type is unknown.
"""
if isinstance(module, (PGGANGenerator, PGGANDiscriminator)):
return 'pggan'
if isinstance(module, (StyleGANGenerator, StyleGANDiscriminator)):
return 'stylegan'
if isinstance(module, (StyleGAN2Generator, StyleGAN2Discriminator)):
return 'stylegan2'
if isinstance(module, (StyleGAN2_GS_Generator, StyleGAN2Discriminator)):
return 'stylegan2_gs'
raise ValueError(f'Unable to parse GAN type from type `{type(module)}`!')
|
the-stack_0_609 | # This file is part of the Blockchain Data Trading Simulator
# https://gitlab.com/MatthiasLohr/bdtsim
#
# Copyright 2021 Matthias Lohr <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import multiprocessing
import os
from multiprocessing.pool import ApplyResult
from typing import Any, Dict, Optional, Tuple
from queue import Queue
import yaml
from bdtsim.account import AccountFile
from bdtsim.data_provider import DataProviderManager
from bdtsim.environment import EnvironmentManager
from bdtsim.protocol import ProtocolManager, DEFAULT_ASSET_PRICE
from bdtsim.renderer import RendererManager
from bdtsim.simulation import Simulation
from bdtsim.simulation_result import SimulationResult, SimulationResultSerializer
from bdtsim.util.types import to_bool
from .command_manager import SubCommand
DEFAULT_ENVIRONMENT_CONFIGURATION: Dict[str, Any] = {'name': 'PyEVM'}
DEFAULT_DATA_PROVIDER_CONFIGURATION: Dict[str, Any] = {'name': 'RandomDataProvider'}
logger = logging.getLogger(__name__)
class BulkExecuteSubCommand(SubCommand):
help = 'bulk execute simulations and renderings'
def __init__(self, parser: argparse.ArgumentParser) -> None:
super(BulkExecuteSubCommand, self).__init__(parser)
parser.add_argument('bulk_configuration')
parser.add_argument('-p', '--processes', type=int, default=multiprocessing.cpu_count())
def __call__(self, args: argparse.Namespace) -> Optional[int]:
with open(args.bulk_configuration, 'r') as fp:
bulk_configuration = yaml.load(fp, Loader=yaml.SafeLoader)
logger.info('creating process pool with %i processes' % args.processes)
process_pool = multiprocessing.Pool(processes=args.processes)
processes: Queue[ApplyResult[Any]] = Queue()
simulation_configurations = bulk_configuration.get('simulations')
if not isinstance(simulation_configurations, list):
raise ValueError('simulations is not a list')
renderer_configurations = bulk_configuration.get('renderers')
if not isinstance(simulation_configurations, list):
raise ValueError('renderers is not a list')
target_directory = bulk_configuration.get('target_directory', 'bulk_output')
os.makedirs(target_directory, exist_ok=True)
def renderer_success_callback(params: Tuple[Dict[str, Any], Dict[str, Any], bytes]) -> None:
sim_conf, renderer_conf, result = params
logger.info('renderer succeeded (%s, %s)' % (str(sim_conf), str(renderer_conf)))
with open(os.path.join(
target_directory,
self.get_output_filename(sim_conf, renderer_conf, suffix=renderer_conf.get('suffix'))
), 'wb') as f:
f.write(result)
def renderer_error_callback(error: BaseException) -> None:
logger.warning('renderer error: %s' % str(error))
def simulation_success_callback(params: Tuple[Dict[str, Any], SimulationResult]) -> None:
local_simulation_configuration, result = params
logger.info('simulation succeeded (%s)' % str(local_simulation_configuration))
logger.debug('writing down result')
with open(os.path.join(
target_directory,
self.get_output_filename(local_simulation_configuration, suffix='result')
), 'wb') as f:
simulation_result_serializer = SimulationResultSerializer(
compression=to_bool(bulk_configuration.get('output_compression', True)),
b64encoding=to_bool(bulk_configuration.get('output_b64encoding', True))
)
f.write(simulation_result_serializer.serialize(result))
logger.debug('scheduling renderers')
for renderer_configuration in renderer_configurations:
processes.put(process_pool.apply_async(
func=self.run_renderer,
kwds={
'simulation_configuration': local_simulation_configuration,
'renderer_configuration': renderer_configuration,
'simulation_result': result
},
callback=renderer_success_callback,
error_callback=renderer_error_callback
))
def simulation_error_callback(error: BaseException) -> None:
logger.warning('simulation error callback called: %s' % str(error))
logger.debug('scheduling simulations')
for simulation_configuration in simulation_configurations:
processes.put(process_pool.apply_async(
func=self.run_simulation,
kwds={
'simulation_configuration': simulation_configuration
},
callback=simulation_success_callback,
error_callback=simulation_error_callback
))
while not processes.empty():
process = processes.get(block=True)
process.wait()
return 0
@staticmethod
def run_simulation(simulation_configuration: Dict[str, Any]) -> Tuple[Dict[str, Any], SimulationResult]:
protocol_configuration = simulation_configuration.get('protocol')
environment_configuration = simulation_configuration.get('environment')
data_provider_configuration = simulation_configuration.get('data_provider')
if protocol_configuration is None:
raise ValueError('missing protocol configuration')
if environment_configuration is None:
environment_configuration = DEFAULT_ENVIRONMENT_CONFIGURATION
if data_provider_configuration is None:
data_provider_configuration = DEFAULT_DATA_PROVIDER_CONFIGURATION
protocol = ProtocolManager.instantiate(
name=protocol_configuration.get('name', ''),
**protocol_configuration.get('parameters', {})
)
account_file = AccountFile(simulation_configuration.get('account_file'))
environment = EnvironmentManager.instantiate(
name=environment_configuration.get('name', ''),
operator=account_file.operator,
seller=account_file.seller,
buyer=account_file.buyer,
**environment_configuration.get('parameters', {})
)
data_provider = DataProviderManager.instantiate(
name=data_provider_configuration.get('name', ''),
**data_provider_configuration.get('parameters', {})
)
simulation = Simulation(
protocol=protocol,
environment=environment,
data_provider=data_provider,
operator=account_file.operator,
seller=account_file.seller,
buyer=account_file.buyer,
protocol_path_coercion=simulation_configuration.get('protocol_path'),
price=simulation_configuration.get('price', DEFAULT_ASSET_PRICE),
)
simulation_result = simulation.run()
return simulation_configuration, simulation_result
@staticmethod
def run_renderer(simulation_configuration: Dict[str, Any], renderer_configuration: Dict[str, Any],
simulation_result: SimulationResult) -> Tuple[Dict[str, Any], Dict[str, Any], bytes]:
renderer = RendererManager.instantiate(
name=renderer_configuration.get('name', ''),
**renderer_configuration.get('parameters', {})
)
result = renderer.render(simulation_result)
return simulation_configuration, renderer_configuration, result
@staticmethod
def get_output_filename(simulation_configuration: Dict[str, Any],
renderer_configuration: Optional[Dict[str, Any]] = None,
suffix: Optional[str] = None) -> str:
def component2str(component_config: Dict[str, Any]) -> str:
result = str(component_config.get('name'))
parameter_lines = []
for key, value in component_config.get('parameters', {}).items():
parameter_lines.append('%s=%s' % (key, value))
if len(parameter_lines):
result += '-%s' % '-'.join(parameter_lines)
return result
output = '_'.join([
component2str(simulation_configuration.get('protocol', {})),
component2str(simulation_configuration.get('environment', {})),
component2str(simulation_configuration.get('data_provider', DEFAULT_DATA_PROVIDER_CONFIGURATION))
])
if renderer_configuration is not None:
output += '_%s' % component2str(renderer_configuration)
if suffix is not None:
output += '.%s' % suffix
return output
|
the-stack_0_612 | import torch
import torch.nn as nn
class ACM(nn.Module):
# def __init__(self, in_channels, num_heads=32, orthogonal_loss=True):
def __init__(self, in_channels, num_heads=8, orthogonal_loss=True):
super(ACM, self).__init__()
assert in_channels % num_heads == 0
self.in_channels = in_channels
self.num_heads = num_heads
self.add_mod = AttendModule(self.in_channels, num_heads=num_heads)
self.sub_mod = AttendModule(self.in_channels, num_heads=num_heads)
self.mul_mod = ModulateModule(channel=self.in_channels, num_groups=num_heads, compressions=2)
self.orthogonal_loss = orthogonal_loss
self.init_parameters()
def init_parameters(self):
if self.add_mod is not None:
self.add_mod.init_parameters()
if self.sub_mod is not None:
self.sub_mod.init_parameters()
if self.mul_mod is not None:
self.mul_mod.init_parameters()
def forward(self, x):
mu = x.mean([2, 3], keepdim=True)
x_mu = x - mu
# creates multipying feature
mul_feature = self.mul_mod(mu) # P
# creates add or sub feature
add_feature = self.add_mod(x_mu) # K
# creates add or sub feature
sub_feature = self.sub_mod(x_mu) # Q
y = (x + add_feature - sub_feature) * mul_feature
if self.orthogonal_loss:
dp = torch.mean(add_feature * sub_feature, dim=1, keepdim=True)
return y, dp
else:
return y
class AttendModule(nn.Module):
def __init__(self, in_channels, num_heads=4):
super(AttendModule, self).__init__()
self.num_heads = int(num_heads)
self.in_channels = in_channels
self.num_c_per_head = self.in_channels // self.num_heads
assert self.in_channels % self.num_heads == 0
self.map_gen = nn.Sequential(
nn.Conv2d(in_channels, num_heads, kernel_size=1, stride=1, padding=0, bias=True, groups=num_heads)
)
self.normalize = nn.Softmax(dim=2)
self.return_weight = False
def init_parameters(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
nn.init.constant_(m.bias, 0.0)
def batch_weighted_avg(self, xhats, weights):
b, c, h, w = xhats.shape
# xhat reshape
xhats_reshape = xhats.view(b * self.num_heads, self.num_c_per_head, h, w)
xhats_reshape = xhats_reshape.view(b * self.num_heads, self.num_c_per_head, h * w)
# weight reshape
weights_reshape = weights.view(b * self.num_heads, 1, h, w)
weights_reshape = weights_reshape.view(b * self.num_heads, 1, h * w)
weights_normalized = self.normalize(weights_reshape)
weights_normalized = weights_normalized.transpose(1, 2)
mus = torch.bmm(xhats_reshape, weights_normalized)
mus = mus.view(b, self.num_heads * self.num_c_per_head, 1, 1)
return mus, weights_normalized
def forward(self, x):
b, c, h, w = x.shape
weights = self.map_gen(x)
mus, weights_normalized = self.batch_weighted_avg(x, weights)
if self.return_weight:
weights_normalized = weights_normalized.view(b, self.num_heads, h * w, 1)
weights_normalized = weights_normalized.squeeze(-1)
weights_normalized = weights_normalized.view(b, self.num_heads, h, w)
weights_splitted = torch.split(weights_normalized, 1, 1)
return mus, weights_splitted
return mus
class ModulateModule(nn.Module):
def __init__(self, channel, num_groups=32, compressions=2):
super(ModulateModule, self).__init__()
self.feature_gen = nn.Sequential(
nn.Conv2d(channel, channel//compressions, kernel_size=1, stride=1, padding=0, bias=True, groups=num_groups),
nn.ReLU(inplace=True),
nn.Conv2d(channel//compressions, channel, kernel_size=1, stride=1, padding=0, bias=True, groups=num_groups),
nn.Sigmoid()
)
def init_parameters(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
nn.init.constant_(m.bias, 0.0)
def forward(self, x):
y = self.feature_gen(x)
return y
if __name__ == '__main__':
x1 = torch.randn(256 * 20 * 20 * 5).view(5, 256, 20, 20).float()
acm = ACM(num_heads=32, in_channels=256, orthogonal_loss=True)
acm.init_parameters()
y, dp = acm(x1)
print(y.shape)
print(dp.shape)
# ACM without orthogonal loss
acm = ACM(num_heads=32, in_channels=256, orthogonal_loss=False)
acm.init_parameters()
y = acm(x1)
print(y.shape)
|
the-stack_0_613 | import subprocess
import time
import os
localtime = time.asctime( time.localtime(time.time()))
data = subprocess.check_output(['netsh','wlan','show','profiles']).decode('utf-8').split('\n')
profiles = [i.split(":")[1][1:-1] for i in data if "All User Profile" in i]
file = open("result.txt", "a")
print("\n[+] Wifi Grabber: " + localtime + "\n")
file.write("\n[+] Wifi Grabber: " + localtime + "\n")
print("========================================================",file=file)
print(localtime, file=file)
print("========================================================",file=file)
file.close
for i in profiles:
results = subprocess.check_output(['netsh','wlan','show','profile',i,
'key=clear']).decode("utf-8").split('\n')
results = [b.split(":")[1][1:-1] for b in results if "Key Content" in b]
try:
print("{:<30} | {:<}".format(i, results[0]),file=file)
file.close
except IndexError:
print("{:<30} | {:<}".format(i, ""))
time.sleep(3)
exit(code=True)
|
the-stack_0_614 | ###########################################################################
# Created by: Hang Zhang
# Email: [email protected]
# Copyright (c) 2017
###########################################################################
import os, sys
BASE_DIR = os.path.dirname(os.path.dirname(os.getcwd()))
sys.path.append(BASE_DIR)
import yaml
import argparse
import numpy as np
from addict import Dict
import torch
import torch.nn as nn
from torch.utils import data
from tensorboardX import SummaryWriter
import torchvision.transforms as transform
from torch.nn.parallel.scatter_gather import gather
import encoding.utils as utils
from encoding.nn import SegmentationLosses, SyncBatchNorm
from encoding.parallel import DataParallelModel, DataParallelCriterion
from encoding.datasets import get_dataset
from encoding.models import get_segmentation_model
CONFIG_PATH = './results/config.yaml'
SMY_PATH = os.path.dirname(CONFIG_PATH)
GPUS = [0, 1]
# model settings
parser = argparse.ArgumentParser(description='model specification')
parser.add_argument('--with_att', action='store_true', default= False, help='whether use attention to fuse rgb and dep')
parser.add_argument('--att_type', type=str, default='AG2', help='Attention type to fuse rgb and dep')
settings= parser.parse_args()
print('settings attention:{} attention type:{}'.format(settings.with_att, settings.att_type))
class Trainer():
def __init__(self, args):
self.args = args
# data transforms
input_transform = transform.Compose([
transform.ToTensor(), # convert RGB [0,255] to FloatTensor in range [0, 1]
transform.Normalize([.485, .456, .406], [.229, .224, .225])]) # mean and std based on imageNet
dep_transform = transform.Compose([
transform.ToTensor(),
transform.Normalize(mean=[0.2798], std=[0.1387]) # mean and std for depth
])
# dataset
data_kwargs = {'transform': input_transform, 'dep_transform': dep_transform,
'base_size': args.base_size, 'crop_size': args.crop_size}
trainset = get_dataset(args.dataset, split=args.train_split, mode='train', **data_kwargs)
testset = get_dataset(args.dataset, split='val', mode='val', **data_kwargs)
# dataloader
kwargs = {'num_workers': args.workers, 'pin_memory': True} if args.cuda else {}
self.trainloader = data.DataLoader(trainset, batch_size=args.batch_size, drop_last=True, shuffle=True, **kwargs)
self.valloader = data.DataLoader(testset, batch_size=args.batch_size, drop_last=False, shuffle=False, **kwargs)
self.nclass = trainset.num_class
# model and params
model = get_segmentation_model(args.model, dataset=args.dataset, backbone=args.backbone, pretrained=True,
root='../../encoding/models/pretrain', n_features=256,
with_att=settings.with_att, att_type=settings.att_type,
)
print(model)
# optimizer using different LR
base_ids = list(map(id, model.base.parameters()))
base_dep_ids = list(map(id, model.dep_base.parameters()))
base_params = filter(lambda p: id(p) in base_ids + base_dep_ids, model.parameters())
other_params = filter(lambda p: id(p) not in base_ids + base_dep_ids, model.parameters())
self.optimizer = torch.optim.SGD([{'params': base_params, 'lr': args.lr},
{'params': other_params, 'lr': args.lr * 10}],
lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
# criterions
self.criterion = SegmentationLosses(se_loss=args.se_loss,
aux=args.aux,
nclass=self.nclass,
se_weight=args.se_weight,
aux_weight=args.aux_weight)
# lr scheduler
self.scheduler = utils.LR_Scheduler_Head(args.lr_scheduler, args.lr, args.epochs,
iters_per_epoch=len(self.trainloader), warmup_epochs=10)
self.best_pred = 0.0
# using cuda
self.device = torch.device("cuda:0" if args.cuda else "cpu")
if args.cuda:
if torch.cuda.device_count() > 1:
print("Let's use", torch.cuda.device_count(), "GPUs!") # [30,xxx]->[10,...],[10,...],[10,...] on 3 GPUs
model = nn.DataParallel(model, device_ids=GPUS)
self.model = model.to(self.device)
# for writing summary
path = "/".join(("{}-{}".format(*i) for i in settings.__dict__.items()))
self.writer = SummaryWriter(os.path.join(SMY_PATH, path))
# resuming checkpoint
if args.resume is not None and args.resume != 'None':
if not os.path.isfile(args.resume):
raise RuntimeError("=> no checkpoint found at '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
if args.cuda:
self.model.module.load_state_dict(checkpoint['state_dict'])
else:
self.model.load_state_dict(checkpoint['state_dict'])
if not args.ft:
self.optimizer.load_state_dict(checkpoint['optimizer'])
self.best_pred = checkpoint['best_pred']
print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch']))
# clear start epoch if fine-tuning
if args.ft:
args.start_epoch = 0
def training(self, epoch):
train_loss = 0.0
self.model.train()
total_inter, total_union, total_correct, total_label, total_loss = 0, 0, 0, 0, 0
for i, (image, dep, target) in enumerate(self.trainloader):
image, dep, target = image.to(self.device), dep.to(self.device), target.to(self.device)
self.scheduler(self.optimizer, i, epoch, self.best_pred)
self.optimizer.zero_grad()
outputs = self.model(image, dep)
loss = self.criterion(outputs, target)
loss.backward()
self.optimizer.step()
correct, labeled = utils.batch_pix_accuracy(outputs.data, target)
inter, union = utils.batch_intersection_union(outputs.data, target, self.nclass)
total_correct += correct
total_label += labeled
total_inter += inter
total_union += union
train_loss += loss.item()
if (i+1) % 50 == 0:
print('epoch {}, step {}, loss {}'.format(epoch + 1, i + 1, train_loss / 50))
self.writer.add_scalar('train_loss', train_loss / 50, epoch * len(self.trainloader) + i)
train_loss = 0.0
pixAcc = 1.0 * total_correct / (np.spacing(1) + total_label)
IOU = 1.0 * total_inter / (np.spacing(1) + total_union)
mIOU = IOU.mean()
print('epoch {}, pixel Acc {}, mean IOU {}'.format(epoch + 1, pixAcc, mIOU))
self.writer.add_scalar("mean_iou/train", mIOU, epoch)
self.writer.add_scalar("pixel accuracy/train", pixAcc, epoch)
def train_n_evaluate(self):
for epoch in range(self.args.epochs):
# run on one epoch
print("\n===============train epoch {}/{} ==========================\n".format(epoch, self.args.epochs))
# one full pass over the train set
self.training(epoch)
# evaluate for one epoch on the validation set
print('\n===============start testing, training epoch {}\n'.format(epoch))
pixAcc, mIOU, loss = self.validation(epoch)
print('evaluation pixel acc {}, mean IOU {}, loss {}'.format(pixAcc, mIOU, loss))
# save the best model
is_best = False
new_pred = (pixAcc + mIOU) / 2
if new_pred > self.best_pred:
is_best = True
self.best_pred = new_pred
utils.save_checkpoint({'epoch': epoch + 1,
'state_dict': self.model.module.state_dict(),
'optimizer': self.optimizer.state_dict(),
'best_pred': self.best_pred}, self.args, is_best)
def validation(self, epoch):
# Fast test during the training
def eval_batch(model, image, dep, target):
# model, image, target already moved to gpus
pred = model(image, dep)
loss = self.criterion(pred, target)
correct, labeled = utils.batch_pix_accuracy(pred.data, target)
inter, union = utils.batch_intersection_union(pred.data, target, self.nclass)
return correct, labeled, inter, union, loss
self.model.eval()
total_inter, total_union, total_correct, total_label, total_loss = 0, 0, 0, 0, 0
for i, (image, dep, target) in enumerate(self.valloader):
image, dep, target = image.to(self.device), dep.to(self.device), target.to(self.device)
with torch.no_grad():
correct, labeled, inter, union, loss = eval_batch(self.model, image, dep, target)
total_correct += correct
total_label += labeled
total_inter += inter
total_union += union
total_loss += loss.item()
pixAcc = 1.0 * total_correct / (np.spacing(1) + total_label)
IOU = 1.0 * total_inter / (np.spacing(1) + total_union)
mIOU = IOU.mean()
if i % 40 == 0:
print('eval mean IOU {}'.format(mIOU))
loss = total_loss / len(self.valloader)
self.writer.add_scalar("mean_iou/val", mIOU, epoch)
self.writer.add_scalar("pixel accuracy/val", pixAcc, epoch)
return pixAcc, mIOU, loss
if __name__ == "__main__":
print("-------mark program start----------")
# configuration
args = Dict(yaml.safe_load(open(CONFIG_PATH)))
args.cuda = (args.use_cuda and torch.cuda.is_available())
args.resume = None if args.resume=='None' else args.resume
torch.manual_seed(args.seed)
trainer = Trainer(args)
# import pdb; pdb.set_trace()
print('Starting Epoch:', trainer.args.start_epoch)
print('Total Epoches:', trainer.args.epochs)
trainer.train_n_evaluate()
|
the-stack_0_616 | # -*- coding: utf-8 -*-
'''
Namecheap domains management
.. versionadded:: 2017.7.0
General Notes
-------------
Use this module to manage domains through the namecheap
api. The Namecheap settings will be set in grains.
Installation Prerequisites
--------------------------
- This module uses the following python libraries to communicate to
the namecheap API:
* ``requests``
.. code-block:: bash
pip install requests
- As saltstack depends on ``requests`` this shouldn't be a problem
Prerequisite Configuration
--------------------------
- The namecheap username, api key and url should be set in a minion
configuration file or pillar
.. code-block:: yaml
namecheap.name: companyname
namecheap.key: a1b2c3d4e5f67a8b9c0d1e2f3
namecheap.client_ip: 162.155.30.172
#Real url
namecheap.url: https://api.namecheap.com/xml.response
#Sandbox url
#namecheap.url: https://api.sandbox.namecheap.xml.response
'''
from __future__ import absolute_import, print_function, unicode_literals
CAN_USE_NAMECHEAP = True
try:
import salt.utils.namecheap
except ImportError:
CAN_USE_NAMECHEAP = False
# Import 3rd-party libs
from salt.ext import six
def __virtual__():
'''
Check to make sure requests and xml are installed and requests
'''
if CAN_USE_NAMECHEAP:
return 'namecheap_domains'
return False
def reactivate(domain_name):
'''
Try to reactivate the expired domain name
returns the following information in a dictionary
issuccess bool indicates whether the domain was renewed successfully
amount charged for reactivation
orderid unique integer value for the order
transactionid unique integer value for the transaction
CLI Example:
.. code-block:: bash
salt 'my-minion' namecheap_domains.reactivate my-domain-name
'''
opts = salt.utils.namecheap.get_opts('namecheap.domains.reactivate')
opts['DomainName'] = domain_name
response_xml = salt.utils.namecheap.post_request(opts)
if response_xml is None:
return {}
domainreactivateresult = response_xml.getElementsByTagName('DomainReactivateResult')[0]
return salt.utils.namecheap.xml_to_dict(domainreactivateresult)
def renew(domain_name, years, promotion_code=None):
'''
Try to renew the specified expiring domain name for a specified number of years
returns the following information in a dictionary
renew bool indicates whether the domain was renewed successfully
domainid unique integer value for the domain
orderid unique integer value for the order
transactionid unique integer value for the transaction
amount charged for renewal
Required parameters:
domain_name
string The domain name you wish to renew
CLI Example:
.. code-block:: bash
salt 'my-minion' namecheap_domains.renew my-domain-name 5
'''
opts = salt.utils.namecheap.get_opts('namecheap.domains.renew')
opts['DomainName'] = domain_name
opts['Years'] = years
if promotion_code is not None:
opts['PromotionCode'] = promotion_code
response_xml = salt.utils.namecheap.post_request(opts)
if response_xml is None:
return {}
domainrenewresult = response_xml.getElementsByTagName("DomainRenewResult")[0]
return salt.utils.namecheap.xml_to_dict(domainrenewresult)
def create(domain_name, years, **kwargs):
'''
Try to create the specified domain name for the specified number of years
returns the following information in a dictionary
registered True/False
amount charged for registration
domainid unique integer value for the domain
orderid unique integer value for the order
transactionid unique integer value for the transaction
whoisguardenable True,False if enabled for this domain
nonrealtimedomain True,False if domain registration is instant or not
CLI Example:
.. code-block:: bash
salt 'my-minion' namecheap_domains.create my-domain-name 2
'''
idn_codes = set(['afr',
'alb',
'ara',
'arg',
'arm',
'asm',
'ast',
'ave',
'awa',
'aze',
'bak',
'bal',
'ban',
'baq',
'bas',
'bel',
'ben',
'bho',
'bos',
'bul',
'bur',
'car',
'cat',
'che',
'chi',
'chv',
'cop',
'cos',
'cze',
'dan',
'div',
'doi',
'dut',
'eng',
'est',
'fao',
'fij',
'fin',
'fre',
'fry',
'geo',
'ger',
'gla',
'gle',
'gon',
'gre',
'guj',
'heb',
'hin',
'hun',
'inc',
'ind',
'inh',
'isl',
'ita',
'jav',
'jpn',
'kas',
'kaz',
'khm',
'kir',
'kor',
'kur',
'lao',
'lav',
'lit',
'ltz',
'mal',
'mkd',
'mlt',
'mol',
'mon',
'mri',
'msa',
'nep',
'nor',
'ori',
'oss',
'pan',
'per',
'pol',
'por',
'pus',
'raj',
'rum',
'rus',
'san',
'scr',
'sin',
'slo',
'slv',
'smo',
'snd',
'som',
'spa',
'srd',
'srp',
'swa',
'swe',
'syr',
'tam',
'tel',
'tgk',
'tha',
'tib',
'tur',
'ukr',
'urd',
'uzb',
'vie',
'wel',
'yid'])
require_opts = ['AdminAddress1', 'AdminCity', 'AdminCountry', 'AdminEmailAddress', 'AdminFirstName',
'AdminLastName', 'AdminPhone', 'AdminPostalCode', 'AdminStateProvince', 'AuxBillingAddress1',
'AuxBillingCity', 'AuxBillingCountry', 'AuxBillingEmailAddress', 'AuxBillingFirstName',
'AuxBillingLastName', 'AuxBillingPhone', 'AuxBillingPostalCode', 'AuxBillingStateProvince',
'RegistrantAddress1', 'RegistrantCity', 'RegistrantCountry', 'RegistrantEmailAddress',
'RegistrantFirstName', 'RegistrantLastName', 'RegistrantPhone', 'RegistrantPostalCode',
'RegistrantStateProvince', 'TechAddress1', 'TechCity', 'TechCountry', 'TechEmailAddress',
'TechFirstName', 'TechLastName', 'TechPhone', 'TechPostalCode', 'TechStateProvince', 'Years']
opts = salt.utils.namecheap.get_opts('namecheap.domains.create')
opts['DomainName'] = domain_name
opts['Years'] = six.text_type(years)
def add_to_opts(opts_dict, kwargs, value, suffix, prefices):
for prefix in prefices:
nextkey = prefix + suffix
if nextkey not in kwargs:
opts_dict[nextkey] = value
for key, value in six.iteritems(kwargs):
if key.startswith('Registrant'):
add_to_opts(opts, kwargs, value, key[10:], ['Tech', 'Admin', 'AuxBilling', 'Billing'])
if key.startswith('Tech'):
add_to_opts(opts, kwargs, value, key[4:], ['Registrant', 'Admin', 'AuxBilling', 'Billing'])
if key.startswith('Admin'):
add_to_opts(opts, kwargs, value, key[5:], ['Registrant', 'Tech', 'AuxBilling', 'Billing'])
if key.startswith('AuxBilling'):
add_to_opts(opts, kwargs, value, key[10:], ['Registrant', 'Tech', 'Admin', 'Billing'])
if key.startswith('Billing'):
add_to_opts(opts, kwargs, value, key[7:], ['Registrant', 'Tech', 'Admin', 'AuxBilling'])
if key == 'IdnCode' and key not in idn_codes:
salt.utils.namecheap.log.error('Invalid IdnCode')
raise Exception('Invalid IdnCode')
opts[key] = value
for requiredkey in require_opts:
if requiredkey not in opts:
salt.utils.namecheap.log.error("Missing required parameter '" + requiredkey + "'")
raise Exception("Missing required parameter '" + requiredkey + "'")
response_xml = salt.utils.namecheap.post_request(opts)
if response_xml is None:
return {}
domainresult = response_xml.getElementsByTagName("DomainCreateResult")[0]
return salt.utils.namecheap.atts_to_dict(domainresult)
def check(*domains_to_check):
'''
Checks the availability of domains
returns a dictionary where the domain name is the key and
the availability is the value of True/False
domains_to_check
array of strings List of domains to check
CLI Example:
.. code-block:: bash
salt 'my-minion' namecheap_domains.check domain-to-check
'''
opts = salt.utils.namecheap.get_opts('namecheap.domains.check')
opts['DomainList'] = ','.join(domains_to_check)
response_xml = salt.utils.namecheap.get_request(opts)
if response_xml is None:
return {}
domains_checked = {}
for result in response_xml.getElementsByTagName("DomainCheckResult"):
available = result.getAttribute("Available")
domains_checked[result.getAttribute("Domain").lower()] = salt.utils.namecheap.string_to_value(available)
return domains_checked
def get_info(domain_name):
'''
Returns information about the requested domain
returns a dictionary of information about the domain_name
domain_name
string Domain name to get information about
CLI Example:
.. code-block:: bash
salt 'my-minion' namecheap_domains.get_info my-domain-name
'''
opts = salt.utils.namecheap.get_opts('namecheap.domains.getinfo')
opts['DomainName'] = domain_name
response_xml = salt.utils.namecheap.get_request(opts)
if response_xml is None:
return []
domaingetinforesult = response_xml.getElementsByTagName("DomainGetInfoResult")[0]
return salt.utils.namecheap.xml_to_dict(domaingetinforesult)
def get_tld_list():
'''
Returns a list of TLDs as objects
CLI Example:
.. code-block:: bash
salt 'my-minion' namecheap_domains.get_tld_list
'''
response_xml = salt.utils.namecheap.get_request(salt.utils.namecheap.get_opts('namecheap.domains.gettldlist'))
if response_xml is None:
return []
tldresult = response_xml.getElementsByTagName("Tlds")[0]
tlds = []
for e in tldresult.getElementsByTagName("Tld"):
tld = salt.utils.namecheap.atts_to_dict(e)
tld['data'] = e.firstChild.data
categories = []
subcategories = e.getElementsByTagName("Categories")[0]
for c in subcategories.getElementsByTagName("TldCategory"):
categories.append(salt.utils.namecheap.atts_to_dict(c))
tld['categories'] = categories
tlds.append(tld)
return tlds
def get_list(list_type=None,
search_term=None,
page=None,
page_size=None,
sort_by=None):
'''
Returns a list of domains for the particular user as a list of objects
offset by ``page`` length of ``page_size``
list_type
string Possible values are ALL/EXPIRING/EXPIRED
Default: ALL
search_term
string Keyword to look for on the domain list
page
integer Page to return
Default: 1
page_size
integer Number of domains to be listed in a page
Minimum value is 10 and maximum value is 100
Default: 20
sort_by
string Possible values are NAME/NAME_DESC/EXPIREDATE/
EXPIREDATE_DESC/CREATEDATE/CREATEDATE_DESC
CLI Example:
.. code-block:: bash
salt 'my-minion' namecheap_domains.get_list
'''
opts = salt.utils.namecheap.get_opts('namecheap.domains.getList')
if list_type is not None:
if list_type not in ['ALL', 'EXPIRING', 'EXPIRED']:
salt.utils.namecheap.log.error('Invalid option for list_type')
raise Exception('Invalid option for list_type')
opts['ListType'] = list_type
if search_term is not None:
if len(search_term) > 70:
salt.utils.namecheap.log.warning('search_term trimmed to first 70 characters')
search_term = search_term[0:70]
opts['SearchTerm'] = search_term
if page is not None:
opts['Page'] = page
if page_size is not None:
if page_size > 100 or page_size < 10:
salt.utils.namecheap.log.error('Invalid option for page')
raise Exception('Invalid option for page')
opts['PageSize'] = page_size
if sort_by is not None:
if sort_by not in ['NAME', 'NAME_DESC', 'EXPIREDATE', 'EXPIREDATE_DESC', 'CREATEDATE', 'CREATEDATE_DESC']:
salt.utils.namecheap.log.error('Invalid option for sort_by')
raise Exception('Invalid option for sort_by')
opts['SortBy'] = sort_by
response_xml = salt.utils.namecheap.get_request(opts)
if response_xml is None:
return []
domainresult = response_xml.getElementsByTagName("DomainGetListResult")[0]
domains = []
for d in domainresult.getElementsByTagName("Domain"):
domains.append(salt.utils.namecheap.atts_to_dict(d))
return domains
|
the-stack_0_617 | import pytest
import gevent
import logging
import time
from volttron.platform import get_services_core
from master_driver.interfaces.modbus_tk.server import Server
from master_driver.interfaces.modbus_tk.maps import Map, Catalog
logger = logging.getLogger(__name__)
# modbus_tk driver config
DRIVER_CONFIG_STRING = """{
"driver_config": {
"name": "write_single_registers",
"device_address": "127.0.0.1",
"port": 5020,
"slave_id": 1,
"baudrate": 9600,
"bytesize": 8,
"parity": "none",
"stopbits": 1,
"xonxoff": 0,
"addressing": "offset",
"endian": "big",
"write_multiple_registers": false,
"register_map": "config://write_single_registers_map.csv"
},
"driver_type": "modbus_tk",
"registry_config": "config://write_single_registers.csv",
"interval": 120,
"timezone": "UTC"
}"""
# modbus_tk csv config
REGISTRY_CONFIG_STRING = """Volttron Point Name,Register Name
unsigned short,unsigned_short
sample bool,sample_bool"""
REGISTRY_CONFIG_MAP = """Register Name,Address,Type,Units,Writable,Default Value,Transform
unsigned_short,0,uint16,None,TRUE,0,scale(10)
sample_bool,16,bool,None,TRUE,False,"""
@pytest.fixture(scope="module")
def agent(request, volttron_instance):
"""Build MasterDriverAgent, add modbus driver & csv configurations
"""
# Build master driver agent
md_agent = volttron_instance.build_agent()
# Clean out master driver configurations
md_agent.vip.rpc.call('config.store',
'manage_delete_store',
'platform.driver')
# Add driver configurations
md_agent.vip.rpc.call('config.store',
'manage_store',
'platform.driver',
'devices/write_single_registers',
DRIVER_CONFIG_STRING,
config_type='json')
# Add csv configurations
md_agent.vip.rpc.call('config.store',
'manage_store',
'platform.driver',
'write_single_registers.csv',
REGISTRY_CONFIG_STRING,
config_type='csv')
md_agent.vip.rpc.call('config.store',
'manage_store',
'platform.driver',
'write_single_registers_map.csv',
REGISTRY_CONFIG_MAP,
config_type='csv')
master_uuid = volttron_instance.install_agent(agent_dir=get_services_core("MasterDriverAgent"),
config_file={},
start=True)
gevent.sleep(10) # wait for the agent to start and start the devices
def stop():
"""Stop master driver agent
"""
volttron_instance.stop_agent(master_uuid)
md_agent.core.stop()
request.addfinalizer(stop)
return md_agent
@pytest.fixture(scope='class')
def modbus_server(request):
ModbusClient = Catalog()['write_single_registers'].get_class()
server_process = Server(address='127.0.0.1', port=5020)
server_process.define_slave(1, ModbusClient, unsigned=False)
server_process.start()
time.sleep(1)
yield server_process
time.sleep(1)
server_process.stop()
@pytest.mark.usefixtures("modbus_server")
class TestModbusTKDriver:
"""
Regression tests for the write_single_registers driver interface.
"""
def get_point(self, agent, point_name):
"""
Issue a get_point RPC call for the named point and return the result.
@param agent: The test Agent.
@param point_name: The name of the point to query.
@return: The actual reading value of the point name from the RPC call.
"""
return agent.vip.rpc.call('platform.driver', 'get_point', 'write_single_registers', point_name).get(timeout=10)
def set_point(self, agent, point_name, point_value):
"""
Issue a set_point RPC call for the named point and value, and return the result.
@param agent: The test Agent.
@param point_name: The name of the point to query.
@param point_value: The value to set on the point.
@return:The actual reading value of the point name from the RPC call.
"""
return agent.vip.rpc.call('platform.driver', 'set_point', 'write_single_registers', point_name, point_value).get(timeout=10)
def scrape_all(self, agent):
"""
Issue a get_point RPC call for the device and return the result.
@param agent: The test Agent.
@return: The dictionary mapping point names to their actual values from the RPC call.
"""
return agent.vip.rpc.call('platform.driver', 'scrape_all', 'write_single_registers').get(timeout=10)
def revert_all(self, agent):
"""
Issue a get_point RPC call for the device and return the result.
@param agent: The test Agent.
@return: Return value from the RPC call.
"""
return agent.vip.rpc.call('platform.driver', 'revert_device', 'write_single_registers').get(timeout=10)
def revert_point(self, agent, point_name):
"""
Issue a get_point RPC call for the named point and return the result.
@param agent: The test Agent.
@param point_name: The name of the point to query.
@return: Return value from the RPC call.
"""
return agent.vip.rpc.call('platform.driver', 'revert_point', 'write_single_registers', point_name).get(timeout=10)
def test_default_values(self, agent):
"""Test set default values
"""
self.revert_all(agent)
default_values = self.scrape_all(agent)
assert type(default_values) is dict
for key in default_values.keys():
assert default_values[key] == 0
def test_set_point(self, agent):
"""Test set points to a new values
"""
set_value = self.set_point(agent, 'unsigned short', 6530)
assert set_value == 6530
set_value = self.set_point(agent, 'sample bool', True)
assert set_value == True
def test_get_point(self, agent):
"""Test get point after set point
"""
self.set_point(agent, 'unsigned short', 1230)
get_value = self.get_point(agent, 'unsigned short')
assert get_value == 1230
def test_revert_point(self, agent):
"""Test revert point to default value
"""
self.revert_point(agent, 'unsigned short')
get_value = self.get_point(agent, 'unsigned short')
assert get_value == 0
self.revert_point(agent, 'sample bool')
get_value = self.get_point(agent, 'sample bool')
assert get_value == False
def test_revert_all(self, agent):
"""Test revert device to default values
"""
self.revert_all(agent)
default_values = self.scrape_all(agent)
assert type(default_values) is dict
for key in default_values.keys():
assert default_values[key] == 0 |
the-stack_0_619 | #!/usr/bin/env python
"""
_Harvest_
"""
from future.utils import viewitems
import threading
import logging
from WMCore.JobSplitting.JobFactory import JobFactory
from WMCore.Services.UUIDLib import makeUUID
from WMCore.DAOFactory import DAOFactory
from WMCore.JobSplitting.LumiBased import isGoodRun, isGoodLumi
from WMCore.DataStructs.Run import Run
from WMCore.WMSpec.WMTask import buildLumiMask
class Harvest(JobFactory):
"""
_Harvest_
Job splitting algoritm which creates a single job for all files
in the fileset (not neccessarily just available files).
Two distinct modes, Periodic and EndOfRun.
In Periodic mode, we periodically create a job processing all
files. A job will not be created until the previous job (if
there is one) has been completed and there are new available
files in the fileset. The specified period is the amount of
time in seconds between the end of a job and the creation of
another job.
In EndOfRun mode, create a job processing all files once the
input file has been closed. This means there will only be
a single job in total for the subscription.
For the EndOfRun mode support a sibling parameters that is
set if there is also a Periodic subscription. In this case
wait until the Periodic subscription is finished before
triggering the EndOfRun harvesting.
"""
def createJobsLocationWise(self, fileset, endOfRun, dqmHarvestUnit, lumiMask, goodRunList):
myThread = threading.currentThread()
fileset.loadData(parentage=0)
allFiles = fileset.getFiles()
# sort by location and run
locationDict = {}
runDict = {}
for fileInfo in allFiles:
locSet = frozenset(fileInfo['locations'])
runSet = fileInfo.getRuns()
if len(locSet) == 0:
logging.error("File %s has no locations!", fileInfo['lfn'])
if len(runSet) == 0:
logging.error("File %s has no run information!", fileInfo['lfn'])
# Populate a dictionary with [location][run] so we can split jobs according to those different combinations
if locSet not in locationDict:
locationDict[locSet] = {}
fileInfo['runs'] = set()
# Handle jobs with run whitelist/blacklist
if goodRunList:
runDict[fileInfo['lfn']] = set()
for run in runSet:
if run.run in goodRunList:
runDict[fileInfo['lfn']].add(run)
if run.run in locationDict[locSet]:
locationDict[locSet][run.run].append(fileInfo)
else:
locationDict[locSet][run.run] = [fileInfo]
elif lumiMask:
# it has lumiMask, thus we consider only good run/lumis
newRunSet = []
for run in runSet:
if not isGoodRun(lumiMask, run.run):
continue
# then loop over lumis
maskedLumis = []
for lumi in run.lumis:
if not isGoodLumi(lumiMask, run.run, lumi):
continue
maskedLumis.append(lumi)
if not maskedLumis:
continue
maskedRun = Run(run.run, *maskedLumis)
newRunSet.append(maskedRun)
if run.run in locationDict[locSet]:
locationDict[locSet][run.run].append(fileInfo)
else:
locationDict[locSet][run.run] = [fileInfo]
if newRunSet:
runDict[fileInfo['lfn']] = newRunSet
else:
# no LumiList and no run white or black list
runDict[fileInfo['lfn']] = runSet
for run in runSet:
if run.run in locationDict[locSet]:
locationDict[locSet][run.run].append(fileInfo)
else:
locationDict[locSet][run.run] = [fileInfo]
# create separate jobs for different locations
self.newGroup()
self.jobCount = 0
baseName = makeUUID()
self.newGroup()
if endOfRun:
harvestType = "EndOfRun"
else:
harvestType = "Periodic"
for location in locationDict:
if dqmHarvestUnit == "byRun":
self.createJobByRun(locationDict, location, baseName, harvestType, runDict, endOfRun)
else:
self.createMultiRunJob(locationDict, location, baseName, harvestType, runDict, endOfRun)
return
def createJobByRun(self, locationDict, location, baseName, harvestType, runDict, endOfRun):
"""
_createJobByRun_
Creates one job per run for all files available at the same location.
"""
for run in locationDict[location]:
# Should create at least one job for every location/run, putting this here will do
self.jobCount += 1
self.newJob(name="%s-%s-Harvest-%i" % (baseName, harvestType, self.jobCount))
for f in locationDict[location][run]:
for fileRun in runDict[f['lfn']]:
if fileRun.run == run:
self.currentJob['mask'].addRun(fileRun)
break
self.currentJob.addFile(f)
if endOfRun:
self.currentJob.addBaggageParameter("runIsComplete", True)
self.mergeLumiRange(self.currentJob['mask']['runAndLumis'])
return
def createMultiRunJob(self, locationDict, location, baseName, harvestType, runDict, endOfRun):
"""
_createMultiRunJob_
Creates a single harvesting job for all files and runs available
at the same location.
"""
self.jobCount += 1
self.newJob(name="%s-%s-Harvest-%i" % (baseName, harvestType, self.jobCount))
for run in locationDict[location]:
for f in locationDict[location][run]:
for fileRun in runDict[f['lfn']]:
if fileRun.run == run:
self.currentJob['mask'].addRun(fileRun)
break
if f not in self.currentJob['input_files']:
self.currentJob.addFile(f)
if endOfRun:
self.currentJob.addBaggageParameter("runIsComplete", True)
self.mergeLumiRange(self.currentJob['mask']['runAndLumis'])
# now calculate the minimum and maximum run number, it has to go to the root name
minRun = min(self.currentJob['mask']['runAndLumis'].keys())
maxRun = max(self.currentJob['mask']['runAndLumis'].keys())
self.currentJob.addBaggageParameter("multiRun", True)
self.currentJob.addBaggageParameter("runLimits", "-%s-%s" % (minRun, maxRun))
return
def mergeLumiRange(self, runLumis):
"""
_mergeLumiRange_
Merges the interesection of lumi ranges.
"""
for run, lumis in viewitems(runLumis):
lumis.sort(key=lambda sublist: sublist[0])
fixedLumis = [lumis[0]]
for lumi in lumis:
if (fixedLumis[-1][1] + 1) >= lumi[0]:
fixedLumis[-1][1] = lumi[1]
else:
fixedLumis.append(lumi)
self.currentJob['mask']['runAndLumis'][run] = fixedLumis
def algorithm(self, *args, **kwargs):
"""
_algorithm_
"""
myThread = threading.currentThread()
periodicInterval = kwargs.get("periodic_harvest_interval", 0)
periodicSibling = kwargs.get("periodic_harvest_sibling", False)
dqmHarvestUnit = kwargs.get("dqmHarvestUnit", "byRun")
runs = kwargs.get("runs", None)
lumis = kwargs.get("lumis", None)
runWhitelist = set(kwargs.get('runWhitelist', []))
runBlacklist = set(kwargs.get('runBlacklist', []))
goodRunList = runWhitelist.difference(runBlacklist)
daoFactory = DAOFactory(package="WMCore.WMBS",
logger=myThread.logger,
dbinterface=myThread.dbi)
releasePeriodicJobDAO = daoFactory(classname="JobSplitting.ReleasePeriodicJob")
periodicSiblingCompleteDAO = daoFactory(classname="JobSplitting.PeriodicSiblingComplete")
fileset = self.subscription.getFileset()
fileset.load()
lumiMask = {}
if runs and lumis:
lumiMask = buildLumiMask(runs, lumis)
if periodicInterval and periodicInterval > 0:
# Trigger the Periodic Job if
# * it is the first job OR
# * the last job ended more than periodicInterval seconds ago
triggerJob = releasePeriodicJobDAO.execute(subscription=self.subscription["id"], period=periodicInterval)
if triggerJob:
myThread.logger.debug("Creating Periodic harvesting job")
self.createJobsLocationWise(fileset, False, dqmHarvestUnit, lumiMask, goodRunList)
elif not fileset.open:
# Trigger the EndOfRun job if
# * (same as Periodic to not have JobCreator go nuts and stop after the first iteration)
# * there is no Periodic sibling subscription OR
# * the Periodic sibling subscription is complete
triggerJob = releasePeriodicJobDAO.execute(subscription=self.subscription["id"], period=3600)
if triggerJob and periodicSibling:
triggerJob = periodicSiblingCompleteDAO.execute(subscription=self.subscription["id"])
if triggerJob:
myThread.logger.debug("Creating EndOfRun harvesting job")
self.createJobsLocationWise(fileset, True, dqmHarvestUnit, lumiMask, goodRunList)
return
|
the-stack_0_622 | from setuptools import setup, find_packages
with open('README.md') as f:
readme = f.read()
setup(
name='Workbench',
version='0.1.1',
description='Timesaver for psd2html (markup)',
long_description=readme,
author='Bohdan Khorolets',
author_email='[email protected]',
url='https://github.com/khorolets/workbench',
packages=find_packages(),
entry_points={
'console_scripts': [
'workbench = workbench.__init__:manager.run',
],
},
install_requires=list(filter(None, [
'flask',
'flask-script',
'elizabeth',
])),
)
|
the-stack_0_624 | import urllib3.request
import json
import datetime as dt
from urllib3 import exceptions as urlex
from Game.periodictasks.search_alarms import AlarmSearch
import pandas as pn
import numpy as np
DATE_FORMAT = '%Y-%m-%d'
def str_to_date(strdate):
"""
parses given string to date using global date format
:param strdate:
:return date:
"""
return dt.datetime.strptime(strdate, DATE_FORMAT)
class AssetComunication:
GET_ASSETS = "getAvailableAssets/"
GET_QUOTE = "getAssetMarketPrice/"
GET_HISTORY = "getAssetHistory/"
def __init__(self, url):
self.API_URL = url
self.alarm_search = AlarmSearch(acom=self)
@staticmethod
def has_quote(asset):
"""
check if an asset has a valid quote
:param asset:
:return boolean:
"""
return asset.buy != -1 and asset.sell != -1
@staticmethod
def url_to_json(url):
"""
fetch json data from given url
:param url:
:return json_response if success, 0 otherwise:
"""
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
http = urllib3.PoolManager()
try:
res = http.request('GET', url)
if res.status == 200:
return json.loads(res.data.decode())
else:
return 0
except urlex.MaxRetryError:
return 0
def get_asset_names(self):
"""
fetch from API all the available assets (only names)
:return asset list:
"""
from Game.models import Asset
url = self.API_URL + self.GET_ASSETS
json_assets = self.url_to_json(url)
asset_list = []
try:
if json_assets != 0:
json_assets = json_assets['availableAssets']
for a in json_assets:
asset = Asset(name=a['name'], type=a['type'])
asset_list.append(asset)
return asset_list
except KeyError:
# rollback
asset_list = []
finally:
return asset_list
def get_asset_quote(self, asset):
"""
given an asset (only name is required)
returns same asset with buy and sell price if both exists
also searchs for alarms for the given asset.
:param asset:
:return asset:
"""
url = self.API_URL + self.GET_QUOTE + asset.name
asset_quote = self.url_to_json(url)
try:
if asset_quote != 0:
asset.buy = asset_quote['buy']
asset.sell = asset_quote['sell']
except KeyError:
# rollback
asset.buy = -1
asset.sell = -1
finally:
self.alarm_search.search_for_alarms(asset=asset)
return asset
def get_asset_type(self, name):
assets = self.get_asset_names()
for a in assets:
if name == a.name:
return a.type
return None
def quote_for_assets(self, assets):
"""
maps asset list (only names are required) with same assets with quote
:param assets:
:return asset list:
"""
return [self.get_asset_quote(a) for a in assets if
self.has_quote(self.get_asset_quote(a))]
def get_assets(self):
"""
fetches all the available assets with their respective quotes
:return asset list:
"""
assets = self.get_asset_names()
return self.quote_for_assets(assets)
def get_asset_history(self, name, start_date, end_date):
"""
get all history for given asset
:param name:
:param start_date:
:param end_date:
:return dict [{day: DayString, sell: SELL_PRICE, buy: BUY_PRICE}]:
"""
url = (self.API_URL + self.GET_HISTORY + name + "/" +
start_date + "/" + end_date)
prices = self.url_to_json(url)
if prices == 0:
prices = {'error': True}
return prices
def average_for_asset(self, asset):
start_date = dt.date.today() - dt.timedelta(days=365 * 2)
end_date = dt.date.today()
history = self.get_asset_history(name=asset.name,
start_date=start_date
.strftime(DATE_FORMAT),
end_date=end_date
.strftime(DATE_FORMAT))
try:
prices = history['prices']
sell = [float(p['sell']) for p in prices]
sell_df = pn.DataFrame(np.array(sell))
sell_data = sell_df.quantile([0.25, 0.5, 0.75]).to_dict()[0]
sell_data['first'] = sell_data.pop(0.25)
sell_data['avg'] = sell_data.pop(0.5)
sell_data['third'] = sell_data.pop(0.75)
buy = [float(p['buy']) for p in prices]
buy_df = pn.DataFrame(np.array(buy))
buy_data = buy_df.quantile([0.25, 0.5, 0.75]).to_dict()[0]
buy_data['first'] = buy_data.pop(0.25)
buy_data['avg'] = buy_data.pop(0.5)
buy_data['third'] = buy_data.pop(0.75)
asset.prices_quantiles = {
'buy': buy_data,
'sell': sell_data,
}
return asset
except KeyError:
return
def get_assets_with_average(self):
"""
fetches all the available assets with their respective quotes
:return asset list:
"""
assets = self.get_assets()
return [self.average_for_asset(a) for a in assets if a]
|
the-stack_0_625 | """
Sphinx is hardcoded to interpret links to downloadable files relative to the root of the docs
source tree. However, the downloadable files we want to use (tarballs of our examples directories)
are themselves generated at build time, and we would therefore like them to be separate from the
source. This module is a Sphinx plugin that replaces the normal interpretation of links, causing
Sphinx to look for downloads relative to a different directory (which is set in `conf.py`).
"""
import logging
import os
import types
from typing import Any, Dict
from docutils import nodes
from sphinx import addnodes, application
from sphinx.environment.collectors import asset
from sphinx.locale import __
logger = logging.getLogger(__name__)
class DownloadExternalFileCollector(asset.DownloadFileCollector):
def process_doc(
self: asset.DownloadFileCollector, app: application.Sphinx, doctree: nodes.document
) -> None:
"""
This function is different from the original method only in doing some surgery on the paths
it finds when a separate root directory is configured.
"""
for node in doctree.traverse(addnodes.download_reference):
targetname = node["reftarget"]
if "://" in targetname:
node["refuri"] = targetname
else:
rel_filename, filename = app.env.relfn2path(targetname, app.env.docname)
if app.config.dai_downloads_root:
filename = os.path.abspath(
os.path.join(app.config.dai_downloads_root, rel_filename)
)
rel_filename = os.path.relpath(filename, app.env.srcdir)
app.env.dependencies[app.env.docname].add(rel_filename)
if not os.access(filename, os.R_OK):
logger.warning(__("download file not readable: %s") % filename)
continue
node["filename"] = app.env.dlfiles.add_file(app.env.docname, rel_filename)
def setup(app: application.Sphinx) -> Dict[str, Any]:
app.add_config_value("dai_downloads_root", None, "html")
# Disable the old instance of DownloadFileCollector and replace it with ours.
for event in app.events.listeners.values():
for listener_id, callback in list(event.items()):
if isinstance(callback, types.MethodType) and isinstance(
callback.__self__, asset.DownloadFileCollector
):
del event[listener_id]
app.add_env_collector(DownloadExternalFileCollector)
return {
"version": "0",
"parallel_read_safe": True,
"parallel_write_safe": True,
}
|
the-stack_0_626 | # Analytics Collector
def truncate(n, decimals=0):
multiplier = 10 ** decimals
return int(n * multiplier) / multiplier
def startCam():
import cv2
from gaze_tracking import GazeTracking
import time
gaze = GazeTracking()
webcam = cv2.VideoCapture(0)
startTime = time.time()
totalFrames = 0
framesDistracted = 0
framesFocused = 0
while True:
_, frame = webcam.read()
totalFrames += 1
gaze.refresh(frame)
frame = gaze.annotated_frame()
if gaze.is_blinking():
framesDistracted += 1
elif gaze.is_right():
framesDistracted += 1
elif gaze.is_left():
framesDistracted += 1
elif gaze.is_center():
framesFocused += 1
else:
framesDistracted += 1
cv2.imshow("Camera", frame)
if cv2.waitKey(1) == ord('q'):
break
webcam.release()
cv2.destroyAllWindows()
totalTime = truncate(time.time() - startTime, 2)
percentFocused = truncate((framesFocused / totalFrames) * 100, 2)
percentDistracted = truncate((framesDistracted / totalFrames) * 100, 2)
return totalTime, percentFocused, percentDistracted
|
the-stack_0_628 | #!/usr/bin/env python3
from tpp.tppflush import *
import sys
from math import fabs
try:
import pygame
except ImportError:
exit("Pygame required. Exiting.")
try:
from lib.controller import *
except ImportError:
joystick_name="??"
j_axis=[ ]
#buttons.py adds the following:
#joystick_name="Microsoft X-Box 360 pad"
#buttons=['B', 'A', 'Y', 'X', 'L', 'R', 'SELECT', 'START', 'Home', 'Home', 'Home']
#j_axis=[0, 1, 3, 4]
done=False
circx,circy = 160,120
deadZone=0.3 #send '0' if fabs joystick(0,1) is less than this value eg joystick_x=0.1, sends joystick_x=0.0
#Default button mapping
buttonMappings = [
HIDButtons.A,
HIDButtons.B,
HIDButtons.X,
HIDButtons.Y,
HIDButtons.SELECT, #Z
HIDButtons.R,
HIDButtons.L,
HIDButtons.START,
HIDButtons.DPADUP,
HIDButtons.DPADDOWN,
HIDButtons.DPADLEFT,
HIDButtons.DPADRIGHT
]
class KBDButtons(int):
HOME = pygame.K_HOME
POWER = pygame.K_END
#street fighter style layout on numberpad ([punches] y,x,L -> 4,5,6)
#might be useful for joy2key apps
KBbutt={
257: HIDButtons.B, #numberpad 1
258: HIDButtons.A,
259: HIDButtons.R,
260: HIDButtons.Y, #numberpad 4
261: HIDButtons.X,
262: HIDButtons.L,
256: HIDButtons.START, #numberpad 0
266: HIDButtons.SELECT, #numberpad .
273: HIDButtons.DPADUP, #arrow key up
274: HIDButtons.DPADDOWN,
276: HIDButtons.DPADLEFT,
275: HIDButtons.DPADRIGHT
}
if len(sys.argv) < 2:
#this is the pop up window
import tkinter as tk
class App:
def __init__(self, master):
frame=tk.Frame(master)
frame.pack()
#reads file lastIP to get first line
try:
f=open("lastIP","r")
last_ip=f.readline()
f.close()
except FileNotFoundError:
last_ip=" "
self.l_IP=tk.StringVar()
self.l_IP.set(last_ip)
#image banner (row 0, col 0)
lumaIMG = tk.PhotoImage(file="lib/luma.png")
lumal = tk.Label(frame,image=lumaIMG)
lumal.image = lumaIMG
lumal.grid(row=0,columnspan=3)
#places the 3 other elements (label, text box, button) on row 1
tk.Label(frame, text='IP:',font=("Courier", 22)).grid(row=1, column=0, sticky=tk.E)
tk.Entry(frame,bg='white', width=15, textvariable=self.l_IP, font=("Courier", 18)).grid(row=1,column=1, pady=10, sticky=tk.E+tk.W)
button = tk.Button(frame, text='Go', font=("Courier", 18), command=self.store)
button.grid(row=1, column=2, sticky=tk.W, pady=10)
#center label and butt
frame.grid_columnconfigure(0, weight=1)
frame.grid_columnconfigure(2, weight=1)
master.bind('<Return>', self.store ) #"enter" key
master.bind('<KP_Enter>', self.store ) # numeric "enter" key
def store(self, *args):
global IP
IP=self.l_IP.get()
f=open("lastIP","w")
f.write(IP.strip()) #stores data in text box (as string type)
f.close()
root.quit()
root= tk.Tk()
root.wm_title('3DS IP')
App(root)
root.bind('<Escape>', lambda x: quit())
root.mainloop()
root.destroy() #removes window
server = IP.strip()
else:
server = sys.argv[1]
server=LumaInputServer(server)
pygame.init()
screen = pygame.display.set_mode((320, 240))
pygame.display.set_caption('touchscreen')
botSr = pygame.image.load('lib/bottom.png')
screen.blit(botSr, (0,0))
if len(j_axis)>=6 :
pygame.draw.circle(screen, (0,0,0), (circx, circy), 5, 2)
pygame.display.update()
pygame.joystick.init()
joystick_count = pygame.joystick.get_count()
print("Number of joysticks: {}".format(joystick_count) )
if (joystick_count>0):
#Only loads one joystick if multiple are connected.
for i in range(joystick_count):
joystick = pygame.joystick.Joystick(i)
name = joystick.get_name()
if name == joystick_name:
break
joystick.init()
print("Using joystick \"{}\"".format(name))
if name == joystick_name:
buttonMappings=buttons
print("\t--> loading \"{}\" layout".format(joystick_name))
else :
print("\t(using default button layout)")
print("\t{} axes, {} buttons, {} hats".format(joystick.get_numaxes(),joystick.get_numbuttons(),joystick.get_numhats()))
for i in range(joystick.get_numaxes()):
j_axis.append(i)
else:
print("No controller found!\n\t(restricted to limited keyboard button layout)")
print("\nHOME = HOME key \nPOWER = END key\nEnd Program = ESC key")
while done==False:
#Event L O O P
for event in pygame.event.get(): # User did something
if event.type == pygame.QUIT: # If user clicked close
done=True
#Touchscreen input
if pygame.mouse.get_pressed()[0]:
pos = pygame.mouse.get_pos()
server.touch(pos[0], pos[1])
#print("THSC: ",pos[0],",",pos[1])
server.send()
elif event.type == pygame.MOUSEBUTTONUP:
server.clear_touch()
server.send()
#Keyboard Mappings
elif event.type == pygame.KEYDOWN:
if event.key == KBDButtons.HOME: #home
server.special_press(Special_Buttons.HOME)
#print("HOME")
if event.key == KBDButtons.POWER: #power
server.special_press(Special_Buttons.POWER)
#print("POWER")
if event.key == pygame.K_ESCAPE: #end program
server.clear_everything()
done = True
if event.key in KBbutt:
server.hid_press(KBbutt[event.key])
#print(event.key)
server.send()
elif event.type == pygame.KEYUP:
if event.key == KBDButtons.HOME: #home
server.special_unpress(Special_Buttons.HOME)
if event.key == KBDButtons.POWER: #power
server.special_unpress(Special_Buttons.POWER)
if event.key in KBbutt:
server.hid_unpress(KBbutt[event.key])
server.send()
# Possible joystick actions: JOYAXISMOTION JOYBALLMOTION JOYBUTTONDOWN JOYBUTTONUP JOYHATMOTION
if event.type == pygame.JOYBUTTONDOWN :
#print("Joystick {} button {} pressed.".format(event.joy,event.button))
server.press(buttonMappings[event.button])
server.send()
if event.type == pygame.JOYBUTTONUP:
#print("Joystick {} button {} released.".format(event.joy,event.button))
server.unpress(buttonMappings[event.button])
server.send()
if event.type == pygame.JOYHATMOTION:
#print("Joystick {} HATS moved to {}.".format(event.joy, event.value))
(xhat, yhat) = event.value #[-1,0,1]
if (xhat == 1):
server.press(HIDButtons.DPADRIGHT)
elif (xhat == -1):
server.press(HIDButtons.DPADLEFT)
elif (xhat == 0) :
server.unpress(HIDButtons.DPADRIGHT)
server.send()
server.unpress(HIDButtons.DPADLEFT)
if (yhat == 1):
server.press(HIDButtons.DPADUP)
elif (yhat == -1):
server.press(HIDButtons.DPADDOWN)
elif (yhat == 0) :
server.unpress(HIDButtons.DPADDOWN)
server.send()
server.unpress(HIDButtons.DPADUP)
server.send()
if event.type == pygame.JOYAXISMOTION:
#xbox:Left Thumbstick | axis 0 : L/R | axis 1 : U/D
#xbox: axis 2 : L trigger (-1:1)
#xbox: Right Thumbstick | axis 3 : L/R | axis 4 : U/D
#xbox: axis 5 : R trigger (-1:1)
#if event.axis == 0: print("Joystick {} axis {} moved to {}.".format(event.joy,event.axis, event.value))
if event.axis == j_axis[0] :
if fabs(event.value)>deadZone:
server.circle_pad_coords[0] = int(32767*event.value) #left_joy_x
else:
#note: circle_pad_neutral() == circle_pad_coords = [0,0] (that is both X and Y coords are set to zero)
server.circle_pad_coords[0] = int(0) #left_joy_x
server.send()
if event.axis==j_axis[1] :
if fabs(event.value)>deadZone:
server.circle_pad_coords[1] = int(-32767*event.value) #left_joy_y
else:
server.circle_pad_coords[1] = int(0) #left_joy_y
server.send()
#using the right trigger to touch the screen only works if you have a right trigger and right thumbstick
if len(j_axis)>=6:
if (event.axis in [j_axis[2], j_axis[3],j_axis[5]]): #r trig = mouse click
(circx, circy)=(160+int(159*joystick.get_axis(j_axis[2])),120+int(119*joystick.get_axis(j_axis[3])))
#draw location of touch point but only when joystick moves
screen.blit(botSr, (0,0))
pygame.draw.circle(screen, (0,0,0), (circx, circy), 5, 2)
pygame.display.update()
if (joystick.get_axis(j_axis[5])>0.0): #Want to be able to "drag"
server.touch(circx,circy)
server.send()
pygame.draw.circle(screen, (255,255,255), (circx, circy), 3, 0)
pygame.display.update()
if event.axis == j_axis[5]: #r trig
if event.value < 0: #less than half depression #notme_irl
server.clear_touch()
server.send()
print("\nClearing everything and closing program")
server.clear_everything()
pygame.quit()
|
the-stack_0_631 | from imbox import Imbox
import html2text
import requests
import json
import time
with open('config.json') as config_file:
data = json.load(config_file)
API_KEY = data['API_KEY']
OAUTH_TOKEN = data['OAUTH_TOKEN']
trello_list_id = data['trello_list_id']
# SSL Context docs https://docs.python.org/3/library/ssl.html#ssl.create_default_context
def get_text(content):
html = (str(content))
text_maker = html2text.HTML2Text()
text_maker.ignore_links = True
text_maker.bypass_tables = False
text = text_maker.handle(html)
# Slice everything that comes between html': and ]}
start = "html':"
end = "]}"
mail_content = text[text.find(start) + len(start):text.rfind(end)]
# Normalize content, removing unknown chars
mail_content = mail_content.replace("['","")
mail_content = mail_content.replace('\\xa0', ' ')
mail_content = mail_content.replace("\\r\\n'","")
return mail_content
def send_to_trello(mail_content,subject):
r = requests.post("https://api.trello.com/1/cards?key=" + \
API_KEY + "&token=" + OAUTH_TOKEN + \
"&name=" + subject + "&idList=" + \
trello_list_id + "&desc=" + \
mail_content)
return r
with Imbox('imap.gmail.com',
username = data['mail_username'],
password = data['mail_password'],
ssl = True,
ssl_context = None,
starttls = False) as imbox:
fetch_mail_type = imbox.messages(sent_from = data['mail_from_username'])
# Get all folders
#status, folders_with_additional_info = imbox.folders()
# Gets all messages from the inbox
#all_inbox_messages = imbox.messages()
for uid, message in fetch_mail_type:
# Every message is an object with the following keys
origin = message.sent_from
receiver = message.sent_to
subject = message.subject
headers = message.headers
message_id = message.message_id
message_date = message.date
content = message.body
message_attachments = message.attachments
result = get_text(content)
response = send_to_trello(result,subject)
if response.status_code == 200:
#imbox.mark_seen(uid)
imbox.delete(uid)
time.sleep(1) |
the-stack_0_634 | import pylab
class Animal:
def __init__(self, name, egg_laying, scales, poisonous, cold_blood, legs, reptile):
self.name = name
self.egg_laying = egg_laying
self.scales = scales
self.poisonous = poisonous
self.legs = legs
self.cold_blood = cold_blood
self.reptile = reptile
def get_name(self):
return self.name
def distance(self, another_animal):
distance = 0
if self.egg_laying != another_animal.egg_laying:
distance += 1
if self.scales != another_animal.scales:
distance += 1
if self.poisonous != another_animal.poisonous:
distance += 1
if self.legs != another_animal.legs:
distance += 1
if self.cold_blood != another_animal.cold_blood:
distance += 1
if self.reptile != another_animal.reptile:
distance += 1
return distance
def __str__(self):
return self.name
def std_dev(l):
if len(l) == 0:
return float('NaN')
summ = 0
for i in l:
summ += len(i)
mean = summ / float(len(l))
tot = 0.0
for i in l:
tot += (len(i) - mean) ** 2
std = (tot / len(l)) ** 0.5
return std
def z_scale_features(vals):
result = pylab.array(vals)
mean = float(sum(vals)) / len(vals)
result = result - mean
return result / std_dev(result)
def i_scale_features(vals):
min_vals, max_vals = min(vals), max(vals)
fit = pylab.polyfit([min_vals, max_vals], [0, 1], 1)
return pylab.polyval(fit, vals)
animals = [Animal('cobra', 1, 1, 1, 1, 0, 1),
Animal('rattlesnake', 1, 1, 1, 1, 0, 1),
Animal('boa constrictor', 0, 1, 0, 1, 0, 1),
Animal('chicken', 1, 1, 0, 1, 2, 0),
Animal('guppy', 0, 1, 0, 0, 0, 0),
Animal('dart frog', 1, 0, 1, 0, 4, 0),
Animal('zebra', 0, 0, 0, 0, 4, 0),
Animal('python', 1, 1, 0, 1, 0, 1),
Animal('alligator', 1, 1, 0, 1, 4, 1)]
def distance_matrix(animals, precision):
column_label = []
for a in animals:
column_label.append(a.get_name())
row_label = column_label[:]
table_vals = []
# Get distance between pairs of animals
for a1 in animals:
row = []
for a2 in animals:
if a1 == a2:
row.append('--')
else:
distance = a1.distance(a2)
row.append(str(round(distance, precision)))
table_vals.append(row)
table = pylab.table(rowLabels=row_label,
colLabels=column_label,
cellText=table_vals,
cellLoc='center',
loc='center',
colWidths=[0.138] * len(animals))
table.scale(1, 2.5)
pylab.axis('off')
pylab.savefig('distance')
distance_matrix(animals, 3)
|
the-stack_0_635 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Note: To use the 'upload' functionality of this file, you must:
# $ pipenv install twine --dev
import io
import os
import sys
from shutil import rmtree
from setuptools import find_packages, setup, Command
# Package meta-data.
NAME = 'mypackage'
DESCRIPTION = 'My short description for my project.'
URL = 'https://github.com/me/myproject'
EMAIL = '[email protected]'
AUTHOR = 'Awesome Soul'
REQUIRES_PYTHON = '>=3.8.0'
VERSION = '0.1.0'
# What packages are required for this module to be executed?
REQUIRED = [
# 'requests', 'maya', 'records',
]
# What packages are optional?
EXTRAS = {
# 'fancy feature': ['django'],
}
# The rest you shouldn't have to touch too much :)
# ------------------------------------------------
# Except, perhaps the License and Trove Classifiers!
# If you do change the License, remember to change the Trove Classifier for that!
here = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
# Note: this will only work if 'README.md' is present in your MANIFEST.in file!
try:
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
except FileNotFoundError:
long_description = DESCRIPTION
# Load the package's __version__.py module as a dictionary.
about = {}
if not VERSION:
project_slug = NAME.lower().replace("-", "_").replace(" ", "_")
with open(os.path.join(here, project_slug, '__version__.py')) as f:
exec(f.read(), about)
else:
about['__version__'] = VERSION
class UploadCommand(Command):
"""Support setup.py upload."""
description = 'Build and publish the package.'
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds…')
rmtree(os.path.join(here, 'dist'))
except OSError:
pass
self.status('Building Source and Wheel (universal) distribution…')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPI via Twine…')
os.system('twine upload dist/*')
self.status('Pushing git tags…')
os.system('git tag v{0}'.format(about['__version__']))
os.system('git push --tags')
sys.exit()
# Where the magic happens:
setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
long_description=long_description,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=find_packages(exclude=["tests", "*.tests", "*.tests.*", "tests.*"]),
# If your package is a single module, use this instead of 'packages':
# py_modules=['mypackage'],
# entry_points={
# 'console_scripts': ['mycli=mymodule:cli'],
# },
install_requires=REQUIRED,
extras_require=EXTRAS,
include_package_data=True,
license='MIT',
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
],
# $ setup.py publish support.
cmdclass={
'upload': UploadCommand,
},
)
|
the-stack_0_637 | #!/usr/bin/env python3
import pytest # type: ignore
import os
import time
import random
import pathlib
import numpy as np # type: ignore
import numpy
from glob import iglob
from pathlib import Path
import rtCommon.utils as utils # type: ignore
import rtCommon.projectUtils as putils # type: ignore
import rtCommon.validationUtils as vutils # type: ignore
from rtCommon.structDict import MatlabStructDict # type: ignore
from rtCommon.addLogin import addUserPassword
from rtCommon.webHttpHandlers import loadPasswdFile
@pytest.fixture(scope="module")
def matTestFilename(): # type: ignore
return os.path.join(os.path.dirname(__file__), 'test_input/teststruct.mat')
class TestFindNewestFile:
TEST_BASE_FILENAME = '/tmp/testdir/file1_20170101T01010'
NUM_TEST_FILES = 5
def setup_class(cls):
# create tmp directory if it doesn't exist
pathlib.Path('/tmp/testdir/').mkdir(parents=True, exist_ok=True)
# check if test files already exist, get the count of them
count_testfiles = sum(1 for _ in iglob(TestFindNewestFile.TEST_BASE_FILENAME + "*"))
if count_testfiles != TestFindNewestFile.NUM_TEST_FILES:
# remove any existing testfiles
for filename in iglob(TestFindNewestFile.TEST_BASE_FILENAME + "*"):
os.remove(filename)
# create the correct number of test files
for i in range(TestFindNewestFile.NUM_TEST_FILES):
filename = TestFindNewestFile.TEST_BASE_FILENAME + str(i)
with open(filename, 'w') as fp:
fp.write("test file")
time.sleep(1)
def assert_result_matches_filename(self, filename):
assert filename == (self.TEST_BASE_FILENAME + str(self.NUM_TEST_FILES - 1))
def test_normalCase(self):
print("Test findNewestFile normal case:")
filename = utils.findNewestFile('/tmp/testdir', 'file1_20170101*')
self.assert_result_matches_filename(filename)
def test_emptyPath(self):
print("Test findNewestFile empty path:")
filename = utils.findNewestFile('', '/tmp/testdir/file1_20170101*')
self.assert_result_matches_filename(filename)
def test_pathInPattern(self):
print("Test findNewestFile path embedded in pattern:")
filename = utils.findNewestFile(
'/tmp/testdir', '/tmp/testdir/file1_20170101*')
self.assert_result_matches_filename(filename)
def test_pathPartiallyInPattern(self):
print("Test findNewestFile path partially in pattern:")
filename = utils.findNewestFile('/tmp', 'testdir/file1_20170101*')
self.assert_result_matches_filename(filename)
def test_noMatchingFiles(self):
print("Test findNewestFile no matching files:")
filename = utils.findNewestFile('/tmp/testdir/', 'no_such_file')
assert filename is None
class TestCompareArrays:
A = None
B = None
max_deviation = .01
def setup_class(cls):
arrayDims = [40, 50, 60]
A = np.random.random(arrayDims)
delta = np.random.random(arrayDims) * TestCompareArrays.max_deviation
B = A + (A * delta)
TestCompareArrays.A = A
TestCompareArrays.B = B
def test_compareArrays(self):
print("Test compareArrays")
# import pdb; pdb.set_trace()
result = vutils.compareArrays(self.B, self.A)
assert result['mean'] < 2 / 3 * self.max_deviation
assert result['max'] < self.max_deviation
return
def test_areArraysClose(self):
print("Test areArraysClose")
max_mean = 2 / 3 * self.max_deviation
assert vutils.areArraysClose(self.B, self.A, mean_limit=max_mean)
return
class TestCompareMatStructs:
A = None
B = None
max_deviation = .01
def setup_class(cls):
def delta(val):
return val + (val * random.random() * TestCompareMatStructs.max_deviation)
A = MatlabStructDict(
{'sub': MatlabStructDict({})}, 'sub')
A.str1 = "hello"
A.a1 = 6.0
A.sub.a2 = np.array([1, 2, 3, 4, 5], dtype=np.float)
A.sub.b2 = 7.0
A.sub.str2 = "world"
B = MatlabStructDict(
{'sub': MatlabStructDict({})}, 'sub')
B.str1 = "hello"
B.a1 = delta(A.a1)
B.sub.a2 = delta(A.a2)
B.sub.b2 = delta(A.b2)
B.sub.str2 = "world"
TestCompareMatStructs.A = A
TestCompareMatStructs.B = B
def test_compareMatStructs_all_fields(self):
print("Test compareMatStructs_all_fields")
result = vutils.compareMatStructs(self.A, self.B)
means = [result[key]['mean'] for key in result.keys()]
assert len(means) == 5
assert all(mean < self.max_deviation for mean in means)
def test_compareMatStructs_field_subset(self):
print("Test compareMatStructs_field_subset")
result = vutils.compareMatStructs(self.A, self.B, ['a2', 'str1'])
means = [result[key]['mean'] for key in result.keys()]
assert len(means) == 2
assert all(mean < self.max_deviation for mean in means)
def test_isMeanWithinThreshold(self):
a = {'val1': {'mean': .1, 'max': .2},
'val2': {'mean': .05, 'max': .075}}
assert vutils.isMeanWithinThreshold(a, .11)
assert not vutils.isMeanWithinThreshold(a, .09)
class TestValidationUtils:
def test_compareMatFiles(self, matTestFilename):
res = vutils.compareMatFiles(matTestFilename, matTestFilename)
assert vutils.isMeanWithinThreshold(res, 0)
def test_pearsonsMeanCorr(self):
n1 = np.array([[1, 2, 3, 4, 5],
[np.nan, np.nan, np.nan, np.nan, np.nan]])
n2 = np.array([[1.1, 2.1, 3.2, 4.1, 5.05],
[np.nan, np.nan, np.nan, np.nan, np.nan]])
n1t = np.transpose(n1)
n2t = np.transpose(n2)
res = vutils.pearsons_mean_corr(n1t, n2t)
assert res > 0.999
class TestUtils:
def test_delete(self):
fileList = ['/tmp/testdir/d1/test1.txt', '/tmp/testdir/d1/d2/test2.txt',
'/tmp/testdir/d1/d2/d3/test3.txt', '/tmp/testdir/d1/d2/d3/test4.txt']
for file in fileList:
utils.writeFile(file, 'hello', binary=False)
# test delete files from list
assert os.path.exists(fileList[-1])
utils.deleteFilesFromList(fileList)
assert not os.path.exists(fileList[-1])
assert os.path.isdir('/tmp/testdir/d1/d2/d3')
# test delete folder
for file in fileList:
utils.writeFile(file, 'hello', binary=False)
utils.deleteFolder('/tmp/testdir/d1')
assert not os.path.isdir('/tmp/testdir/d1')
# test delete files recursively in folders, but leave folders in place
for file in fileList:
utils.writeFile(file, 'hello', binary=False)
utils.deleteFolderFiles('/tmp/testdir/d1')
assert os.path.isdir('/tmp/testdir/d1/d2/d3')
class TestAddUser:
def test_adduser(self):
testPasswordFile = '/tmp/testdir/test_pwd_file'
# start with empty file
if os.path.exists(testPasswordFile):
os.remove(testPasswordFile)
addUserPassword('a_user', 'a_password', testPasswordFile, retypePasswd=False)
addUserPassword('b_user', 'b_password', testPasswordFile, retypePasswd=False)
pwds = loadPasswdFile(testPasswordFile)
assert 'a_user' in pwds
assert 'b_user' in pwds
class TestProjectUtils:
def test_npToPy(self):
data1 = {'subject': '04', 'task': 'story', 'suffix': 'bold', 'datatype': 'func', 'run': 1}
data2 = {'a1': (1, 'two', 3.0),
'a2': {'np': numpy.float32(3), 'pyint': 4, 'str': 'five'},
'a3': [6.0, 'seven', numpy.int(8), {'a', numpy.float32(5), 'c'}]}
data2_py = {'a1': (1, 'two', 3.0),
'a2': {'np': 3.0, 'pyint': 4, 'str': 'five'},
'a3': [6.0, 'seven', 8.0, {'a', 5.0, 'c'}]}
kwargs = {'mdata': data2, 'test1': 9.0, 'test2': numpy.float32(9), 'test3': 'yes'}
kwargs_py = {'mdata': data2_py, 'test1': 9.0, 'test2': 9.0, 'test3': 'yes'}
args = (4, 'hello', data1, kwargs)
args_py = (4, 'hello', data1, kwargs_py)
res = putils.npToPy(args)
assert res == args_py
if __name__ == "__main__":
print("PYTEST MAIN:")
pytest.main()
|
the-stack_0_638 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for Felix."""
import json
from typing import Callable, Iterator, Mapping, MutableSequence, NamedTuple, Optional, Sequence, Tuple, Union
from absl import logging
from six import with_metaclass
import tensorflow as tf
import felix_constants as constants
import tokenization
FeedDict = Mapping[str, Sequence[Sequence[float]]]
SourceTargetPair = Tuple[MutableSequence[str], str]
def get_token_list(text):
"""Returns a list of tokens.
This function expects that the tokens in the text are separated by space
character(s). Example: "ca n't , touch". This is the case at least for the
public DiscoFuse and WikiSplit datasets.
Args:
text: String to be split into tokens.
"""
return text.split()
def build_feed_dict(tokens,
tokenizer,
target_tokens = None,
max_seq_length = 128,
max_predictions_per_seq = 20):
"""Returns a dictionary used for predicting/training the insertion model.
Converts a list of source tokens, containing masks, to a dictionary of
features used by a TF model. If a target sequence is provided, then the
targets for the MASKs are set.
Args:
tokens: Input tokens, with mask tokens.
tokenizer: Tokenizer used to convert tokens to IDs.
target_tokens: (Optional) The targets of the mask tokens.
max_seq_length: Maximum sequence length.
max_predictions_per_seq: Maximum number of mask tokens.
Returns:
Dictionary with model features or None if `len(tokens) > max_seq_length` or
if the number of MASKs is larger than `max_predictions_per_seq`.
"""
mask_position = []
mask_target_id = []
mask_target_weight = []
for idx, token in enumerate(tokens):
if token != constants.MASK:
continue
mask_position.append(idx)
if target_tokens:
mask_target_id += tokenizer.convert_tokens_to_ids([target_tokens[idx]])
else:
mask_target_id.append(0)
mask_target_weight.append(1.0)
# Deleted tokens (bracketed by unused) should have a segment_id of 2.
unused = False
segment_ids = []
for token in tokens:
if token == constants.DELETE_SPAN_START or unused:
unused = True
segment_ids.append(2)
else:
segment_ids.append(0)
if token == constants.DELETE_SPAN_END:
unused = False
input_mask = [1] * len(tokens)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
assert len(segment_ids) == len(input_ids)
# Padding.
while len(input_ids) < max_seq_length:
segment_ids.append(0)
input_ids.append(0)
input_mask.append(0)
if len(input_ids) > max_seq_length:
input_ids = input_ids[:max_seq_length]
segment_ids = segment_ids[:max_seq_length]
input_mask = input_mask[:max_seq_length]
#return None
assert len(input_ids) == max_seq_length, "len(input_ids) = {}".format(
len(input_ids))
assert len(input_mask) == max_seq_length, "len(input_mask) = {}".format(
len(input_mask))
assert len(segment_ids) == max_seq_length, "len(segment_ids) = {}".format(
len(segment_ids))
if len(mask_position) > max_predictions_per_seq:
mask_position = mask_position[:max_predictions_per_seq]
#return None
while len(mask_position) < max_predictions_per_seq:
mask_target_weight.append(0)
mask_position.append(0)
mask_target_id.append(0)
feed_dict = {
"input_ids": [input_ids],
"input_mask": [input_mask],
"segment_ids": [segment_ids],
"masked_lm_positions": [mask_position],
"masked_lm_ids": [mask_target_id],
"masked_lm_weights": [mask_target_weight],
}
return feed_dict
def _int_feature(values):
return tf.train.Feature(int64_list=tf.train.Int64List(value=values))
def _float_feature(values):
return tf.train.Feature(float_list=tf.train.FloatList(value=values))
def _text_feature(values):
return tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[element.encode("utf8") for element in values]))
def feed_dict_to_tf_example(feed_dict,
source = None,
target = None):
"""Returns a TF example for MLM insertion model."""
features = {
"input_ids": _int_feature(feed_dict["input_ids"][0]),
"input_mask": _int_feature(feed_dict["input_mask"][0]),
"segment_ids": _int_feature(feed_dict["segment_ids"][0]),
"masked_lm_positions": _int_feature(feed_dict["masked_lm_positions"][0]),
"masked_lm_ids": _int_feature(feed_dict["masked_lm_ids"][0]),
"masked_lm_weights": _float_feature(feed_dict["masked_lm_weights"][0]),
}
if source:
features["text_source"] = _text_feature([source])
if target:
features["text_target"] = _text_feature([target])
return tf.train.Example(features=tf.train.Features(feature=features))
class Features(NamedTuple):
"""A data holder for various features that can be read from files."""
source: MutableSequence[str]
target: str
output_variant_id: Optional[int] = None
@staticmethod
def from_source_target_pair(pair):
return Features(source=pair[0], target=pair[1])
SourcesAndFeaturesPair = Tuple[MutableSequence[str], Features]
def text_file_iterator(fname_pattern):
"""Returns an iterator over lines of the files covered by fname_pattern."""
for fname in get_filenames(fname_pattern):
with tf.io.gfile.GFile(fname, "r") as f:
for line in f:
yield line
def skip_header_text_file_iterator(fname_pattern):
"""Similar to text_file_iterator, but skipping the first line of each file."""
for fname in get_filenames(fname_pattern):
tf.io.gfile.GFile(fname)
it = tf.io.gfile.GFile(fname, "r")
it.next() # skip the header line
for line in it:
yield line
def get_parse_tsv_line_fn(
return_none_on_error = False,
reverse = False):
"""A higher-order function producing TSV line-parsing functions.
Args:
return_none_on_error: Whether to return None on encountering an error (such
as too few TSV columns) rather than raising an Error.
reverse: When True, returns ([`target`], `source`) instead of ([`source`],
`target`). Useful for working with "reverse" (a.k.a. "noise" models that
go from `target` to `source`.
Returns:
A parsing function that goes from a text line to a ([source], target) pair
(or a ([`target`], `source`) pair when `reverse`=True).
"""
def parse_tsv_line(line):
"""Parses the first two columns, `source` and `target`, from a TSV line.
Any further columns are ignored.
Args:
line: A text line.
Returns:
a tuple ([source], target), with `source` being wrapped in a list.
Raises:
ValueError: when the line has less than two TSV columns and
`return_none_on_error`=False.
"""
split = line.rstrip("\n").split("\t")
if len(split) < 2:
message = 'TSV line has less than two tab-delimited fields:\n"{}"'.format(
line)
if return_none_on_error:
logging.warning(message)
return None
else:
raise ValueError(message)
source, target = split[:2]
if reverse:
return [target], source
else:
return [source], target
return parse_tsv_line
def parse_discofuse_line(line):
"""Parses a DiscoFuse example from a line from a TSV file.
The documentation for this format:
https://github.com/google-research-datasets/discofuse#data-format
Args:
line: A line from a TSV file.
Returns:
A pair (<source texts list>, <target text>).
"""
coherent_1, coherent_2, incoherent_1, incoherent_2, _, _, _, _ = (
line.rstrip("\n").split("\t"))
# Strip because the second coherent sentence might be empty.
fusion = (coherent_1 + " " + coherent_2).strip()
return [incoherent_1, incoherent_2], fusion
def parse_iterate_plain_line(line):
return _parse_iterate_line(line, with_intent=False)
def parse_iterate_intent_line(line):
return _parse_iterate_line(line, with_intent=True)
def _parse_iterate_line(line, with_intent=False):
"""Parses a IteraTE example from a line from a (line-by-line) JSON file.
Args:
line: A JSON line from a line-by-line JSON file.
Returns:
A tuple ([source], target), with `source` being wrapped in a list.
"""
json_line = json.loads(line)
if with_intent:
src = json_line["before_sent_with_intent"]
else:
src = json_line["before_sent"]
tgt = json_line["after_sent"]
return [src], tgt
def yield_sources_and_targets(
input_file_pattern,
input_format,
source_key = None,
target_key = None):
"""Produces an iterator over pairs (source list, targets) parsed from a file.
Args:
input_file_pattern: Path/pattern to the input file(s).
input_format: Format of the input file.
source_key: Source text feature name. Only considered when
`input_format=sstable`.
target_key: Target text feature name. Only considered when
`input_format=sstable`.
Yields:
Pairs of (list of source texts, target text).
"""
data_spec = {
"wikisplit": (text_file_iterator, get_parse_tsv_line_fn()),
"discofuse": (skip_header_text_file_iterator, parse_discofuse_line),
"IteraTE_Plain": (skip_header_text_file_iterator, parse_iterate_plain_line),
"IteraTE_Intent": (skip_header_text_file_iterator, parse_iterate_intent_line),
}
if input_format not in data_spec:
raise ValueError("Unsupported input_format: {}".format(input_format))
file_iterator_fn, parse_fn = data_spec[input_format]
for item in file_iterator_fn(input_file_pattern):
# Pytype correctly infers possible types for `item`, but does not handle
# well the various possible signatures of `parse_fn`.
parsed_item = parse_fn(item) # pytype: disable=wrong-arg-types
if parsed_item is not None:
yield parsed_item
def get_filenames(patterns):
"""Obtains a list of filenames corresponding to the pattern.
Supports patterns, as well as plain
file names, as well as comma-separated lists of patterns.
Caveat: Will not work if the patterns have commas (',') in them.
Args:
patterns: File pattern or comma-separated patterns.
Raises:
RuntimeError: If `patterns` is valid but cannot be expanded/does not match
any files.
Returns:
list of individual paths to each file.
"""
all_files = []
for pattern in patterns.split(","):
# points to a specific file.
files = tf.io.gfile.glob(pattern)
if not files:
raise RuntimeError("Could not find files matching: %s" % pattern)
all_files.extend(files)
return all_files
def read_label_map(
path,
use_str_keys = False):
"""Returns label map read from the given path.
Args:
path: Path to the label map file.
use_str_keys: Whether to use label strings as keys instead of
(base tag, num insertions) tuple keys. The latter is only used by
FelixInsert.
"""
label_map = {}
with tf.io.gfile.GFile(path) as f:
if path.endswith(".json"):
label_map = json.load(f)
else:
for tag in f:
tag = tag.strip()
# Empty lines are skipped.
if tag:
if tag in label_map:
raise ValueError("Duplicate label in label_map: {}".format(tag))
label_map[tag] = len(label_map)
if not use_str_keys:
new_label_map = {}
for key, val in label_map.items():
if "|" in key:
pos_pipe = key.index("|")
new_key = (key[:pos_pipe], int(key[pos_pipe + 1:]))
else:
new_key = (key, 0)
new_label_map[new_key] = val
label_map = new_label_map
return label_map
|
the-stack_0_639 | import argparse
import os
from util import util
import torch
import models
import data
class BaseOptions():
"""This class defines options used during both training and test time.
It also implements several helper functions such as parsing, printing, and saving the options.
It also gathers additional options defined in <modify_commandline_options> functions in both dataset class and model class.
"""
def __init__(self):
"""Reset the class; indicates the class hasn't been initailized"""
self.initialized = False
def initialize(self, parser):
"""Define the common options that are used in both training and test."""
# basic parameters
parser.add_argument('--dataroot', required=True, help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')
parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')
parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
# model parameters
parser.add_argument('--model', type=str, default='cycle_gan', help='chooses which model to use. [cycle_gan | pix2pix | test | colorization]')
parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels: 3 for RGB and 1 for grayscale')
parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels: 3 for RGB and 1 for grayscale')
parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in the last conv layer')
parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in the first conv layer')
parser.add_argument('--netD', type=str, default='basic', help='specify discriminator architecture [basic | n_layers | pixel]. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator')
parser.add_argument('--netG', type=str, default='resnet_9blocks', help='specify generator architecture [resnet_9blocks | resnet_6blocks | unet_256 | unet_128]')
parser.add_argument('--n_layers_D', type=int, default=3, help='only used if netD==n_layers')
parser.add_argument('--norm', type=str, default='instance', help='instance normalization or batch normalization [instance | batch | none]')
parser.add_argument('--init_type', type=str, default='normal', help='network initialization [normal | xavier | kaiming | orthogonal]')
parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')
parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator')
parser.add_argument('--conv_type', type=str, default='conv2d', help='conv type [conv2d | dcn_v1 | dcn_v2 | mixed]')
# dataset parameters
parser.add_argument('--dataset_mode', type=str, default='unaligned', help='chooses how datasets are loaded. [unaligned | aligned | single | colorization]')
parser.add_argument('--direction', type=str, default='AtoB', help='AtoB or BtoA')
parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
parser.add_argument('--num_threads', default=4, type=int, help='# threads for loading data')
parser.add_argument('--batch_size', type=int, default=1, help='input batch size')
parser.add_argument('--load_size', type=int, default=286, help='scale images to this size')
parser.add_argument('--crop_size', type=int, default=256, help='then crop to this size')
parser.add_argument('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
parser.add_argument('--preprocess', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop | crop | scale_width | scale_width_and_crop | none]')
parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation')
parser.add_argument('--display_winsize', type=int, default=256, help='display window size for both visdom and HTML')
# additional parameters
parser.add_argument('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
parser.add_argument('--load_iter', type=int, default='0', help='which iteration to load? if load_iter > 0, the code will load models by iter_[load_iter]; otherwise, the code will load models by [epoch]')
parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information')
parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}')
self.initialized = True
return parser
def gather_options(self):
"""Initialize our parser with basic options(only once).
Add additional model-specific and dataset-specific options.
These options are defined in the <modify_commandline_options> function
in model and dataset classes.
"""
if not self.initialized: # check if it has been initialized
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser = self.initialize(parser)
# get the basic options
opt, _ = parser.parse_known_args()
# modify model-related parser options
model_name = opt.model
model_option_setter = models.get_option_setter(model_name)
parser = model_option_setter(parser, self.isTrain)
opt, _ = parser.parse_known_args() # parse again with new defaults
# modify dataset-related parser options
dataset_name = opt.dataset_mode
dataset_option_setter = data.get_option_setter(dataset_name)
parser = dataset_option_setter(parser, self.isTrain)
# save and return the parser
self.parser = parser
return parser.parse_args()
def print_options(self, opt):
"""Print and save options
It will print both current options and default values(if different).
It will save options into a text file / [checkpoints_dir] / opt.txt
"""
message = ''
message += '----------------- Options ---------------\n'
for k, v in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(k)
if v != default:
comment = '\t[default: %s]' % str(default)
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
message += '----------------- End -------------------'
print(message)
# save to the disk
expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
util.mkdirs(expr_dir)
file_name = os.path.join(expr_dir, '{}_opt.txt'.format(opt.phase))
with open(file_name, 'wt') as opt_file:
opt_file.write(message)
opt_file.write('\n')
def parse(self):
"""Parse our options, create checkpoints directory suffix, and set up gpu device."""
opt = self.gather_options()
opt.isTrain = self.isTrain # train or test
# process opt.suffix
if opt.suffix:
suffix = ('_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else ''
opt.name = opt.name + suffix
self.print_options(opt)
# set gpu ids
str_ids = opt.gpu_ids.split(',')
opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if id >= 0:
opt.gpu_ids.append(id)
if len(opt.gpu_ids) > 0:
torch.cuda.set_device(opt.gpu_ids[0])
self.opt = opt
return self.opt
|
the-stack_0_640 | # Copyright 2017 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
from collections import OrderedDict
import inspect
import os
from rclpy.clock import Clock
from rclpy.impl.implementation_singleton import rclpy_implementation as _rclpy
# Known filenames from which logging methods can be called (will be ignored in `_find_caller`).
_internal_callers = []
# This will cause rclpy filenames to be registered in `_internal_callers` on first logging call.
_populate_internal_callers = True
def _find_caller(frame):
"""Get the first calling frame that is outside of rclpy."""
global _populate_internal_callers
global _internal_callers
if _populate_internal_callers:
# Populate the list of internal filenames from which logging methods can be called.
# This has to be done from within a function to avoid cyclic module imports.
import rclpy.logging
# Extend the list to preserve any filenames that may have been added by third parties.
# Note: the call to `realpath` will also resolve mixed slashes that can result on Windows.
_internal_callers.extend([
os.path.realpath(__file__),
os.path.realpath(rclpy.logging.__file__),
])
_populate_internal_callers = False
file_path = os.path.realpath(inspect.getframeinfo(frame).filename)
while any(f in file_path for f in _internal_callers):
frame = frame.f_back
file_path = os.path.realpath(inspect.getframeinfo(frame).filename)
return frame
class CallerId(
namedtuple('CallerId', ['function_name', 'file_path', 'line_number', 'last_index'])):
def __new__(cls, frame=None):
if not frame:
frame = _find_caller(inspect.currentframe())
return super(CallerId, cls).__new__(
cls,
function_name=frame.f_code.co_name,
file_path=os.path.abspath(inspect.getframeinfo(frame).filename),
line_number=frame.f_lineno,
last_index=frame.f_lasti, # To distinguish between two callers on the same line
)
class LoggingFilter:
"""Base class for logging filters."""
"""
Parameters of a filter and their default value, if appropriate.
A default value of None makes a parameter required.
"""
params = {}
"""
Initialize the context of a logging call, e.g. declare variables needed for
determining the log condition and add them to the context.
"""
@classmethod
def initialize_context(cls, context, **kwargs):
# Store all parameters in the context so we can check that users never try to change them.
for param in cls.params:
context[param] = kwargs.get(param, cls.params[param])
if context[param] is None:
raise TypeError(
'Required parameter "{0}" was not specified for logging filter "{1}"'
.format(param, cls.__name__))
"""
Decide if it's appropriate to log given a context, and update the context accordingly.
"""
@staticmethod
def should_log(context):
return True
class Once(LoggingFilter):
"""Ignore all log calls except the first one."""
params = {
'once': None,
}
@classmethod
def initialize_context(cls, context, **kwargs):
super(Once, cls).initialize_context(context, **kwargs)
context['has_been_logged_once'] = False
@staticmethod
def should_log(context):
logging_condition = False
if not context['has_been_logged_once']:
logging_condition = True
context['has_been_logged_once'] = True
return logging_condition
class Throttle(LoggingFilter):
"""Ignore log calls if the last call is not longer ago than the specified duration."""
params = {
'throttle_duration_sec': None,
'throttle_time_source_type': Clock(),
}
@classmethod
def initialize_context(cls, context, **kwargs):
super(Throttle, cls).initialize_context(context, **kwargs)
context['throttle_last_logged'] = 0
if not isinstance(context['throttle_time_source_type'], Clock):
raise ValueError(
'Received throttle_time_source_type of "{0}" '
'is not a clock instance'
.format(context['throttle_time_source_type']))
@staticmethod
def should_log(context):
logging_condition = True
now = context['throttle_time_source_type'].now().nanoseconds
next_log_time = context['throttle_last_logged'] + (context['throttle_duration_sec'] * 1e+9)
logging_condition = now >= next_log_time
if logging_condition:
context['throttle_last_logged'] = now
return logging_condition
class SkipFirst(LoggingFilter):
"""Ignore the first log call but process all subsequent calls."""
params = {
'skip_first': None,
}
@classmethod
def initialize_context(cls, context, **kwargs):
super(SkipFirst, cls).initialize_context(context, **kwargs)
context['first_has_been_skipped'] = False
@staticmethod
def should_log(context):
logging_condition = True
if not context['first_has_been_skipped']:
logging_condition = False
context['first_has_been_skipped'] = True
return logging_condition
# The ordering of this dictionary defines the order in which filters will be processed.
supported_filters = OrderedDict()
supported_filters['throttle'] = Throttle
supported_filters['skip_first'] = SkipFirst
supported_filters['once'] = Once
def get_filters_from_kwargs(**kwargs):
"""
Determine which filters have had parameters specified in the given keyword arguments.
Returns the list of filters using the order specified by `supported_filters`.
"""
detected_filters = []
all_supported_params = []
for supported_filter, filter_class in supported_filters.items():
filter_params = filter_class.params.keys()
all_supported_params.extend(filter_params)
if any(kwargs.get(param_name) for param_name in filter_params):
detected_filters.append(supported_filter)
# Check that all required parameters (with no default value) have been specified
for detected_filter in detected_filters:
for param_name, default_value in supported_filters[detected_filter].params.items():
if param_name in kwargs:
continue
# Param not specified; use the default.
if default_value is None:
raise TypeError(
'required parameter "{0}" not specified '
'but is required for the the logging filter "{1}"'.format(
param_name, detected_filter))
kwargs[param_name] = default_value
for kwarg in kwargs:
if kwarg not in all_supported_params:
raise TypeError(
'parameter "{0}" is not one of the recognized logging options "{1}"'
.format(kwarg, all_supported_params)
)
return detected_filters
class RcutilsLogger:
def __init__(self, name=''):
self.name = name
self.contexts = {}
def get_child(self, name):
if not name:
raise ValueError('Child logger name must not be empty.')
if self.name:
# Prepend the name of this logger
name = self.name + '.' + name
return RcutilsLogger(name=name)
def set_level(self, level):
from rclpy.logging import LoggingSeverity
level = LoggingSeverity(level)
return _rclpy.rclpy_logging_set_logger_level(self.name, level)
def get_effective_level(self):
from rclpy.logging import LoggingSeverity
level = LoggingSeverity(
_rclpy.rclpy_logging_get_logger_effective_level(self.name))
return level
def is_enabled_for(self, severity):
from rclpy.logging import LoggingSeverity
severity = LoggingSeverity(severity)
return _rclpy.rclpy_logging_logger_is_enabled_for(self.name, severity)
def log(self, message, severity, **kwargs):
r"""
Log a message with the specified severity.
The message will not be logged if:
* the logger is not enabled for the message's severity (the message severity is less than
the level of the logger), or
* a logging filter causes the message to be skipped.
.. note::
Logging filters will only be evaluated if the logger is enabled for the message's
severity.
:param message str: message to log.
:param severity: severity of the message.
:type severity: :py:class:LoggingSeverity
:keyword name str: name of the logger to use.
:param \**kwargs: optional parameters for logging filters (see below).
:Keyword Arguments:
* *throttle_duration_sec* (``float``) --
Duration of the throttle interval for the :py:class:Throttle: filter.
* *throttle_time_source_type* (``str``) --
Optional time source type for the :py:class:Throttle: filter (default of
``RCUTILS_STEADY_TIME``)
* *skip_first* (``bool``) --
If True, enable the :py:class:SkipFirst: filter.
* *once* (``bool``) --
If True, enable the :py:class:Once: filter.
:returns: False if a filter caused the message to not be logged; True otherwise.
:raises: TypeError on invalid filter parameter combinations.
:raises: ValueError on invalid parameters values.
:rtype: bool
"""
# Gather context info and check filters only if the severity is appropriate.
if not self.is_enabled_for(severity):
return False
from rclpy.logging import LoggingSeverity
severity = LoggingSeverity(severity)
name = kwargs.pop('name', self.name)
# Infer the requested log filters from the keyword arguments
detected_filters = get_filters_from_kwargs(**kwargs)
# Get/prepare the context corresponding to the caller.
caller_id = CallerId()
if caller_id not in self.contexts:
context = {'name': name, 'severity': severity}
for detected_filter in detected_filters:
if detected_filter in supported_filters:
supported_filters[detected_filter].initialize_context(context, **kwargs)
context['filters'] = detected_filters
self.contexts[caller_id] = context
else:
context = self.contexts[caller_id]
# Don't support any changes to the logger.
if severity != context['severity']:
raise ValueError('Logger severity cannot be changed between calls.')
if name != context['name']:
raise ValueError('Logger name cannot be changed between calls.')
if detected_filters != context['filters']:
raise ValueError('Requested logging filters cannot be changed between calls.')
for detected_filter in detected_filters:
filter_params = supported_filters[detected_filter].params
if any(context[p] != kwargs.get(p, filter_params[p]) for p in filter_params):
raise ValueError(
'Logging filter parameters cannot be changed between calls.')
# Check if any filter determines the message shouldn't be processed.
# Note(dhood): even if a message doesn't get logged, a filter might still update its state
# as if it had been. This matches the behavior of the C logging macros provided by rcutils.
for logging_filter in context['filters']:
if not supported_filters[logging_filter].should_log(context):
return False
# Call the relevant function from the C extension.
_rclpy.rclpy_logging_rcutils_log(
severity, name, message,
caller_id.function_name, caller_id.file_path, caller_id.line_number)
return True
def debug(self, message, **kwargs):
"""Log a message with `DEBUG` severity via :py:classmethod:RcutilsLogger.log:."""
from rclpy.logging import LoggingSeverity
return self.log(message, LoggingSeverity.DEBUG, **kwargs)
def info(self, message, **kwargs):
"""Log a message with `INFO` severity via :py:classmethod:RcutilsLogger.log:."""
from rclpy.logging import LoggingSeverity
return self.log(message, LoggingSeverity.INFO, **kwargs)
def warning(self, message, **kwargs):
"""Log a message with `WARN` severity via :py:classmethod:RcutilsLogger.log:."""
from rclpy.logging import LoggingSeverity
return self.log(message, LoggingSeverity.WARN, **kwargs)
def warn(self, message, **kwargs):
"""
Log a message with `WARN` severity via :py:classmethod:RcutilsLogger.log:.
Deprecated in favor of :py:classmethod:RcutilsLogger.warning:.
"""
return self.warning(message, **kwargs)
def error(self, message, **kwargs):
"""Log a message with `ERROR` severity via :py:classmethod:RcutilsLogger.log:."""
from rclpy.logging import LoggingSeverity
return self.log(message, LoggingSeverity.ERROR, **kwargs)
def fatal(self, message, **kwargs):
"""Log a message with `FATAL` severity via :py:classmethod:RcutilsLogger.log:."""
from rclpy.logging import LoggingSeverity
return self.log(message, LoggingSeverity.FATAL, **kwargs)
|
the-stack_0_645 | """ Contact serializers. """
# Django REST Framework
from ast import Num
from statistics import mode
from rest_framework import serializers
# Models
from coeadmin.record.models.person import Person
from coeadmin.record.models.contact import Contact
# Serializers
from coeadmin.record.serializers.person import PersonModelSerializer
# Utilities
from datetime import datetime, timedelta
class ContactModelSerializer(serializers.ModelSerializer):
""" Contact serializer. """
person = PersonModelSerializer(allow_null=True)
class Meta:
""" Meta class. """
model = Contact
fields = (
'id',
'person',
'contact_date',
'contact_type',
'insolation_days',
'high_insulation_date',
'is_active',
)
read_only_fields = (
'id',
'person'
)
class AddContactSerializer(serializers.ModelSerializer):
""" Add contact serializer. """
class Meta:
""" Meta class. """
model = Contact
fields = (
'id',
'person',
'contact_date',
'contact_type',
'insolation_days',
'high_insulation_date',
'is_active',
)
def create(self, validate_data):
""" Create the contact. """
positive = self.context['positive']
person = validate_data['person']
days = validate_data['insolation_days']
contact_date= validate_data['contact_date'],
contact = Contact.objects.create(
positive=positive,
person=person,
contact_date= validate_data['contact_date'],
contact_type= validate_data['contact_type'],
insolation_days=days,
high_insulation_date=contact_date[0] + timedelta(days=days),
)
return contact |
the-stack_0_647 | ''' Tests for netcdf '''
from __future__ import division, print_function, absolute_import
import os
from os.path import join as pjoin, dirname
import shutil
import tempfile
import warnings
from io import BytesIO
from glob import glob
from contextlib import contextmanager
import numpy as np
from numpy.testing import (assert_, assert_allclose, assert_raises,
assert_equal, run_module_suite)
from scipy.io.netcdf import netcdf_file
from scipy._lib._tmpdirs import in_tempdir
TEST_DATA_PATH = pjoin(dirname(__file__), 'data')
N_EG_ELS = 11 # number of elements for example variable
VARTYPE_EG = 'b' # var type for example variable
@contextmanager
def make_simple(*args, **kwargs):
f = netcdf_file(*args, **kwargs)
f.history = 'Created for a test'
f.createDimension('time', N_EG_ELS)
time = f.createVariable('time', VARTYPE_EG, ('time',))
time[:] = np.arange(N_EG_ELS)
time.units = 'days since 2008-01-01'
f.flush()
yield f
f.close()
def check_simple(ncfileobj):
'''Example fileobj tests '''
assert_equal(ncfileobj.history, b'Created for a test')
time = ncfileobj.variables['time']
assert_equal(time.units, b'days since 2008-01-01')
assert_equal(time.shape, (N_EG_ELS,))
assert_equal(time[-1], N_EG_ELS-1)
def assert_mask_matches(arr, expected_mask):
'''
Asserts that the mask of arr is effectively the same as expected_mask.
In contrast to numpy.ma.testutils.assert_mask_equal, this function allows
testing the 'mask' of a standard numpy array (the mask in this case is treated
as all False).
Parameters
----------
arr: ndarray or MaskedArray
Array to test.
expected_mask: array_like of booleans
A list giving the expected mask.
'''
mask = np.ma.getmaskarray(arr)
assert_equal(mask, expected_mask)
def test_read_write_files():
# test round trip for example file
cwd = os.getcwd()
try:
tmpdir = tempfile.mkdtemp()
os.chdir(tmpdir)
with make_simple('simple.nc', 'w') as f:
pass
# read the file we just created in 'a' mode
with netcdf_file('simple.nc', 'a') as f:
check_simple(f)
# add something
f._attributes['appendRan'] = 1
# To read the NetCDF file we just created::
with netcdf_file('simple.nc') as f:
# Using mmap is the default
assert_(f.use_mmap)
check_simple(f)
assert_equal(f._attributes['appendRan'], 1)
# Read it in append (and check mmap is off)
with netcdf_file('simple.nc', 'a') as f:
assert_(not f.use_mmap)
check_simple(f)
assert_equal(f._attributes['appendRan'], 1)
# Now without mmap
with netcdf_file('simple.nc', mmap=False) as f:
# Using mmap is the default
assert_(not f.use_mmap)
check_simple(f)
# To read the NetCDF file we just created, as file object, no
# mmap. When n * n_bytes(var_type) is not divisible by 4, this
# raised an error in pupynere 1.0.12 and scipy rev 5893, because
# calculated vsize was rounding up in units of 4 - see
# http://www.unidata.ucar.edu/software/netcdf/docs/netcdf.html
with open('simple.nc', 'rb') as fobj:
with netcdf_file(fobj) as f:
# by default, don't use mmap for file-like
assert_(not f.use_mmap)
check_simple(f)
# Read file from fileobj, with mmap
with open('simple.nc', 'rb') as fobj:
with netcdf_file(fobj, mmap=True) as f:
assert_(f.use_mmap)
check_simple(f)
# Again read it in append mode (adding another att)
with open('simple.nc', 'r+b') as fobj:
with netcdf_file(fobj, 'a') as f:
assert_(not f.use_mmap)
check_simple(f)
f.createDimension('app_dim', 1)
var = f.createVariable('app_var', 'i', ('app_dim',))
var[:] = 42
# And... check that app_var made it in...
with netcdf_file('simple.nc') as f:
check_simple(f)
assert_equal(f.variables['app_var'][:], 42)
except:
os.chdir(cwd)
shutil.rmtree(tmpdir)
raise
os.chdir(cwd)
shutil.rmtree(tmpdir)
def test_read_write_sio():
eg_sio1 = BytesIO()
with make_simple(eg_sio1, 'w') as f1:
str_val = eg_sio1.getvalue()
eg_sio2 = BytesIO(str_val)
with netcdf_file(eg_sio2) as f2:
check_simple(f2)
# Test that error is raised if attempting mmap for sio
eg_sio3 = BytesIO(str_val)
assert_raises(ValueError, netcdf_file, eg_sio3, 'r', True)
# Test 64-bit offset write / read
eg_sio_64 = BytesIO()
with make_simple(eg_sio_64, 'w', version=2) as f_64:
str_val = eg_sio_64.getvalue()
eg_sio_64 = BytesIO(str_val)
with netcdf_file(eg_sio_64) as f_64:
check_simple(f_64)
assert_equal(f_64.version_byte, 2)
# also when version 2 explicitly specified
eg_sio_64 = BytesIO(str_val)
with netcdf_file(eg_sio_64, version=2) as f_64:
check_simple(f_64)
assert_equal(f_64.version_byte, 2)
def test_read_example_data():
# read any example data files
for fname in glob(pjoin(TEST_DATA_PATH, '*.nc')):
with netcdf_file(fname, 'r') as f:
pass
with netcdf_file(fname, 'r', mmap=False) as f:
pass
def test_itemset_no_segfault_on_readonly():
# Regression test for ticket #1202.
# Open the test file in read-only mode.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
filename = pjoin(TEST_DATA_PATH, 'example_1.nc')
with netcdf_file(filename, 'r') as f:
time_var = f.variables['time']
# time_var.assignValue(42) should raise a RuntimeError--not seg. fault!
assert_raises(RuntimeError, time_var.assignValue, 42)
def test_write_invalid_dtype():
dtypes = ['int64', 'uint64']
if np.dtype('int').itemsize == 8: # 64-bit machines
dtypes.append('int')
if np.dtype('uint').itemsize == 8: # 64-bit machines
dtypes.append('uint')
with netcdf_file(BytesIO(), 'w') as f:
f.createDimension('time', N_EG_ELS)
for dt in dtypes:
assert_raises(ValueError, f.createVariable, 'time', dt, ('time',))
def test_flush_rewind():
stream = BytesIO()
with make_simple(stream, mode='w') as f:
x = f.createDimension('x',4)
v = f.createVariable('v', 'i2', ['x'])
v[:] = 1
f.flush()
len_single = len(stream.getvalue())
f.flush()
len_double = len(stream.getvalue())
assert_(len_single == len_double)
def test_dtype_specifiers():
# Numpy 1.7.0-dev had a bug where 'i2' wouldn't work.
# Specifying np.int16 or similar only works from the same commit as this
# comment was made.
with make_simple(BytesIO(), mode='w') as f:
f.createDimension('x',4)
f.createVariable('v1', 'i2', ['x'])
f.createVariable('v2', np.int16, ['x'])
f.createVariable('v3', np.dtype(np.int16), ['x'])
def test_ticket_1720():
io = BytesIO()
items = [0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9]
with netcdf_file(io, 'w') as f:
f.history = 'Created for a test'
f.createDimension('float_var', 10)
float_var = f.createVariable('float_var', 'f', ('float_var',))
float_var[:] = items
float_var.units = 'metres'
f.flush()
contents = io.getvalue()
io = BytesIO(contents)
with netcdf_file(io, 'r') as f:
assert_equal(f.history, b'Created for a test')
float_var = f.variables['float_var']
assert_equal(float_var.units, b'metres')
assert_equal(float_var.shape, (10,))
assert_allclose(float_var[:], items)
def test_mmaps_segfault():
filename = pjoin(TEST_DATA_PATH, 'example_1.nc')
with warnings.catch_warnings():
warnings.simplefilter("error")
with netcdf_file(filename, mmap=True) as f:
x = f.variables['lat'][:]
# should not raise warnings
del x
def doit():
with netcdf_file(filename, mmap=True) as f:
return f.variables['lat'][:]
# should not crash
with warnings.catch_warnings():
warnings.simplefilter("ignore")
x = doit()
x.sum()
def test_zero_dimensional_var():
io = BytesIO()
with make_simple(io, 'w') as f:
v = f.createVariable('zerodim', 'i2', [])
# This is checking that .isrec returns a boolean - don't simplify it
# to 'assert not ...'
assert v.isrec is False, v.isrec
f.flush()
def test_byte_gatts():
# Check that global "string" atts work like they did before py3k
# unicode and general bytes confusion
with in_tempdir():
filename = 'g_byte_atts.nc'
f = netcdf_file(filename, 'w')
f._attributes['holy'] = b'grail'
f._attributes['witch'] = 'floats'
f.close()
f = netcdf_file(filename, 'r')
assert_equal(f._attributes['holy'], b'grail')
assert_equal(f._attributes['witch'], b'floats')
f.close()
def test_open_append():
# open 'w' put one attr
with in_tempdir():
filename = 'append_dat.nc'
f = netcdf_file(filename, 'w')
f._attributes['Kilroy'] = 'was here'
f.close()
# open again in 'a', read the att and and a new one
f = netcdf_file(filename, 'a')
assert_equal(f._attributes['Kilroy'], b'was here')
f._attributes['naughty'] = b'Zoot'
f.close()
# open yet again in 'r' and check both atts
f = netcdf_file(filename, 'r')
assert_equal(f._attributes['Kilroy'], b'was here')
assert_equal(f._attributes['naughty'], b'Zoot')
f.close()
def test_append_recordDimension():
dataSize = 100
with in_tempdir():
# Create file with record time dimension
with netcdf_file('withRecordDimension.nc', 'w') as f:
f.createDimension('time', None)
f.createVariable('time', 'd', ('time',))
f.createDimension('x', dataSize)
x = f.createVariable('x', 'd', ('x',))
x[:] = np.array(range(dataSize))
f.createDimension('y', dataSize)
y = f.createVariable('y', 'd', ('y',))
y[:] = np.array(range(dataSize))
f.createVariable('testData', 'i', ('time', 'x', 'y'))
f.flush()
f.close()
for i in range(2):
# Open the file in append mode and add data
with netcdf_file('withRecordDimension.nc', 'a') as f:
f.variables['time'].data = np.append(f.variables["time"].data, i)
f.variables['testData'][i, :, :] = np.ones((dataSize, dataSize))*i
f.flush()
# Read the file and check that append worked
with netcdf_file('withRecordDimension.nc') as f:
assert_equal(f.variables['time'][-1], i)
assert_equal(f.variables['testData'][-1, :, :].copy(), np.ones((dataSize, dataSize))*i)
assert_equal(f.variables['time'].data.shape[0], i+1)
assert_equal(f.variables['testData'].data.shape[0], i+1)
# Read the file and check that 'data' was not saved as user defined
# attribute of testData variable during append operation
with netcdf_file('withRecordDimension.nc') as f:
with assert_raises(KeyError) as ar:
f.variables['testData']._attributes['data']
ex = ar.exception
assert_equal(ex.args[0], 'data')
def test_maskandscale():
t = np.linspace(20, 30, 15)
t[3] = 100
tm = np.ma.masked_greater(t, 99)
fname = pjoin(TEST_DATA_PATH, 'example_2.nc')
with netcdf_file(fname, maskandscale=True) as f:
Temp = f.variables['Temperature']
assert_equal(Temp.missing_value, 9999)
assert_equal(Temp.add_offset, 20)
assert_equal(Temp.scale_factor, np.float32(0.01))
found = Temp[:].compressed()
del Temp # Remove ref to mmap, so file can be closed.
expected = np.round(tm.compressed(), 2)
assert_allclose(found, expected)
with in_tempdir():
newfname = 'ms.nc'
f = netcdf_file(newfname, 'w', maskandscale=True)
f.createDimension('Temperature', len(tm))
temp = f.createVariable('Temperature', 'i', ('Temperature',))
temp.missing_value = 9999
temp.scale_factor = 0.01
temp.add_offset = 20
temp[:] = tm
f.close()
with netcdf_file(newfname, maskandscale=True) as f:
Temp = f.variables['Temperature']
assert_equal(Temp.missing_value, 9999)
assert_equal(Temp.add_offset, 20)
assert_equal(Temp.scale_factor, np.float32(0.01))
expected = np.round(tm.compressed(), 2)
found = Temp[:].compressed()
del Temp
assert_allclose(found, expected)
# ------------------------------------------------------------------------
# Test reading with masked values (_FillValue / missing_value)
# ------------------------------------------------------------------------
def test_read_withValuesNearFillValue():
# Regression test for ticket #5626
fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
with netcdf_file(fname, maskandscale=True) as f:
vardata = f.variables['var1_fillval0'][:]
assert_mask_matches(vardata, [False, True, False])
def test_read_withNoFillValue():
# For a variable with no fill value, reading data with maskandscale=True
# should return unmasked data
fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
with netcdf_file(fname, maskandscale=True) as f:
vardata = f.variables['var2_noFillval'][:]
assert_mask_matches(vardata, [False, False, False])
assert_equal(vardata, [1,2,3])
def test_read_withFillValueAndMissingValue():
# For a variable with both _FillValue and missing_value, the _FillValue
# should be used
IRRELEVANT_VALUE = 9999
fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
with netcdf_file(fname, maskandscale=True) as f:
vardata = f.variables['var3_fillvalAndMissingValue'][:]
assert_mask_matches(vardata, [True, False, False])
assert_equal(vardata, [IRRELEVANT_VALUE, 2, 3])
def test_read_withMissingValue():
# For a variable with missing_value but not _FillValue, the missing_value
# should be used
fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
with netcdf_file(fname, maskandscale=True) as f:
vardata = f.variables['var4_missingValue'][:]
assert_mask_matches(vardata, [False, True, False])
def test_read_withFillValNaN():
fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
with netcdf_file(fname, maskandscale=True) as f:
vardata = f.variables['var5_fillvalNaN'][:]
assert_mask_matches(vardata, [False, True, False])
def test_read_withChar():
fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
with netcdf_file(fname, maskandscale=True) as f:
vardata = f.variables['var6_char'][:]
assert_mask_matches(vardata, [False, True, False])
def test_read_with2dVar():
fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
with netcdf_file(fname, maskandscale=True) as f:
vardata = f.variables['var7_2d'][:]
assert_mask_matches(vardata, [[True, False], [False, False], [False, True]])
def test_read_withMaskAndScaleFalse():
# If a variable has a _FillValue (or missing_value) attribute, but is read
# with maskandscale set to False, the result should be unmasked
fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
# Open file with mmap=False to avoid problems with closing a mmap'ed file
# when arrays referring to its data still exist:
with netcdf_file(fname, maskandscale=False, mmap=False) as f:
vardata = f.variables['var3_fillvalAndMissingValue'][:]
assert_mask_matches(vardata, [False, False, False])
assert_equal(vardata, [1, 2, 3])
if __name__ == "__main__":
run_module_suite()
|
the-stack_0_650 | '''
Created on Oct 6, 2013 (from DialogPluginManager.py)
@author: Mark V Systems Limited
(c) Copyright 2013 Mark V Systems Limited, All rights reserved.
'''
from tkinter import simpledialog, Toplevel, font, messagebox, VERTICAL, HORIZONTAL, N, S, E, W
from tkinter.constants import DISABLED, ACTIVE
try:
from tkinter.ttk import Treeview, Scrollbar, Frame, Label, Button
except ImportError:
from ttk import Treeview, Scrollbar, Frame, Label, Button
from arelle import PackageManager, DialogURL
from arelle.CntlrWinTooltip import ToolTip
import os, time
try:
import regex as re
except ImportError:
import re
def dialogPackageManager(mainWin):
# check for updates in background
import threading
thread = threading.Thread(target=lambda cntlr=mainWin: backgroundCheckForUpdates(cntlr))
thread.daemon = True
thread.start()
def backgroundCheckForUpdates(cntlr):
cntlr.showStatus(_("Checking for updates to packages")) # clear web loading status
packageNamesWithNewerFileDates = PackageManager.packageNamesWithNewerFileDates()
if packageNamesWithNewerFileDates:
cntlr.showStatus(_("Updates are available for these packages: {0}")
.format(', '.join(packageNamesWithNewerFileDates)), clearAfter=5000)
else:
cntlr.showStatus(_("No updates found for packages."), clearAfter=5000)
time.sleep(0.1) # Mac locks up without this, may be needed for empty ui queue?
cntlr.uiThreadQueue.put((DialogPackageManager, [cntlr, packageNamesWithNewerFileDates]))
class DialogPackageManager(Toplevel):
def __init__(self, mainWin, packageNamesWithNewerFileDates):
super(DialogPackageManager, self).__init__(mainWin.parent)
self.ENABLE = _("Enable")
self.DISABLE = _("Disable")
self.parent = mainWin.parent
self.cntlr = mainWin
# copy plugins for temporary display
self.packagesConfig = PackageManager.packagesConfig
self.packagesConfigChanged = False
self.packageNamesWithNewerFileDates = packageNamesWithNewerFileDates
parentGeometry = re.match("(\d+)x(\d+)[+]?([-]?\d+)[+]?([-]?\d+)", self.parent.geometry())
dialogX = int(parentGeometry.group(3))
dialogY = int(parentGeometry.group(4))
self.title(_("Taxonomy Packages Manager"))
frame = Frame(self)
# left button frame
buttonFrame = Frame(frame, width=40)
buttonFrame.columnconfigure(0, weight=1)
addLabel = Label(buttonFrame, text=_("Find taxonomy packages:"), wraplength=64, justify="center")
addLocalButton = Button(buttonFrame, text=_("Locally"), command=self.findLocally)
ToolTip(addLocalButton, text=_("File chooser allows selecting taxonomy packages to add (or reload), from the local file system. "
"Select either a PWD or prior taxonomy package zip file, or a taxonomy manifest (.taxonomyPackage.xml) within an unzipped taxonomy package. "), wraplength=360)
addWebButton = Button(buttonFrame, text=_("On Web"), command=self.findOnWeb)
ToolTip(addWebButton, text=_("Dialog to enter URL full path to load (or reload) package, from the web or local file system. "
"URL may be either a PWD or prior taxonomy package zip file, or a taxonomy manifest (.taxonomyPackage.xml) within an unzipped taxonomy package. "), wraplength=360)
manifestNameButton = Button(buttonFrame, text=_("Manifest"), command=self.manifestName)
ToolTip(manifestNameButton, text=_("Provide pre-PWD non-standard archive manifest file name pattern (e.g., *taxonomyPackage.xml). "
"Uses unix file name pattern matching. "
"Multiple manifest files are supported in pre-PWD archives (such as oasis catalogs). "
"(Replaces pre-PWD search for either .taxonomyPackage.xml or catalog.xml). "), wraplength=480)
self.manifestNamePattern = ""
addLabel.grid(row=0, column=0, pady=4)
addLocalButton.grid(row=1, column=0, pady=4)
addWebButton.grid(row=2, column=0, pady=4)
manifestNameButton.grid(row=3, column=0, pady=4)
buttonFrame.grid(row=0, column=0, rowspan=3, sticky=(N, S, W), padx=3, pady=3)
# right tree frame (packages already known to arelle)
packagesFrame = Frame(frame, width=700)
vScrollbar = Scrollbar(packagesFrame, orient=VERTICAL)
hScrollbar = Scrollbar(packagesFrame, orient=HORIZONTAL)
self.packagesView = Treeview(packagesFrame, xscrollcommand=hScrollbar.set, yscrollcommand=vScrollbar.set, height=7)
self.packagesView.grid(row=0, column=0, sticky=(N, S, E, W))
self.packagesView.bind('<<TreeviewSelect>>', self.packageSelect)
hScrollbar["command"] = self.packagesView.xview
hScrollbar.grid(row=1, column=0, sticky=(E,W))
vScrollbar["command"] = self.packagesView.yview
vScrollbar.grid(row=0, column=1, sticky=(N,S))
packagesFrame.columnconfigure(0, weight=1)
packagesFrame.rowconfigure(0, weight=1)
packagesFrame.grid(row=0, column=1, columnspan=4, sticky=(N, S, E, W), padx=3, pady=3)
self.packagesView.focus_set()
self.packagesView.column("#0", width=120, anchor="w")
self.packagesView.heading("#0", text=_("Name"))
self.packagesView["columns"] = ("ver", "status", "date", "update", "descr")
self.packagesView.column("ver", width=150, anchor="w", stretch=False)
self.packagesView.heading("ver", text=_("Version"))
self.packagesView.column("status", width=50, anchor="w", stretch=False)
self.packagesView.heading("status", text=_("Status"))
self.packagesView.column("date", width=170, anchor="w", stretch=False)
self.packagesView.heading("date", text=_("File Date"))
self.packagesView.column("update", width=50, anchor="w", stretch=False)
self.packagesView.heading("update", text=_("Update"))
self.packagesView.column("descr", width=200, anchor="w", stretch=False)
self.packagesView.heading("descr", text=_("Description"))
remappingsFrame = Frame(frame)
vScrollbar = Scrollbar(remappingsFrame, orient=VERTICAL)
hScrollbar = Scrollbar(remappingsFrame, orient=HORIZONTAL)
self.remappingsView = Treeview(remappingsFrame, xscrollcommand=hScrollbar.set, yscrollcommand=vScrollbar.set, height=5)
self.remappingsView.grid(row=0, column=0, sticky=(N, S, E, W))
hScrollbar["command"] = self.remappingsView.xview
hScrollbar.grid(row=1, column=0, sticky=(E,W))
vScrollbar["command"] = self.remappingsView.yview
vScrollbar.grid(row=0, column=1, sticky=(N,S))
remappingsFrame.columnconfigure(0, weight=1)
remappingsFrame.rowconfigure(0, weight=1)
remappingsFrame.grid(row=1, column=1, columnspan=4, sticky=(N, S, E, W), padx=3, pady=3)
self.remappingsView.focus_set()
self.remappingsView.column("#0", width=200, anchor="w")
self.remappingsView.heading("#0", text=_("Prefix"))
self.remappingsView["columns"] = ("remapping")
self.remappingsView.column("remapping", width=500, anchor="w", stretch=False)
self.remappingsView.heading("remapping", text=_("Remapping"))
# bottom frame package info details
packageInfoFrame = Frame(frame, width=700)
packageInfoFrame.columnconfigure(1, weight=1)
self.packageNameLabel = Label(packageInfoFrame, wraplength=600, justify="left",
font=font.Font(family='Helvetica', size=12, weight='bold'))
self.packageNameLabel.grid(row=0, column=0, columnspan=6, sticky=W)
self.packageVersionHdr = Label(packageInfoFrame, text=_("version:"), state=DISABLED)
self.packageVersionHdr.grid(row=1, column=0, sticky=W)
self.packageVersionLabel = Label(packageInfoFrame, wraplength=600, justify="left")
self.packageVersionLabel.grid(row=1, column=1, columnspan=5, sticky=W)
self.packageDescrHdr = Label(packageInfoFrame, text=_("description:"), state=DISABLED)
self.packageDescrHdr.grid(row=2, column=0, sticky=W)
self.packageDescrLabel = Label(packageInfoFrame, wraplength=600, justify="left")
self.packageDescrLabel.grid(row=2, column=1, columnspan=5, sticky=W)
self.packagePrefixesHdr = Label(packageInfoFrame, text=_("prefixes:"), state=DISABLED)
self.packagePrefixesHdr.grid(row=3, column=0, sticky=W)
self.packagePrefixesLabel = Label(packageInfoFrame, wraplength=600, justify="left")
self.packagePrefixesLabel.grid(row=3, column=1, columnspan=5, sticky=W)
ToolTip(self.packagePrefixesLabel, text=_("List of prefixes that this package remaps."), wraplength=240)
self.packageUrlHdr = Label(packageInfoFrame, text=_("URL:"), state=DISABLED)
self.packageUrlHdr.grid(row=4, column=0, sticky=W)
self.packageUrlLabel = Label(packageInfoFrame, wraplength=600, justify="left")
self.packageUrlLabel.grid(row=4, column=1, columnspan=5, sticky=W)
ToolTip(self.packageUrlLabel, text=_("URL of taxonomy package (local file path or web loaded file)."), wraplength=240)
self.packageDateHdr = Label(packageInfoFrame, text=_("date:"), state=DISABLED)
self.packageDateHdr.grid(row=5, column=0, sticky=W)
self.packageDateLabel = Label(packageInfoFrame, wraplength=600, justify="left")
self.packageDateLabel.grid(row=5, column=1, columnspan=5, sticky=W)
ToolTip(self.packageDateLabel, text=_("Date of currently loaded package file (with parenthetical node when an update is available)."), wraplength=240)
self.packageEnableButton = Button(packageInfoFrame, text=self.ENABLE, state=DISABLED, command=self.packageEnable)
ToolTip(self.packageEnableButton, text=_("Enable/disable package."), wraplength=240)
self.packageEnableButton.grid(row=6, column=1, sticky=E)
self.packageMoveUpButton = Button(packageInfoFrame, text=_("Move Up"), state=DISABLED, command=self.packageMoveUp)
ToolTip(self.packageMoveUpButton, text=_("Move package up (above other remappings)."), wraplength=240)
self.packageMoveUpButton.grid(row=6, column=2, sticky=E)
self.packageMoveDownButton = Button(packageInfoFrame, text=_("Move Down"), state=DISABLED, command=self.packageMoveDown)
ToolTip(self.packageMoveDownButton, text=_("Move package down (below other remappings)."), wraplength=240)
self.packageMoveDownButton.grid(row=6, column=3, sticky=E)
self.packageReloadButton = Button(packageInfoFrame, text=_("Reload"), state=DISABLED, command=self.packageReload)
ToolTip(self.packageReloadButton, text=_("Reload/update package."), wraplength=240)
self.packageReloadButton.grid(row=6, column=4, sticky=E)
self.packageRemoveButton = Button(packageInfoFrame, text=_("Remove"), state=DISABLED, command=self.packageRemove)
ToolTip(self.packageRemoveButton, text=_("Remove package from packages table (does not erase the package file)."), wraplength=240)
self.packageRemoveButton.grid(row=6, column=5, sticky=E)
packageInfoFrame.grid(row=2, column=0, columnspan=5, sticky=(N, S, E, W), padx=3, pady=3)
packageInfoFrame.config(borderwidth=4, relief="groove")
okButton = Button(frame, text=_("Close"), command=self.ok)
ToolTip(okButton, text=_("Accept and changes (if any) and close dialog."), wraplength=240)
cancelButton = Button(frame, text=_("Cancel"), command=self.close)
ToolTip(cancelButton, text=_("Cancel changes (if any) and close dialog."), wraplength=240)
okButton.grid(row=3, column=3, sticky=(S,E), pady=3)
cancelButton.grid(row=3, column=4, sticky=(S,E), pady=3, padx=3)
enableDisableFrame = Frame(frame)
enableDisableFrame.grid(row=3, column=1, sticky=(S,W), pady=3)
enableAllButton = Button(enableDisableFrame, text=_("Enable All"), command=self.enableAll)
ToolTip(enableAllButton, text=_("Enable all packages."), wraplength=240)
disableAllButton = Button(enableDisableFrame, text=_("Disable All"), command=self.disableAll)
ToolTip(disableAllButton, text=_("Disable all packages."), wraplength=240)
enableAllButton.grid(row=1, column=1)
disableAllButton.grid(row=1, column=2)
self.loadTreeViews()
self.geometry("+{0}+{1}".format(dialogX+50,dialogY+100))
frame.grid(row=0, column=0, sticky=(N,S,E,W))
frame.columnconfigure(0, weight=0)
frame.columnconfigure(1, weight=1)
frame.rowconfigure(0, weight=1)
window = self.winfo_toplevel()
window.columnconfigure(0, weight=1)
window.rowconfigure(0, weight=1)
self.bind("<Return>", self.ok)
self.bind("<Escape>", self.close)
self.protocol("WM_DELETE_WINDOW", self.close)
self.grab_set()
self.wait_window(self)
def loadTreeViews(self):
self.selectedModule = None
# clear previous treeview entries
for previousNode in self.packagesView.get_children(""):
self.packagesView.delete(previousNode)
for i, packageInfo in enumerate(self.packagesConfig.get("packages", [])):
name = packageInfo.get("name", "package{}".format(i))
node = self.packagesView.insert("", "end", "_{}".format(i), text=name)
self.packagesView.set(node, "ver", packageInfo.get("version"))
self.packagesView.set(node, "status", packageInfo.get("status"))
self.packagesView.set(node, "date", packageInfo.get("fileDate"))
if name in self.packageNamesWithNewerFileDates:
self.packagesView.set(node, "update", _("available"))
self.packagesView.set(node, "descr", packageInfo.get("description"))
# clear previous treeview entries
for previousNode in self.remappingsView.get_children(""):
self.remappingsView.delete(previousNode)
for i, remappingItem in enumerate(sorted(self.packagesConfig.get("remappings", {}).items())):
prefix, remapping = remappingItem
node = self.remappingsView.insert("", "end", prefix, text=prefix)
self.remappingsView.set(node, "remapping", remapping)
self.packageSelect() # clear out prior selection
def ok(self, event=None):
if self.packagesConfigChanged:
PackageManager.packagesConfig = self.packagesConfig
PackageManager.packagesConfigChanged = True
self.cntlr.onPackageEnablementChanged()
self.close()
def close(self, event=None):
self.parent.focus_set()
self.destroy()
def packageSelect(self, *args):
node = (self.packagesView.selection() or (None,))[0]
try:
nodeIndex = int(node[1:])
except (ValueError, TypeError):
nodeIndex = -1
if 0 <= nodeIndex < len(self.packagesConfig["packages"]):
packageInfo = self.packagesConfig["packages"][nodeIndex]
self.selectedPackageIndex = nodeIndex
name = packageInfo["name"]
self.packageNameLabel.config(text=name)
self.packageVersionHdr.config(state=ACTIVE)
self.packageVersionLabel.config(text=packageInfo["version"])
self.packageDescrHdr.config(state=ACTIVE)
self.packageDescrLabel.config(text=packageInfo["description"])
self.packagePrefixesHdr.config(state=ACTIVE)
self.packagePrefixesLabel.config(text=', '.join(packageInfo["remappings"].keys()))
self.packageUrlHdr.config(state=ACTIVE)
self.packageUrlLabel.config(text=packageInfo["URL"])
self.packageDateHdr.config(state=ACTIVE)
self.packageDateLabel.config(text=packageInfo["fileDate"] + " " +
(_("(an update is available)") if name in self.packageNamesWithNewerFileDates else ""))
self.packageEnableButton.config(state=ACTIVE,
text={"enabled":self.DISABLE,
"disabled":self.ENABLE}[packageInfo["status"]])
self.packageMoveUpButton.config(state=ACTIVE if 0 < nodeIndex else DISABLED)
self.packageMoveDownButton.config(state=ACTIVE if nodeIndex < (len(self.packagesConfig["packages"]) - 1) else DISABLED)
self.packageReloadButton.config(state=ACTIVE)
self.packageRemoveButton.config(state=ACTIVE)
else:
self.selectedPackageIndex = -1
self.packageNameLabel.config(text="")
self.packageVersionHdr.config(state=DISABLED)
self.packageVersionLabel.config(text="")
self.packageDescrHdr.config(state=DISABLED)
self.packageDescrLabel.config(text="")
self.packagePrefixesHdr.config(state=DISABLED)
self.packagePrefixesLabel.config(text="")
self.packageUrlHdr.config(state=DISABLED)
self.packageUrlLabel.config(text="")
self.packageDateHdr.config(state=DISABLED)
self.packageDateLabel.config(text="")
self.packageEnableButton.config(state=DISABLED, text=self.ENABLE)
self.packageMoveUpButton.config(state=DISABLED)
self.packageMoveDownButton.config(state=DISABLED)
self.packageReloadButton.config(state=DISABLED)
self.packageRemoveButton.config(state=DISABLED)
def findLocally(self):
initialdir = self.cntlr.pluginDir # default plugin directory
if not self.cntlr.isMac: # can't navigate within app easily, always start in default directory
initialdir = self.cntlr.config.setdefault("packageOpenDir", initialdir)
filename = self.cntlr.uiFileDialog("open",
parent=self,
title=_("Choose taxonomy package file"),
initialdir=initialdir,
filetypes=[(_("Taxonomy package files (*.zip)"), "*.zip"),
(_("PWD Manifest (taxonomyPackage.xml)"), "taxonomyPackage.xml"),
(_("pre-PWD Manifest (*.taxonomyPackage.xml)"), "*.taxonomyPackage.xml"),
(_("pre-PWD Oasis Catalog (*catalog.xml)"), "*catalog.xml")],
defaultextension=".zip")
if filename:
# check if a package is selected (any file in a directory containing an __init__.py
self.cntlr.config["packageOpenDir"] = os.path.dirname(filename)
packageInfo = PackageManager.packageInfo(self.cntlr, filename, packageManifestName=self.manifestNamePattern)
self.loadFoundPackageInfo(packageInfo, filename)
def findOnWeb(self):
url = DialogURL.askURL(self)
if url: # url is the in-cache or local file
packageInfo = PackageManager.packageInfo(self.cntlr, url, packageManifestName=self.manifestNamePattern)
self.cntlr.showStatus("") # clear web loading status
self.loadFoundPackageInfo(packageInfo, url)
def manifestName(self):
self.manifestNamePattern = simpledialog.askstring(_("Archive manifest file name pattern"),
_("Provide non-standard archive manifest file name pattern (e.g., *taxonomyPackage.xml). \n"
"Uses unix file name pattern matching. \n"
"Multiple manifest files are supported in archive (such as oasis catalogs). \n"
"(If blank, search for either .taxonomyPackage.xml or catalog.xml). "),
initialvalue=self.manifestNamePattern,
parent=self)
def loadFoundPackageInfo(self, packageInfo, url):
if packageInfo and packageInfo.get("name"):
self.addPackageInfo(packageInfo)
self.loadTreeViews()
else:
messagebox.showwarning(_("Package is not itself a taxonomy package. "),
_("File does not itself contain a manifest file: \n\n{0}\n\n "
"If opening an archive file, the manifest file search pattern currently is \"\", please press \"Manifest\" to change manifest file name pattern, e.g.,, \"*.taxonomyPackage.xml\", if needed. ")
.format(url),
parent=self)
def removePackageInfo(self, name, version):
# find package entry
packagesList = self.packagesConfig["packages"]
j = -1
for i, packageInfo in enumerate(packagesList):
if packageInfo['name'] == name and packageInfo['version'] == version:
j = i
break
if 0 <= j < len(packagesList):
del self.packagesConfig["packages"][i]
self.packagesConfigChanged = True
def addPackageInfo(self, packageInfo):
name = packageInfo["name"]
version = packageInfo["version"]
self.removePackageInfo(name, version) # remove any prior entry for this package
self.packageNamesWithNewerFileDates.discard(name) # no longer has an update available
self.packagesConfig["packages"].append(packageInfo)
PackageManager.rebuildRemappings(self.cntlr)
self.packagesConfigChanged = True
def packageEnable(self):
if 0 <= self.selectedPackageIndex < len(self.packagesConfig["packages"]):
packageInfo = self.packagesConfig["packages"][self.selectedPackageIndex]
if self.packageEnableButton['text'] == self.ENABLE:
packageInfo["status"] = "enabled"
self.packageEnableButton['text'] = self.DISABLE
elif self.packageEnableButton['text'] == self.DISABLE:
packageInfo["status"] = "disabled"
self.packageEnableButton['text'] = self.ENABLE
self.packagesConfigChanged = True
PackageManager.rebuildRemappings(self.cntlr)
self.loadTreeViews()
def packageMoveUp(self):
if 1 <= self.selectedPackageIndex < len(self.packagesConfig["packages"]):
packages = self.packagesConfig["packages"]
packageInfo = packages[self.selectedPackageIndex]
del packages[self.selectedPackageIndex]
packages.insert(self.selectedPackageIndex -1, packageInfo)
self.packagesConfigChanged = True
PackageManager.rebuildRemappings(self.cntlr)
self.loadTreeViews()
def packageMoveDown(self):
if 0 <= self.selectedPackageIndex < len(self.packagesConfig["packages"]) - 1:
packages = self.packagesConfig["packages"]
packageInfo = packages[self.selectedPackageIndex]
del packages[self.selectedPackageIndex]
packages.insert(self.selectedPackageIndex + 1, packageInfo)
self.packagesConfigChanged = True
PackageManager.rebuildRemappings(self.cntlr)
self.loadTreeViews()
def packageReload(self):
if 0 <= self.selectedPackageIndex < len(self.packagesConfig["packages"]):
packageInfo = self.packagesConfig["packages"][self.selectedPackageIndex]
url = packageInfo.get("URL")
if url:
packageInfo = PackageManager.packageInfo(self.cntlr, url, reload=True, packageManifestName=packageInfo.get("manifestName"))
if packageInfo:
self.addPackageInfo(packageInfo)
PackageManager.rebuildRemappings(self.cntlr)
self.loadTreeViews()
self.cntlr.showStatus(_("{0} reloaded").format(packageInfo.get("name")), clearAfter=5000)
else:
messagebox.showwarning(_("Package error"),
_("File or package cannot be reloaded: \n\n{0}")
.format(url),
parent=self)
def packageRemove(self):
if 0 <= self.selectedPackageIndex < len(self.packagesConfig["packages"]):
packageInfo = self.packagesConfig["packages"][self.selectedPackageIndex]
self.removePackageInfo(packageInfo["name"], packageInfo["version"])
self.packagesConfigChanged = True
PackageManager.rebuildRemappings(self.cntlr)
self.loadTreeViews()
def enableAll(self):
self.enableDisableAll(True)
def disableAll(self):
self.enableDisableAll(False)
def enableDisableAll(self, doEnable):
for iPkg in range(len(self.packagesConfig["packages"])):
packageInfo = self.packagesConfig["packages"][iPkg]
if doEnable:
packageInfo["status"] = "enabled"
self.packageEnableButton['text'] = self.DISABLE
else:
packageInfo["status"] = "disabled"
self.packageEnableButton['text'] = self.ENABLE
self.packagesConfigChanged = True
PackageManager.rebuildRemappings(self.cntlr)
self.loadTreeViews()
|
the-stack_0_651 | #!/usr/bin/env python
# from galaxy import eggs
import sys
import rpy2.rinterface as ri
import rpy2.rlike.container as rlc
# from rpy import *
import rpy2.robjects as robjects
r = robjects.r
def stop_err(msg):
sys.stderr.write(msg)
sys.exit()
infile = sys.argv[1]
y_col = int(sys.argv[2]) - 1
x_cols = sys.argv[3].split(",")
outfile = sys.argv[4]
print("Predictor columns: %s; Response column: %d" % (x_cols, y_col + 1))
fout = open(outfile, "w")
elems = []
for i, line in enumerate(file(infile)): # noqa F821
line = line.rstrip("\r\n")
if len(line) > 0 and not line.startswith("#"):
elems = line.split("\t")
break
if i == 30:
break # Hopefully we'll never get here...
if len(elems) < 1:
stop_err(
"The data in your input dataset is either missing or not formatted properly."
)
y_vals = []
x_vals = []
x_vector = []
for k, col in enumerate(x_cols):
x_cols[k] = int(col) - 1
x_vals.append([])
NA = "NA"
for ind, line in enumerate(file(infile)): # noqa F821
if line and not line.startswith("#"):
try:
fields = line.split("\t")
try:
yval = float(fields[y_col])
except Exception:
yval = r("NA")
y_vals.append(yval)
for k, col in enumerate(x_cols):
try:
xval = float(fields[col])
except Exception:
xval = r("NA")
x_vals[k].append(xval)
x_vector.append(xval)
except Exception as e:
print(e)
# x_vals1 = numpy.asarray(x_vals).transpose()
check1 = 0
check0 = 0
for i in y_vals:
if i == 1:
check1 = 1
if i == 0:
check0 = 1
if check1 == 0 or check0 == 0:
sys.exit("Warning: logistic regression must have at least two classes")
for i in y_vals:
if i not in [1, 0, r("NA")]:
print(str(i), file=fout)
sys.exit(
"Warning: the current version of this tool can run only with two classes and need to be labeled as 0 and 1."
)
# dat= r.list(x=array(x_vals1), y=y_vals)
novif = 0
# set_default_mode(NO_CONVERSION)
# try:
# linear_model = r.glm(r("y ~ x"), data = r.na_exclude(dat),family="binomial")
# #r('library(car)')
# #r.assign('dat',dat)
# #r.assign('ncols',len(x_cols))
# #r.vif(r('glm(dat$y ~ ., data = na.exclude(data.frame(as.matrix(dat$x,ncol=ncols))->datx),family="binomial")')).as_py()
#
# except Exception as rex:
# stop_err("Error performing logistic regression on the input data.\nEither the response column or one of the predictor columns contain only non-numeric or invalid values.")
fv = robjects.FloatVector(x_vector)
m = r["matrix"](fv, ncol=len(x_cols), byrow=True)
# ensure order for generating formula
od = rlc.OrdDict([("y", robjects.FloatVector(y_vals)), ("x", m)])
dat = robjects.DataFrame(od)
# convert dat.names: ["y","x.1","x.2"] to formula string: 'y ~ x.1 + x.2'
formula = " + ".join(dat.names).replace("+", "~", 1)
print(formula)
try:
linear_model = r.glm(formula, data=r["na.exclude"](dat), family="binomial")
except Exception:
stop_err(
"Error performing linear regression on the input data.\nEither the response column or one of the predictor columns contain only non-numeric or invalid values."
)
if len(x_cols) > 1:
try:
r("library(car)")
r.assign("dat", dat)
r.assign("ncols", len(x_cols))
# vif=r.vif(r('glm(dat$y ~ ., data = na.exclude(data.frame(as.matrix(dat$x,ncol=ncols))->datx),family="binomial")'))
od2 = rlc.OrdDict([("datx", m)])
glm_data_frame = robjects.DataFrame(od2)
glm_result = r.glm(
"dat$y ~ .", data=r["na.exclude"](glm_data_frame), family="binomial"
)
print("Have glm")
vif = r.vif(glm_result)
except Exception as rex:
print(rex)
else:
novif = 1
# set_default_mode(BASIC_CONVERSION)
# coeffs=linear_model.as_py()['coefficients']
coeffs = linear_model.rx2("coefficients")
# null_deviance=linear_model.as_py()['null.deviance']
null_deviance = linear_model.rx2("null.deviance")[0]
# residual_deviance=linear_model.as_py()['deviance']
residual_deviance = linear_model.rx2("deviance")[0]
# yintercept= coeffs['(Intercept)']
yintercept = coeffs.rx2("(Intercept)")[0]
summary = r.summary(linear_model)
# co = summary.get('coefficients', 'NA')
co = summary.rx2("coefficients")
print(co)
"""
if len(co) != len(x_vals)+1:
stop_err("Stopped performing logistic regression on the input data, since one of the predictor columns contains only non-numeric or invalid values.")
"""
try:
yintercept = r.round(float(yintercept), digits=10)[0]
# pvaly = r.round(float(co[0][3]), digits=10)
pvaly = r.round(float(co.rx(1, 4)[0]), digits=10)[0]
except Exception as e:
print(str(e))
print("response column\tc%d" % (y_col + 1), file=fout)
tempP = []
for i in x_cols:
tempP.append("c" + str(i + 1))
tempP = ",".join(tempP)
print("predictor column(s)\t%s" % (tempP), file=fout)
print("Y-intercept\t%s" % (yintercept), file=fout)
print("p-value (Y-intercept)\t%s" % (pvaly), file=fout)
print(coeffs)
if len(x_vals) == 1: # Simple linear regression case with 1 predictor variable
try:
# slope = r.round(float(coeffs['x']), digits=10)
raw_slope = coeffs.rx2("x")[0]
slope = r.round(float(raw_slope), digits=10)[0]
except Exception:
slope = "NA"
try:
# pval = r.round(float(co[1][3]), digits=10)
pval = r.round(float(co.rx2(2, 4)[0]), digits=10)[0]
except Exception:
pval = "NA"
print("Slope (c%d)\t%s" % (x_cols[0] + 1, slope), file=fout)
print("p-value (c%d)\t%s" % (x_cols[0] + 1, pval), file=fout)
else: # Multiple regression case with >1 predictors
ind = 1
# while ind < len(coeffs.keys()):
print(len(coeffs.names))
while ind < len(coeffs.names):
try:
# slope = r.round(float(coeffs['x'+str(ind)]), digits=10)
raw_slope = coeffs.rx2("x." + str(ind))[0]
slope = r.round(float(raw_slope), digits=10)[0]
except Exception:
slope = "NA"
print("Slope (c%d)\t%s" % (x_cols[ind - 1] + 1, slope), file=fout)
try:
# pval = r.round(float(co[ind][3]), digits=10)
pval = r.round(float(co.rx2(ind + 1, 4)[0]), digits=10)[0]
except Exception:
pval = "NA"
print("p-value (c%d)\t%s" % (x_cols[ind - 1] + 1, pval), file=fout)
ind += 1
# rsq = summary.get('r.squared','NA')
rsq = summary.rx2("r.squared")
if rsq == ri.RNULLType():
rsq = "NA"
else:
rsq = rsq[0]
try:
# rsq= r.round(float((null_deviance-residual_deviance)/null_deviance), digits=5)
rsq = r.round(float((null_deviance - residual_deviance) / null_deviance), digits=5)[
0
]
# null_deviance= r.round(float(null_deviance), digits=5)
null_deviance = r.round(float(null_deviance), digits=5)[0]
# residual_deviance= r.round(float(residual_deviance), digits=5)
residual_deviance = r.round(float(residual_deviance), digits=5)[0]
except Exception:
pass
print("Null deviance\t%s" % (null_deviance), file=fout)
print("Residual deviance\t%s" % (residual_deviance), file=fout)
print("pseudo R-squared\t%s" % (rsq), file=fout)
print("\n", file=fout)
print("vif", file=fout)
if novif == 0:
# py_vif=vif.as_py()
count = 0
for i in sorted(vif.names):
print("c" + str(x_cols[count] + 1), str(vif.rx2(i)[0]), file=fout)
count += 1
elif novif == 1:
print("vif can calculate only when model have more than 1 predictor", file=fout)
|
the-stack_0_653 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class StorageAccountItem(Model):
"""The storage account item containing storage account metadata.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Storage identifier.
:vartype id: str
:ivar resource_id: Storage account resource Id.
:vartype resource_id: str
:ivar attributes: The storage account management attributes.
:vartype attributes: ~azure.keyvault.models.StorageAccountAttributes
:ivar tags: Application specific metadata in the form of key-value pairs.
:vartype tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'resource_id': {'readonly': True},
'attributes': {'readonly': True},
'tags': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'attributes': {'key': 'attributes', 'type': 'StorageAccountAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(self, **kwargs):
super(StorageAccountItem, self).__init__(**kwargs)
self.id = None
self.resource_id = None
self.attributes = None
self.tags = None
|
the-stack_0_654 | #!/usr/bin/python3
"""
Copyright 2018-2019 Firmin.Sun ([email protected])
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# -----------------------------------------------------
# @Time : 11/8/2018 4:54 PM
# @Author : Firmin.Sun ([email protected])
# @Software: ZJ_AI
# -----------------------------------------------------
# -*- coding: utf-8 -*-
import keras
import numpy as np
import cv2
from PIL import Image
def read_image_bgr(path):
'''
:param path:
:return: (h, w, 3)
'''
try:
image = np.asarray(Image.open(path).convert('RGB'))
except Exception as ex:
print(path)
return image[:, :, ::-1].copy()
def preprocess_image(x):
# mostly identical to "https://github.com/fchollet/keras/blob/master/keras/applications/imagenet_utils.py"
# except for converting RGB -> BGR since we assume BGR already
x = x.astype(keras.backend.floatx())
if keras.backend.image_data_format() == 'channels_first':
if x.ndim == 3:
x[0, :, :] -= 103.939
x[1, :, :] -= 116.779
x[2, :, :] -= 123.68
else:
x[:, 0, :, :] -= 103.939
x[:, 1, :, :] -= 116.779
x[:, 2, :, :] -= 123.68
else:
x[..., 0] -= 103.939
x[..., 1] -= 116.779
x[..., 2] -= 123.68
return x
def resize_image(image, min_side=448, max_side=448):
'''
resize image to dsize
:param img: input (h, w, 3) = (rows, cols, 3)
:param size:
:return: out (h, w, 3)
'''
(h, w, _) = image.shape
scale = np.asarray((min_side, max_side),dtype=float) / np.asarray((h, w),dtype=float)
# resize the image with the computed scale
# cv2.resize(image, (w, h))
img = cv2.resize(image, (min_side, max_side))
return img, scale
|
the-stack_0_655 | import os
import unittest
import pytest
from nose.plugins.attrib import attr
from conans.test.assets.multi_config import multi_config_files
from conans.test.utils.tools import TestClient
@attr("slow")
@pytest.mark.slow
@pytest.mark.tool_cmake
class CMakeConfigsTest(unittest.TestCase):
def test_test_package_configs(self):
client = TestClient()
name = "Hello0"
files = multi_config_files(name, test=True)
client.save(files, clean_first=True)
client.run("create . user/testing")
self.assertIn("Hello Release Hello0", client.out)
self.assertIn("Hello Debug Hello0", client.out)
def test_cmake_multi(self):
client = TestClient()
deps = None
for name in ["Hello0", "Hello1", "Hello2"]:
files = multi_config_files(name, test=False, deps=deps)
client.save(files, clean_first=True)
deps = [name]
if name != "Hello2":
client.run("export . lasote/stable")
client.run('install . --build missing')
client.run("build .")
cmd = os.sep.join([".", "bin", "say_hello"])
client.run_command(cmd)
self.assertIn("Hello Release Hello2 Hello Release Hello1 Hello Release Hello0",
" ".join(str(client.out).splitlines()))
client.run_command(cmd + "_d")
self.assertIn("Hello Debug Hello2 Hello Debug Hello1 Hello Debug Hello0",
" ".join(str(client.out).splitlines()))
|
the-stack_0_656 | """
Defines CPU Options for use in the CPU target
"""
class FastMathOptions(object):
"""
Options for controlling fast math optimization.
"""
def __init__(self, value):
# https://releases.llvm.org/7.0.0/docs/LangRef.html#fast-math-flags
valid_flags = {
'fast',
'nnan', 'ninf', 'nsz', 'arcp',
'contract', 'afn', 'reassoc',
}
if isinstance(value, FastMathOptions):
self.flags = value.flags.copy()
elif value is True:
self.flags = {'fast'}
elif value is False:
self.flags = set()
elif isinstance(value, set):
invalid = value - valid_flags
if invalid:
raise ValueError("Unrecognized fastmath flags: %s" % invalid)
self.flags = value
elif isinstance(value, dict):
invalid = set(value.keys()) - valid_flags
if invalid:
raise ValueError("Unrecognized fastmath flags: %s" % invalid)
self.flags = {v for v, enable in value.items() if enable}
else:
msg = "Expected fastmath option(s) to be either a bool, dict or set"
raise ValueError(msg)
def __bool__(self):
return bool(self.flags)
__nonzero__ = __bool__
def __repr__(self):
return f"FastMathOptions({self.flags})"
class ParallelOptions(object):
"""
Options for controlling auto parallelization.
"""
def __init__(self, value):
if isinstance(value, bool):
self.enabled = value
self.comprehension = value
self.reduction = value
self.inplace_binop = value
self.setitem = value
self.numpy = value
self.stencil = value
self.fusion = value
self.prange = value
elif isinstance(value, dict):
self.enabled = True
self.comprehension = value.pop('comprehension', True)
self.reduction = value.pop('reduction', True)
self.inplace_binop = value.pop('inplace_binop', True)
self.setitem = value.pop('setitem', True)
self.numpy = value.pop('numpy', True)
self.stencil = value.pop('stencil', True)
self.fusion = value.pop('fusion', True)
self.prange = value.pop('prange', True)
if value:
msg = "Unrecognized parallel options: %s" % value.keys()
raise NameError(msg)
elif isinstance(value, ParallelOptions):
self.enabled = value.enabled
self.comprehension = value.comprehension
self.reduction = value.reduction
self.inplace_binop = value.inplace_binop
self.setitem = value.setitem
self.numpy = value.numpy
self.stencil = value.stencil
self.fusion = value.fusion
self.prange = value.prange
else:
msg = "Expect parallel option to be either a bool or a dict"
raise ValueError(msg)
class InlineOptions(object):
"""
Options for controlling inlining
"""
def __init__(self, value):
ok = False
if isinstance(value, str):
if value in ('always', 'never'):
ok = True
else:
ok = hasattr(value, '__call__')
if ok:
self._inline = value
else:
msg = ("kwarg 'inline' must be one of the strings 'always' or "
"'never', or it can be a callable that returns True/False. "
"Found value %s" % value)
raise ValueError(msg)
@property
def is_never_inline(self):
"""
True if never inline
"""
return self._inline == 'never'
@property
def is_always_inline(self):
"""
True if always inline
"""
return self._inline == 'always'
@property
def has_cost_model(self):
"""
True if a cost model is provided
"""
return not (self.is_always_inline or self.is_never_inline)
@property
def value(self):
"""
The raw value
"""
return self._inline
|
the-stack_0_657 | import math
import numpy as np
import torch
from scipy.spatial import cKDTree
def setup_seed(seed):
torch.backends.cudnn.deterministic = True
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
def square_dists(points1, points2):
'''
Calculate square dists between two group points
:param points1: shape=(B, N, C)
:param points2: shape=(B, M, C)
:return:
'''
B, N, C = points1.shape
_, M, _ = points2.shape
dists = torch.sum(torch.pow(points1, 2), dim=-1).view(B, N, 1) + \
torch.sum(torch.pow(points2, 2), dim=-1).view(B, 1, M)
dists -= 2 * torch.matmul(points1, points2.permute(0, 2, 1))
#dists = torch.where(dists < 0, torch.ones_like(dists) * 1e-7, dists) # Very Important for dist = 0.
return dists.float()
def random_select_points(pc, m):
if m < 0:
idx = np.arange(pc.shape[0])
np.random.shuffle(idx)
return pc[idx, :]
n = pc.shape[0]
replace = False if n >= m else True
idx = np.random.choice(n, size=(m, ), replace=replace)
return pc[idx, :]
def generate_rotation_x_matrix(theta):
mat = np.eye(3, dtype=np.float32)
mat[1, 1] = math.cos(theta)
mat[1, 2] = -math.sin(theta)
mat[2, 1] = math.sin(theta)
mat[2, 2] = math.cos(theta)
return mat
def generate_rotation_y_matrix(theta):
mat = np.eye(3, dtype=np.float32)
mat[0, 0] = math.cos(theta)
mat[0, 2] = math.sin(theta)
mat[2, 0] = -math.sin(theta)
mat[2, 2] = math.cos(theta)
return mat
def generate_rotation_z_matrix(theta):
mat = np.eye(3, dtype=np.float32)
mat[0, 0] = math.cos(theta)
mat[0, 1] = -math.sin(theta)
mat[1, 0] = math.sin(theta)
mat[1, 1] = math.cos(theta)
return mat
def generate_random_rotation_matrix(angle1=-45, angle2=45):
thetax = np.random.uniform() * np.pi * angle2 / 180.0
thetay = np.random.uniform() * np.pi * angle2 / 180.0
thetaz = np.random.uniform() * np.pi * angle2 / 180.0
matx = generate_rotation_x_matrix(thetax)
maty = generate_rotation_y_matrix(thetay)
matz = generate_rotation_z_matrix(thetaz)
return np.dot(matx, np.dot(maty, matz))
def generate_random_tranlation_vector(range1=-0.5, range2=0.5):
tranlation_vector = np.random.uniform(range1, range2, size=(3, )).astype(np.float32)
return tranlation_vector
def transform(pc, R, t=None):
pc = np.dot(pc, R.T)
if t is not None:
pc = pc + t
return pc
def batch_transform(batch_pc, batch_R, batch_t=None):
'''
:param batch_pc: shape=(B, N, 3)
:param batch_R: shape=(B, 3, 3)
:param batch_t: shape=(B, 3)
:return: shape(B, N, 3)
'''
transformed_pc = torch.matmul(batch_pc, batch_R.permute(0, 2, 1).contiguous())
if batch_t is not None:
transformed_pc = transformed_pc + torch.unsqueeze(batch_t, 1)
return transformed_pc
# The transformation between unit quaternion and rotation matrix is referenced to
# https://zhuanlan.zhihu.com/p/45404840
def quat2mat(quat):
w, x, y, z = quat
R = np.zeros((3, 3), dtype=np.float32)
R[0][0] = 1 - 2*y*y - 2*z*z
R[0][1] = 2*x*y - 2*z*w
R[0][2] = 2*x*z + 2*y*w
R[1][0] = 2*x*y + 2*z*w
R[1][1] = 1 - 2*x*x - 2*z*z
R[1][2] = 2*y*z - 2*x*w
R[2][0] = 2*x*z - 2*y*w
R[2][1] = 2*y*z + 2*x*w
R[2][2] = 1 - 2*x*x - 2*y*y
return R
def batch_quat2mat(batch_quat):
'''
:param batch_quat: shape=(B, 4)
:return:
'''
w, x, y, z = batch_quat[:, 0], batch_quat[:, 1], batch_quat[:, 2], \
batch_quat[:, 3]
device = batch_quat.device
B = batch_quat.size()[0]
R = torch.zeros(dtype=torch.float, size=(B, 3, 3)).to(device)
R[:, 0, 0] = 1 - 2 * y * y - 2 * z * z
R[:, 0, 1] = 2 * x * y - 2 * z * w
R[:, 0, 2] = 2 * x * z + 2 * y * w
R[:, 1, 0] = 2 * x * y + 2 * z * w
R[:, 1, 1] = 1 - 2 * x * x - 2 * z * z
R[:, 1, 2] = 2 * y * z - 2 * x * w
R[:, 2, 0] = 2 * x * z - 2 * y * w
R[:, 2, 1] = 2 * y * z + 2 * x * w
R[:, 2, 2] = 1 - 2 * x * x - 2 * y * y
return R
def mat2quat(mat):
w = math.sqrt(mat[0, 0] + mat[1, 1] + mat[2, 2] + 1 + 1e-8) / 2
x = (mat[2, 1] - mat[1, 2]) / (4 * w + 1e-8)
y = (mat[0, 2] - mat[2, 0]) / (4 * w + 1e-8)
z = (mat[1, 0] - mat[0, 1]) / (4 * w + 1e-8)
return w, x, y, z
def jitter_point_cloud(pc, sigma=0.01, clip=0.05):
N, C = pc.shape
assert(clip > 0)
#jittered_data = np.clip(sigma * np.random.randn(N, C), -1*clip, clip).astype(np.float32)
jittered_data = np.clip(
np.random.normal(0.0, scale=sigma, size=(N, 3)),
-1 * clip, clip).astype(np.float32)
jittered_data += pc
return jittered_data
def shift_point_cloud(pc, shift_range=0.1):
N, C = pc.shape
shifts = np.random.uniform(-shift_range, shift_range, (1, C)).astype(np.float32)
pc += shifts
return pc
def random_scale_point_cloud(pc, scale_low=0.8, scale_high=1.25):
scale = np.random.uniform(scale_low, scale_high, 1)
pc *= scale
return pc
def inv_R_t(R, t):
inv_R = R.permute(0, 2, 1).contiguous()
inv_t = - inv_R @ t[..., None]
return inv_R, torch.squeeze(inv_t, -1)
def uniform_2_sphere(num: int = None):
"""Uniform sampling on a 2-sphere
Source: https://gist.github.com/andrewbolster/10274979
Args:
num: Number of vectors to sample (or None if single)
Returns:
Random Vector (np.ndarray) of size (num, 3) with norm 1.
If num is None returned value will have size (3,)
"""
if num is not None:
phi = np.random.uniform(0.0, 2 * np.pi, num)
cos_theta = np.random.uniform(-1.0, 1.0, num)
else:
phi = np.random.uniform(0.0, 2 * np.pi)
cos_theta = np.random.uniform(-1.0, 1.0)
theta = np.arccos(cos_theta)
x = np.sin(theta) * np.cos(phi)
y = np.sin(theta) * np.sin(phi)
z = np.cos(theta)
return np.stack((x, y, z), axis=-1)
def random_crop(pc, p_keep):
rand_xyz = uniform_2_sphere()
centroid = np.mean(pc[:, :3], axis=0)
pc_centered = pc[:, :3] - centroid
dist_from_plane = np.dot(pc_centered, rand_xyz)
mask = dist_from_plane > np.percentile(dist_from_plane, (1.0 - p_keep) * 100)
return pc[mask, :]
def shuffle_pc(pc):
return np.random.permutation(pc)
def flip_pc(pc, r=0.5):
if np.random.random() > r:
pc[:, 1] = -1 * pc[:, 1]
return pc
def angle(v1: torch.Tensor, v2: torch.Tensor):
"""Compute angle between 2 vectors
For robustness, we use the same formulation as in PPFNet, i.e.
angle(v1, v2) = atan2(cross(v1, v2), dot(v1, v2)).
This handles the case where one of the vectors is 0.0, since torch.atan2(0.0, 0.0)=0.0
Args:
v1: (B, *, 3)
v2: (B, *, 3)
Returns:
"""
cross_prod = torch.stack([v1[..., 1] * v2[..., 2] - v1[..., 2] * v2[..., 1],
v1[..., 2] * v2[..., 0] - v1[..., 0] * v2[..., 2],
v1[..., 0] * v2[..., 1] - v1[..., 1] * v2[..., 0]], dim=-1)
cross_prod_norm = torch.norm(cross_prod, dim=-1)
dot_prod = torch.sum(v1 * v2, dim=-1)
return torch.atan2(cross_prod_norm, dot_prod)
|
the-stack_0_659 | import os
import glob
# Our numerical workhorses
import numpy as np
import pandas as pd
import scipy.special
# Import the project utils
import sys
sys.path.insert(0, '../')
import image_analysis_utils as im_utils
# Useful plotting libraries
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib as mpl
import seaborn as sns
# Image analysis libraries
import skimage.io
import skimage.filters
import skimage.segmentation
import scipy.ndimage
# Set plotting style
im_utils.set_plotting_style()
#==============================================================================
# METADATA
#==============================================================================
DATE = 20161118
USERNAME = 'mrazomej'
OPERATOR = 'O2'
BINDING_ENERGY = -13.9
REPRESSORS = (0, 0, 130)
IPDIST = 0.160 # in units of µm per pixel
STRAINS = ['auto', 'delta', 'RBS1027']
IPTG_RANGE = (0, 0.1, 5, 10, 25, 50, 75, 100, 250, 500, 1000, 5000)
#==============================================================================
# Define the data directory.
data_dir = '../../../data/microscopy/' + str(DATE) + '/'
# Glob the profile and noise images.
yfp_glob = glob.glob(data_dir + '*yfp_profile*/*.tif')
rfp_glob = glob.glob(data_dir + '*mCherry_profile*/*.tif')
noise_glob = glob.glob(data_dir + '*noise*/*.tif')
# Load the images as collections
yfp_profile = skimage.io.ImageCollection(yfp_glob)
rfp_profile = skimage.io.ImageCollection(rfp_glob)
noise_profile = skimage.io.ImageCollection(noise_glob)
# Need to split the noise profile image into the two channels
noise_rfp = [noise_profile[i][0] for i, _ in enumerate(noise_profile)]
noise_yfp = [noise_profile[i][1] for i, _ in enumerate(noise_profile)]
# Generate averages and plot them.
rfp_avg = im_utils.average_stack(rfp_profile)
yfp_avg = im_utils.average_stack(yfp_profile)
rfp_noise = im_utils.average_stack(noise_rfp)
yfp_noise = im_utils.average_stack(noise_yfp)
with sns.axes_style('white'):
fig, ax = plt.subplots(2, 2, figsize=(6,6))
ax = ax.ravel()
ax[0].imshow(yfp_avg, cmap=plt.cm.viridis)
ax[0].set_title('yfp profile')
ax[1].imshow(rfp_avg, cmap=plt.cm.plasma)
ax[1].set_title('rfp profile')
ax[2].imshow(yfp_noise, cmap=plt.cm.Greens_r)
ax[2].set_title('yfp noise')
ax[3].imshow(rfp_noise, cmap=plt.cm.Reds_r)
ax[3].set_title('rfp noise')
plt.tight_layout()
plt.savefig('./outdir/background_correction.png')
#==============================================================================
# Iterate through each strain and concentration to make the dataframes.
dfs = []
# Select random IPTG and random strain to print the example segmentation
ex_iptg = np.random.choice(IPTG_RANGE)
ex_strain = STRAINS[-1]
for i, st in enumerate(STRAINS):
print(st)
for j, iptg in enumerate(IPTG_RANGE):
# Load the images
if (iptg==0) & (st != STRAINS[-1]):
images = glob.glob(data_dir + '*' + st + '_*/*.tif')
else:
images = glob.glob(data_dir + '*' + st + '*_' + str(iptg) +
'uMIPTG*/*.ome.tif')
if len(images) is not 0:
ims = skimage.io.ImageCollection(images)
# Select random image to print example segmentation
ex_no = np.random.choice(np.arange(0, len(images) - 1))
for z, x in enumerate(ims):
_, m, y = im_utils.ome_split(x)
y_flat = im_utils.generate_flatfield(y, yfp_noise, yfp_avg)
# Segment the mCherry channel.
m_seg = im_utils.log_segmentation(m, label=True)
# Print example segmentation for the random image
if (st==ex_strain) & (iptg == ex_iptg) & (z == ex_no):
merge = im_utils.example_segmentation(m_seg, _, 10/IPDIST)
skimage.io.imsave('./outdir/example_segmentation.png', merge)
# Extract the measurements.
im_df = im_utils.props_to_df(m_seg, physical_distance=IPDIST,
intensity_image=y_flat)
# Add strain and IPTG concentration information.
im_df.insert(0, 'IPTG_uM', iptg)
im_df.insert(0, 'repressors', REPRESSORS[i])
im_df.insert(0, 'rbs', st)
im_df.insert(0, 'binding_energy', BINDING_ENERGY)
im_df.insert(0, 'operator', OPERATOR)
im_df.insert(0, 'username', USERNAME)
im_df.insert(0, 'date', DATE)
# Append the dataframe to the global list.
dfs.append(im_df)
# Concatenate the dataframe
df_im = pd.concat(dfs, axis=0)
df_im.to_csv('./outdir/' + str(DATE) + '_' + OPERATOR + '_' +\
STRAINS[-1] + '_raw_segmentation.csv', index=False)
|
the-stack_0_660 | #!/usr/bin/env python
#
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Copyright (c) 2008-2016 California Institute of Technology.
# Copyright (c) 2016-2019 The Uncertainty Quantification Foundation.
# License: 3-clause BSD. The full license text is available at:
# - https://github.com/uqfoundation/dill/blob/master/LICENSE
"""
Methods for detecting objects leading to pickling failures.
"""
import dis
from inspect import ismethod, isfunction, istraceback, isframe, iscode
from .pointers import parent, reference, at, parents, children
from ._dill import _trace as trace
from ._dill import PY3
__all__ = ['baditems','badobjects','badtypes','code','errors','freevars',
'getmodule','globalvars','nestedcode','nestedglobals','outermost',
'referredglobals','referrednested','trace','varnames']
def getmodule(object, _filename=None, force=False):
"""get the module of the object"""
from inspect import getmodule as getmod
module = getmod(object, _filename)
if module or not force: return module
if PY3: builtins = 'builtins'
else: builtins = '__builtin__'
builtins = __import__(builtins)
from .source import getname
name = getname(object, force=True)
return builtins if name in vars(builtins).keys() else None
def outermost(func): # is analogous to getsource(func,enclosing=True)
"""get outermost enclosing object (i.e. the outer function in a closure)
NOTE: this is the object-equivalent of getsource(func, enclosing=True)
"""
if PY3:
if ismethod(func):
_globals = func.__func__.__globals__ or {}
elif isfunction(func):
_globals = func.__globals__ or {}
else:
return #XXX: or raise? no matches
_globals = _globals.items()
else:
if ismethod(func):
_globals = func.im_func.func_globals or {}
elif isfunction(func):
_globals = func.func_globals or {}
else:
return #XXX: or raise? no matches
_globals = _globals.iteritems()
# get the enclosing source
from .source import getsourcelines
try: lines,lnum = getsourcelines(func, enclosing=True)
except: #TypeError, IOError
lines,lnum = [],None
code = ''.join(lines)
# get all possible names,objects that are named in the enclosing source
_locals = ((name,obj) for (name,obj) in _globals if name in code)
# now only save the objects that generate the enclosing block
for name,obj in _locals: #XXX: don't really need 'name'
try:
if getsourcelines(obj) == (lines,lnum): return obj
except: #TypeError, IOError
pass
return #XXX: or raise? no matches
def nestedcode(func, recurse=True): #XXX: or return dict of {co_name: co} ?
"""get the code objects for any nested functions (e.g. in a closure)"""
func = code(func)
if not iscode(func): return [] #XXX: or raise? no matches
nested = set()
for co in func.co_consts:
if co is None: continue
co = code(co)
if co:
nested.add(co)
if recurse: nested |= set(nestedcode(co, recurse=True))
return list(nested)
def code(func):
'''get the code object for the given function or method
NOTE: use dill.source.getsource(CODEOBJ) to get the source code
'''
if PY3:
im_func = '__func__'
func_code = '__code__'
else:
im_func = 'im_func'
func_code = 'func_code'
if ismethod(func): func = getattr(func, im_func)
if isfunction(func): func = getattr(func, func_code)
if istraceback(func): func = func.tb_frame
if isframe(func): func = func.f_code
if iscode(func): return func
return
#XXX: ugly: parse dis.dis for name after "<code object" in line and in globals?
def referrednested(func, recurse=True): #XXX: return dict of {__name__: obj} ?
"""get functions defined inside of func (e.g. inner functions in a closure)
NOTE: results may differ if the function has been executed or not.
If len(nestedcode(func)) > len(referrednested(func)), try calling func().
If possible, python builds code objects, but delays building functions
until func() is called.
"""
if PY3:
att1 = '__code__'
att0 = '__func__'
else:
att1 = 'func_code' # functions
att0 = 'im_func' # methods
import gc
funcs = set()
# get the code objects, and try to track down by referrence
for co in nestedcode(func, recurse):
# look for function objects that refer to the code object
for obj in gc.get_referrers(co):
# get methods
_ = getattr(obj, att0, None) # ismethod
if getattr(_, att1, None) is co: funcs.add(obj)
# get functions
elif getattr(obj, att1, None) is co: funcs.add(obj)
# get frame objects
elif getattr(obj, 'f_code', None) is co: funcs.add(obj)
# get code objects
elif hasattr(obj, 'co_code') and obj is co: funcs.add(obj)
# frameobjs => func.func_code.co_varnames not in func.func_code.co_cellvars
# funcobjs => func.func_code.co_cellvars not in func.func_code.co_varnames
# frameobjs are not found, however funcobjs are...
# (see: test_mixins.quad ... and test_mixins.wtf)
# after execution, code objects get compiled, and then may be found by gc
return list(funcs)
def freevars(func):
"""get objects defined in enclosing code that are referred to by func
returns a dict of {name:object}"""
if PY3:
im_func = '__func__'
func_code = '__code__'
func_closure = '__closure__'
else:
im_func = 'im_func'
func_code = 'func_code'
func_closure = 'func_closure'
if ismethod(func): func = getattr(func, im_func)
if isfunction(func):
closures = getattr(func, func_closure) or ()
func = getattr(func, func_code).co_freevars # get freevars
else:
return {}
return dict((name,c.cell_contents) for (name,c) in zip(func,closures))
# thanks to Davies Liu for recursion of globals
def nestedglobals(func, recurse=True):
"""get the names of any globals found within func"""
func = code(func)
if func is None: return list()
from .temp import capture
names = set()
with capture('stdout') as out:
dis.dis(func) #XXX: dis.dis(None) disassembles last traceback
for line in out.getvalue().splitlines():
if '_GLOBAL' in line:
name = line.split('(')[-1].split(')')[0]
names.add(name)
for co in getattr(func, 'co_consts', tuple()):
if co and recurse and iscode(co):
names.update(nestedglobals(co, recurse=True))
return list(names)
def referredglobals(func, recurse=True, builtin=False):
"""get the names of objects in the global scope referred to by func"""
return globalvars(func, recurse, builtin).keys()
def globalvars(func, recurse=True, builtin=False):
"""get objects defined in global scope that are referred to by func
return a dict of {name:object}"""
if PY3:
im_func = '__func__'
func_code = '__code__'
func_globals = '__globals__'
func_closure = '__closure__'
else:
im_func = 'im_func'
func_code = 'func_code'
func_globals = 'func_globals'
func_closure = 'func_closure'
if ismethod(func): func = getattr(func, im_func)
if isfunction(func):
globs = vars(getmodule(sum)).copy() if builtin else {}
# get references from within closure
orig_func, func = func, set()
for obj in getattr(orig_func, func_closure) or {}:
_vars = globalvars(obj.cell_contents, recurse, builtin) or {}
func.update(_vars) #XXX: (above) be wary of infinte recursion?
globs.update(_vars)
# get globals
globs.update(getattr(orig_func, func_globals) or {})
# get names of references
if not recurse:
func.update(getattr(orig_func, func_code).co_names)
else:
func.update(nestedglobals(getattr(orig_func, func_code)))
# find globals for all entries of func
for key in func.copy(): #XXX: unnecessary...?
nested_func = globs.get(key)
if nested_func is orig_func:
#func.remove(key) if key in func else None
continue #XXX: globalvars(func, False)?
func.update(globalvars(nested_func, True, builtin))
elif iscode(func):
globs = vars(getmodule(sum)).copy() if builtin else {}
#globs.update(globals())
if not recurse:
func = func.co_names # get names
else:
orig_func = func.co_name # to stop infinite recursion
func = set(nestedglobals(func))
# find globals for all entries of func
for key in func.copy(): #XXX: unnecessary...?
if key is orig_func:
#func.remove(key) if key in func else None
continue #XXX: globalvars(func, False)?
nested_func = globs.get(key)
func.update(globalvars(nested_func, True, builtin))
else:
return {}
#NOTE: if name not in func_globals, then we skip it...
return dict((name,globs[name]) for name in func if name in globs)
def varnames(func):
"""get names of variables defined by func
returns a tuple (local vars, local vars referrenced by nested functions)"""
func = code(func)
if not iscode(func):
return () #XXX: better ((),())? or None?
return func.co_varnames, func.co_cellvars
def baditems(obj, exact=False, safe=False): #XXX: obj=globals() ?
"""get items in object that fail to pickle"""
if not hasattr(obj,'__iter__'): # is not iterable
return [j for j in (badobjects(obj,0,exact,safe),) if j is not None]
obj = obj.values() if getattr(obj,'values',None) else obj
_obj = [] # can't use a set, as items may be unhashable
[_obj.append(badobjects(i,0,exact,safe)) for i in obj if i not in _obj]
return [j for j in _obj if j is not None]
def badobjects(obj, depth=0, exact=False, safe=False):
"""get objects that fail to pickle"""
from dill import pickles
if not depth:
if pickles(obj,exact,safe): return None
return obj
return dict(((attr, badobjects(getattr(obj,attr),depth-1,exact,safe)) \
for attr in dir(obj) if not pickles(getattr(obj,attr),exact,safe)))
def badtypes(obj, depth=0, exact=False, safe=False):
"""get types for objects that fail to pickle"""
from dill import pickles
if not depth:
if pickles(obj,exact,safe): return None
return type(obj)
return dict(((attr, badtypes(getattr(obj,attr),depth-1,exact,safe)) \
for attr in dir(obj) if not pickles(getattr(obj,attr),exact,safe)))
def errors(obj, depth=0, exact=False, safe=False):
"""get errors for objects that fail to pickle"""
from dill import pickles, copy
if not depth:
try:
pik = copy(obj)
if exact:
assert pik == obj, \
"Unpickling produces %s instead of %s" % (pik,obj)
assert type(pik) == type(obj), \
"Unpickling produces %s instead of %s" % (type(pik),type(obj))
return None
except Exception:
import sys
return sys.exc_info()[1]
_dict = {}
for attr in dir(obj):
try:
_attr = getattr(obj,attr)
except Exception:
import sys
_dict[attr] = sys.exc_info()[1]
continue
if not pickles(_attr,exact,safe):
_dict[attr] = errors(_attr,depth-1,exact,safe)
return _dict
# EOF
|
the-stack_0_661 | from __future__ import division
from .atmospheric_model import AtmosphericLayer, phase_covariance_von_karman, fried_parameter_from_Cn_squared
from ..statistics import SpectralNoiseFactoryMultiscale
from ..field import Field, RegularCoords, UnstructuredCoords, CartesianGrid
from .finite_atmospheric_layer import FiniteAtmosphericLayer
import numpy as np
from scipy import linalg
from scipy.ndimage import affine_transform
import time
import warnings
class InfiniteAtmosphericLayer(AtmosphericLayer):
def __init__(self, input_grid, Cn_squared=None, L0=np.inf, velocity=0, height=0, stencil_length=2, use_interpolation=False):
self._initialized = False
AtmosphericLayer.__init__(self, input_grid, Cn_squared, L0, velocity, height)
# Check properties of input_grid
if not input_grid.is_('cartesian'):
raise ValueError('Input grid must be cartesian.')
if not input_grid.is_regular:
raise ValueError('Input grid must be regularly spaced')
if not input_grid.ndim == 2:
raise ValueError('Input grid must be two-dimensional.')
self.stencil_length = stencil_length
self.use_interpolation = use_interpolation
self._make_stencils()
self._make_covariance_matrices()
self._make_AB_matrices()
self._make_initial_phase_screen()
self.center = np.zeros(2)
self._initialized = True
def _recalculate_matrices(self):
if self._initialized:
self._make_covariance_matrices()
self._make_AB_matrices()
def _make_stencils(self):
# Vertical
self.new_grid_bottom = CartesianGrid(RegularCoords(self.input_grid.delta, [self.input_grid.dims[0], 1], self.input_grid.zero - np.array([0, self.input_grid.delta[1]])))
self.stencil_bottom = Field(np.zeros(self.input_grid.size, dtype='bool'), self.input_grid).shaped
self.stencil_bottom[:self.stencil_length,:] = True
for i, n in enumerate(np.random.geometric(0.5, self.input_grid.dims[0])):
self.stencil_bottom[(n + self.stencil_length - 1) % self.input_grid.dims[1],i] = True
self.stencil_bottom = self.stencil_bottom.ravel()
self.num_stencils_vertical = np.sum(self.stencil_bottom)
# Horizontal
self.new_grid_left = CartesianGrid(RegularCoords(self.input_grid.delta, [1, self.input_grid.dims[1]], self.input_grid.zero - np.array([self.input_grid.delta[0], 0])))
self.stencil_left = Field(np.zeros(self.input_grid.size, dtype='bool'), self.input_grid).shaped
self.stencil_left[:,:self.stencil_length] = True
for i, n in enumerate(np.random.geometric(0.5, self.input_grid.dims[1])):
self.stencil_left[i,(n + self.stencil_length - 1) % self.input_grid.dims[0]] = True
self.stencil_left = self.stencil_left.ravel()
self.num_stencils_horizontal = np.sum(self.stencil_left)
def _make_covariance_matrices(self):
phase_covariance = phase_covariance_von_karman(fried_parameter_from_Cn_squared(1, 1), self.L0)
# Vertical
x = np.concatenate((self.input_grid.x[self.stencil_bottom], self.new_grid_bottom.x))
x = np.concatenate([x - xx for xx in x])
y = np.concatenate((self.input_grid.y[self.stencil_bottom], self.new_grid_bottom.y))
y = np.concatenate([y - yy for yy in y])
separations = CartesianGrid(UnstructuredCoords((x, y)))
n = self.new_grid_bottom.size + self.num_stencils_vertical
self.cov_matrix_vertical = phase_covariance(separations).reshape((n, n))
# Horizontal
x = np.concatenate((self.input_grid.x[self.stencil_left], self.new_grid_left.x))
x = np.concatenate([x - xx for xx in x])
y = np.concatenate((self.input_grid.y[self.stencil_left], self.new_grid_left.y))
y = np.concatenate([y - yy for yy in y])
separations = CartesianGrid(UnstructuredCoords((x, y)))
n = self.new_grid_left.size + self.num_stencils_horizontal
self.cov_matrix_horizontal = phase_covariance(separations).reshape((n, n))
def _make_AB_matrices(self):
# Vertical
n = self.num_stencils_vertical
cov_zz = self.cov_matrix_vertical[:n,:n]
cov_xz = self.cov_matrix_vertical[n:, :n]
cov_zx = self.cov_matrix_vertical[:n, n:]
cov_xx = self.cov_matrix_vertical[n:, n:]
cf = linalg.cho_factor(cov_zz)
inv_cov_zz = linalg.cho_solve(cf, np.eye(cov_zz.shape[0]))
self.A_vertical = cov_xz.dot(inv_cov_zz)
BBt = cov_xx - self.A_vertical.dot(cov_zx)
U, S, Vt = np.linalg.svd(BBt)
L = np.sqrt(S[:self.input_grid.dims[0]])
self.B_vertical = U * L
# Horizontal
n = self.num_stencils_horizontal
cov_zz = self.cov_matrix_horizontal[:n,:n]
cov_xz = self.cov_matrix_horizontal[n:, :n]
cov_zx = self.cov_matrix_horizontal[:n, n:]
cov_xx = self.cov_matrix_horizontal[n:, n:]
cf = linalg.cho_factor(cov_zz)
inv_cov_zz = linalg.cho_solve(cf, np.eye(cov_zz.shape[0]))
self.A_horizontal = cov_xz.dot(inv_cov_zz)
BBt = cov_xx - self.A_horizontal.dot(cov_zx)
U, S, Vt = np.linalg.svd(BBt)
L = np.sqrt(S[:self.input_grid.dims[1]])
self.B_horizontal = U * L
def _make_initial_phase_screen(self):
oversampling = 16
layer = FiniteAtmosphericLayer(self.input_grid, self.Cn_squared, self.outer_scale, self.velocity, self.height, oversampling)
self._achromatic_screen = layer.phase_for(1)
self._shifted_achromatic_screen = self._achromatic_screen
def _extrude(self, where=None):
flipped = (where == 'top') or (where == 'right')
horizontal = (where == 'left') or (where == 'right')
if where == 'top' or where == 'right':
screen = self._achromatic_screen[::-1]
else:
screen = self._achromatic_screen
if horizontal:
stencil = self.stencil_left
A = self.A_horizontal
B = self.B_horizontal
else:
stencil = self.stencil_bottom
A = self.A_vertical
B = self.B_vertical
stencil_data = screen[stencil]
random_data = np.random.normal(0, 1, size=B.shape[1])
new_slice = A.dot(stencil_data) + B.dot(random_data) * np.sqrt(self._Cn_squared)
screen = screen.shaped
if horizontal:
screen = np.hstack((new_slice[:,np.newaxis], screen[:,:-1]))
else:
screen = np.vstack((new_slice[np.newaxis,:], screen[:-1,:]))
screen = Field(screen, self.input_grid)
if flipped:
self._achromatic_screen = screen[::-1,::-1].ravel()
else:
self._achromatic_screen = screen.ravel()
def phase_for(self, wavelength):
return self._shifted_achromatic_screen / wavelength
def reset(self):
self._make_initial_phase_screen()
self.center = np.zeros(2)
self._t = 0
def evolve_until(self, t):
if t is None:
self.reset()
return
old_center = np.round(self.center / self.input_grid.delta).astype('int')
self.center = self.velocity * t
new_center = np.round(self.center / self.input_grid.delta).astype('int')
delta = new_center - old_center
for i in range(abs(delta[0])):
if delta[0] < 0:
self._extrude('left')
else:
self._extrude('right')
for i in range(abs(delta[1])):
if delta[1] < 0:
self._extrude('bottom')
else:
self._extrude('top')
if self.use_interpolation:
# Use bilinear interpolation to interpolate the achromatic phase screen to the correct position.
# This is to avoid sudden shifts by discrete pixels.
ps = self._achromatic_screen.shaped
sub_delta = self.center - new_center * self.input_grid.delta
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message='The behaviour of affine_transform with a one-dimensional array supplied for the matrix parameter has changed in scipy 0.18.0.')
self._shifted_achromatic_screen = affine_transform(ps, np.array([1,1]), (sub_delta / self.input_grid.delta)[::-1], mode='nearest', order=1).ravel()
else:
self._shifted_achromatic_screen = self._achromatic_screen
@property
def Cn_squared(self):
return self._Cn_squared
@Cn_squared.setter
def Cn_squared(self, Cn_squared):
self._Cn_squared = Cn_squared
@property
def outer_scale(self):
return self._L0
@outer_scale.setter
def L0(self, L0):
self._L0 = L0
self._recalculate_matrices() |
the-stack_0_662 | #!/bin/env python
import os
import sys
import random
import subprocess as sub
import getopt
import time
def identity(x):
return x
def cygpath(x):
command = ["cygpath", "-wp", x]
p = sub.Popen(command,stdout=sub.PIPE)
output, errors = p.communicate()
lines = output.split("\n")
return lines[0]
if sys.platform == "cygwin":
normclasspath = cygpath
else:
normclasspath = identity
CUSTOM_CONF_FILE = ""
CONFIG_OPTS = []
STATUS = 0
JKUBERNETES_DIR = "/".join(os.path.realpath( __file__ ).split("/")[:-2])
JKUBERNETES_CONF_DIR = os.getenv("JKUBERNETES_CONF_DIR", JKUBERNETES_DIR + "/conf" )
CONFIG_OPTS = []
EXCLUDE_JARS = []
INCLUDE_JARS = []
API_SERVER_ADDRESS = ""
JKUBERNETES_CREATE_YAML_PATH = ""
def check_java():
check_java_cmd = 'which java'
ret = os.system(check_java_cmd)
if ret != 0:
print("Failed to find java, please add java to PATH")
sys.exit(-1)
def print_commands():
"""Print all client commands and link to documentation"""
print ("kubectl command [-s http://apiserverip:port]")
print ("Commands:\n\t", "\n\t".join(sorted(COMMANDS.keys())))
print ("\nHelp:", "\n\thelp", "\n\thelp <command>")
print ("\nDocumentation for the jkubernetes client can be found at https://github.com/gwisoft/jkubernetes/wiki/jkubernetes-Chinese-Documentation\n")
def get_jars_full(adir):
ret = []
temp = adir.strip()
print (temp == "")
if temp == "":
return ret
files = os.listdir(adir)
for f in files:
if f.endswith(".jar") == False:
continue
filter = False
for exclude_jar in EXCLUDE_JARS:
if f.find(exclude_jar) >= 0:
filter = True
break
if filter == True:
print ("Don't add " + f + " to classpath")
else:
ret.append(adir + "/" + f)
return ret
def unknown_command(*args):
print ("Unknown command: [kubectl %s]" % ' '.join(sys.argv[1:]))
print_usage()
def print_usage(command=None):
"""Print one help message or list of available commands"""
if command != None:
if command in COMMANDS:
print (COMMANDS[command].__doc__ or
"No documentation provided for <%s>" % command)
else:
print ("<%s> is not a valid command" % command)
else:
print_commands()
def parse_config_opts_and_args(args):
curr = args[:]
curr.reverse()
config_list = []
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "-s":
global API_SERVER_ADDRESS
API_SERVER_ADDRESS = curr.pop()
elif token == "-c":
config_list.append(curr.pop())
elif token == "--config":
global CUSTOM_CONF_FILE
CUSTOM_CONF_FILE = curr.pop()
else:
args_list.append(token)
print ("config_list=")
print (config_list)
print ("args_list=")
print (args_list)
return config_list, args_list
def parse_config_opts(config_list):
global CONFIG_OPTS
if len(config_list) > 0:
for config in config_list:
CONFIG_OPTS.append(config)
def filter_array(array):
ret = []
for item in array:
temp = item.strip()
if temp != "":
ret.append(temp)
return ret
def get_config_opts():
global CONFIG_OPTS
print ("-Dkubernetes.options=" + (','.join(CONFIG_OPTS)).replace(' ', "%%%%"))
return "-Dkubernetes.options=" + (','.join(CONFIG_OPTS)).replace(' ', "%%%%")
#扩展的jar包入参
def get_exclude_jars():
global EXCLUDE_JARS
return " -Dexclude.jars=" + (','.join(EXCLUDE_JARS))
def create(args):
"""
kubectl create -f ***.yaml
"""
pass
args = parse_client_createopts(args)
childopts = get_client_customopts() + get_exclude_jars() + get_client_createopts()
print ("childopts=")
print (childopts)
exec_jkubernetes_class(
"org.gwisoft.jkubernetes.kubectl.KubectlCreate",
jvmtype="-client -Xms256m -Xmx256m",
sysdirs=[JKUBERNETES_CONF_DIR, JKUBERNETES_DIR + "/bin",CUSTOM_CONF_FILE],
args=args,
childopts=childopts,
isBackgroundRun="false")
def kube(args):
"""
kubectl kube
"""
pass
childopts = get_client_customopts() + get_exclude_jars()
print ("childopts=")
print (childopts)
exec_jkubernetes_class(
"org.gwisoft.jkubernetes.daemon.kube.KubeServer",
jvmtype="-server -Xms256m -Xmx256m",
sysdirs=[JKUBERNETES_CONF_DIR, JKUBERNETES_DIR + "/bin",CUSTOM_CONF_FILE],
args=args,
childopts=childopts,
isBackgroundRun="true")
def kubelet(args):
"""
kubectl kubelet
"""
pass
childopts = get_client_customopts() + get_exclude_jars()
print ("childopts=")
print (childopts)
exec_jkubernetes_class(
"org.gwisoft.jkubernetes.daemon.kubelet.Kubelet",
jvmtype="-server -Xms256m -Xmx256m",
sysdirs=[JKUBERNETES_CONF_DIR, JKUBERNETES_DIR + "/bin",CUSTOM_CONF_FILE],
args=args,
childopts=childopts,
isBackgroundRun="true")
def delete(args):
"""
kubectl delete -f ***.yaml
"""
pass
args = parse_client_createopts(args)
childopts = get_client_customopts() + get_exclude_jars() + get_client_createopts()
print ("childopts=")
print (childopts)
exec_jkubernetes_class(
"org.gwisoft.jkubernetes.kubectl.KubectlDelete",
jvmtype="-server -Xms256m -Xmx256m",
sysdirs=[JKUBERNETES_CONF_DIR, JKUBERNETES_DIR + "/bin",CUSTOM_CONF_FILE],
args=args,
childopts=childopts,
isBackgroundRun="false")
def rollingUpdate(args):
"""
kubectl rolling-update [old topology name] -f ***.yaml
"""
pass
args = parse_client_createopts(args)
childopts = get_client_customopts() + get_exclude_jars() + get_client_createopts()
print ("childopts=")
print (childopts)
exec_jkubernetes_class(
"org.gwisoft.jkubernetes.kubectl.KubectlRollingUpdate",
jvmtype="-server -Xms256m -Xmx256m",
sysdirs=[JKUBERNETES_CONF_DIR, JKUBERNETES_DIR + "/bin",CUSTOM_CONF_FILE],
args=args,
childopts=childopts,
isBackgroundRun="false")
def replace(args):
"""
kubectl replace -f ***.yaml
"""
pass
args = parse_client_createopts(args)
childopts = get_client_customopts() + get_exclude_jars() + get_client_createopts()
print ("childopts=")
print (childopts)
exec_jkubernetes_class(
"org.gwisoft.jkubernetes.kubectl.KubectlReplace",
jvmtype="-server -Xms256m -Xmx256m",
sysdirs=[JKUBERNETES_CONF_DIR, JKUBERNETES_DIR + "/bin",CUSTOM_CONF_FILE],
args=args,
childopts=childopts,
isBackgroundRun="false")
def get(args):
"""
kubectl get po [topology name]
"""
pass
childopts = get_client_customopts() + get_exclude_jars()
print ("childopts=")
print (childopts)
exec_jkubernetes_class(
"org.gwisoft.jkubernetes.kubectl.KubectlGet",
jvmtype="-server -Xms256m -Xmx256m",
sysdirs=[JKUBERNETES_CONF_DIR, JKUBERNETES_DIR + "/bin",CUSTOM_CONF_FILE],
args=args,
childopts=childopts,
isBackgroundRun="false")
def get_client_createopts():
ret = (" -Dkubernetes.create.yaml=" + JKUBERNETES_CREATE_YAML_PATH + " -Dkubernetes.apiserver.address=" + API_SERVER_ADDRESS)
return ret
def parse_client_createopts(args):
print ("parse_client_createopts=")
print (args)
curr = args
curr.reverse()
args_list = []
while len(curr) > 0:
token = curr.pop()
print (token == "-f")
if token == "-f":
global JKUBERNETES_CREATE_YAML_PATH
JKUBERNETES_CREATE_YAML_PATH = curr.pop()
else:
args_list.append(token)
print (args_list)
return args_list
def exec_jkubernetes_class(klass, jvmtype="-server", sysdirs=[], args=[], childopts="",isBackgroundRun=""):
args_str = " ".join(args)
command = "java " + " -Dkubernetes.home=" + JKUBERNETES_DIR + " " + get_config_opts() + " " + childopts + " -cp " + get_classpath(sysdirs) + " " + klass + " " + args_str
print ("Running: " + command)
global STATUS
STATUS = os.execvp("java", filter_array(command.split(" ")))
#系统自定义的配置入参
def get_client_customopts():
ret = ("")
"""
ret = (" -Dkubernetes.root.logger=INFO,stdout -Dlogback.configurationFile=" + JKUBERNETES_DIR +
"/conf/client_logback.xml -Dlog4j.configuration=File:" + JKUBERNETES_DIR +
"/conf/client_log4j.properties")
"""
return ret
def get_classpath(extrajars):
ret = []
ret.extend(extrajars)
ret.extend(get_jars_full(JKUBERNETES_DIR))
ret.extend(get_jars_full(JKUBERNETES_DIR + "/lib"))
ret.extend(INCLUDE_JARS)
return normclasspath(":".join(ret))
def main():
if len(sys.argv) <= 1:
print_usage()
sys.exit(-1)
global CONFIG_OPTS
config_list, args = parse_config_opts_and_args(sys.argv[1:])
parse_config_opts(config_list)
COMMAND = args[0]
ARGS = args[1:]
if COMMANDS.get(COMMAND) == None:
unknown_command(COMMAND)
sys.exit(-1)
if len(ARGS) != 0 and ARGS[0] == "help":
print_usage(COMMAND)
sys.exit(0)
try:
(COMMANDS.get(COMMAND,"help"))(ARGS)
except Exception as msg:
print(msg)
print_usage(COMMAND)
sys.exit(-1)
sys.exit(STATUS)
COMMANDS = {"create": create,"kube":kube,"kubelet":kubelet,"delete":delete,"rolling-update":rollingUpdate,"replace":replace,"get":get}
if __name__ == "__main__":
#check_java()
main()
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.