filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_9857 | # Copyright (c) 2013 Pratik Kumar Sahu, Nagendra Chowdary, Anish Mathuria
# Ported to Python by Gallopsled
from __future__ import division
import os
import random
import struct
# +------------------------------------------------------------------------+
# | RANDOM NUMBERS FUNCTIONS |
# +------------------------------------------------------------------------+
# get a random integer i (0<=i<maxv)
# ==================================
def random_get_int(maxv):
return random.randrange(0, maxv)
def randel(arr):
return arr[random_get_int(len(arr))]
def enc_data_msn(c, i):
# c is the lsn to be encoded with a msn
# lsn = least significant nibble msn = most significant nibble
if c <= i:
if c == 0:
#Randomly select and return from {5,7}
return randel([5, 7])
else:
#Randomly select and return from {4,5,6,7}
return randel([4,5,6,7])
elif c == 0:
#Randomly select and return from {3,5,7}
return randel([3,5,7])
elif c <= 0x0A:
#Randomly select and return from {3,4,5,6,7}
#CSE Why doesn't the author use 3 below then?
return randel([4,5,6,7])
else:
return randel([4,6])
|
the-stack_0_9858 | """
Interfaces to various optimizers.
"""
from __future__ import print_function, division
import sys
from copy import copy
import warnings
# CRUFT: time.clock() removed from python 3.8
try:
from time import perf_counter
except ImportError:
from time import clock as perf_counter
import numpy as np
from . import monitor
from . import initpop
from . import lsqerror
from .history import History
from .formatnum import format_uncertainty
from .fitproblem import nllf_scale
from .dream import MCMCModel
class ConsoleMonitor(monitor.TimedUpdate):
"""
Display fit progress on the console
"""
def __init__(self, problem, progress=1, improvement=30):
monitor.TimedUpdate.__init__(self, progress=progress,
improvement=improvement)
self.problem = problem
def show_progress(self, history):
scale, err = nllf_scale(self.problem)
chisq = format_uncertainty(scale*history.value[0], err)
print("step", history.step[0], "cost", chisq)
sys.stdout.flush()
def show_improvement(self, history):
# print("step",history.step[0],"chisq",history.value[0])
p = self.problem.getp()
try:
self.problem.setp(history.point[0])
print(self.problem.summarize())
finally:
self.problem.setp(p)
sys.stdout.flush()
class CheckpointMonitor(monitor.TimedUpdate):
"""
Periodically save fit state so that it can be resumed later.
"""
#: Function to call at each checkpoint.
checkpoint = None # type: Callable[None, None]
def __init__(self, checkpoint, progress=60*30):
monitor.TimedUpdate.__init__(self, progress=progress,
improvement=np.inf)
self.checkpoint = checkpoint
self._first = True
def show_progress(self, history):
# Skip the first checkpoint since it only contains the
# start/resume state
if self._first:
self._first = False
else:
self.checkpoint(history)
def show_improvement(self, history):
pass
class StepMonitor(monitor.Monitor):
"""
Collect information at every step of the fit and save it to a file.
*fid* is the file to save the information to
*fields* is the list of "step|time|value|point" fields to save
The point field should be last in the list.
"""
FIELDS = ['step', 'time', 'value', 'point']
def __init__(self, problem, fid, fields=FIELDS):
if any(f not in self.FIELDS for f in fields):
raise ValueError("invalid monitor field")
self.fid = fid
self.fields = fields
self.problem = problem
self._pattern = "%%(%s)s\n" % (")s %(".join(fields))
fid.write("# " + ' '.join(fields) + '\n')
def config_history(self, history):
history.requires(time=1, value=1, point=1, step=1)
def __call__(self, history):
point = " ".join("%.15g" % v for v in history.point[0])
time = "%g" % history.time[0]
step = "%d" % history.step[0]
scale, _ = nllf_scale(self.problem)
value = "%.15g" % (scale * history.value[0])
out = self._pattern % dict(point=point, time=time,
value=value, step=step)
self.fid.write(out)
class MonitorRunner(object):
"""
Adaptor which allows solvers to accept progress monitors.
"""
def __init__(self, monitors, problem):
if monitors is None:
monitors = [ConsoleMonitor(problem)]
self.monitors = monitors
self.history = History(time=1, step=1, point=1, value=1,
population_points=1, population_values=1)
for M in self.monitors:
M.config_history(self.history)
self._start = perf_counter()
def __call__(self, step, point, value,
population_points=None, population_values=None):
self.history.update(time=perf_counter() - self._start,
step=step, point=point, value=value,
population_points=population_points,
population_values=population_values)
for M in self.monitors:
M(self.history)
class FitBase(object):
"""
FitBase defines the interface from bumps models to the various fitting
engines available within bumps.
Each engine is defined in its own class with a specific set of attributes
and methods.
The *name* attribute is the name of the optimizer. This is just a simple
string.
The *settings* attribute is a list of pairs (name, default), where the
names are defined as fields in FitOptions. A best attempt should be
made to map the fit options for the optimizer to the standard fit options,
since each of these becomes a new command line option when running
bumps. If that is not possible, then a new option should be added
to FitOptions. A plugin architecture might be appropriate here, if
there are reasons why specific problem domains might need custom fitters,
but this is not yet supported.
Each engine takes a fit problem in its constructor.
The :meth:`solve` method runs the fit. It accepts a
monitor to track updates, a mapper to distribute work and
key-value pairs defining the settings.
There are a number of optional methods for the fitting engines. Basically,
all the methods in :class:`FitDriver` first check if they are specialized
in the fit engine before performing a default action.
The *load*/*save* methods load and save the fitter state in a given
directory with a specific base file name. The fitter can choose a file
extension to add to the base name. Some care is needed to be sure that
the extension doesn't collide with other extensions such as .mon for
the fit monitor.
The *plot* method shows any plots to help understand the performance of
the fitter, such as a convergence plot showing the the range of values
in the population over time, as well as plots of the parameter uncertainty
if available. The plot should work within is given a figure canvas to work with
The *stderr*/*cov* methods should provide summary statistics for the
parameter uncertainties. Some fitters, such as MCMC, will compute these
directly from the population. Others, such as BFGS, will produce an
estimate of the uncertainty as they go along. If the fitter does not
provide these estimates, then they will be computed from numerical
derivatives at the minimum in the FitDriver method.
"""
def __init__(self, problem):
"""Fit the models and show the results"""
self.problem = problem
def solve(self, monitors=None, mapper=None, **options):
raise NotImplementedError()
class MultiStart(FitBase):
"""
Multi-start monte carlo fitter.
This fitter wraps a local optimizer, restarting it a number of times
to give it a chance to find a different local minimum. If the keep_best
option is True, then restart near the best fit, otherwise restart at
random.
"""
name = "Multistart Monte Carlo"
settings = [('starts', 100)]
def __init__(self, fitter):
FitBase.__init__(self, fitter.problem)
self.fitter = fitter
def solve(self, monitors=None, mapper=None, **options):
# TODO: need better way of tracking progress
import logging
starts = options.pop('starts', 1)
reset = not options.pop('keep_best', True)
f_best = np.inf
x_best = self.problem.getp()
for _ in range(max(starts, 1)):
logging.info("multistart round %d", _)
x, fx = self.fitter.solve(monitors=monitors, mapper=mapper,
**options)
if fx < f_best:
x_best, f_best = x, fx
logging.info("multistart f(x),x: %s %s", str(fx), str(x_best))
if reset:
self.problem.randomize()
else:
# Jitter
self.problem.setp(x_best)
pop = initpop.eps_init(1, self.problem.getp(),
self.problem.bounds(),
use_point=False, eps=1e-3)
self.problem.setp(pop[0])
return x_best, f_best
class DEFit(FitBase):
"""
Classic Storn and Price differential evolution optimizer.
"""
name = "Differential Evolution"
id = "de"
settings = [('steps', 1000), ('pop', 10), ('CR', 0.9), ('F', 2.0),
('ftol', 1e-8), ('xtol', 1e-6), #('stop', ''),
]
def solve(self, monitors=None, abort_test=None, mapper=None, **options):
if abort_test is None:
abort_test = lambda: False
options = _fill_defaults(options, self.settings)
from .mystic.optimizer import de
from .mystic.solver import Minimizer
from .mystic import stop
if monitors is None:
monitors = [ConsoleMonitor(self.problem)]
if mapper is not None:
_mapper = lambda p, v: mapper(v)
else:
_mapper = lambda p, v: list(map(self.problem.nllf, v))
resume = hasattr(self, 'state')
steps = options['steps'] + (self.state['step'][-1] if resume else 0)
strategy = de.DifferentialEvolution(npop=options['pop'],
CR=options['CR'],
F=options['F'],
crossover=de.c_bin,
mutate=de.rand1u)
success = parse_tolerance(options)
failure = stop.Steps(steps)
self.history = History()
# Step adds to current step number if resume
minimize = Minimizer(strategy=strategy, problem=self.problem,
history=self.history, monitors=monitors,
success=success, failure=failure)
if resume:
self.history.restore(self.state)
x = minimize(mapper=_mapper, abort_test=abort_test, resume=resume)
#print(minimize.termination_condition())
#with open("/tmp/evals","a") as fid:
# print >>fid,minimize.history.value[0],minimize.history.step[0],\
# minimize.history.step[0]*options['pop']*len(self.problem.getp())
return x, self.history.value[0]
def load(self, input_path):
self.state = load_history(input_path)
def save(self, output_path):
save_history(output_path, self.history.snapshot())
def parse_tolerance(options):
from .mystic import stop
if options.get('stop', ''):
return stop.parse_condition(options['stop'])
xtol, ftol = options['xtol'], options['ftol']
if xtol == 0:
if ftol == 0:
return None
if ftol < 0:
return stop.Rf(-ftol, scaled=True)
return stop.Rf(ftol, scaled=False)
else:
if xtol == 0:
return None
if xtol < 0:
return stop.Rx(-xtol, scaled=True)
return stop.Rx(xtol, scaled=False)
def _history_file(path):
return path + "-history.json"
def load_history(path):
"""
Load fitter details from a history file.
"""
import json
with open(_history_file(path), "r") as fid:
return json.load(fid)
def save_history(path, state):
"""
Save fitter details to a history file as JSON.
The content of the details are fitter specific.
"""
import json
with open(_history_file(path), "w") as fid:
json.dump(state, fid)
class BFGSFit(FitBase):
"""
BFGS quasi-newton optimizer.
BFGS estimates Hessian and its Cholesky decomposition, but initial
tests give uncertainties quite different from the directly computed
Jacobian in Levenburg-Marquardt or the Hessian estimated at the
minimum by numdifftools.
To use the internal 'H' and 'L' and save some computation time, then
use::
C = lsqerror.chol_cov(fit.result['L'])
stderr = lsqerror.stderr(C)
"""
name = "Quasi-Newton BFGS"
id = "newton"
settings = [('steps', 3000), ('starts', 1),
('ftol', 1e-6), ('xtol', 1e-12)]
def solve(self, monitors=None, abort_test=None, mapper=None, **options):
if abort_test is None:
abort_test = lambda: False
options = _fill_defaults(options, self.settings)
from .quasinewton import quasinewton
self._update = MonitorRunner(problem=self.problem,
monitors=monitors)
result = quasinewton(fn=self.problem.nllf,
x0=self.problem.getp(),
monitor=self._monitor,
abort_test=abort_test,
itnlimit=options['steps'],
gradtol=options['ftol'],
steptol=1e-12,
macheps=1e-8,
eta=1e-8,
)
self.result = result
#code = result['status']
#from .quasinewton import STATUS
#print("%d: %s, x=%s, fx=%s"
# % (code, STATUS[code], result['x'], result['fx']))
return result['x'], result['fx']
def _monitor(self, step, x, fx):
self._update(step=step, point=x, value=fx,
population_points=[x],
population_values=[fx])
return True
class PSFit(FitBase):
"""
Particle swarm optimizer.
"""
name = "Particle Swarm"
id = "ps"
settings = [('steps', 3000), ('pop', 1)]
def solve(self, monitors=None, mapper=None, **options):
options = _fill_defaults(options, self.settings)
if mapper is None:
mapper = lambda x: list(map(self.problem.nllf, x))
from .random_lines import particle_swarm
self._update = MonitorRunner(problem=self.problem,
monitors=monitors)
low, high = self.problem.bounds()
cfo = dict(parallel_cost=mapper,
n=len(low),
x0=self.problem.getp(),
x1=low,
x2=high,
f_opt=0,
monitor=self._monitor)
npop = int(cfo['n'] * options['pop'])
result = particle_swarm(cfo, npop, maxiter=options['steps'])
satisfied_sc, n_feval, f_best, x_best = result
return x_best, f_best
def _monitor(self, step, x, fx, k):
self._update(step=step, point=x[:, k], value=fx[k],
population_points=x.T, population_values=fx)
return True
class RLFit(FitBase):
"""
Random lines optimizer.
"""
name = "Random Lines"
id = "rl"
settings = [('steps', 3000), ('starts', 20), ('pop', 0.5), ('CR', 0.9)]
def solve(self, monitors=None, abort_test=None, mapper=None, **options):
if abort_test is None:
abort_test = lambda: False
options = _fill_defaults(options, self.settings)
if mapper is None:
mapper = lambda x: list(map(self.problem.nllf, x))
from .random_lines import random_lines
self._update = MonitorRunner(problem=self.problem,
monitors=monitors)
low, high = self.problem.bounds()
cfo = dict(parallel_cost=mapper,
n=len(low),
x0=self.problem.getp(),
x1=low,
x2=high,
f_opt=0,
monitor=self._monitor)
npop = max(int(cfo['n'] * options['pop']), 3)
result = random_lines(cfo, npop, abort_test=abort_test,
maxiter=options['steps'], CR=options['CR'])
satisfied_sc, n_feval, f_best, x_best = result
return x_best, f_best
def _monitor(self, step, x, fx, k):
# print "rl best",k, x.shape,fx.shape
self._update(step=step, point=x[:, k], value=fx[k],
population_points=x.T, population_values=fx)
return True
class PTFit(FitBase):
"""
Parallel tempering optimizer.
"""
name = "Parallel Tempering"
id = "pt"
settings = [('steps', 400), ('nT', 24), ('CR', 0.9),
('burn', 100), ('Tmin', 0.1), ('Tmax', 10)]
def solve(self, monitors=None, mapper=None, **options):
options = _fill_defaults(options, self.settings)
# TODO: no mapper??
from .partemp import parallel_tempering
self._update = MonitorRunner(problem=self.problem,
monitors=monitors)
t = np.logspace(np.log10(options['Tmin']),
np.log10(options['Tmax']),
options['nT'])
history = parallel_tempering(nllf=self.problem.nllf,
p=self.problem.getp(),
bounds=self.problem.bounds(),
# logfile="partemp.dat",
T=t,
CR=options['CR'],
steps=options['steps'],
burn=options['burn'],
monitor=self._monitor)
return history.best_point, history.best
def _monitor(self, step, x, fx, P, E):
self._update(step=step, point=x, value=fx,
population_points=P, population_values=E)
return True
class SimplexFit(FitBase):
"""
Nelder-Mead simplex optimizer.
"""
name = "Nelder-Mead Simplex"
id = "amoeba"
settings = [('steps', 1000), ('starts', 1), ('radius', 0.15),
('xtol', 1e-6), ('ftol', 1e-8)]
def solve(self, monitors=None, abort_test=None, mapper=None, **options):
from .simplex import simplex
if abort_test is None:
abort_test = lambda: False
options = _fill_defaults(options, self.settings)
# TODO: no mapper??
self._update = MonitorRunner(problem=self.problem,
monitors=monitors)
#print("bounds", self.problem.bounds())
result = simplex(f=self.problem.nllf, x0=self.problem.getp(),
bounds=self.problem.bounds(),
abort_test=abort_test,
update_handler=self._monitor,
maxiter=options['steps'],
radius=options['radius'],
xtol=options['xtol'],
ftol=options['ftol'])
# Let simplex propose the starting point for the next amoeba
# fit in a multistart amoeba context. If the best is always
# used, the fit can get stuck in a local minimum.
self.problem.setp(result.next_start)
#print("amoeba %s %s"%(result.x,result.fx))
return result.x, result.fx
def _monitor(self, k, n, x, fx):
self._update(step=k, point=x[0], value=fx[0],
population_points=x, population_values=fx)
return True
class MPFit(FitBase):
"""
MPFit optimizer.
"""
name = "MPFit"
id = "mp"
settings = [('steps', 200), ('ftol', 1e-10), ('xtol', 1e-10)]
def solve(self, monitors=None, abort_test=None, mapper=None, **options):
from .mpfit import mpfit
if abort_test is None:
abort_test = lambda: False
options = _fill_defaults(options, self.settings)
self._low, self._high = self.problem.bounds()
self._update = MonitorRunner(problem=self.problem,
monitors=monitors)
self._abort = abort_test
x0 = self.problem.getp()
parinfo = []
for low, high in zip(*self.problem.bounds()):
parinfo.append({
#'value': None, # passed in by xall instead
#'fixed': False, # everything is varying
'limited': (np.isfinite(low), np.isfinite(high)),
'limits': (low, high),
#'parname': '', # could probably ask problem for this...
# From the code, default step size is sqrt(eps)*abs(value)
# or eps if value is 0. This seems okay. The other
# other alternative is to limit it by bounds.
#'step': 0, # compute step automatically
#'mpside': 0, # 1, -1 or 2 for right-, left- or 2-sided deriv
#'mpmaxstep': 0., # max step for this parameter
#'tied': '', # parameter expressions tying fit parameters
#'mpprint': 1, # print the parameter value when iterating
})
result = mpfit(
fcn=self._residuals,
xall=x0,
parinfo=parinfo,
autoderivative=True,
fastnorm=True,
#damp=0, # no damping when damp=0
# Stopping conditions
ftol=options['ftol'],
xtol=options['xtol'],
#gtol=1e-100, # exclude gtol test
maxiter=options['steps'],
# Progress monitor
iterfunct=self._monitor,
nprint=1, # call monitor each iteration
quiet=True, # leave it to monitor to print any info
# Returns values
nocovar=True, # use our own covar calculation for consistency
)
if result.status > 0:
x, fx = result.params, result.fnorm
else:
x, fx = None, None
return x, fx
def _monitor(self, fcn, p, k, fnorm,
functkw=None, parinfo=None,
quiet=0, dof=None, **extra):
self._update(k, p, fnorm)
def _residuals(self, p, fjac=None):
if self._abort():
return -1, None
self.problem.setp(p)
# treat prior probabilities on the parameters as additional
# measurements
residuals = np.hstack(
(self.problem.residuals().flat, self.problem.parameter_residuals()))
# Tally costs for broken constraints
extra_cost = self.problem.constraints_nllf()
# Spread the cost over the residuals. Since we are smoothly increasing
# residuals as we leave the boundary, this should push us back into the
# boundary (within tolerance) during the lm fit.
residuals += np.sign(residuals) * (extra_cost / len(residuals))
return 0, residuals
class LevenbergMarquardtFit(FitBase):
"""
Levenberg-Marquardt optimizer.
"""
name = "Levenberg-Marquardt"
id = "lm"
settings = [('steps', 200), ('ftol', 1.5e-8), ('xtol', 1.5e-8)]
# LM also has
# gtol: orthoganality between jacobian columns
# epsfcn: numerical derivative step size
# factor: initial radius
# diag: variable scale factors to bring them near 1
def solve(self, monitors=None, abort_test=None, mapper=None, **options):
from scipy import optimize
if abort_test is None:
abort_test = lambda: False
options = _fill_defaults(options, self.settings)
self._low, self._high = self.problem.bounds()
self._update = MonitorRunner(problem=self.problem,
monitors=monitors)
x0 = self.problem.getp()
maxfev = options['steps']*(len(x0)+1)
result = optimize.leastsq(self._bounded_residuals,
x0,
ftol=options['ftol'],
xtol=options['xtol'],
maxfev=maxfev,
epsfcn=1e-8,
full_output=True)
x, cov_x, info, mesg, success = result
if not 1 <= success <= 4:
# don't treat "reached maxfev" as a true failure
if "reached maxfev" in mesg:
# unless the x values are bad
if not np.all(np.isfinite(x)):
x = None
mesg = "Levenberg-Marquardt fit failed with bad values"
else:
x = None
self._cov = cov_x if x is not None else None
# compute one last time with x forced inside the boundary, and using
# problem.nllf as returned by other optimizers. We will ignore the
# covariance output and calculate it again ourselves. Not ideal if
# f is expensive, but it will be consistent with other optimizers.
if x is not None:
x += self._stray_delta(x)
self.problem.setp(x)
fx = self.problem.nllf()
else:
fx = None
return x, fx
def _bounded_residuals(self, p):
# Force the fit point into the valid region
stray = self._stray_delta(p)
stray_cost = np.sum(stray**2)
if stray_cost > 0:
stray_cost += 1e6
self.problem.setp(p + stray)
# treat prior probabilities on the parameters as additional
# measurements
residuals = np.hstack(
(self.problem.residuals().flat, self.problem.parameter_residuals()))
# Tally costs for straying outside the boundaries plus other costs
extra_cost = stray_cost + self.problem.constraints_nllf()
# Spread the cost over the residuals. Since we are smoothly increasing
# residuals as we leave the boundary, this should push us back into the
# boundary (within tolerance) during the lm fit.
residuals += np.sign(residuals) * (extra_cost / len(residuals))
return residuals
def _stray_delta(self, p):
"""calculate how far point is outside the boundary"""
return (np.where(p < self._low, self._low - p, 0)
+ np.where(p > self._high, self._high - p, 0))
def cov(self):
return self._cov
class SnobFit(FitBase):
name = "SNOBFIT"
id = "snobfit"
settings = [('steps', 200)]
def solve(self, monitors=None, mapper=None, **options):
options = _fill_defaults(options, self.settings)
# TODO: no mapper??
from snobfit.snobfit import snobfit
self._update = MonitorRunner(problem=self.problem,
monitors=monitors)
x, fx, _ = snobfit(self.problem, self.problem.getp(),
self.problem.bounds(),
fglob=0, callback=self._monitor)
return x, fx
def _monitor(self, k, x, fx, improved):
# TODO: snobfit does have a population...
self._update(step=k, point=x, value=fx,
population_points=[x], population_values=[fx])
class DreamModel(MCMCModel):
"""
DREAM wrapper for fit problems.
"""
def __init__(self, problem=None, mapper=None):
"""
Create a sampling from the multidimensional likelihood function
represented by the problem set using dream.
"""
# print "dream"
self.problem = problem
self.bounds = self.problem.bounds()
self.labels = self.problem.labels()
self.mapper = mapper if mapper else lambda p: list(map(self.nllf, p))
def log_density(self, x):
return -self.nllf(x)
def nllf(self, x):
"""Negative log likelihood of seeing models given *x*"""
# Note: usually we will be going through the provided mapper, and
# this function will never be called.
# print "eval",x; sys.stdout.flush()
return self.problem.nllf(x)
def map(self, pop):
# print "calling mapper",self.mapper
return -np.array(self.mapper(pop))
class DreamFit(FitBase):
name = "DREAM"
id = "dream"
settings = [('samples', int(1e4)), ('burn', 100), ('pop', 10),
('init', 'eps'), ('thin', 1), ('alpha', 0.01),
('outliers', 'none'), ('trim', False),
('steps', 0), # deprecated: use --samples instead
]
def __init__(self, problem):
FitBase.__init__(self, problem)
self.dream_model = DreamModel(problem)
self.state = None
def solve(self, monitors=None, abort_test=None, mapper=None, **options):
from .dream import Dream
if abort_test is None:
abort_test = lambda: False
options = _fill_defaults(options, self.settings)
if mapper:
self.dream_model.mapper = mapper
self._update = MonitorRunner(problem=self.dream_model.problem,
monitors=monitors)
population = initpop.generate(self.dream_model.problem, **options)
pop_size = population.shape[0]
draws, steps = int(options['samples']), options['steps']
if steps == 0:
steps = (draws + pop_size-1) // pop_size
# TODO: need a better way to announce number of steps
# maybe somehow print iteration # of # iters in the monitor?
print("# steps: %d, # draws: %d"%(steps, pop_size*steps))
population = population[None, :, :]
sampler = Dream(model=self.dream_model, population=population,
draws=pop_size * steps,
burn=pop_size * options['burn'],
thinning=options['thin'],
monitor=self._monitor, alpha=options['alpha'],
outlier_test=options['outliers'],
DE_noise=1e-6)
self.state = sampler.sample(state=self.state, abort_test=abort_test)
self._trimmed = self.state.trim_portion() if options['trim'] else 1.0
#print("trimming", options['trim'], self._trimmed)
self.state.mark_outliers(portion=self._trimmed)
self.state.keep_best()
self.state.title = self.dream_model.problem.name
# TODO: Temporary hack to apply a post-mcmc action to the state vector
# The problem is that if we manipulate the state vector before saving
# it then we will not be able to use the --resume feature. We can
# get around this by just not writing state for the derived variables,
# at which point we can remove this notice.
# TODO: Add derived/visible variable support to other optimizers
fn, labels = getattr(self.problem, 'derive_vars', (None, None))
if fn is not None:
self.state.derive_vars(fn, labels=labels)
visible_vars = getattr(self.problem, 'visible_vars', None)
if visible_vars is not None:
self.state.set_visible_vars(visible_vars)
integer_vars = getattr(self.problem, 'integer_vars', None)
if integer_vars is not None:
self.state.set_integer_vars(integer_vars)
x, fx = self.state.best()
# Check that the last point is the best point
#points, logp = self.state.sample()
#assert logp[-1] == fx
#print(points[-1], x)
#assert all(points[-1, i] == xi for i, xi in enumerate(x))
return x, -fx
def entropy(self, **kw):
return self.state.entropy(portion=self._trimmed, **kw)
def _monitor(self, state, pop, logp):
# Get an early copy of the state
self.state = self._update.history.uncertainty_state = state
step = state.generation
x, fx = state.best()
self._update(step=step, point=x, value=-fx,
population_points=pop, population_values=-logp)
return True
def stderr(self):
"""
Approximate standard error as 1/2 the 68% interval fo the sample,
which is a more robust measure than the mean of the sample for
non-normal distributions.
"""
from .dream.stats import var_stats
vstats = var_stats(self.state.draw(portion=self._trimmed))
return np.array([(v.p68[1] - v.p68[0]) / 2 for v in vstats], 'd')
#def cov(self):
# # Covariance estimate from final 1000 points
# return np.cov(self.state.draw().points[-1000:])
def load(self, input_path):
from .dream.state import load_state, path_contains_saved_state
if path_contains_saved_state(input_path):
print("loading saved state from %s (this might take awhile) ..."
% (input_path,))
fn, labels = getattr(self.problem, 'derive_vars', (None, []))
self.state = load_state(input_path, report=100, derived_vars=len(labels))
else:
# Warn if mc files are not found on --resume path
warnings.warn("No mcmc found; ignoring --resume=%r"%input_path)
def save(self, output_path):
self.state.save(output_path)
def plot(self, output_path):
self.state.show(figfile=output_path, portion=self._trimmed)
self.error_plot(figfile=output_path)
def show(self):
pass
def error_plot(self, figfile):
# Produce error plot
import pylab
from . import errplot
# TODO: shouldn't mix calc and display!
res = errplot.calc_errors_from_state(problem=self.dream_model.problem,
state=self.state,
portion=self._trimmed)
if res is not None:
pylab.figure()
errplot.show_errors(res)
pylab.savefig(figfile + "-errors.png", format='png')
class Resampler(FitBase):
# TODO: why isn't cli.resynth using this?
def __init__(self, fitter):
self.fitter = fitter
raise NotImplementedError()
def solve(self, **options):
starts = options.pop('starts', 1)
restart = options.pop('restart', False)
x, fx = self.fitter.solve(**options)
points = _resampler(self.fitter, x, samples=starts,
restart=restart, **options)
self.points = points # save points for later plotting
return x, fx
def _resampler(fitter, xinit, samples=100, restart=False, **options):
"""
Refit the result multiple times with resynthesized data, building
up an array in Result.samples which contains the best fit to the
resynthesized data. *samples* is the number of samples to generate.
*fitter* is the (local) optimizer to use. **kw are the parameters
for the optimizer.
"""
x = xinit
points = []
try: # TODO: some solvers already catch KeyboardInterrupt
for _ in range(samples):
# print "== resynth %d of %d" % (i, samples)
fitter.problem.resynth_data()
if restart:
fitter.problem.randomize()
else:
fitter.problem.setp(x)
x, fx = fitter.solve(**options)
points.append(np.hstack((fx, x)))
# print self.problem.summarize()
# print "[chisq=%g]" % (nllf*2/self.problem.dof)
except KeyboardInterrupt:
# On keyboard interrupt we can declare that we are finished sampling
# without it being an error condition, so let this exception pass.
pass
finally:
# Restore the state of the problem
fitter.problem.restore_data()
fitter.problem.setp(xinit)
#fitter.problem.model_update() # setp does model update
return points
class FitDriver(object):
def __init__(self, fitclass=None, problem=None, monitors=None,
abort_test=None, mapper=None, **options):
self.fitclass = fitclass
self.problem = problem
self.options = options
self.monitors = monitors
self.abort_test = abort_test
self.mapper = mapper if mapper else lambda p: list(map(problem.nllf, p))
self.fitter = None
self.result = None
def fit(self, resume=None):
if hasattr(self, '_cov'):
del self._cov
if hasattr(self, '_stderr'):
del self._stderr
fitter = self.fitclass(self.problem)
if resume:
fitter.load(resume)
starts = self.options.get('starts', 1)
if starts > 1:
fitter = MultiStart(fitter)
t0 = perf_counter()
self.fitter = fitter
x, fx = fitter.solve(monitors=self.monitors,
abort_test=self.abort_test,
mapper=self.mapper,
**self.options)
self.time = perf_counter() - t0
self.result = x, fx
if x is not None:
self.problem.setp(x)
return x, fx
def clip(self):
"""
Force parameters within bounds so constraints are finite.
The problem is updated with the new parameter values.
Returns a list of parameter names that were clipped.
"""
labels = self.problem.labels()
values = self.problem.getp()
bounds = self.problem.bounds()
new_values = np.clip(values, bounds[0], bounds[1])
clipped = [name for name, old, new in zip(labels, values, new_values)
if old != new]
self.problem.setp(new_values)
return clipped
def entropy(self, method=None):
if hasattr(self.fitter, 'entropy'):
return self.fitter.entropy(method=method)
else:
from .dream import entropy
return entropy.cov_entropy(self.cov()), 0
def chisq(self):
if not hasattr(self, '_chisq'):
self._chisq = self.problem.chisq()
return self._chisq
def cov(self):
r"""
Return an estimate of the covariance of the fit.
Depending on the fitter and the problem, this may be computed from
existing evaluations within the fitter, or from numerical
differentiation around the minimum.
If the problem uses $\chi^2/2$ as its nllf, then the covariance
is derived from the Jacobian::
x = fit.problem.getp()
J = bumps.lsqerror.jacobian(fit.problem, x)
cov = bumps.lsqerror.jacobian_cov(J)
Otherwise, the numerical differentiation will use the Hessian
estimated from nllf::
x = fit.problem.getp()
H = bumps.lsqerror.hessian(fit.problem, x)
cov = bumps.lsqerror.hessian_cov(H)
"""
# Note: if fit() has not been run then self.fitter is None and in
# particular, self.fitter will not have a covariance matrix. In
# this case, the code will fall through to computing the covariance
# matrix directly from the problem. It will use the initial value
# stored in the problem parameters because results will also be None.
if not hasattr(self, '_cov'):
self._cov = None
if hasattr(self.fitter, 'cov'):
self._cov = self.fitter.cov()
#print("fitter cov", self._cov)
if self._cov is None:
# Use Jacobian if residuals are available because it is faster
# to compute. Otherwise punt and use Hessian. The has_residuals
# attribute should be True if present. It may be false if
# the problem defines a residuals method but doesn't really
# have residuals (e.g. to allow levenberg-marquardt to run even
# though it is not fitting a sum-square problem).
if hasattr(self.problem, 'has_residuals'):
has_residuals = self.problem.has_residuals
else:
has_residuals = hasattr(self.problem, 'residuals')
x = self.problem.getp() if self.result is None else self.result[0]
if has_residuals:
J = lsqerror.jacobian(self.problem, x)
#print("Jacobian", J)
self._cov = lsqerror.jacobian_cov(J)
else:
H = lsqerror.hessian(self.problem, x)
#print("Hessian", H)
self._cov = lsqerror.hessian_cov(H)
return self._cov
def stderr(self):
"""
Return an estimate of the standard error of the fit.
Depending on the fitter and the problem, this may be computed from
existing evaluations within the fitter, or from numerical
differentiation around the minimum.
"""
# Note: if fit() has not been run then self.fitter is None and in
# particular, self.fitter will not have a stderr method defined so
# it will compute stderr from covariance.
if not hasattr(self, '_stderr'):
self._stderr = None
if hasattr(self.fitter, 'stderr'):
self._stderr = self.fitter.stderr()
if self._stderr is None:
# If no stderr from the fitter then compute it from the covariance
self._stderr = self.stderr_from_cov()
return self._stderr
def stderr_from_cov(self):
"""
Return an estimate of standard error of the fit from covariance matrix.
Unlike stderr, which uses the estimate from the underlying
fitter (DREAM uses the MCMC sample for this), *stderr_from_cov*
estimates the error from the diagonal of the covariance matrix.
Here, the covariance matrix may have been estimated by the fitter
instead of the Hessian.
"""
if not hasattr(self, '_stderr_from_cov'):
self._stderr_from_cov = lsqerror.stderr(self.cov())
return self._stderr_from_cov
def show(self):
if hasattr(self.fitter, 'show'):
self.fitter.show()
if hasattr(self.problem, 'show'):
self.problem.show()
def show_err(self):
"""
Display the error approximation from the numerical derivative.
Warning: cost grows as the cube of the number of parameters.
"""
# TODO: need cheaper uncertainty estimate
# Note: error estimated from hessian diagonal is insufficient.
err = self.stderr_from_cov()
norm = np.sqrt(self.chisq())
print("=== Uncertainty from curvature: name"
" value(unc.) "
" value(unc./chi)) ===")
for k, v, dv in zip(self.problem.labels(), self.problem.getp(), err):
print("%40s %-15s %-15s" % (k,
format_uncertainty(v, dv),
format_uncertainty(v, dv/norm)))
print("="*75)
def show_cov(self):
cov = self.cov()
maxn = 1000 # max array dims to print
cov_str = np.array2string(
cov,
max_line_width=20*maxn, threshold=maxn*maxn,
precision=6, #suppress_small=True,
separator=', ',
)
print("=== Covariance matrix ===")
print(cov_str)
print("=========================")
def show_entropy(self, method=None):
print("Calculating entropy...")
S, dS = self.entropy(method=method)
print("Entropy: %s bits" % format_uncertainty(S, dS))
def save(self, output_path):
# print "calling driver save"
if hasattr(self.fitter, 'save'):
self.fitter.save(output_path)
if hasattr(self.problem, 'save'):
self.problem.save(output_path)
def load(self, input_path):
# print "calling driver save"
if hasattr(self.fitter, 'load'):
self.fitter.load(input_path)
if hasattr(self.problem, 'load'):
self.problem.load(input_path)
def plot(self, output_path, view=None):
# print "calling fitter.plot"
if hasattr(self.problem, 'plot'):
self.problem.plot(figfile=output_path, view=view)
if hasattr(self.fitter, 'plot'):
self.fitter.plot(output_path=output_path)
def _save_fit_cov(self, output_path):
model = getattr(self.problem, 'name', self.problem.__class__.__name__)
fitter = self.fitclass.id
cov = self.cov()
err = self.stderr_from_cov()
chisq = self.chisq()
state = {
'model': model,
'fitter': fitter,
}
def _fill_defaults(options, settings):
"""
Returns options dict with missing values filled from settings.
"""
result = dict(settings) # settings is a list of (key,value) pairs
result.update(options)
return result
FITTERS = []
FIT_AVAILABLE_IDS = []
FIT_ACTIVE_IDS = []
def register(fitter, active=True):
"""
Register a new fitter with bumps, if it is not already there.
*active* is False if you don't want it showing up in the GUI selector.
"""
# Check if already registered.
if fitter in FITTERS:
return
# Check that there is no other fitter of that name
if fitter.id in FIT_AVAILABLE_IDS:
raise ValueError("There is already a fitter registered as %r"
% fitter.id)
# Register the fitter.
FITTERS.append(fitter)
FIT_AVAILABLE_IDS.append(fitter.id)
# Make it "active" by listing it in the help menu.
if active:
FIT_ACTIVE_IDS.append(fitter.id)
# Register the fitters
register(SimplexFit, active=True)
register(DEFit, active=True)
register(DreamFit, active=True)
register(BFGSFit, active=True)
register(LevenbergMarquardtFit, active=True)
register(MPFit, active=True)
#register(PSFit, active=False)
register(PTFit, active=False)
#register(RLFit, active=False)
#register(SnobFit, active=False)
FIT_DEFAULT_ID = SimplexFit.id
assert FIT_DEFAULT_ID in FIT_ACTIVE_IDS
assert all(f in FIT_AVAILABLE_IDS for f in FIT_ACTIVE_IDS)
def fit(problem, method=FIT_DEFAULT_ID, verbose=False, **options):
"""
Simplified fit interface.
Given a fit problem, the name of a fitter and the fitter options,
it will run the fit and return the best value and standard error of
the parameters. If *verbose* is true, then the console monitor will
be enabled, showing progress through the fit and showing the parameter
standard error at the end of the fit, otherwise it is completely
silent.
Returns an *OptimizeResult* object containing "x" and "dx". The
dream fitter also includes the "state" object, allowing for more
detailed uncertainty analysis. Optimizer information such as the
stopping condition and the number of function evaluations are not
yet included.
To run in parallel (with multiprocessing and dream)::
from bumps.mapper import MPMapper
mapper = MPMapper.start_mapper(problem, None, cpu=0) #cpu=0 for all CPUs
result = fit(problem, method="dream", mapper=mapper)
"""
from scipy.optimize import OptimizeResult
#verbose = True
if method not in FIT_AVAILABLE_IDS:
raise ValueError("unknown method %r not one of %s"
% (method, ", ".join(sorted(FIT_ACTIVE_IDS))))
for fitclass in FITTERS:
if fitclass.id == method:
break
monitors = None if verbose else [] # default is step monitor
driver = FitDriver(
fitclass=fitclass, problem=problem, monitors=monitors,
**options)
driver.clip() # make sure fit starts within domain
x0 = problem.getp()
x, fx = driver.fit()
problem.setp(x)
dx = driver.stderr()
if verbose:
print("final chisq", problem.chisq_str())
driver.show_err()
result = OptimizeResult(
x=x, dx=driver.stderr(),
fun=fx,
success=True, status=0, message="successful termination",
#nit=0, # number of iterations
#nfev=0, # number of function evaluations
#njev, nhev # jacobian and hessian evaluations
#maxcv=0, # max constraint violation
)
if hasattr(driver.fitter, 'state'):
result.state = driver.fitter.state
return result |
the-stack_0_9859 | #
# Copyright 2020 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import namedtuple
import re
import pytest
import pandas as pd
from pandas.api.types import is_datetime64_any_dtype, is_float_dtype, is_integer_dtype, is_string_dtype
from PIL import Image, ImageChops
import wave
from pardata.dataset import Dataset
from pardata.loaders import Loader
from pardata.loaders import FormatLoaderMap
from pardata.loaders._format_loader_map import load_data_files
from pardata.loaders.audio import WaveLoader
from pardata.loaders.image import PillowLoader
from pardata.loaders.text import PlainTextLoader
from pardata.loaders.table import CSVPandasLoader
class TestBaseLoader:
"Test loaders._base.*"
def test_abstract(self):
"Loader is an abstract class."
with pytest.raises(TypeError) as e:
Loader()
assert 'abstract class' in str(e.value)
def test_load(self, tmp_path):
"Loader.load() must be overridden upon Loader being inherited."
class MyLoader(Loader):
pass
# Error out when instantiating MyLoader because load method is not overridden
with pytest.raises(TypeError) as e:
MyLoader()
assert "Can't instantiate abstract class MyLoader with abstract method" in str(e.value)
class MyLoader(Loader):
def load(self, path, options):
# Calling the parent's load() method shouldn't lead to error
super().load(path, options)
# This line shouldn't error out even though it calls an abstract method in its parent
MyLoader().load(tmp_path, None)
def test_check_path(self):
"Test Loader.check_path method."
class MyLoader(Loader):
def load(self):
pass
loader = MyLoader()
integer = 1
with pytest.raises(TypeError) as e:
loader.check_path(integer)
assert str(e.value) == f'Unsupported path type "{type(integer)}".'
class TestFormatLoaderMap:
"""Test loaders._format_loader.*. Various path types (regex and plain) are tested in test_datasets.py, since it is
easier and more natural to test there, and the test have already covered sufficiently for path types.
"""
def test_register_non_loader(self):
"Test when it registers a non-Loader instance."
flm = FormatLoaderMap()
with pytest.raises(TypeError) as e:
flm.register_loader('some-format', 'some-string')
assert str(e.value) == 'loader "some-string" must be a Loader instance.'
def test_load_non_existing_format(self, tmp_path):
"Test loading a non-existing format."
with pytest.raises(RuntimeError) as e:
load_data_files('nonsense', tmp_path, tmp_path)
assert str(e.value) == 'The format loader map does not specify a loader for format "nonsense".'
def test_load_wrong_format_type(self, tmp_path):
"Test loading a non-existing format."
with pytest.raises(TypeError) as e:
load_data_files(0x348f, tmp_path, tmp_path)
assert str(e.value) == 'Parameter "fmt" must be a string or a dict, but it is of type "<class \'int\'>".'
def test_load_unknown_type_of_path(self, tmp_path):
"Test loading an unknown type of the parameter ``path``."
with pytest.raises(TypeError) as e:
load_data_files('audio/wav', tmp_path, 12)
assert str(e.value) == f'Unsupported type of the "path" parameter: {type(12)}.'
def test_load_unknown_path_type(self, tmp_path):
"Test loading an unknown ``path[type]``."
with pytest.raises(ValueError) as e:
load_data_files('image/png', tmp_path, {'type': 'nonsense'})
assert str(e.value) == 'Unknown type of path "nonsense".'
class TestAudioLoaders:
def test_wave_loader(self, bell_sound):
"Test the normal functionality of WaveLoader."
with wave.open(str(bell_sound), 'rb') as local:
local_content = local.readframes(local.getnframes())
with WaveLoader().load(bell_sound, {}) as loaded:
loaded_content = loaded.readframes(loaded.getnframes())
assert local_content == loaded_content
def test_wave_loader_no_path(self):
"Test WaveLoader when fed in with non-path."
integer = 1
with pytest.raises(TypeError) as e:
WaveLoader().load(integer, {})
assert str(e.value) == f'Unsupported path type "{type(integer)}".'
class TestImageLoaders:
def test_image_pillow_loader(self, saturn_image):
"Test the normal functionality of PillowLoader."
local = Image.open(saturn_image)
loaded = PillowLoader().load(saturn_image, {})
assert ImageChops.difference(local, loaded).getbbox() is None
def test_image_pillow_loader_no_path(self):
"Test PillowLoader when fed in with non-path."
integer = 1
with pytest.raises(TypeError) as e:
PillowLoader().load(integer, {})
assert str(e.value) == f'Unsupported path type "{type(integer)}".'
class TestTextLoaders:
def test_plain_text_loader_no_path(self):
"Test PlainTextLoader when fed in with non-path."
integer = 1
with pytest.raises(TypeError) as e:
PlainTextLoader().load(integer, {})
assert str(e.value) == f'Unsupported path type "{type(integer)}".'
def test_plain_text_loader_bad_encoding(self, tmp_path):
"Test PlainTextLoader when the encoding is nonsense."
text_file = tmp_path / 'some-text.txt'
text_file.write_text("I'm a text file :)", encoding='utf-8')
with pytest.raises(LookupError) as e:
PlainTextLoader().load(text_file, {'encoding': "non-encoding"})
assert str(e.value) == 'unknown encoding: non-encoding'
def test_plain_text_loader_incorrect_encoding(self, tmp_path):
"Test PlainTextLoader when the encoding does not match."
text_file = tmp_path / 'some-text.txt'
text_file.write_text("I'm a text file :)", encoding='utf-8')
with pytest.raises(UnicodeError) as e:
PlainTextLoader().load(text_file, {'encoding': "utf-16"})
assert str(e.value) == 'UTF-16 stream does not start with BOM'
class TestTableLoaders:
def test_csv_pandas_loader(self, tmp_path, noaa_jfk_schema):
"Test the basic functioning of CSVPandasLoader."
dataset = Dataset(noaa_jfk_schema, tmp_path, mode=Dataset.InitializationMode.DOWNLOAD_AND_LOAD)
data = dataset.data['jfk_weather_cleaned']
assert isinstance(data, pd.DataFrame)
assert data.shape == (75119, 16)
dataset.delete()
Column = namedtuple('Column', ('name', 'dtype', 'check'))
@pytest.mark.parametrize('columns', # a list of Column (column name, specified data type, check function)
[
# Only one column specified
[Column('DATE', 'datetime', is_datetime64_any_dtype)],
[Column('DATE', 'str', is_string_dtype)],
[Column('DATE', 'string', is_string_dtype)],
[Column('HOURLYPressureTendencyCons', 'float', is_float_dtype)],
# Two columns specified
[Column('DATE', 'datetime', is_datetime64_any_dtype),
Column('HOURLYPressureTendencyCons', 'float', is_float_dtype)],
# No column specified (let Pandas autodetect dtype)
[Column('DATE', None, is_string_dtype),
Column('HOURLYPressureTendencyCons', None, is_integer_dtype),
Column('HOURLYVISIBILITY', None, is_float_dtype)],
])
def test_csv_pandas_column_data_types(self, tmp_path, noaa_jfk_schema, columns):
"Test the column data types."
assert len(columns) > 0 # Sanity check, make sure columns are there
# Clear columns
column_dict = noaa_jfk_schema['subdatasets']['jfk_weather_cleaned']['format']['options']['columns'] = {}
# Update column dictionary as specified
for col in columns:
if col.dtype is not None:
column_dict[col.name] = col.dtype
dataset = Dataset(noaa_jfk_schema, tmp_path, mode=Dataset.InitializationMode.DOWNLOAD_AND_LOAD)
data = dataset.data['jfk_weather_cleaned']
for col in columns:
assert col.check(data.dtypes[col.name])
@pytest.mark.parametrize(('err_column', # (column name, specified data type, default dtype checked for conversion)
'other_columns'), # (column name, specified data type, None)
[
# Only one unsupported specified
(Column('DATE', 'float', 'str'), []),
(Column('HOURLYVISIBILITY', 'int', 'float'), []),
# Some supported specified
(Column('DATE', 'float', 'str'), [Column('HOURLYPressureTendencyCons', 'int', None)]),
(Column('HOURLYVISIBILITY', 'int', 'float'), [Column('DATE', 'datetime', None)]),
# More than one unsupported specified. The error that raises the exception should be
# put as err_column.
(Column('DATE', 'float', 'str'), [Column('HOURLYVISIBILITY', 'int', 'float')]),
])
def test_csv_pandas_column_unsupported_data_types(self, tmp_path, noaa_jfk_schema,
err_column, other_columns):
"Test column data types when they are unsupported."
# Clear columns
column_dict = noaa_jfk_schema['subdatasets']['jfk_weather_cleaned']['format']['options']['columns'] = {}
# Update column dictionary as specified
for col in other_columns:
if col.dtype is not None:
column_dict[col.name] = col.dtype
column_dict[err_column.name] = err_column.dtype
with pytest.raises(ValueError) as e:
Dataset(noaa_jfk_schema, tmp_path, mode=Dataset.InitializationMode.DOWNLOAD_AND_LOAD)
# Pandas is a 3rd-party library. We don't check for the exact wording but only some keywords
# Examples:
# ValueError: cannot safely convert passed user dtype of int64 for float64 dtyped data in column 1
# ValueError: could not convert string to float: '2010-01-01 01:00:00'
assert 'convert' in str(e.value)
for t in (err_column.dtype, err_column.check):
assert re.search(rf"{t}(\d*|ing)\b", str(e.value)) # "ing" is for "str'ing'"
def test_csv_pandas_no_delimiter(self, tmp_path, noaa_jfk_schema):
"Test when no delimiter is given."
# Remove the delimiter option
del noaa_jfk_schema['subdatasets']['jfk_weather_cleaned']['format']['options']['delimiter']
data = Dataset(noaa_jfk_schema, tmp_path,
mode=Dataset.InitializationMode.DOWNLOAD_AND_LOAD).data['jfk_weather_cleaned']
assert len(data.columns) == 16 # number of columns remain the same
@pytest.mark.parametrize('delimiter', ('\t', ' ', '|', ';'))
def test_csv_pandas_delimiter(self, tmp_path, noaa_jfk_schema, delimiter):
"Test common delimiter settings. Note that the case of comma has been tested in ``test_csv_pandas_loader``."
del noaa_jfk_schema['subdatasets']['jfk_weather_cleaned']['format']['options']['columns']
# Change delimiter to tab, |, ;, space
noaa_jfk_schema['subdatasets']['jfk_weather_cleaned']['format']['options']['delimiter'] = delimiter
data = Dataset(noaa_jfk_schema, tmp_path,
mode=Dataset.InitializationMode.DOWNLOAD_AND_LOAD).data['jfk_weather_cleaned']
# None of these delimiters exist in the file, number of columns should be 1.
assert len(data.columns) == 1
def test_csv_pandas_loader_no_path(self):
"Test CSVPandasLoader when fed in with non-path."
integer = 1
with pytest.raises(TypeError) as e:
CSVPandasLoader().load(integer, {})
assert str(e.value) == f'Unsupported path type "{type(integer)}".'
def test_csv_pandas_loader_non_option(self, tmp_path, noaa_jfk_schema):
"Test CSVPandasLoader when None option is passed."
del noaa_jfk_schema['subdatasets']['jfk_weather_cleaned']['format']['options']
dataset = Dataset(noaa_jfk_schema, tmp_path, mode=Dataset.InitializationMode.DOWNLOAD_AND_LOAD)
data = dataset.data['jfk_weather_cleaned']
assert isinstance(data, pd.DataFrame)
assert len(data) == 75119
def test_csv_pandas_loader_no_encoding(self, tmp_path, noaa_jfk_schema):
"Test CSVPandasLoader when no encoding is specified."
del noaa_jfk_schema['subdatasets']['jfk_weather_cleaned']['format']['options']['encoding']
self.test_csv_pandas_loader(tmp_path, noaa_jfk_schema)
def test_csv_pandas_header(self, tmp_path, noaa_jfk_schema):
"Test CSVPandasLoader header options"
noaa_jfk_schema['subdatasets']['jfk_weather_cleaned']['format']['options']['no_header'] = True
noaa_dataset = Dataset(noaa_jfk_schema, tmp_path, mode=Dataset.InitializationMode.DOWNLOAD_ONLY)
with pytest.raises(ValueError) as exinfo: # Pandas should error from trying to read string as another dtype
noaa_dataset.load()
assert('could not convert string to float' in str(exinfo.value))
noaa_dataset.delete()
false_test_cases = [False, '', None] # These should all be treated as False
for case in false_test_cases:
noaa_jfk_schema['subdatasets']['jfk_weather_cleaned']['format']['options']['no_header'] = case
self.test_csv_pandas_loader(tmp_path, noaa_jfk_schema)
del noaa_jfk_schema['subdatasets']['jfk_weather_cleaned']['format']['options']['no_header']
self.test_csv_pandas_loader(tmp_path, noaa_jfk_schema)
|
the-stack_0_9862 | # -*- coding: utf-8 -*-
"""
flask_jsondash.db
~~~~~~~~~~~~~~~~~~~~~~~~~~
A translation adapter for transparent operations between storage types.
:copyright: (c) 2016 by Chris Tabor.
:license: MIT, see LICENSE for more details.
"""
import json
from datetime import datetime as dt
from pymongo import MongoClient
from flask_jsondash import mongo_adapter, settings
DB_NAME = settings.ACTIVE_DB
def reformat_data(data, c_id):
"""Format/clean existing config data to be re-inserted into database.
Args:
data (dict): The chart data to override with standard params.
Returns:
data (dict): The in-place updated dict.
"""
data.update(dict(id=c_id, date=dt.now()))
return data
def format_charts(data):
"""Form chart POST data for JSON usage within db.
Args:
data (dict): The request.form data to format.
Returns:
modules (list): A list of json-decoded dictionaries.
"""
modules = []
for item in data:
if item.startswith('module_'):
val_json = json.loads(data[item])
modules.append(val_json)
return modules
def get_db_handler():
"""Get the appropriate db adapter.
Returns:
object: The instantiated database handler
"""
if DB_NAME == 'mongo':
client = MongoClient(host=settings.DB_URI, port=settings.DB_PORT)
conn = client[settings.DB_NAME]
coll = conn[settings.DB_TABLE]
return mongo_adapter.Db(client, conn, coll, format_charts)
else:
raise NotImplementedError(
'Mongodb is the only supported database right now.')
|
the-stack_0_9863 | from django.urls import path
from .views import BookmarkDetail, BookmarkDelete, BookmarkCreate, BookmarkUpdate, BookmarkList
# namespace = 이름 공간
# 다른 앱들과 url pattern 이름이 겹치는 것을 방지하기 위해서 사용
# 2.x버전 이전에는 namespace라는 인수가 존재
app_name = 'bookmark'
urlpatterns = [
# 함수형 뷰 : 이름만 쓴다
# 클래스형 뷰 : 이름.as_view()
path('', BookmarkList.as_view(), name='index'),
path('create/', BookmarkCreate.as_view(), name='create'),
path('delete/<int:pk>/', BookmarkDelete.as_view(), name='delete'),
path('update/<int:pk>/', BookmarkUpdate.as_view(), name='update'),
path('detail/<int:pk>/', BookmarkDetail.as_view(), name='detail'),
]
|
the-stack_0_9866 | # Colorama module: pip install colorama
from colorama import init, Fore, Style
# Selenium module imports: pip install selenium
from selenium import webdriver
from selenium.common.exceptions import TimeoutException as TE
from selenium.common.exceptions import ElementClickInterceptedException as ECIE
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait as WDW
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
# Python default import.
from time import sleep
from glob import glob
import os
"""Colorama module constants."""
init(convert=True) # Init colorama module.
red = Fore.RED # Red color.
green = Fore.GREEN # Green color.
yellow = Fore.YELLOW # Yellow color.
reset = Style.RESET_ALL # Reset color attribute.
class Settings(object):
"""Contains all settings of upload."""
def __init__(self, file: str, filetype: str) -> None:
"""Open Settings JSON file and read it."""
self.filetype = filetype[1:] # Type of data file.
if self.filetype == 'json':
from json import loads
self.file = loads(open(file, encoding='utf-8').read())['nft']
self.len_file = len(self.file) # Lenght of file.
elif self.filetype == 'csv':
self.file = open(file, encoding='utf-8').read().splitlines()[1:]
self.len_file = len(self.file) # Lenght of file.
elif self.filetype == 'xlsx':
from pandas import read_excel
self.file = read_excel(file) # Read Excel (XLSX) file.
self.len_file = self.file.shape[0] # Get number of rows.
self.file = self.file.to_dict() # Transform XLSX to dict.
else:
import sys
sys.exit(f'{red}File extension is not support.{reset}')
def create_parameters(self, parameters: list) -> None:
"""Create parameters."""
# Upload:
self.file_path = str(parameters[0])
self.nft_name = str(parameters[1])
self.external_link = parameters[2]
self.description = str(parameters[3])
self.collection = str(parameters[4])
self.properties: list = self.type_parameters(
parameters[5], 2) # [[type, name], ...]
self.levels: list = self.type_parameters(
parameters[6], 3) # [[name, from, to], ...]
self.stats: list = self.type_parameters(
parameters[7], 3) # [[name, from, to], ...]
self.unlockable_content: list = parameters[8] # [bool, text]
self.explicit_and_sensitive_content: bool = parameters[9]
self.supply: int = parameters[10]
self.blockchain: str = parameters[11]
# Sell:
self.price: int = parameters[12]
self.quantity: int = parameters[13]
def type_parameters(self, parameters: list, _range: int) -> list:
"""Change element's type of some parameters."""
if len(parameters) > 0:
if type(parameters[0]) == list:
for parameter in range(len(parameters)):
for element in range(_range):
parameters[parameter][element] = \
str(parameters[parameter][element])
else:
for element in range(_range):
parameters[element] = str(parameters[element])
return parameters
def get_nft(self, nft: int) -> None:
"""Get all settings of NFT."""
self.nft = nft
if self.filetype == 'json':
self.json_file()
def type_checker(self, nft_settings: list) -> list:
"""Type with correctly string element in list."""
from ast import literal_eval
_list = []
nft_settings = nft_settings.split(';') \
if self.filetype == 'csv' else nft_settings
for element in nft_settings:
element = str(element).strip() # Remove whitespaces.
# Check if element is a list like.
if element != '':
if element[0] == '[' and element[len(element) - 1] == ']':
element = literal_eval(element)
# Check if element is a boolean like.
elif element == 'True' or element == 'False':
element = bool(element)
# Check if element is a integer like.
elif element.isdigit():
element = int(element)
elif element.replace('.', '').isdigit():
element = float(element)
_list.append(element)
return _list
def json_file(self) -> None:
"""Transform JSON list/dict to a whole list."""
nft_settings = self.file[self.nft]
# Get key's value from the NFT data.
nft_settings = [nft_settings[settings] for settings in nft_settings]
_list = [] # Init a new list.
for element in nft_settings: # Take each element in list.
_list.append(self.dict_checker(element)) # Check element.
# Create parameters from list.
self.create_parameters(_list)
def dict_checker(self, element):
"""Check if element is a dict or not."""
if type(element) == list: # If element is a list.
final_list = [] # Final list that will be return.
for item in element: # For each item in this list.
temp_list = [] # Store all key's value.
if type(item) == dict: # If element is a dict.
for key in item: # For each key in dict (item).
temp_list.append(item.get(key)) # Get key's value.
else:
temp_list = item # Do nothing.
final_list.append(temp_list) # Append each temp list.
return final_list
else:
return element # Else return the same element.
class Opensea(object):
"""Main class of Opensea automatic uploader."""
def __init__(self, password: str, recovery_phrase: str) -> None:
"""Get the password and the recovery_phrase from the text file."""
# Get recovery phrase of MetaMask wallet.
self.recovery_phrase = recovery_phrase
self.password = password # Get new password.
# Used files path.
self.webdriver_path = 'assets/chromedriver.exe'
self.metamask_extension_path = 'assets/MetaMask.crx'
self.driver = self.webdriver() # Start new webdriver.
# Opensea URLs.
self.login_url = 'https://opensea.io/login?referrer=%2Fasset%2Fcreate'
self.create_url = 'https://opensea.io/asset/create'
def webdriver(self):
"""Start webdriver and return state of it."""
options = webdriver.ChromeOptions() # Configure options for Chrome.
options.add_extension(self.metamask_extension_path) # Add extension.
# options.add_argument("headless") # Headless ChromeDriver.
options.add_argument("log-level=3") # No logs is printed.
options.add_argument("--mute-audio") # Audio is muted.
driver = webdriver.Chrome(self.webdriver_path, options=options)
driver.maximize_window() # Maximize window to reach all elements.
return driver
def element_clickable(self, element: str) -> None:
"""Click on element if it's clickable using Selenium."""
try:
WDW(self.driver, 5).until(EC.element_to_be_clickable(
(By.XPATH, element))).click()
except ECIE:
# Sometimes the element is not clickable.
self.driver.execute_script(
"arguments[0].click();",
self.driver.find_element_by_xpath(element))
def element_visible(self, element: str, timer: int = 5):
"""Check if element is visible using Selenium."""
return WDW(self.driver, timer).until(EC.visibility_of_element_located(
(By.XPATH, element)))
def element_send_keys(self, element: str, keys: str) -> None:
"""Send keys to element if it's visible using Selenium."""
try:
WDW(self.driver, 5).until(EC.visibility_of_element_located(
(By.XPATH, element))).send_keys(keys)
except TE:
# Some elements are not visible but are still present.
WDW(self.driver, 5).until(EC.presence_of_element_located(
(By.XPATH, element))).send_keys(keys)
def clear_text(self, element) -> None:
"""Clear text from input."""
self.element_clickable(element)
webdriver.ActionChains(self.driver).key_down(Keys.CONTROL).perform()
webdriver.ActionChains(self.driver).send_keys('a').perform()
webdriver.ActionChains(self.driver).key_up(Keys.CONTROL).perform()
def window_handles(self, window_number: int) -> None:
"""Check for window handles and wait until a specific tab is opened."""
wait = 0
while True:
# If asked tab is opened.
sleep(2)
if len(self.driver.window_handles) == window_number + 1:
return True
elif wait == 10:
return False
wait += 1
def metamask(self) -> None:
"""Login to MetaMask extension."""
print('Login to MetaMask extension.', end=' ')
# Switch to MetaMask extension's tab.
self.driver.switch_to.window(self.driver.window_handles[0])
# Refresh MetaMask extension's tab while extension is not loaded.
while True:
# If page is fully loaded.
if 'initialize' in self.driver.current_url:
break
self.driver.refresh() # Reload page.
sleep(1) # Wait 1 second.
# Click on "Start" button.
self.element_clickable(
'//*[@id="app-content"]/div/div[3]/div/div/div/button')
# Click on "Import wallet" button.
self.element_clickable('//*[@id="app-content"]/div/div[3]/div/div/'
'div[2]/div/div[2]/div[1]/button')
# Click on "I agree" button.
self.element_clickable('//*[@id="app-content"]/div/div[3]/div/div/'
'div/div[5]/div[1]/footer/button[2]')
# Input recovery phrase.
self.element_send_keys('//*[@id="app-content"]/div/div[3]/div/div/'
'form/div[4]''/div[1]/div/input',
self.recovery_phrase)
# Input new password.
self.element_send_keys('//*[@id="password"]', self.password)
self.element_send_keys('//*[@id="confirm-password"]', self.password)
# Check "I have read and agree to the..." checkbox.
self.element_clickable(
'//*[@id="app-content"]/div/div[3]/div/div/form/div[7]/div')
# Click on "Import" button.
self.element_clickable(
'//*[@id="app-content"]/div/div[3]/div/div/form/button')
# Click on "All done" button.
self.element_clickable(
'//*[@id="app-content"]/div/div[3]/div/div/button')
print(f'{green}Logged to Metamask extension.{reset}')
def opensea_login(self) -> None:
"""Login to Opensea using Metamask."""
print('Login to Opensea.', end=' ')
self.driver.switch_to.window(self.driver.window_handles[1]) \
if self.window_handles(1) else self.retry_login(0)
self.driver.get(self.login_url) # Go to Opensea login URL.
# Click on "Metamask" button in list of wallet.
ul = len(self.element_visible(
'//*[@id="__next"]/div[1]/main/div/div/div/div[2]/ul'
).find_elements_by_tag_name('li'))
for li in range(ul):
li += 1 # Add 1 to start li element at li[1].
# Check if button text contains "MetaMask".
if self.element_visible(
'//*[@id="__next"]/div[1]/main/div/div/div/div[2]/ul/li'
f'[{li}]/button/div[2]/span').text == 'MetaMask':
# Click on Metamask button.
self.element_clickable('//*[@id="__next"]/div[1]/main/div/div'
f'/div/div[2]/ul/li[{li}]/button')
break
# Switch on MetaMask popup tab.
self.driver.switch_to.window(self.driver.window_handles[2]) \
if self.window_handles(2) else self.retry_login(0)
# Click on "Next" button.
self.element_clickable('//*[@id="app-content"]/div/div[3]/div/'
'div[2]/div[4]/div[2]/button[2]')
# Click on "Connect" button.
self.element_clickable('//*[@id="app-content"]/div/div[3]/div/'
'div[2]/div[2]/div[2]/footer/button[2]')
self.metamask_sign()
# Reload page and retry to log in to Opensea if failed.
try:
WDW(self.driver, 10).until(EC.url_to_be(self.create_url))
print(f'{green}Logged to Opensea.{reset}\n')
except TE:
self.retry_login()
def metamask_sign(self) -> None:
"""Metamask confirm connection."""
# Switch on MetaMask popup tab.
self.driver.switch_to.window(self.driver.window_handles[2]) \
if self.window_handles(2) else self.retry_login(0)
# Click on "Sign" button.
self.element_clickable(
'//*[@id="app-content"]/div/div[3]/div/div[3]/button[2]')
# Switch back to Opensea tab.
self.driver.switch_to.window(self.driver.window_handles[1]) \
if self.window_handles(1) else self.retry_login(0)
def retry_login(self, value: int = 1) -> None:
"""Retry to log in to Opensea after an error occured."""
print(f'{red}Failed to login to Opensea, Retrying.{reset}')
if value == 0:
self.opensea_login()
else:
self.driver.get(self.create_url)
self.metamask_sign()
def opensea_upload(self, number: int) -> None:
"""Upload multiple NFTs automatically on Opensea."""
try:
print(f'Uploading {settings.nft_name}/{len(settings.file)}.',
end=' ')
# Go to Opensea login URL.
self.driver.get(self.create_url + '?enable_supply=true')
# Upload NFT file.
if not os.path.exists(settings.file_path) \
or settings.file_path == '':
raise TE('File doesn\'t exist.')
self.element_send_keys('//*[@id="media"]', settings.file_path)
# Input NFT name.
if settings.nft_name == '':
raise TE('Missing NFT Name.')
self.element_send_keys('//*[@id="name"]', settings.nft_name)
# Input external link.
if settings.external_link != '':
self.element_send_keys(
'//*[@id="external_link"]', settings.external_link)
# Input description.
if settings.description != '':
self.element_send_keys(
'//*[@id="description"]', settings.description)
# Input collection and select it.
if settings.collection != '':
self.element_send_keys(
'//*[@id="__next"]/div[1]/main/div/div/section/div/form/'
'div[5]/div/div[2]/input', settings.collection)
try:
sleep(2)
self.element_clickable(
'//*[contains(@id, "tippy")]/div/div/div/ul/li/button')
except Exception:
raise TE('Collection doesn\'t exist')
# Add properties, levels and stats.
parameters = [settings.properties, settings.levels, settings.stats]
for index in range(3):
if len(parameters[index]) > 0:
# Change element from list of string to list of list.
# https://github.com/maximedrn/opensea_automatic_uploader/issues/1
if type(parameters[index][0]) != list:
parameters[index] = [parameters[index]]
# Click on "+" button for properties, levels and stats.
self.element_clickable(
'//*[@id="__next"]/div[1]/main/div/div/section/div/'
f'form/section/div[{index + 1}]/div/div[2]/button')
parameter = 0
for element in parameters[index]:
# If there are more than 1 element.
if parameter > 0:
# Click on "Add more" button.
self.element_clickable(
f'/html/body/div[{index + 2}]/div/div/div/'
'section/button')
parameter += 1
self.element_send_keys(
f'/html/body/div[{index + 2}]/div/div/div/section/'
f'table/tbody/tr[{parameter}]/td[1]/div/div/input',
element[0])
if len(element) == 3:
actual_element = (
f'/html/body/div[{index + 2}]/div/div/div/'
f'section/table/tbody/tr[{parameter}]/td[3]'
'/div/div/input')
self.clear_text(actual_element)
self.element_send_keys(actual_element, element[2])
actual_element = (
f'/html/body/div[{index + 2}]/div/div/div/section/'
f'table/tbody/tr[{parameter}]/td[2]/div/div/input')
self.clear_text(actual_element)
self.element_send_keys(actual_element, element[1])
# Click on "Save" button.
self.element_clickable(f'/html/body/div[{index + 2}]/div'
'/div/div/footer/button')
# Click on "Unlockable Content" switch if true.
if settings.unlockable_content != '':
if len(settings.unlockable_content) > 0:
if settings.unlockable_content[0]:
self.element_send_keys(
'//*[@id="unlockable-content-toggle"]', Keys.ENTER)
# Send text content.
self.element_send_keys(
'//*[@id="__next"]/div[1]/main/div/div/section/div'
'/form/section/div[4]/div[2]/textarea',
settings.unlockable_content[1])
# Click on "Explicit & Sensitive Content" switch if true.
if settings.explicit_and_sensitive_content != '':
if settings.explicit_and_sensitive_content:
self.element_send_keys(
'//*[@id="explicit-content-toggle"]', Keys.ENTER)
# Set Blockchain.
if settings.blockchain != '':
blockchain = self.element_visible('//*[@id="chain"]')
if blockchain.get_attribute('value') != settings.blockchain:
# Click on bottom sheet.
self.element_clickable(
'//*[@id="__next"]/div[1]/main/div/div'
'/section/div/form/div[7]/div/div[2]')
# Get lenght of elements list.
ul = len(self.element_visible(
'//*[@id="tippy-9"]/div/div/div/ul'
).find_elements_by_tag_name('li'))
# Find Blockchain in list.
for li in range(ul):
li += 1 # Add 1 to start li element at li[1].
# Check if span text contains Blockchain.
if self.element_visible(
f'//*[@id="tippy-9"]/div/div/div/ul/li[{li}]'
'/button/div[2]/span[1]').text \
== settings.blockchain:
# Click on specific Blockchain button.
self.element_clickable('//*[@id="tippy-9"]/div/div'
f'/div/ul/li[{li}]/button')
break
sleep(2)
# Set number of supply.
if settings.supply != "" and type(settings.supply) == int:
if (
"?enable_supply=true" in self.driver.current_url
and settings.supply > 1
):
# Set supply modifying value.
self.driver.execute_script(
f'arguments[0].value = "";',
self.element_visible('//*[@id="supply"]'))
self.element_send_keys(
'//*[@id="supply"]', settings.supply)
sleep(2)
# Click on "Create" button.
self.element_clickable('//*[@id="__next"]/div[1]/main/div/div/'
'section/div/form/div/div[1]/span/button')
sleep(10)
# Check if done.
self.element_visible('/html/body/div[5]/div/div/div/div[1]', 10)
print(f'{green}Done.{reset}')
# If price has been defined.
if settings.price > 0:
self.sell_nft() # Sell NFT.
else:
print(f'{red}NFT sale cancelled.{reset}')
except TE as error:
print(f'{red}Failed: {error}{reset}')
def sell_nft(self) -> None:
"""Set a price for the NFT, etc."""
try:
# Get sell page for the NFT.
self.driver.get(self.driver.current_url + '/sell')
if settings.supply > 1 and \
settings.blockchain.lower() == 'polygon':
# Input number of supplies to sell.
if settings.quantity <= settings.supply:
self.driver.execute_script(
f'arguments[0].value = "";',
self.element_visible('//*[@id="quantity"]'))
self.element_send_keys(
'//*[@id="quantity"]', str(settings.quantity))
else:
raise TE('Quantity must be less or equal to supply.')
if settings.supply == 1 and \
settings.blockchain.lower() != 'polygon':
# Input Ethereum price.
self.element_send_keys(
'//*[@id="__next"]/div[1]/main/div/div/div[3]/div/div'
'[2]/div/div[1]/form/div[2]/div/div[2]/div/div/div[2]'
'/input', str(settings.price))
else:
# Input price.
self.element_send_keys(
'//input[@name="price"]', str(settings.price))
# Click on "Complete listing" button.
try:
self.element_clickable('//button[@type="submit"]')
except Exception:
raise TE('An error occured. Submit button can\'t be clicked')
# Click on "Create" button.
try:
self.element_clickable('//*[@class="ActionPanel--content"]/button')
except Exception:
raise TE('An error occured. Sell button can\'t be clicked')
WDW(webdriver, timeout=1)
# Sign Metamask
self.metamask_sign()
WDW(webdriver, timeout=5)
# click 'x' icon to close the popup and bring up the page for the NFT that was just listed
self.element_clickable('//button[@class="UnstyledButtonreact__UnstyledButton-sc-ty1bh0-0 btgkrL"]')
WDW(webdriver, timeout=7)
print(f'{green}NFT put up for sale.{reset}')
except TE as error:
print(f'{red}NFT sale cancelled: {error}{reset}')
def cls() -> None:
"""Clear console function."""
# Clear console for Windows using 'cls' and Linux & Mac using 'clear'.
os.system('cls' if os.name == 'nt' else 'clear')
def read_file(file_: str, question: str) -> str:
"""Read file or ask for data to write in text file."""
if not os.path.isfile(f'assets/{file_}.txt'):
open(f'assets/{file_}.txt', 'a')
with open(f'assets/{file_}.txt', 'r+', encoding='utf-8') as file:
text = file.read()
if text == '':
text = input(question)
if input(f'Do you want to save your {file_} in'
' text file? (y/n) ').lower() == 'y':
file.write(text)
print(f'{green}Saved.{reset}')
else:
print(f'{yellow}Not saved.{reset}')
return text
def data_file() -> str:
"""Read data folder and extract JSON, CSV and XLSX files."""
while True:
folder = [glob(f'data/{extension}')
for extension in ['*.json', '*.csv', '*.xlsx']]
print(f'{yellow}\nChoose your file:{reset}')
file_number = 0
files = []
print('0 - Browse file on PC.')
for extension in folder:
for file in extension:
file_number += 1
files.append(file)
print(f'{file_number} - {file}')
answer = input('File number: ')
cls() # Clear console.
if answer.isdigit():
if int(answer) == 0:
# Browse file on PC.
from tkinter import Tk
from tkinter.filedialog import askopenfilename
Tk().withdraw() # Hide Tkinter tab.
return askopenfilename(filetypes=[('', '.json .csv .xlsx')])
elif int(answer) <= len(files):
return files[int(answer) - 1]
else:
print(f'{red}File doesn\'t exist.{reset}')
else:
print(f'{red}Answer must be an integer.{reset}')
if __name__ == '__main__':
cls() # Clear console.
password = read_file('password', '\nWhat is your MetaMask password? ')
recovery_phrase = read_file('recovery_phrase',
'\nWhat is your MetaMask recovery phrase? ')
file = data_file() # Ask for file.
# Init Settings class.
settings = Settings(file, os.path.splitext(file)[1])
# Init Opensea class and send password and recovery phrase.
opensea = Opensea(password, recovery_phrase)
opensea.metamask() # Connect to MetaMask.
opensea.opensea_login() # Connect to Opensea.
# Upload each NFT one by one.
for element in range(settings.len_file):
settings.get_nft(element) # Get data of the NFT.
opensea.opensea_upload(element + 1) # Upload it.
|
the-stack_0_9867 | # -*- coding: utf-8 -*-
# Copyright (c) 2016-2022 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
import os
import numpy as np
from pandapower.auxiliary import ppException
try:
import pplog as logging
except ImportError:
import logging
logger = logging.getLogger(__name__)
class MapboxTokenMissing(ppException):
"""
Exception being raised in case loadflow did not converge.
"""
pass
def _on_map_test(x, y):
"""
checks if bus_geodata can be located on a map using geopy
"""
try:
from geopy.geocoders import Nominatim
from geopy.exc import GeocoderTimedOut
geolocator = Nominatim(user_agent="pandapower_user_mapboxplot")
except ImportError:
# if geopy is not available there will be no geo-coordinates check
# therefore if geo-coordinates are not real and user sets on_map=True, an empty map will be plot!
logger.warning('Geo-coordinates check cannot be peformed because geopy package not available \n\t--> '
'if geo-coordinates are not in lat/lon format an empty plot may appear...')
return True
try:
location = geolocator.reverse("{0}, {1}".format(x, y), language='en-US')
except GeocoderTimedOut:
logger.Error("Existing net geodata cannot be geo-located: possible reason: geo-data not in lat/long ->"
"try geo_data_to_latlong(net, projection) to transform geodata to lat/long!")
if location.address is None:
return False
else:
return True
def geo_data_to_latlong(net, projection):
"""
Transforms network's geodata (in `net.bus_geodata` and `net.line_geodata`) from specified projection to lat/long (WGS84).
INPUT:
**net** (pandapowerNet) - The pandapower network
**projection** (String) - projection from which geodata are transformed to lat/long. some examples
- "epsg:31467" - 3-degree Gauss-Kruger zone 3
- "epsg:2032" - NAD27(CGQ77) / UTM zone 18N
- "epsg:2190" - Azores Oriental 1940 / UTM zone 26N
"""
try:
from pyproj import Proj, transform
except ImportError:
logger.warning('Geo-coordinates check cannot be peformed because pyproj package not available \n\t--> '
'if geo-coordinates are not in lat/lon format an empty plot may appear...')
return
if projection == 'epsg:4326':
return
wgs84 = Proj(init='epsg:4326') # lat/long
try:
projection = Proj(init=projection)
except:
logger.warning("Transformation of geodata to lat/long failed! because of:]\n"
"Unknown projection provided "
"(format 'epsg:<number>' required as available at http://spatialreference.org/ref/epsg/ )")
return
# transform all geodata to long/lat using set or found projection
try:
lon, lat = transform(projection, wgs84, net.bus_geodata.loc[:, 'x'].values, net.bus_geodata.loc[:, 'y'].values)
net.bus_geodata.loc[:, 'x'], net.bus_geodata.loc[:, 'y'] = lon, lat
if net.line_geodata.shape[0] > 0:
for idx in net.line_geodata.index:
line_coo = np.array(net.line_geodata.loc[idx, 'coords'])
lon, lat = transform(projection, wgs84, line_coo[:, 0], line_coo[:, 1])
net.line_geodata.loc[idx, 'coords'] = np.array([lon, lat]).T.tolist()
return
except:
logger.warning('Transformation of geodata to lat/long failed!')
return
def set_mapbox_token(token):
from pandapower import pp_dir
path = os.path.join(pp_dir, "plotting", "plotly")
filename = os.path.join(path, 'mapbox_token.txt')
with open(filename, "w") as mapbox_file:
mapbox_file.write(token)
def _get_mapbox_token():
from pandapower import pp_dir
path = os.path.join(pp_dir, "plotting", "plotly")
filename = os.path.join(path, 'mapbox_token.txt')
with open(filename, "r") as mapbox_file:
return mapbox_file.read()
|
the-stack_0_9868 | #!/usr/bin/env python
# _*_ coding: utf-8 _*_
from setuptools import setup, find_packages
import os
import imp
def non_python_files(path):
""" Return all non-python-file filenames in path """
result = []
all_results = []
module_suffixes = [info[0] for info in imp.get_suffixes()]
ignore_dirs = ['cvs']
for item in os.listdir(path):
name = os.path.join(path, item)
if (
os.path.isfile(name) and
os.path.splitext(item)[1] not in module_suffixes
):
result.append(name)
elif os.path.isdir(name) and item.lower() not in ignore_dirs:
all_results.extend(non_python_files(name))
if result:
all_results.append((path, result))
return all_results
data_files = (
# non_python_files('emissary') +
# non_python_files(os.path.join('Emissary', 'doc'))
)
setup(name='Emissary',
version="2.1.1",
description='A microservice for indexing the plain text of articles and essays',
author='Luke Brooks',
author_email='[email protected]',
url='http://psybernetics.org.uk/emissary',
download_url = 'https://github.com/LukeB42/Emissary/tarball/2.0.0',
data_files = data_files,
packages=['emissary', 'emissary.resources', 'emissary.controllers'],
include_package_data=True,
install_requires=[
"setproctitle",
"goose-extractor",
"lxml",
"gevent",
"Flask-RESTful",
"Flask-SQLAlchemy",
"cssselect",
"BeautifulSoup",
"feedparser",
"python-snappy",
"requests",
"pygments",
"window",
],
keywords=["text extraction","document archival","document retrieval"]
)
|
the-stack_0_9872 | import json
import boto3
import sys
from datetime import datetime
from decimal import Decimal
from boto3.dynamodb.conditions import Key, Attr
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
today=datetime.today()
curyear=today.year
curmonth=today.month
curday=today.day
start_of_day = int(datetime(curyear,curmonth,curday,0,0).timestamp())
curtime = int(datetime.today().timestamp())
oneweekbefore = start_of_day - 604800
def lambda_handler(event, context):
dynamodb = boto3.resource('dynamodb', region_name='eu-west-1')
activitytable = dynamodb.Table('BBAthleteActivities')
athlete_id = get_qp(event,'id')
athlete_activities=activitytable.get_item(Key={'id': athlete_id })
#print(athlete_activities)
aa = athlete_activities.get('Item').get('activities')
filtered_activities = list(filter(lambda x: (int(datetime.strptime(x['Date'],'%Y-%m-%d').strftime("%s")) < oneweekbefore), aa))
logger.debug(filtered_activities);
activitytable.update_item(Key={'id':athlete_id} , UpdateExpression='set activities = :obj', ExpressionAttributeValues={":obj" : filtered_activities })
activitytable.update_item(Key={'id':athlete_id} , UpdateExpression='set lastupdated = :obj', ExpressionAttributeValues={":obj" : oneweekbefore })
return {
'statusCode': 200,
'body': json.dumps("Removed last one week activities for athlete : "+ athlete_id)
}
def get_qp(event,qp):
qpid = None
if event.get('params') is not None :
qpid=event.get('params').get('querystring').get(qp)
return qpid
if __name__ == '__main__':
evnt={ "params": { "querystring": { "id": "9671032" } } }
print(lambda_handler(evnt,None))
|
the-stack_0_9873 | #
# @lc app=leetcode.cn id=946 lang=python3
#
# [946] 验证栈序列
#
from typing import List
class Solution:
# 模拟思路即可
def validateStackSequences(self, pushed: List[int], popped: List[int]) -> bool:
try:
if len(pushed) == 0 or len(pushed) == 1:
return True
# 模拟的栈
stack = []
for num in popped:
if len(stack) > 0 and num == stack[-1]:
# 恰好要弹出的元素在栈顶
stack.pop()
elif num in pushed:
# 要弹出的元素,还没入栈, 把该元素之前的所有元素入栈
index = pushed.index(num)
stack.extend(pushed[:index])
pushed = pushed[index+1:]
else:
return False
return True
except Exception as e:
raise e
|
the-stack_0_9875 | import json
from flask import Response
from flask import Blueprint
from flask import request
from financespy import Transaction
from financespy import parse_month
from datetime import date
def month_weeks(backend, year, month):
return [
[trans.to_dict() for trans in week.records()]
for week in backend.month(year=year, month=month).weeks()
]
def month_days(backend, year, month):
return [
[trans.to_dict() for trans in day.records()]
for day in backend
.month(year=year, month=month)
.days()
]
def month_day(backend, year, month, day):
return [
trans.to_dict()
for trans in backend
.month(year=year, month=month)
.day(day).records()
]
def transactions_blueprint(backend, name):
transactions = Blueprint(
"_transactions_",
name,
url_prefix="/api/accounts/<account>/transactions")
@transactions.route("/")
def root(user):
return "It is working for " + user
@transactions.route("/<int:year>/<month>", methods=("GET",))
def month_all(account, year, month):
result = [
trans.to_dict()
for trans in backend.month(year=year, month=month).records()
]
return Response(
json.dumps(result),
mimetype="application/json"
)
@transactions.route("/<int:year>/<month>/<details>", methods=("GET",))
def month_details(account, year, month, details):
if details == "weeks":
result = month_weeks(backend, year, month)
elif details == "days":
result = month_days(backend, year, month)
else:
result = month_day(backend, year, month, int(details))
return Response(
json.dumps(result),
mimetype="application/json"
)
@transactions.route("/<int:year>/<month>/<int:day>", methods=("PUT",))
def insert_record(account, year, month, day):
payload = request.get_json()
transaction = Transaction(
value=payload["value"],
description=payload["description"],
categories=[]
)
backend.insert_record(
date=date(year=year, month=parse_month(month), day=day),
record=transaction
)
return ('', 204)
return transactions
|
the-stack_0_9877 | """Mock documents, used for integration testing."""
from dataclasses import dataclass, replace
from itertools import chain
from pathlib import Path
from typing import List, Optional, Sequence, Tuple
from bp.document import Document
from bp.entity import Page
from bp.build_document import InputPage, build_document
from bp.geometry import BBox, Interval
from bp.ocr import InputWord
MockWord = Tuple[str, Tuple[float, float], Tuple[float, float]]
def _input_word(mock_word: MockWord) -> InputWord:
bbox = BBox(
Interval(mock_word[1][0], mock_word[1][1]),
Interval(mock_word[2][0], mock_word[2][1]))
return InputWord(bbox, mock_word[0], None, None, None)
@dataclass(frozen=True)
class MockPage:
"""A mock page.
Args:
mock_words: The mock words on the page.
bbox: The bounding box of the mock page.
"""
words: Tuple[InputWord, ...]
bbox: BBox
def mock_doc(pages: Sequence[str],
name: Optional[str] = None) -> Document:
"""A mock doc described as an ASCII drawing.
Args:
pages: Every string represents a page of input. See the test code for
examples.
name: A name for the Document. This is mostly for logging/debugging. It
should usually be fine to use the default.
"""
if not pages:
pages = [""]
mock_pages: List[MockPage] = []
offset = 0.0
for page in pages:
mock_words: List[MockWord] = []
lines = page.split('\n')
for line_no, line in enumerate(lines):
start: Optional[int] = None
for i in range(len(line) + 1):
if i < len(line) and line[i] != ' ':
if start is None:
start = i
if i == len(line) or line[i] == ' ':
if start is not None:
word = line[start:i]
mock_word = (
word,
(start, i),
(line_no, line_no + 1))
mock_words += [mock_word]
start = None
page_width = max(len(line) for line in lines) if lines else 0
mock_pages += [
MockPage(tuple(map(lambda W: _input_word(W), mock_words)),
BBox(Interval(0, page_width),
Interval(0 + offset, len(lines) + offset)))]
offset += len(lines)
assert len(pages) == len(mock_pages)
if name is None:
name = ('---page break---').join(pages)
return build_mock_doc(tuple(mock_pages), name=name)
def build_mock_doc(mock_pages: Tuple[MockPage, ...], name: str) -> Document:
input_pages = tuple(InputPage(Page(mock_page.bbox, index + 1), mock_page.words)
for index, mock_page in enumerate(mock_pages))
return build_document(input_pages, name)
|
the-stack_0_9880 | import numpy as np
import torch
from torch.utils.data import Dataset, TensorDataset, DataLoader
from sklearn.utils import shuffle
class SequenceBucketCollator():
def __init__(self, choose_length, maxlen, sequence_index, length_index, label_index=None):
self.choose_length = choose_length
self.sequence_index = sequence_index
self.length_index = length_index
self.label_index = label_index
self.maxlen = maxlen
def __call__(self, batch):
batch = [torch.stack(x) for x in list(zip(*batch))]
sequences = batch[self.sequence_index]
lengths = batch[self.length_index]
length = self.choose_length(lengths)
mask = torch.arange(start=self.maxlen, end=0, step=-1) < length
padded_sequences = sequences[:, mask]
batch[self.sequence_index] = padded_sequences
if self.label_index is not None:
return [x for i, x in enumerate(batch) if i != self.label_index], batch[self.label_index]
return batch
def make_loader(x_padded, lengths, y, maxlen=236, batch_size=512, is_train=True):
dataset = TensorDataset(x_padded, lengths, torch.tensor(y))
collator = SequenceBucketCollator(lambda length: length.max(),
maxlen=maxlen,
sequence_index=0,
length_index=1,
label_index=2)
loader = DataLoader(dataset, batch_size=batch_size, shuffle=True, collate_fn=collator)
return loader
|
the-stack_0_9881 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import os
from typing import Callable
import jax
import jax.numpy as jn
import numpy as np
import tensorflow as tf # For data augmentation.
import tensorflow_datasets as tfds
from absl import app, flags
from tqdm import tqdm, trange
import objax
from examples.image_classification.tfdata.data import DataSet
from objax.jaxboard import SummaryWriter, Summary
from objax.util import EasyDict
from objax.zoo import convnet, wide_resnet
FLAGS = flags.FLAGS
def augment(x, shift: int):
y = tf.image.random_flip_left_right(x['image'])
y = tf.pad(y, [[shift] * 2, [shift] * 2, [0] * 2], mode='REFLECT')
return dict(image=tf.image.random_crop(y, tf.shape(x['image'])), label=x['label'])
# We make our own TrainLoop to be reusable
class TrainLoop(objax.Module):
predict: Callable
train_op: Callable
def __init__(self, nclass: int, **kwargs):
self.nclass = nclass
self.params = EasyDict(kwargs)
def train_step(self, summary: Summary, data: dict, progress: np.ndarray):
kv = self.train_op(progress, data['image'].numpy(), data['label'].numpy())
for k, v in kv.items():
if jn.isnan(v):
raise ValueError('NaN, try reducing learning rate', k)
summary.scalar(k, float(v))
def train(self, num_train_epochs: int, train_size: int, train: DataSet, test: DataSet, logdir: str):
checkpoint = objax.io.Checkpoint(logdir, keep_ckpts=5, makedir=True)
start_epoch, last_ckpt = checkpoint.restore(self.vars())
train_iter = iter(train)
progress = np.zeros(jax.local_device_count(), 'f') # for multi-GPU
with SummaryWriter(os.path.join(logdir, 'tb')) as tensorboard:
for epoch in range(start_epoch, num_train_epochs):
with self.vars().replicate():
# Train
summary = Summary()
loop = trange(0, train_size, self.params.batch,
leave=False, unit='img', unit_scale=self.params.batch,
desc='Epoch %d/%d' % (1 + epoch, num_train_epochs))
for step in loop:
progress[:] = (step + (epoch * train_size)) / (num_train_epochs * train_size)
self.train_step(summary, next(train_iter), progress)
# Eval
accuracy, total = 0, 0
for data in tqdm(test, leave=False, desc='Evaluating'):
total += data['image'].shape[0]
preds = np.argmax(self.predict(data['image'].numpy()), axis=1)
accuracy += (preds == data['label'].numpy()).sum()
accuracy /= total
summary.scalar('eval/accuracy', 100 * accuracy)
print('Epoch %04d Loss %.2f Accuracy %.2f' % (epoch + 1, summary['losses/xe'](),
summary['eval/accuracy']()))
tensorboard.write(summary, step=(epoch + 1) * train_size)
checkpoint.save(self.vars(), epoch + 1)
# We inherit from the training loop and define predict and train_op.
class TrainModule(TrainLoop):
def __init__(self, model: Callable, nclass: int, **kwargs):
super().__init__(nclass, **kwargs)
self.model = model(3, nclass)
model_vars = self.model.vars()
self.opt = objax.optimizer.Momentum(model_vars)
self.ema = objax.optimizer.ExponentialMovingAverage(model_vars, momentum=0.999, debias=True)
print(model_vars)
def loss(x, label):
logit = self.model(x, training=True)
loss_wd = 0.5 * sum((v.value ** 2).sum() for k, v in model_vars.items() if k.endswith('.w'))
loss_xe = objax.functional.loss.cross_entropy_logits(logit, label).mean()
return loss_xe + loss_wd * self.params.weight_decay, {'losses/xe': loss_xe, 'losses/wd': loss_wd}
gv = objax.GradValues(loss, model_vars)
def train_op(progress, x, y):
g, v = gv(x, y)
lr = self.params.lr * jn.cos(progress * (7 * jn.pi) / (2 * 8))
self.opt(lr, objax.functional.parallel.pmean(g))
self.ema()
return objax.functional.parallel.pmean({'monitors/lr': lr, **v[1]})
def predict_op(x):
return objax.functional.softmax(self.model(x, training=False))
self.predict = objax.Parallel(self.ema.replace_vars(predict_op), model_vars + self.ema.vars())
self.train_op = objax.Parallel(train_op, self.vars(), reduce=lambda x: x[0])
def network(arch: str):
if arch == 'cnn32-3-max':
return functools.partial(convnet.ConvNet, scales=3, filters=32, filters_max=1024,
pooling=objax.functional.max_pool_2d)
elif arch == 'cnn32-3-mean':
return functools.partial(convnet.ConvNet, scales=3, filters=32, filters_max=1024,
pooling=objax.functional.average_pool_2d)
elif arch == 'cnn64-3-max':
return functools.partial(convnet.ConvNet, scales=3, filters=64, filters_max=1024,
pooling=objax.functional.max_pool_2d)
elif arch == 'cnn64-3-mean':
return functools.partial(convnet.ConvNet, scales=3, filters=64, filters_max=1024,
pooling=objax.functional.average_pool_2d)
elif arch == 'wrn28-1':
return functools.partial(wide_resnet.WideResNet, depth=28, width=1)
elif arch == 'wrn28-2':
return functools.partial(wide_resnet.WideResNet, depth=28, width=2)
raise ValueError('Architecture not recognized', arch)
def main(argv):
del argv
# In this example we use tensorflow_datasets for loading cifar10, but you can use any dataset library you like.
tf.config.experimental.set_visible_devices([], "GPU")
DATA_DIR = os.path.join(os.environ['HOME'], 'TFDS')
data, info = tfds.load(name='cifar10', split='train', data_dir=DATA_DIR, with_info=True)
train_size = info.splits['train'].num_examples
image_shape = info.features['image'].shape
nclass = info.features['label'].num_classes
train = DataSet.from_tfds(data, image_shape, augment_fn=lambda x: augment(x, 4))
test = DataSet.from_tfds(tfds.load(name='cifar10', split='test', data_dir=DATA_DIR), image_shape)
train = train.cache().shuffle(8192).repeat().parse().augment().batch(FLAGS.batch)
train = train.nchw().one_hot(nclass).prefetch(16)
test = test.cache().parse().batch(FLAGS.batch).nchw().prefetch(16)
del data, info
# Define the network and train_it
loop = TrainModule(network(FLAGS.arch), nclass=nclass,
arch=FLAGS.arch,
lr=FLAGS.lr,
batch=FLAGS.batch,
epochs=FLAGS.epochs,
weight_decay=FLAGS.weight_decay)
logdir = '%s/%s' % (loop.__class__.__name__, '_'.join(sorted('%s_%s' % k for k in loop.params.items())))
logdir = os.path.join(FLAGS.logdir, logdir)
print(f'Saving to {logdir}')
print(f'Visualize results with:\n tensorboard --logdir {FLAGS.logdir}')
loop.train(FLAGS.epochs, train_size, train, test, logdir)
if __name__ == '__main__':
flags.DEFINE_enum('arch', 'wrn28-2', ['cnn32-3-max', 'cnn32-3-mean',
'cnn64-3-max', 'cnn64-3-mean',
'wrn28-1', 'wrn28-2'],
'Model architecture.')
flags.DEFINE_float('lr', 0.1, 'Learning rate.')
flags.DEFINE_float('weight_decay', 0.0005, 'Weight decay ratio.')
flags.DEFINE_integer('batch', 256, 'Batch size')
flags.DEFINE_integer('epochs', 1000, 'Training duration in number of epochs.')
flags.DEFINE_string('logdir', 'experiments', 'Directory where to save checkpoints and tensorboard data.')
app.run(main)
|
the-stack_0_9883 | #!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# create a rendering window and renderer
ren1 = vtk.vtkRenderer()
ren1.SetBackground(0,0,0)
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
renWin.SetSize(300,300)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# camera parameters
camera = ren1.GetActiveCamera()
camera.SetPosition(-54.8012,109.471,231.412)
camera.SetFocalPoint(33,33,33)
camera.SetViewUp(0.157687,0.942832,-0.293604)
camera.SetViewAngle(30)
camera.SetClippingRange(124.221,363.827)
reader = vtk.vtkGenericEnSightReader()
# Make sure all algorithms use the composite data pipeline
cdp = vtk.vtkCompositeDataPipeline()
reader.SetDefaultExecutivePrototype(cdp)
reader.SetCaseFileName("" + str(VTK_DATA_ROOT) + "/Data/EnSight/ironProt_ascii.case")
Contour0 = vtk.vtkContourFilter()
Contour0.SetInputConnection(reader.GetOutputPort())
Contour0.SetValue(0,200)
Contour0.SetComputeScalars(1)
mapper = vtk.vtkHierarchicalPolyDataMapper()
mapper.SetInputConnection(Contour0.GetOutputPort())
mapper.SetImmediateModeRendering(1)
mapper.SetScalarRange(0,1)
mapper.SetScalarVisibility(1)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetRepresentationToSurface()
actor.GetProperty().SetInterpolationToGouraud()
ren1.AddActor(actor)
# enable user interface interactor
iren.Initialize()
# prevent the tk window from showing up then start the event loop
reader.SetDefaultExecutivePrototype(None)
# --- end of script --
|
the-stack_0_9884 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Collection of tests for :mod:`orion.core.worker.consumer`."""
import logging
import os
import shutil
import signal
import subprocess
import tempfile
import time
import pytest
import orion.core.io.experiment_builder as experiment_builder
import orion.core.io.resolve_config as resolve_config
import orion.core.utils.backward as backward
import orion.core.worker.consumer as consumer
from orion.core.utils import sigterm_as_interrupt
from orion.core.utils.exceptions import BranchingEvent, MissingResultFile
from orion.core.utils.format_trials import tuple_to_trial
Consumer = consumer.Consumer
@pytest.fixture
def config(exp_config):
"""Return a configuration."""
config = exp_config[0][0]
config["metadata"]["user_args"] = ["--x~uniform(-50, 50)"]
config["metadata"]["VCS"] = resolve_config.infer_versioning_metadata(
config["metadata"]["user_script"]
)
config["name"] = "exp"
config["working_dir"] = "/tmp/orion"
backward.populate_space(config)
config["space"] = config["metadata"]["priors"]
return config
@pytest.mark.usefixtures("storage")
def test_trials_interrupted_sigterm(config, monkeypatch):
"""Check if a trial is set as interrupted when a signal is raised."""
def mock_popen(self, *args, **kwargs):
os.kill(os.getpid(), signal.SIGTERM)
exp = experiment_builder.build(**config)
monkeypatch.setattr(subprocess.Popen, "wait", mock_popen)
trial = tuple_to_trial((1.0,), exp.space)
exp.register_trial(trial)
con = Consumer(exp)
with pytest.raises(KeyboardInterrupt):
with sigterm_as_interrupt():
con(trial)
shutil.rmtree(trial.working_dir)
@pytest.mark.usefixtures("storage")
def test_trial_working_dir_is_created(config):
"""Check that trial working dir is created."""
exp = experiment_builder.build(**config)
trial = tuple_to_trial((1.0,), exp.space)
exp.register_trial(trial, status="reserved")
assert not os.path.exists(trial.working_dir)
con = Consumer(exp)
con(trial)
assert os.path.exists(trial.working_dir)
shutil.rmtree(trial.working_dir)
def setup_code_change_mock(config, monkeypatch, ignore_code_changes):
"""Mock create experiment and trials, and infer_versioning_metadata"""
exp = experiment_builder.build(**config)
trial = tuple_to_trial((1.0,), exp.space)
exp.register_trial(trial, status="reserved")
con = Consumer(exp, ignore_code_changes=ignore_code_changes)
def code_changed(user_script):
return dict(
type="git",
is_dirty=True,
HEAD_sha="changed",
active_branch="new_branch",
diff_sha="new_diff",
)
monkeypatch.setattr(consumer, "infer_versioning_metadata", code_changed)
return con, trial
@pytest.mark.usefixtures("storage")
def test_code_changed_evc_disabled(config, monkeypatch, caplog):
"""Check that trial has its working_dir attribute changed."""
con, trial = setup_code_change_mock(config, monkeypatch, ignore_code_changes=True)
with caplog.at_level(logging.WARNING):
con(trial)
assert "Code changed between execution of 2 trials" in caplog.text
shutil.rmtree(trial.working_dir)
@pytest.mark.usefixtures("storage")
def test_code_changed_evc_enabled(config, monkeypatch):
"""Check that trial has its working_dir attribute changed."""
con, trial = setup_code_change_mock(config, monkeypatch, ignore_code_changes=False)
with pytest.raises(BranchingEvent) as exc:
con(trial)
assert exc.match("Code changed between execution of 2 trials")
shutil.rmtree(trial.working_dir)
@pytest.mark.usefixtures("storage")
def test_retrieve_result_nofile(config):
"""Test retrieve result"""
results_file = tempfile.NamedTemporaryFile(
mode="w", prefix="results_", suffix=".log", dir=".", delete=True
)
exp = experiment_builder.build(**config)
con = Consumer(exp)
with pytest.raises(MissingResultFile) as exec:
con.retrieve_results(results_file)
results_file.close()
assert exec.match(r"Cannot parse result file")
|
the-stack_0_9885 | #!/usr/bin/env python
import json
from run_tests_stats import execute
import optparse
import os
import subprocess
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
def parseresults(log_file, plot_data, t, duration):
fp = open(log_file).readlines()
i = 0
plot_data[t] = {}
plot_data[t]['tot_ops'] = []
plot_data[t]['abrt_ratio'] = []
plot_data[t]['ckp_builder_start'] = []
plot_data[t]['writeback_spent'] = []
plot_data[t]['blocking_spent'] = []
plot_data[t]['ckp_quiescence_spent'] = []
plot_data[t]['ckp_scan_spent'] = []
plot_data[t]['ckp_builder_spent'] = []
plot_data[t]['ckp_barrier_spent'] = []
plot_data[t]['max_last_objs'] = []
plot_data[t]['avg_last_objs'] = []
plot_data[t]['ckp_builder_by_wakeup'] = []
for line in fp:
if i <= 1:
i += 1
continue
w = line.split()
if not w:
break
thd = (w[2])
tot_ops = w[3]
plot_data[t]['tot_ops'].append(float(tot_ops)/duration/1000)
abrts = (w[5])
plot_data[t]['abrt_ratio'].append(float(abrts)/(float(abrts)+float(tot_ops)))
ckp_builder_start = (w[6])
writeback_spent = (w[7])
blocking_spent = (w[8])
ckp_quiescence_spent = (w[9])
ckp_scan_spent = (w[10])
ckp_builder_spent = (w[11])
ckp_barrier_spent = (w[12])
max_last_objs = (w[13])
avg_last_objs = (w[14])
ckp_builder_by_wakeup = (w[15])
plot_data[t]['ckp_builder_start'].append(ckp_builder_start)
plot_data[t]['writeback_spent'].append(writeback_spent)
plot_data[t]['blocking_spent'].append(blocking_spent)
plot_data[t]['ckp_quiescence_spent'].append(ckp_quiescence_spent)
plot_data[t]['ckp_scan_spent'].append(ckp_scan_spent)
plot_data[t]['ckp_builder_spent'].append(ckp_builder_spent)
plot_data[t]['ckp_barrier_spent'].append(ckp_barrier_spent)
plot_data[t]['max_last_objs'].append(max_last_objs)
plot_data[t]['avg_last_objs'].append(avg_last_objs)
plot_data[t]['ckp_builder_by_wakeup'].append(ckp_builder_by_wakeup)
#print thd
#print tot_ops
def plotgraph(plot_data, threads, update_rate, data_structure, initial_size, graph_type, final_dir):
fig = plt.figure()
title = data_structure + '_' + graph_type + '_u' + str(update_rate) + '_i' + str(initial_size)
fig.suptitle(title)
ax = fig.add_subplot(111)
for keys in plot_data:
ax.plot(threads, plot_data[keys][graph_type], marker='o', linestyle='-', label = keys )
ax.set_xlabel('threads')
if graph_type == 'tot_ops':
ax.set_ylabel('Ops/us')
else:
ax.set_ylabel('Abort Ratio')
ax.legend(loc = 'upper left')
#plt.show()
fig.savefig(final_dir+title+'.png')
parser = optparse.OptionParser()
parser.add_option("-d", "--dest", default = "temp",
help = "destination folder")
(opts, args) = parser.parse_args()
#Create result directory
result_dir = "./results/" + opts.dest + "/"
try:
os.stat(result_dir)
except:
os.makedirs(result_dir)
#Make benches
status = subprocess.check_output('make clean -C ../src/; make -C ../src/', shell=True)
#Read config files
with open('config.json') as json_data_file:
data = json.load(json_data_file)
for test in data:
if data[test][0]["data_structure"] == "llist":
if data[test][0]["buckets"] != 1:
sys.exit("Buckets should be 1\n");
for ur in data[test][0]["update_rate"]:
final_dir = result_dir + test + "/u" + str(ur) + "/";
try:
os.stat(final_dir)
except:
os.makedirs(final_dir)
plot_data = {}
for t in data[test][0]["alg_type"]:
out_file = final_dir + "__" + t + "_" +data[test][0]["data_structure"] + "_" + str(data[test][0]["initial_size"]) + "_u" + str(ur) + ".txt"
execute(data[test][0]["runs_per_test"], data[test][0]["rlu_max_ws"], data[test][0]["buckets"], data[test][0]["duration"], \
t, ur, data[test][0]["initial_size"], data[test][0]["range_size"], out_file, data[test][0]["threads"])
parseresults(out_file, plot_data, t, data[test][0]["duration"])
plotgraph(plot_data, data[test][0]["threads"], ur, data[test][0]["data_structure"], data[test][0]["initial_size"], 'tot_ops', final_dir)
plotgraph(plot_data, data[test][0]["threads"], ur, data[test][0]["data_structure"], data[test][0]["initial_size"], 'abrt_ratio', final_dir)
plotgraph(plot_data, data[test][0]["threads"], ur, data[test][0]["data_structure"], data[test][0]["initial_size"], 'ckp_builder_start', final_dir)
plotgraph(plot_data, data[test][0]["threads"], ur, data[test][0]["data_structure"], data[test][0]["initial_size"], 'writeback_spent', final_dir)
plotgraph(plot_data, data[test][0]["threads"], ur, data[test][0]["data_structure"], data[test][0]["initial_size"], 'blocking_spent', final_dir)
plotgraph(plot_data, data[test][0]["threads"], ur, data[test][0]["data_structure"], data[test][0]["initial_size"], 'ckp_quiescence_spent', final_dir)
plotgraph(plot_data, data[test][0]["threads"], ur, data[test][0]["data_structure"], data[test][0]["initial_size"], 'ckp_scan_spent', final_dir)
plotgraph(plot_data, data[test][0]["threads"], ur, data[test][0]["data_structure"], data[test][0]["initial_size"], 'ckp_builder_spent', final_dir)
plotgraph(plot_data, data[test][0]["threads"], ur, data[test][0]["data_structure"], data[test][0]["initial_size"], 'ckp_barrier_spent', final_dir)
plotgraph(plot_data, data[test][0]["threads"], ur, data[test][0]["data_structure"], data[test][0]["initial_size"], 'max_last_objs', final_dir)
plotgraph(plot_data, data[test][0]["threads"], ur, data[test][0]["data_structure"], data[test][0]["initial_size"], 'avg_last_objs', final_dir)
plotgraph(plot_data, data[test][0]["threads"], ur, data[test][0]["data_structure"], data[test][0]["initial_size"], 'ckp_builder_by_wakeup', final_dir)
|
the-stack_0_9892 | # coding:utf-8
# 2019/9/21
import sys
sys.path.append(r"C:\Study\github\Lookoops\MachineLearning\TensorFlow\image-clssifier")
import logging
import pickle
import os
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from keras import backend as K
import numpy as np
import tensorflow as tf
from tool import util, ui_MainWindow, ui_ModelAddDialog, ui_ModelAddDialogChild, helpDialog
# import classifier_collection as cc
# import test_image_classifier as tic
from preprocessing import preprocessing_factory
r"""ui标签转换
pyuic5 -o C:\Study\github\Lookoops\MachineLearning\TensorFlow\image-clssifier\GUI\tool\ui_MainWindow.py C:\Study\github\Lookoops\MachineLearning\TensorFlow\image-clssifier\GUI\tool\ui_MainWindow.ui
pyuic5 -o C:\Study\github\Lookoops\MachineLearning\TensorFlow\image-clssifier\GUI\tool\ui_ModelAddDialog.py C:\Study\github\Lookoops\MachineLearning\TensorFlow\image-clssifier\GUI\tool\ui_ModelAddDialog.ui
pyuic5 -o C:\Study\github\Lookoops\MachineLearning\TensorFlow\image-clssifier\GUI\tool\ui_ModelAddDialogChild.py C:\Study\github\Lookoops\MachineLearning\TensorFlow\image-clssifier\GUI\tool\ui_ModelAddDialogChild.ui
pyuic5 -o C:\Study\github\Lookoops\MachineLearning\TensorFlow\image-clssifier\GUI\tool\helpDialog.py C:\Study\github\Lookoops\MachineLearning\TensorFlow\image-clssifier\GUI\tool\helpDialog.ui
my tensorflow install path: C:/Users/Yauno/AppData/Local/conda/conda/envs/tensorflow
qtdesigner install path: C:\Users\Yauno\AppData\Local\conda\conda\envs\tensorflow\Lib\site-packages\pyqt5_tools\Qt\bin
"""
# 日志设置
LOGGER_PATH = r"C:\Study\github\Lookoops\MachineLearning\TensorFlow\image-clssifier\GUI\log"
logger = util.getLogger(LOGGER_PATH)
logger.setLevel(logging.DEBUG) # 设置日志级别,设置INFO时时DEBUG不可见
# 配置设置
CONFIG_PATH = r"C:\Study\github\Lookoops\MachineLearning\TensorFlow\image-clssifier\GUI\data\conf.txt"
CONF_MODEL_LIST_NAME = "modelList"
DEFAULT_LOAD_DIR = "defaultLoadDir"
PREDICT_MODEL_PATH = "" # 融合模型的路径
# MODEL_LIST = cc.getModelList() # 模型列表
MODEL_LIST = [['vgg_16', 'vgg_16/fc8/squeezed:0', 224], ['inception_v3', 'InceptionV3/Predictions/Reshape_1:0', 299], ['pnasnet_large', 'final_layer/predictions:0', 331], ['resnet_v2_200', 'resnet_v2_200/predictions/Reshape_1:0', 224], ['inception_resnet_v2', 'InceptionResnetV2/Logits/Predictions:0', 299]]
LABEL_MAPPING_PATH = None # 标签映射路径
GRAPH_DIR = None # 图模型路径
class MyWindow(QMainWindow, ui_MainWindow.Ui_MainWindow):
"""主窗口"""
def __init__(self, parent=None):
super(MyWindow, self).__init__(parent)
self.predictPic = None
self.graphDir = GRAPH_DIR
self.stackingModelPath = PREDICT_MODEL_PATH
self.gap = 6 # 预测偏差
self.labelMapPath = LABEL_MAPPING_PATH
self.picDefaultLoadDir = 'c:\\'
self.mainWindowIcon = "./data/icon.png"
self.initMainWindow()
def initMainWindow(self):
"""主窗口初始化"""
self.setupUi(self)
self.topWidget = QWidget()
self.setWindowIcon(QIcon(self.mainWindowIcon))
self.initComboBox()
self.initData()
self.menubar.triggered[QAction].connect(self.processtrigger) # 菜单栏触发
self.loadPic.clicked.connect(self.getFile) # 图片加载
self.reset.clicked.connect(self.resetFunc) # 重置按钮
self.predict.clicked.connect(self.predictFunc) # 预测按钮
def initComboBox(self):
"""训练模型下拉框初始化, 从设置中读取配置"""
conf = util.getConfig(CONFIG_PATH)
modelList = conf.options(CONF_MODEL_LIST_NAME)
for m in modelList:
curModelPath = conf.get(CONF_MODEL_LIST_NAME, m)
self.comboBox.addItem(m)
def initData(self):
"""初始化数据"""
self.conf = util.getConfig(CONFIG_PATH)
self.picDefaultLoadDir = self.conf.get(DEFAULT_LOAD_DIR, "pic-default-load-dir")
def resetFunc(self):
"""重置操作"""
self.printConsel("[INFO] reset inputs")
self.predictPic = None
self.showPic.setPixmap(QPixmap("")) # 图片重置
def modelAddFunc(self):
"""setting菜单中的添加模型选项,添加模型名称以及模型对应路径"""
self.modelDialog = ModelAddDialog() # 模型添加框
self.modelDialog.open()
qe = QEventLoop()
qe.exec_()
def printConsel(self, message):
"""打印消息到控制台"""
util.recordAndPrint(logger, self.console, message)
def initPdtModel(self):
"""初始化训练模型"""
self.printConsel("[INFO] initialize prediction model.")
self.pdtModel = Prediction(self.graphDir, self.stackingModelPath, self.labelMapPath)
def pdtCheck(self):
"""预测前检查资源加载"""
self.printConsel("[INFO] check resources load")
if self.predictPic == None:
self.printConsel("[ERROR] picture path is not exist, please check the path you input")
return False
return True
def mockPredictFunc(self):
"""测试"""
import random
return os.path.basename(self.predictPic), random.randint(0,30)
def predictFunc(self):
"""预测,使用Stacking继承学习方法直接预测.
后面考虑通过选择其他方法进行预测"""
if not self.pdtCheck():
return
self.printConsel("[INFO] loading predict models.")
# self.initPdtModel()
# picName, picPdt = self.pdtModel.predictSinglePic(self.predictPic)
picName, picPdt = self.mockPredictFunc() # 测试界面
self.printConsel("[INFO] picture name: {}, estimate age: {} ± {} month".format(picName, picPdt, self.gap))
def picDefaultLoadFunc(self):
"""图片默认加载目录"""
self.printConsel("[INFO] set picture default load directory.")
self.picDefaultLoadDir = QFileDialog.getExistingDirectory(self, "getExistingDirectory", "./")
self.printConsel("[INFO] set picture default load directory successful, new directory is : {}".format(self.picDefaultLoadDir))
# 保存
self.conf.set(DEFAULT_LOAD_DIR, "pic-default-load-dir", self.picDefaultLoadDir)
with open(CONFIG_PATH, 'w') as f:
self.conf.write(f)
def helpFunc(self):
"""帮助界面"""
self.printConsel("[INFO] help")
helpWin = helpWindow()
helpWin.open()
qe = QEventLoop()
qe.exec_()
def getFile(self):
"""加载图片"""
fname, _ = QFileDialog.getOpenFileName(self, 'Open file', self.picDefaultLoadDir, "Image files (*.jpg *.png)")
self.printConsel("[INFO] load picture, source : {}".format(fname))
self.predictPic = fname
self.showPic.setScaledContents (True) # 自适应
self.showPic.setPixmap(QPixmap(fname))
def processtrigger(self, q):
"""信号槽触发"""
curName = q.text()
if curName == "添加模型":
self.modelAddFunc()
elif curName == "图片默认加载目录":
self.picDefaultLoadFunc()
elif curName == "退出":
self.close()
elif curName == "使用方法":
self.helpFunc()
class ModelAddDialog(QMainWindow, ui_ModelAddDialog.Ui_ModelListView):
"""模型添加弹框"""
def __init__(self):
super(ModelAddDialog, self).__init__()
self.setupUi(self)
self.initData()
self.initOps()
def open(self):
self.show()
def initOps(self):
self.modelAddButton.clicked.connect(self.add)
self.modelDeleteButton.clicked.connect(self.delete)
def initData(self):
"""初始化列表中的数据"""
self.modelListView.clear()
self.conf = util.getConfig(CONFIG_PATH)
modelList = self.conf.options(CONF_MODEL_LIST_NAME)
for m in modelList:
curModelPath = self.conf.get(CONF_MODEL_LIST_NAME, m)
self.modelListView.addItem("{}: '{}'".format(m, curModelPath))
def delete(self):
"""删除列表中的数据"""
for item in self.modelListView.selectedItems():
removeItem = self.modelListView.takeItem(self.modelListView.row(item))
try:
boolean = self.conf.remove_option(CONF_MODEL_LIST_NAME, removeItem.text().split(":")[0])
if boolean:
logger.info("[INFO] remove item: {} successful".format(removeItem.text().split(":")[0]))
self.modelListView.removeItemWidget(removeItem)
with open(CONFIG_PATH, 'w') as f:
self.conf.write(f)
else:
logger.info("[WARNING] remove item:{} fail".format(removeItem.text().split(":")[0]))
except Exception as e:
logger.error("[ERROR] remove item:{} fail, trace: {}".format(removeItem.text().split(":")[0], str(e)))
self.initData()
def add(self):
"""添加模型"""
self.child = ModelChildDialog()
self.child.open()
self.initData()
qe = QEventLoop()
qe.exec_()
class ModelChildDialog(QMainWindow, ui_ModelAddDialogChild.Ui_modelChildDIalog):
"""模型添加的模态框"""
def __init__(self):
super(ModelChildDialog, self).__init__()
self.setupUi(self)
self.modelPath = None
self.initOps()
def initOps(self):
"""初始化信号槽"""
self.modelAddOk.clicked.connect(self.accept)
self.modelAddCancle.clicked.connect(self.cancel)
self.filePathButton.clicked.connect(self.getFile)
def accept(self):
"""确认"""
modelName = self.modelNameInput.text()
conf = util.getConfig(CONFIG_PATH)
# print(modelName, self.modelPath)
if modelName != None and self.modelPath != None:
conf.set(CONF_MODEL_LIST_NAME, modelName, self.modelPath)
with open(CONFIG_PATH, 'w') as f:
conf.write(f)
self.close()
def cancel(self):
"""取消"""
self.close()
def getFile(self):
"""选择节点路径"""
self.modelPath, _ = QFileDialog.getOpenFileName(self, 'Open file', 'c:\\',"model file (*.pb)")
def open(self):
self.show()
class helpWindow(QMainWindow, helpDialog.Ui_Dialog):
"""帮助界面"""
def __init__(self):
super(helpWindow, self).__init__()
self.setupUi(self)
self.helpButtonOk.clicked.connect(self.acceptFunc)
def open(self):
self.show()
def acceptFunc(self):
self.close()
class PredictionHandler(object):
"""预测基类"""
def __init__(self, graphDir=None, stackingModelPath=None):
self.picList = None # 预测图片列表
self.frozenGraphName = "frozen_graph.pb"
self.graphDir = graphDir
self.stackingModel = None
self.initData(stackingModelPath)
def initData(self, stackingModelPath):
"""初始化数据和模型"""
# self.picList = [] # 初始化图片列表
self.stackingModel = self.loadModel(stackingModelPath)
def loadModel(self, stackingModelPath):
"""加载模型"""
return pickle.load(open(stackingModelPath,'rb'))
def checkEnv(self):
"""预测前检查资源加载"""
if self.picList == None:
assert False, "picture path is empty"
if self.graphDir == None:
assert False, "model graph path is not exist"
if self.stackingModel == None:
assert False, "train model is not initialize"
def preProcess(self, data, alpha = 0.99, isTotal = False):
"""数据均一化"""
m, n = np.shape(data)
ret = np.zeros((m, n))
for i in range(m):
total = np.sum(data[i, :])
maxValue = np.max(data[i, :])
for j in range(n):
if isTotal:
ret[i, j] = data[i, j] / tatal * alpha
else:
ret[i, j] = [data[i, j], 1][data[i, j] == 0] / maxValue * alpha
return ret
def createGraph(self, sess, modelPath):
"""创建图"""
K.clear_session()
tf.reset_default_graph()
with tf.gfile.FastGFile(modelPath, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
def pdtBySingleModel(self, modelPath, modelName, tensorName, picList, picSize):
"""单个模型预测"""
self.createGraph(None, modelPath)
pFn = preprocessing_factory.get_preprocessing(modelName, is_training=False)
pdtOutput = {} # 字典格式保存预测结果,{'2_m-1-1.9.png': prediction}
with tf.Session() as sess:
for picPath in picList:
tensor = sess.graph.get_tensor_by_name(tensorName)
baseName = os.path.basename(picPath)
# 获得图像
imgData = tf.gfile.FastGFile(picPath, 'rb').read()
imgData = tf.image.decode_jpeg(imgData, channels=3)
imgData = pFn(imgData, picSize, picSize)
imgData = tf.expand_dims(imgData, 0)
imgData = sess.run(imgData)
try:
prediction = sess.run(tensor, {'input:0': imgData})
prediction = np.squeeze(prediction)
pdtOutput[baseName] = prediction
except Exception as e:
print("[Error] %s" % str(e))
return pdtOutput
def getMeanOfModels(self):
"""获得多个模型预测结果,并返回预测均值"""
pdt = {}
for modelName, tesnorName, picSize in MODEL_LIST:
curModelPdt = {}
modeDir = os.path.join(self.graphDir, modelName) # 获得一个模型名称对应的目录
classList = os.listdir(modeDir) # 获得当前模型名下面的多个训练模型
for c in classList:
modelPath = os.path.join(modeDir, c, self.frozenGraphName) # 当前训练模型路径
tmpPdt = self.pdtBySingleModel(modelPath, modelName, tensorName, self.picList, picSize) # 单个模型预测单张图片
for k,v in tmpPdt.items():
v = v.argmax() # 获得数组中预测概率最高的索引
curModelPdt.get(k, []).append(v)
# 获得当前模型对图片预测的均值
count = len(classList)
for k,v in curModelPdt:
curModelPdt[k] = mean(v) # 可能会报错
# 添加单个模型预测结果到pdt中
for k,v in curModelPdt.items():
if k not in pdt:
pdt[k] = [v]
else:
pdt[k].append(v)
picNameList, testFeature = [], []
for k,v in pdt:
picNameList.append(k)
testFeature.append(v)
testFeature = np.mat(testFeature)
testFeature = self.preProcess(testFeature)
return picNameList, testFeature
def predicts(self, picPathList):
"""预测多张图片
@param picPathList 路径,列表
"""
self.picList = picPathList
self.checkEnv() # 检测
picNameList, testFeature = self.getMeanOfModels()
pdtValue = self.stackingModel.predict()
return picNameList, pdtValue
def predictSinglePic(self, picPath):
"""预测单张图片
@param picPath 路径,字符串
"""
return self.predicts([picPath])
class Prediction(PredictionHandler):
"""预测实现类"""
def __init__(self, graphDir=None, stackingModelPath=None, labelMapPath=None):
super(Prediction, self).__init__(graphDir, stackingModelPath)
self.labelMap = None
self.labelMapPath = labelMapPath
self.initLableMap()
def initLableMap(self):
"""初始化标签映射字典"""
self.labelMap = {}
with open(self.labelMapPath, "r") as f:
lines = f.readlines()
for line in lines:
k,v = line.split(" ")
self.labelMap[k] = v
def predictSinglePic(self, picPath):
"""重写父类预测方法"""
picNameList, pdtValue = self.predicts([picPath])
try:
return picNameList[0], self.labelMap[int(pdtValue[0])] # 若抛出异常
except:
assert False, "check label map"
if __name__ == '__main__':
app = QApplication(sys.argv)
myWin = MyWindow()
myWin.show()
sys.exit(app.exec_()) |
the-stack_0_9893 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cleanup task for cleaning up unneeded testcases."""
import collections
import datetime
import json
import random
from googleapiclient.errors import HttpError
from base import dates
from base import errors
from base import memoize
from base import utils
from chrome import build_info
from crash_analysis import crash_comparer
from crash_analysis import severity_analyzer
from datastore import data_handler
from datastore import data_types
from datastore import ndb_utils
from fuzzing import leak_blacklist
from handlers import base_handler
from libs import handler
from libs import mail
from libs.issue_management import issue_filer
from libs.issue_management import issue_tracker_policy
from libs.issue_management import issue_tracker_utils
from metrics import crash_stats
from metrics import logs
GENERIC_INCORRECT_COMMENT = (
'\n\nIf this is incorrect, please add the {label_text}')
OSS_FUZZ_INCORRECT_COMMENT = ('\n\nIf this is incorrect, please file a bug on '
'https://github.com/google/oss-fuzz/issues/new')
AUTO_CC_LIMIT = 5
TOP_CRASHES_LIMIT = 5
TOP_CRASHES_DAYS_LOOKBEHIND = 7
TOP_CRASHES_MIN_THRESHOLD = 50 * TOP_CRASHES_DAYS_LOOKBEHIND
TOP_CRASHES_IGNORE_CRASH_TYPES = [
'Out-of-memory',
'Stack-overflow',
'Timeout',
]
TOP_CRASHES_IGNORE_CRASH_STATES = ['NULL']
FUZZ_TARGET_UNUSED_THRESHOLD = 15
UNUSED_HEARTBEAT_THRESHOLD = 15
ProjectMap = collections.namedtuple('ProjectMap', 'jobs platforms')
def _get_predator_result_item(testcase, key, default=None):
"""Return the suspected components for a test case."""
predator_result = testcase.get_metadata('predator_result')
if not predator_result:
return default
return predator_result['result'].get(key, default)
def _append_generic_incorrect_comment(comment, policy, issue, suffix):
"""Get the generic incorrect comment."""
wrong_label = policy.label('wrong')
if not wrong_label:
return comment
return comment + GENERIC_INCORRECT_COMMENT.format(
label_text=issue.issue_tracker.label_text(wrong_label)) + suffix
def job_platform_to_real_platform(job_platform):
"""Get real platform from job platform."""
for platform in data_types.PLATFORMS:
if platform in job_platform:
return platform
raise ValueError('Unknown platform: ' + job_platform)
def cleanup_reports_metadata():
"""Delete ReportMetadata for uploaded reports."""
uploaded_reports = ndb_utils.get_all_from_query(
data_types.ReportMetadata.query(
ndb_utils.is_true(data_types.ReportMetadata.is_uploaded)),
keys_only=True)
ndb_utils.delete_multi(uploaded_reports)
def cleanup_testcases_and_issues():
"""Clean up unneeded open testcases and their associated issues."""
jobs = data_handler.get_all_job_type_names()
testcase_keys = ndb_utils.get_all_from_query(
data_types.Testcase.query(
ndb_utils.is_false(data_types.Testcase.triaged)),
keys_only=True)
top_crashes_by_project_and_platform_map = (
get_top_crashes_for_all_projects_and_platforms())
utils.python_gc()
testcases_processed = 0
empty_issue_tracker_policy = issue_tracker_policy.get_empty()
for testcase_key in testcase_keys:
testcase_id = testcase_key.id()
try:
testcase = data_handler.get_testcase_by_id(testcase_id)
except errors.InvalidTestcaseError:
# Already deleted.
continue
logs.log('Processing testcase %d.' % testcase_id)
try:
issue = issue_tracker_utils.get_issue_for_testcase(testcase)
policy = issue_tracker_utils.get_issue_tracker_policy_for_testcase(
testcase)
if not policy:
policy = empty_issue_tracker_policy
# Issue updates.
update_os_labels(policy, testcase, issue)
update_fuzz_blocker_label(policy, testcase, issue,
top_crashes_by_project_and_platform_map)
update_component_labels(testcase, issue)
update_issue_ccs_from_owners_file(policy, testcase, issue)
update_issue_owner_and_ccs_from_predator_results(policy, testcase, issue)
update_issue_labels_for_flaky_testcase(policy, testcase, issue)
# Testcase marking rules.
mark_duplicate_testcase_as_closed_with_no_issue(testcase)
mark_issue_as_closed_if_testcase_is_fixed(policy, testcase, issue)
mark_testcase_as_closed_if_issue_is_closed(policy, testcase, issue)
mark_testcase_as_closed_if_job_is_invalid(testcase, jobs)
mark_unreproducible_testcase_as_fixed_if_issue_is_closed(testcase, issue)
mark_unreproducible_testcase_and_issue_as_closed_after_deadline(
policy, testcase, issue)
# Notification, to be done at end after testcase state is updated from
# previous rules.
notify_closed_issue_if_testcase_is_open(policy, testcase, issue)
notify_issue_if_testcase_is_invalid(policy, testcase, issue)
notify_uploader_when_testcase_is_processed(policy, testcase, issue)
# Mark testcase as triage complete if both testcase and associated issue
# are closed. This also need to be done before the deletion rules.
mark_testcase_as_triaged_if_needed(testcase, issue)
# Testcase deletion rules.
delete_unreproducible_testcase_with_no_issue(testcase)
except Exception:
logs.log_error('Failed to process testcase %d.' % testcase_id)
testcases_processed += 1
if testcases_processed % 100 == 0:
utils.python_gc()
def cleanup_unused_fuzz_targets_and_jobs():
"""Clean up unused FuzzTarget and FuzzTargetJob entities."""
last_run_cutoff = utils.utcnow() - datetime.timedelta(
days=FUZZ_TARGET_UNUSED_THRESHOLD)
unused_target_jobs = data_types.FuzzTargetJob.query(
data_types.FuzzTargetJob.last_run < last_run_cutoff)
valid_target_jobs = data_types.FuzzTargetJob.query(
data_types.FuzzTargetJob.last_run >= last_run_cutoff)
to_delete = [t.key for t in unused_target_jobs]
valid_fuzz_targets = set(t.fuzz_target_name for t in valid_target_jobs)
for fuzz_target in ndb_utils.get_all_from_model(data_types.FuzzTarget):
if fuzz_target.fully_qualified_name() not in valid_fuzz_targets:
to_delete.append(fuzz_target.key)
ndb_utils.delete_multi(to_delete)
def get_jobs_and_platforms_for_project():
"""Return a map of projects to jobs and platforms map to use for picking top
crashes."""
all_jobs = ndb_utils.get_all_from_model(data_types.Job)
projects_to_jobs_and_platforms = {}
for job in all_jobs:
job_environment = job.get_environment()
# Skip experimental jobs.
if utils.string_is_true(job_environment.get('EXPERIMENTAL')):
continue
# Skip custom binary jobs.
if (utils.string_is_true(job_environment.get('CUSTOM_BINARY')) or
job_environment.get('SYSTEM_BINARY_DIR')):
continue
# Skip if explicitly excluded using flag.
if utils.string_is_true(job_environment.get('EXCLUDE_FROM_TOP_CRASHES')):
continue
if job.project not in projects_to_jobs_and_platforms:
projects_to_jobs_and_platforms[job.project] = ProjectMap(set(), set())
projects_to_jobs_and_platforms[job.project].jobs.add(job.name)
projects_to_jobs_and_platforms[job.project].platforms.add(
job_platform_to_real_platform(job.platform))
return projects_to_jobs_and_platforms
@memoize.wrap(memoize.Memcache(12 * 60 * 60))
def _get_crash_occurrence_platforms_from_crash_parameters(
crash_type, crash_state, security_flag, project_name, lookbehind_days):
"""Get platforms from crash stats based on crash parameters."""
last_hour = crash_stats.get_last_successful_hour()
if not last_hour:
# No crash stats available, skip.
return []
where_clause = ('crash_type = {crash_type} AND '
'crash_state = {crash_state} AND '
'security_flag = {security_flag} AND '
'project = {project}').format(
crash_type=json.dumps(crash_type),
crash_state=json.dumps(crash_state),
security_flag=json.dumps(security_flag),
project=json.dumps(project_name),
)
_, rows = crash_stats.get(
end=last_hour,
block='day',
days=lookbehind_days,
group_by='platform',
where_clause=where_clause,
group_having_clause='',
sort_by='total_count',
offset=0,
limit=1)
platforms = set()
for row in rows:
for group in row['groups']:
platform = group['name'].split(':')[0]
platforms.add(platform.lower())
return platforms
def get_platforms_from_testcase_variants(testcase):
"""Get platforms from crash stats based on crash parameters."""
variant_query = data_types.TestcaseVariant.query(
data_types.TestcaseVariant.testcase_id == testcase.key.id())
platforms = {
variant.platform
for variant in variant_query
if variant.is_similar and variant.platform
}
return platforms
def get_crash_occurrence_platforms(testcase, lookbehind_days=1):
"""Get platforms from crash stats for a testcase."""
return _get_crash_occurrence_platforms_from_crash_parameters(
testcase.crash_type, testcase.crash_state, testcase.security_flag,
testcase.project_name, lookbehind_days)
def get_top_crashes_for_all_projects_and_platforms():
"""Return top crashes for all projects and platforms."""
last_hour = crash_stats.get_last_successful_hour()
if not last_hour:
# No crash stats available, skip.
return {}
projects_to_jobs_and_platforms = (get_jobs_and_platforms_for_project())
top_crashes_by_project_and_platform_map = {}
for project_name in projects_to_jobs_and_platforms:
top_crashes_by_project_and_platform_map[project_name] = {}
project_map = projects_to_jobs_and_platforms[project_name]
for platform in project_map.platforms:
where_clause = (
'crash_type NOT IN UNNEST(%s) AND '
'crash_state NOT IN UNNEST(%s) AND '
'job_type IN UNNEST(%s) AND '
'platform LIKE %s AND '
'project = %s' % (json.dumps(TOP_CRASHES_IGNORE_CRASH_TYPES),
json.dumps(TOP_CRASHES_IGNORE_CRASH_STATES),
json.dumps(list(project_map.jobs)),
json.dumps(platform.lower() + '%'),
json.dumps(project_name)))
_, rows = crash_stats.get(
end=last_hour,
block='day',
days=TOP_CRASHES_DAYS_LOOKBEHIND,
group_by='platform',
where_clause=where_clause,
group_having_clause='',
sort_by='total_count',
offset=0,
limit=TOP_CRASHES_LIMIT)
if not rows:
continue
top_crashes_by_project_and_platform_map[project_name][platform] = [{
'crashState': row['crashState'],
'crashType': row['crashType'],
'isSecurity': row['isSecurity'],
'totalCount': row['totalCount'],
} for row in rows if row['totalCount'] >= TOP_CRASHES_MIN_THRESHOLD]
return top_crashes_by_project_and_platform_map
def get_top_crash_platforms(testcase, top_crashes_by_project_and_platform_map):
"""Return list of platforms where this testcase is a top crasher."""
if testcase.project_name not in top_crashes_by_project_and_platform_map:
return []
top_crashes_by_platform_map = top_crashes_by_project_and_platform_map[
testcase.project_name]
top_crash_platforms = set()
for platform in list(top_crashes_by_platform_map.keys()):
top_crashes = top_crashes_by_platform_map[platform]
if not top_crashes:
continue
for top_crash in top_crashes:
crash_state_comparer = crash_comparer.CrashComparer(
top_crash['crashState'], testcase.crash_state)
crash_type_comparer = crash_comparer.CrashComparer(
top_crash['crashType'], testcase.crash_type)
if (crash_state_comparer.is_similar() and
top_crash['isSecurity'] == testcase.security_flag and
(top_crash['isSecurity'] or crash_type_comparer.is_similar())):
top_crash_platforms.add(platform.lower())
return sorted(list(top_crash_platforms))
def delete_unreproducible_testcase_with_no_issue(testcase):
"""Delete an unreproducible testcase if it has no associated issue and has
been open for a certain time interval."""
# Make sure that this testcase is an unreproducible bug. If not, bail out.
if not testcase.one_time_crasher_flag:
return
# Make sure that this testcase has no associated bug. If not, bail out.
if testcase.bug_information:
return
# Make sure that testcase is atleast older than
# |UNREPRODUCIBLE_TESTCASE_NO_BUG_DEADLINE|, otherwise it will be seen in
# crash stats anyway.
if (testcase.timestamp and not dates.time_has_expired(
testcase.timestamp,
days=data_types.UNREPRODUCIBLE_TESTCASE_NO_BUG_DEADLINE)):
return
# Make sure that testcase is not seen in crash stats for a certain time
# interval.
if get_crash_occurrence_platforms(
testcase, data_types.UNREPRODUCIBLE_TESTCASE_NO_BUG_DEADLINE):
return
testcase.key.delete()
logs.log(
'Deleted unreproducible testcase %d with no issue.' % testcase.key.id())
def mark_duplicate_testcase_as_closed_with_no_issue(testcase):
"""Closes a duplicate testcase if it has no associated issue and has been open
for a certain time interval."""
# Make sure that this testcase is a duplicate bug. If not, bail out.
if testcase.status != 'Duplicate':
return
# Make sure that this testcase has no associated bug. If not, bail out.
if testcase.bug_information:
return
# Make sure that testcase has been open for a certain time interval. We do
# a null timestamp check since some older testcases could be missing it.
if (testcase.timestamp and not dates.time_has_expired(
testcase.timestamp, days=data_types.DUPLICATE_TESTCASE_NO_BUG_DEADLINE)):
return
testcase.fixed = 'NA'
testcase.open = False
testcase.put()
logs.log('Closed duplicate testcase %d with no issue.' % testcase.key.id())
def mark_issue_as_closed_if_testcase_is_fixed(policy, testcase, issue):
"""Mark an issue as fixed if all of its associated reproducible testcase are
fixed."""
verified_label = policy.label('verified')
if not verified_label:
return
# If there is no associated issue, then bail out.
if not issue or not testcase.bug_information:
return
# If the issue is closed in a status other than Fixed, like Duplicate, WontFix
# or Archived, we shouldn't change it. Bail out.
if not issue.is_open and issue.status != policy.status('fixed'):
return
# Check testcase status, so as to skip unreproducible uploads.
if testcase.status not in ['Processed', 'Duplicate']:
return
# If the testcase is still open, no work needs to be done. Bail out.
if testcase.open:
return
# FIXME: Find a better solution to skip over reproducible tests that are now
# showing up a flaky (esp when we are unable to reproduce crash in original
# crash revision).
if testcase.fixed == 'NA':
return
# We can only verify fixed issues for reproducible testcases. If the testcase
# is unreproducible, bail out. Exception is if we explicitly marked this as
# fixed.
if testcase.one_time_crasher_flag and testcase.fixed != 'Yes':
return
# Make sure that no other testcases associated with this issue are open.
similar_testcase = data_types.Testcase.query(
data_types.Testcase.bug_information == testcase.bug_information,
ndb_utils.is_true(data_types.Testcase.open),
ndb_utils.is_false(data_types.Testcase.one_time_crasher_flag)).get()
if similar_testcase:
return
# As a last check, do the expensive call of actually checking all issue
# comments to make sure we didn't do the verification already and we didn't
# get called out on issue mistriage.
if (issue_tracker_utils.was_label_added(issue, verified_label) or
issue_tracker_utils.was_label_added(issue, policy.label('wrong'))):
return
issue.labels.add(verified_label)
comment = 'ClusterFuzz testcase %d is verified as fixed' % testcase.key.id()
fixed_range_url = data_handler.get_fixed_range_url(testcase)
if fixed_range_url:
comment += ' in ' + fixed_range_url
else:
comment += '.'
if utils.is_oss_fuzz():
comment += OSS_FUZZ_INCORRECT_COMMENT
else:
comment = _append_generic_incorrect_comment(comment, policy, issue,
' and re-open the issue.')
skip_auto_close = data_handler.get_value_from_job_definition(
testcase.job_type, 'SKIP_AUTO_CLOSE_ISSUE')
if not skip_auto_close:
issue.status = policy.status('verified')
issue.save(new_comment=comment, notify=True)
logs.log('Mark issue %d as verified for fixed testcase %d.' %
(issue.id, testcase.key.id()))
def mark_unreproducible_testcase_as_fixed_if_issue_is_closed(testcase, issue):
"""Mark an unreproducible testcase as fixed if the associated issue is
closed."""
# If the testcase is already closed, no more work to do.
if not testcase.open:
return
# Make sure that this testcase is an unreproducible bug. If not, bail out.
if not testcase.one_time_crasher_flag:
return
# Make sure that this testcase has an associated bug. If not, bail out.
if not testcase.bug_information:
return
# Make sure that there is an associated bug and it is in closed state.
if not issue or issue.is_open:
return
testcase.fixed = 'NA'
testcase.open = False
testcase.put()
logs.log('Closed unreproducible testcase %d with issue closed.' %
testcase.key.id())
def mark_unreproducible_testcase_and_issue_as_closed_after_deadline(
policy, testcase, issue):
"""Closes an unreproducible testcase and its associated issue after a certain
time period."""
# If the testcase is already closed, no more work to do.
if not testcase.open:
return
# Check testcase status, so as to skip unreproducible uploads.
if testcase.status not in ['Processed', 'Duplicate']:
return
# Make sure that this testcase is an unreproducible bug. If not, bail out.
if not testcase.one_time_crasher_flag:
return
# Make sure that this testcase has an associated bug. If not, bail out.
if not testcase.bug_information:
return
# If this testcase was manually uploaded, don't change issue state as our
# reproduction result might be incorrect.
if testcase.uploader_email:
return
# Make sure that there is an associated bug and it is in open state.
if not issue or not issue.is_open:
return
# Check if there are any reproducible open testcases are associated with
# this bug. If yes, return.
similar_testcase = data_types.Testcase.query(
data_types.Testcase.bug_information == testcase.bug_information,
ndb_utils.is_true(data_types.Testcase.open),
ndb_utils.is_false(data_types.Testcase.one_time_crasher_flag)).get()
if similar_testcase:
return
# Make sure that testcase is atleast older than
# |UNREPRODUCIBLE_TESTCASE_WITH_BUG_DEADLINE|, otherwise it will be seen in
# crash stats anyway.
if (testcase.timestamp and not dates.time_has_expired(
testcase.timestamp,
days=data_types.UNREPRODUCIBLE_TESTCASE_WITH_BUG_DEADLINE)):
return
# Handle testcase that turned from reproducible to unreproducible. Account
# for the recent progression task run time.
last_tested_crash_time = testcase.get_metadata('last_tested_crash_time')
if (last_tested_crash_time and not dates.time_has_expired(
last_tested_crash_time,
days=data_types.UNREPRODUCIBLE_TESTCASE_WITH_BUG_DEADLINE)):
return
# Make that there is no crash seen in the deadline period.
if get_crash_occurrence_platforms(
testcase, data_types.UNREPRODUCIBLE_TESTCASE_WITH_BUG_DEADLINE):
return
# As a last check, do the expensive call of actually checking all issue
# comments to make sure we we didn't get called out on issue mistriage.
if issue_tracker_utils.was_label_added(issue, policy.label('wrong')):
return
# Close associated issue and testcase.
comment = ('ClusterFuzz testcase %d is flaky and no longer crashes, '
'so closing issue.' % testcase.key.id())
if utils.is_oss_fuzz():
comment += OSS_FUZZ_INCORRECT_COMMENT
else:
comment = _append_generic_incorrect_comment(comment, policy, issue,
' and re-open the issue.')
issue.status = policy.status('wontfix')
issue.save(new_comment=comment, notify=True)
testcase.fixed = 'NA'
testcase.open = False
testcase.put()
logs.log('Closed unreproducible testcase %d and associated issue.' %
testcase.key.id())
def mark_testcase_as_triaged_if_needed(testcase, issue):
"""Mark testcase as triage complete if both testcase and associated issue
are closed."""
# Check if testcase is open. If yes, bail out.
if testcase.open:
return
# Check if there is an associated bug in open state. If yes, bail out.
if issue:
# Get latest issue object to ensure our update went through.
issue = issue_tracker_utils.get_issue_for_testcase(testcase)
if issue.is_open:
return
testcase.triaged = True
testcase.put()
def mark_testcase_as_closed_if_issue_is_closed(policy, testcase, issue):
"""Mark testcase as closed if the associated issue is closed."""
# If the testcase is already closed, no more work to do.
if not testcase.open:
return
# If there is no associated issue, then bail out.
if not issue or not testcase.bug_information:
return
# If the issue is still open, no work needs to be done. Bail out.
if issue.is_open:
return
# Make sure we passed our deadline based on issue closed timestamp.
if (issue.closed_time and not dates.time_has_expired(
issue.closed_time,
days=data_types.CLOSE_TESTCASE_WITH_CLOSED_BUG_DEADLINE)):
return
# If the issue has an ignore label, don't close the testcase and bail out.
# This helps to prevent new bugs from getting filed for legit WontFix cases.
if issue_tracker_utils.was_label_added(issue, policy.label('ignore')):
return
testcase.open = False
testcase.fixed = 'NA'
testcase.put()
logs.log('Closed testcase %d with issue closed.' % testcase.key.id())
def mark_testcase_as_closed_if_job_is_invalid(testcase, jobs):
"""Mark testcase as closed if the associated job type does not exist."""
# If the testcase is already closed, no more work to do.
if not testcase.open:
return
# Check if the testcase job name is in the list of jobs.
if testcase.job_type in jobs:
return
testcase.open = False
testcase.fixed = 'NA'
testcase.put()
logs.log('Closed testcase %d with invalid job.' % testcase.key.id())
def notify_closed_issue_if_testcase_is_open(policy, testcase, issue):
"""Notify closed issue if associated testcase is still open after a certain
time period."""
needs_feedback_label = policy.label('needs_feedback')
if not needs_feedback_label:
return
# If the testcase is already closed, no more work to do.
if not testcase.open:
return
# Check testcase status, so as to skip unreproducible uploads.
if testcase.status not in ['Processed', 'Duplicate']:
return
# If there is no associated issue, then bail out.
if not issue or not testcase.bug_information:
return
# If the issue is still open, no work needs to be done. Bail out.
if issue.is_open:
return
# If we have already passed our deadline based on issue closed timestamp,
# no need to notify. We will close the testcase instead.
if (issue.closed_time and not dates.time_has_expired(
issue.closed_time,
days=data_types.NOTIFY_CLOSED_BUG_WITH_OPEN_TESTCASE_DEADLINE)):
return
# Check if there is ignore label on issue already. If yes, bail out.
if issue_tracker_utils.was_label_added(issue, policy.label('ignore')):
return
# Check if we did add the notification comment already. If yes, bail out.
if issue_tracker_utils.was_label_added(issue, needs_feedback_label):
return
issue.labels.add(needs_feedback_label)
if issue.status in [policy.status('fixed'), policy.status('verified')]:
issue_comment = (
'ClusterFuzz testcase {id} is still reproducing on tip-of-tree build '
'(trunk).\n\nPlease re-test your fix against this testcase and if the '
'fix was incorrect or incomplete, please re-open the bug.'
).format(id=testcase.key.id())
wrong_label = policy.label('wrong')
if wrong_label:
issue_comment += (
(' Otherwise, ignore this notification and add the '
'{label_text}.'
).format(label_text=issue.issue_tracker.label_text(wrong_label)))
else:
# Covers WontFix, Archived cases.
issue_comment = (
'ClusterFuzz testcase {id} is still reproducing on tip-of-tree build '
'(trunk).\n\nIf this testcase was not reproducible locally or '
'unworkable, ignore this notification and we will file another '
'bug soon with hopefully a better and workable testcase.\n\n'.format(
id=testcase.key.id()))
ignore_label = policy.label('ignore')
if ignore_label:
issue_comment += (
'Otherwise, if this is not intended to be fixed (e.g. this is an '
'intentional crash), please add the {label_text} to '
'prevent future bug filing with similar crash stacktrace.'.format(
label_text=issue.issue_tracker.label_text(ignore_label)))
issue.save(new_comment=issue_comment, notify=True)
logs.log('Notified closed issue for open testcase %d.' % testcase.key.id())
def notify_issue_if_testcase_is_invalid(policy, testcase, issue):
"""Leave comments on associated issues when test cases are no longer valid."""
invalid_fuzzer_label = policy.label('invalid_fuzzer')
if not invalid_fuzzer_label:
return
if not issue or not testcase.bug_information:
return
# If the issue is closed, there's no work to do.
if not issue.is_open:
return
# Currently, this only happens if a test case relies on a fuzzer that has
# been deleted. This can be modified if more cases are needed in the future.
if not testcase.get_metadata('fuzzer_was_deleted'):
return
# Check if we added this message once. If yes, bail out.
if issue_tracker_utils.was_label_added(issue, invalid_fuzzer_label):
return
issue_comment = (
'ClusterFuzz testcase %d is associated with an obsolete fuzzer and can '
'no longer be processed. Please close the issue if it is no longer '
'actionable.') % testcase.key.id()
issue.labels.add(invalid_fuzzer_label)
issue.save(new_comment=issue_comment, notify=True)
logs.log('Closed issue %d for invalid testcase %d.' % (issue.id,
testcase.key.id()))
def _send_email_to_uploader(testcase_id, to_email, content):
"""Send email to uploader when all the testcase tasks are finished."""
subject = 'Your testcase upload %d analysis is complete.' % testcase_id
content_with_footer = (
'%s\n\n'
'If you suspect that the result above is incorrect, '
'try re-doing that job on the testcase report page.') % content.strip()
html_content = content_with_footer.replace('\n', '<br>')
mail.send(to_email, subject, html_content)
def _get_severity_from_labels(security_severity_label, labels):
"""Get the severity from the label list."""
pattern = issue_filer.get_label_pattern(security_severity_label)
for label in labels:
match = pattern.match(label)
if match:
return severity_analyzer.string_to_severity(match.group(1))
return data_types.SecuritySeverity.MISSING
def _update_issue_security_severity_and_get_comment(policy, testcase, issue):
"""Apply a new security severity label if none exists on issue already
and return a comment on this addition. If a label already exists and does
not match security severity label on issue, then just return a comment on
what the recommended severity is."""
security_severity_label = policy.label('security_severity')
if not security_severity_label:
return ''
if not data_types.SecuritySeverity.is_valid(testcase.security_severity):
return ''
issue_severity = _get_severity_from_labels(security_severity_label,
issue.labels)
recommended_severity = issue_filer.apply_substitutions(
policy, security_severity_label, testcase)
if not recommended_severity:
return ''
recommended_severity = recommended_severity[0]
if issue_severity == data_types.SecuritySeverity.MISSING:
issue.labels.add(recommended_severity)
return ('\n\nA recommended severity was added to this bug. '
'Please change the severity if it is inaccurate.')
if issue_severity != testcase.security_severity:
return (
'\n\nThe recommended severity (%s) is different from what was assigned '
'to the bug. Please double check the accuracy of the assigned '
'severity.' % recommended_severity)
return ''
def _update_issue_when_uploaded_testcase_is_processed(
policy, testcase, issue, description, update_bug_summary, notify):
"""Add issue comment when uploaded testcase is processed."""
if update_bug_summary and testcase.is_crash():
issue.title = data_handler.get_issue_summary(testcase)
# Impact labels like impacting head/beta/stable only apply for Chromium.
if testcase.project_name == 'chromium':
issue_filer.update_issue_impact_labels(testcase, issue)
# Add severity labels for all project types.
comment = description + _update_issue_security_severity_and_get_comment(
policy, testcase, issue)
issue.save(new_comment=comment, notify=notify)
def notify_uploader_when_testcase_is_processed(policy, testcase, issue):
"""Notify uploader by email when all the testcase tasks are finished."""
testcase_id = testcase.key.id()
# Check if this is a user upload. If not, bail out.
upload_metadata = data_types.TestcaseUploadMetadata.query(
data_types.TestcaseUploadMetadata.testcase_id == testcase_id).get()
if not upload_metadata:
return
# Check that we have a valid email to send the notification. If not, bail out.
to_email = upload_metadata.uploader_email
if not to_email:
return
# If this is a bundled archive with multiple testcases, then don't send email
# for individual testcases.
if upload_metadata.bundled:
return
# Check if the notification is already sent once. If yes, bail out.
if data_handler.is_notification_sent(testcase_id, to_email):
return
# Make sure all testcase taks are done (e.g. minimization, regression, etc).
if not data_handler.critical_tasks_completed(testcase):
return
notify = not upload_metadata.quiet_flag
if issue and not testcase.duplicate_of:
issue_description = data_handler.get_issue_description(testcase)
_update_issue_when_uploaded_testcase_is_processed(
policy, testcase, issue, issue_description,
upload_metadata.bug_summary_update_flag, notify)
if notify:
issue_description_without_crash_state = data_handler.get_issue_description(
testcase, hide_crash_state=True)
_send_email_to_uploader(testcase_id, to_email,
issue_description_without_crash_state)
# Make sure to create notification entry, as we use this to update bug.
data_handler.create_notification_entry(testcase_id, to_email)
def update_os_labels(policy, testcase, issue):
"""Add OS labels to issue."""
os_label = policy.label('os')
if not os_label:
return
if not issue:
return
platforms = get_crash_occurrence_platforms(testcase)
platforms = platforms.union(get_platforms_from_testcase_variants(testcase))
logs.log(
'Found %d platforms for the testcase %d.' % (len(platforms),
testcase.key.id()),
platforms=platforms)
for platform in platforms:
label = os_label.replace('%PLATFORM%', platform.capitalize())
if not issue_tracker_utils.was_label_added(issue, label):
issue.labels.add(label)
issue.save(notify=False)
logs.log('Updated labels of issue %d.' % issue.id, labels=issue.labels)
def update_fuzz_blocker_label(policy, testcase, issue,
top_crashes_by_project_and_platform_map):
"""Add top crash label to issue."""
fuzz_blocker_label = policy.label('fuzz_blocker')
if not fuzz_blocker_label:
return
if not issue:
return
if not testcase.open:
return
top_crash_platforms = get_top_crash_platforms(
testcase, top_crashes_by_project_and_platform_map)
if not top_crash_platforms:
# Not a top crasher, bail out.
return
if issue_tracker_utils.was_label_added(issue, fuzz_blocker_label):
# Issue was already marked a top crasher, bail out.
return
if len(top_crash_platforms) == 1:
platform_message = '%s platform' % top_crash_platforms[0]
else:
platform_message = '%s and %s platforms' % (', '.join(
top_crash_platforms[:-1]), top_crash_platforms[-1])
fuzzer_name = (
testcase.get_metadata('fuzzer_binary_name') or testcase.fuzzer_name)
update_message = (
'This crash occurs very frequently on %s and is likely preventing the '
'fuzzer %s from making much progress. Fixing this will allow more bugs '
'to be found.' % (platform_message, fuzzer_name))
if utils.is_oss_fuzz():
update_message += OSS_FUZZ_INCORRECT_COMMENT
elif utils.is_chromium():
update_message += '\n\nMarking this bug as a blocker for next Beta release.'
update_message = _append_generic_incorrect_comment(
update_message,
policy,
issue,
' and remove the {label_text}.'.format(
label_text=issue.issue_tracker.label_text(
data_types.CHROMIUM_ISSUE_RELEASEBLOCK_BETA_LABEL)))
issue.labels.add(data_types.CHROMIUM_ISSUE_RELEASEBLOCK_BETA_LABEL)
# Update with the next beta for trunk, and remove existing milestone label.
beta_milestone_label = (
'M-%d' % build_info.get_release_milestone('head', testcase.platform))
if beta_milestone_label not in issue.labels:
issue.labels.remove_by_prefix('M-')
issue.labels.add(beta_milestone_label)
logs.log(update_message)
issue.labels.add(fuzz_blocker_label)
issue.save(new_comment=update_message, notify=True)
def update_component_labels(testcase, issue):
"""Add components to the issue if needed."""
if not issue:
return
components = _get_predator_result_item(
testcase, 'suspected_components', default=[])
# Remove components already in issue or whose more specific variants exist.
filtered_components = []
for component in components:
found_component_in_issue = any(
component == issue_component or issue_component.startswith(component +
'>')
for issue_component in issue.components)
if not found_component_in_issue:
filtered_components.append(component)
if not filtered_components:
# If there are no new components to add, then we shouldn't make any changes
# to issue.
return
# Don't run on issues we've already applied automatic components to in case
# labels are removed manually. This may cause issues in the event that we
# rerun a test case, but it seems like a reasonable tradeoff to avoid spam.
if issue_tracker_utils.was_label_added(
issue, data_types.CHROMIUM_ISSUE_PREDATOR_AUTO_COMPONENTS_LABEL):
return
for filtered_component in filtered_components:
issue.components.add(filtered_component)
issue.labels.add(data_types.CHROMIUM_ISSUE_PREDATOR_AUTO_COMPONENTS_LABEL)
issue_comment = (
'Automatically applying components based on crash stacktrace and '
'information from OWNERS files.\n\n'
'If this is incorrect, please apply the {label_text}.'.format(
label_text=issue.issue_tracker.label_text(
data_types.CHROMIUM_ISSUE_PREDATOR_WRONG_COMPONENTS_LABEL)))
issue.save(new_comment=issue_comment, notify=True)
def update_issue_ccs_from_owners_file(policy, testcase, issue):
"""Add cc to an issue based on owners list from owners file. This is
currently applicable to fuzz targets only."""
auto_cc_label = policy.label('auto_cc_from_owners')
if not auto_cc_label:
return
if not issue or not issue.is_open:
return
if testcase.get_metadata('has_issue_ccs_from_owners_file'):
return
ccs_list = utils.parse_delimited(
testcase.get_metadata('issue_owners', ''),
delimiter=',',
strip=True,
remove_empty=True)
if not ccs_list:
return
# If we've assigned the ccs before, it likely means we were incorrect.
# Don't try again for this particular issue.
if issue_tracker_utils.was_label_added(issue, auto_cc_label):
return
ccs_added = False
actions = list(issue.actions)
for cc in random.sample(ccs_list, min(AUTO_CC_LIMIT, len(ccs_list))):
if cc in issue.ccs:
continue
# If cc was previously manually removed from the cc list, we assume that
# they were incorrectly added. Don't try to add them again.
cc_was_removed = any(cc in action.ccs.removed for action in actions)
if cc_was_removed:
continue
issue.ccs.add(cc)
ccs_added = True
if not ccs_added:
# Everyone we'd expect to see has already been cced on the issue. No need
# to spam it with another comment. Also, set the metadata to avoid doing
# this again.
testcase.set_metadata('has_issue_ccs_from_owners_file', True)
return
issue_comment = (
'Automatically adding ccs based on OWNERS file / target commit history.')
if utils.is_oss_fuzz():
issue_comment += OSS_FUZZ_INCORRECT_COMMENT + '.'
else:
issue_comment = _append_generic_incorrect_comment(issue_comment, policy,
issue, '.')
issue.labels.add(auto_cc_label)
issue.save(new_comment=issue_comment, notify=True)
def update_issue_labels_for_flaky_testcase(policy, testcase, issue):
"""Update issue reproducibility label when testcase becomes flaky or
unreproducible."""
if not issue or not issue.is_open:
return
# If the testcase is reproducible, then no change is needed. Bail out.
if not testcase.one_time_crasher_flag:
return
# Make sure that no other reproducible testcases associated with this issue
# are open. If yes, no need to update label.
similar_reproducible_testcase = data_types.Testcase.query(
data_types.Testcase.bug_information == testcase.bug_information,
ndb_utils.is_true(data_types.Testcase.open),
ndb_utils.is_false(data_types.Testcase.one_time_crasher_flag)).get()
if similar_reproducible_testcase:
return
reproducible_label = policy.label('reproducible')
unreproducible_label = policy.label('unreproducible')
if not reproducible_label or not unreproducible_label:
return
# Make sure that this issue is not already marked Unreproducible.
if unreproducible_label in issue.labels:
return
issue.labels.remove(reproducible_label)
issue.labels.add(unreproducible_label)
comment = ('ClusterFuzz testcase {testcase_id} appears to be flaky, '
'updating reproducibility {label_type}.'.format(
testcase_id=testcase.key.id(),
label_type=issue.issue_tracker.label_type))
issue.save(new_comment=comment)
def update_issue_owner_and_ccs_from_predator_results(policy,
testcase,
issue,
only_allow_ccs=False):
"""Assign the issue to an appropriate owner if possible."""
if not issue or not issue.is_open:
return
# If the issue already has an owner, we don't need to update the bug.
if issue.assignee:
return
# If there are more than 3 suspected CLs, we can't be confident in the
# results. Just skip any sort of notification to CL authors in this case.
suspected_cls = _get_predator_result_item(testcase, 'suspected_cls')
if not suspected_cls or len(suspected_cls) > 3:
return
# If we've assigned an owner or cc once before, it likely means we were
# incorrect. Don't try again for this particular issue.
if (issue_tracker_utils.was_label_added(
issue, data_types.CHROMIUM_ISSUE_PREDATOR_AUTO_OWNER_LABEL) or
issue_tracker_utils.was_label_added(
issue, data_types.CHROMIUM_ISSUE_PREDATOR_AUTO_CC_LABEL)):
return
# Validate that the suspected CLs have all of the information we need before
# continuing. This allows us to assume that they are well-formed later,
# avoiding any potential exceptions that would interrupt this task.
for suspected_cl in suspected_cls:
url = suspected_cl.get('url')
description = suspected_cl.get('description')
author = suspected_cl.get('author')
if not url or not description or not author:
logs.log_error(
'Suspected CL for testcase %d is missing required information.' %
testcase.key.id())
return
if len(suspected_cls) == 1 and not only_allow_ccs:
suspected_cl = suspected_cls[0]
# If this owner has already been assigned before but has since been removed,
# don't assign it to them again.
for action in issue.actions:
if action.assignee == suspected_cls[0]['author']:
return
# We have high confidence for the single-CL case, so we assign the owner.
issue.labels.add(data_types.CHROMIUM_ISSUE_PREDATOR_AUTO_OWNER_LABEL)
issue.assignee = suspected_cl['author']
issue.status = policy.status('assigned')
issue_comment = (
'Automatically assigning owner based on suspected regression '
'changelist %s (%s).\n\n'
'If this is incorrect, please let us know why and apply the %s '
'label. If you aren\'t the correct owner for this issue, please '
'unassign yourself as soon as possible so it can be re-triaged.' %
(suspected_cl['url'], suspected_cl['description'],
data_types.CHROMIUM_ISSUE_PREDATOR_WRONG_CL_LABEL))
else:
if testcase.get_metadata('has_issue_ccs_from_predator_results'):
return
issue_comment = (
'Automatically adding ccs based on suspected regression changelists:'
'\n\n')
ccs_added = False
for suspected_cl in suspected_cls:
# Update the comment with the suspected CL, regardless of whether or not
# we're ccing the author. This might, for example, catch the attention of
# someone who has already been cced.
author = suspected_cl['author']
issue_comment += '%s by %s - %s\n\n' % (suspected_cl['description'],
author, suspected_cl['url'])
if author in issue.ccs:
continue
# If an author has previously been manually removed from the cc list,
# we assume they were incorrectly added. Don't try to add them again.
author_was_removed = False
for action in issue.actions:
if author in action.ccs.removed:
author_was_removed = True
break
if author_was_removed:
continue
issue.ccs.add(author)
ccs_added = True
if not ccs_added:
# Everyone we'd expect to see has already been cced on the issue. No need
# to spam it with another comment. Also, set the metadata to avoid doing
# this again.
testcase.set_metadata('has_issue_ccs_from_owners_file', True)
return
issue.labels.add(data_types.CHROMIUM_ISSUE_PREDATOR_AUTO_CC_LABEL)
issue_comment += ((
'If this is incorrect, please let us know why and apply the '
'{label_text}.').format(
label_text=issue.issue_tracker.label_text(
data_types.CHROMIUM_ISSUE_PREDATOR_WRONG_CL_LABEL)))
try:
issue.save(new_comment=issue_comment, notify=True)
except HttpError:
# If we see such an error when we aren't setting an owner, it's unexpected.
if only_allow_ccs or not issue.assignee:
logs.log_error(
'Unable to update issue for test case %d.' % testcase.key.id())
return
# Retry without setting the owner. They may not be a chromium project
# member, in which case we can try falling back to cc.
issue = issue_tracker_utils.get_issue_for_testcase(testcase)
update_issue_owner_and_ccs_from_predator_results(
policy, testcase, issue, only_allow_ccs=True)
def cleanup_unused_heartbeats():
"""Clean up unused heartbeat entities."""
cutoff_time = utils.utcnow() - datetime.timedelta(
days=UNUSED_HEARTBEAT_THRESHOLD)
unused_heartbeats = ndb_utils.get_all_from_query(
data_types.Heartbeat.query(
data_types.Heartbeat.last_beat_time < cutoff_time),
keys_only=True)
ndb_utils.delete_multi(unused_heartbeats)
class Handler(base_handler.Handler):
"""Cleanup."""
@handler.check_cron()
def get(self):
cleanup_testcases_and_issues()
cleanup_reports_metadata()
leak_blacklist.cleanup_global_blacklist()
cleanup_unused_fuzz_targets_and_jobs()
cleanup_unused_heartbeats()
|
the-stack_0_9894 | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from oneflow.compatible.single_client.core.summary import projector_pb2 as projector_pb2
from oneflow.compatible.single_client.python.oneflow_export import oneflow_export
import time
from oneflow.compatible import single_client as flow
@oneflow_export("summary.Projector")
class Projector(object):
r"""The class of Projector
This class can create an 'embedding_projector' or 'exception_projector'
"""
def __init__(self, logdir=None):
r"""Create a Projector objector
Args:
logdir: The log dir
Raises:
Exception: If 'logdir' is None or illegal
"""
if logdir is None:
raise Exception("logdir should not be None!")
logdir += "/projector"
if not os.path.exists(logdir):
os.makedirs(logdir)
self.logdir_ = logdir
self.embedding_filename_ = None
self.exception_filename_ = None
def create_embedding_projector(self):
if (self.embedding_filename_ is not None) and (
os.path.exists(self.embedding_filename_)
):
raise OSError("You must create only one embedding projector!")
self.embedding_filename_ = (
self.logdir_ + "/projector." + str(int(time.time())) + ".log"
)
def create_exception_projector(self):
if (self.exception_filename_ is not None) and (
os.path.exists(self.exception_filename_)
):
raise OSError("You must create only one embedding projector!")
self.exception_filename_ = (
self.logdir_ + "/projector.gradit." + str(int(time.time())) + ".log"
)
@property
def logdir(self):
return self.logdir_
@property
def exception_filename(self):
return self.exception_filename_
@property
def embedding_filename(self):
return self.embedding_filename_
def write_projector(self, filename=None, projector=None):
with open(filename, "wb") as f:
f.write(projector.SerializeToString())
f.flush()
def set_tensor(self, tensor: projector_pb2.Tensor, value):
for d in value.shape:
td = tensor.shape.dim.add()
td.size = d
tensor.dtype = str(value.dtype)
tensor.content = value.tobytes()
def set_projector(self, pro, tag, step, value, label=None):
pro.tag = str(tag)
pro.step = step
pro.WALL_TIME = time.time()
self.set_tensor(pro.value, value)
if label is not None:
self.set_tensor(pro.label, label)
def set_sample(self, sample, name, x, sample_type):
if name is not None:
sample.name = name
if sample_type == "image" or sample_type == "IMAGE":
sample.type = projector_pb2.Sample.SampleType.IMAGE
elif sample_type == "audio" or sample_type == "AUDIO":
sample.type = projector_pb2.Sample.SampleType.AUDIO
elif sample_type == "text" or sample_type == "TEXT":
sample.type = projector_pb2.Sample.SampleType.TEXT
else:
raise NotImplementedError
if x is not None:
self.set_tensor(sample.X, x)
def embedding_projector(
self,
value=None,
label=None,
tag=None,
step=None,
sample_name=None,
sample_type=None,
x=None,
):
if tag is None:
tag = "embedding_projector"
summary_projector = projector_pb2.SummaryProjector()
summary_projector.metadata.type = projector_pb2.MetaData.ProjectorType.EMBEDDING
projector = summary_projector.projector.add()
self.set_projector(pro=projector, tag=tag, step=step, value=value, label=label)
if (sample_name is not None) and (sample_type is not None):
self.set_sample(
sample=summary_projector.sample,
name=sample_name,
x=x,
sample_type=sample_type,
)
self.write_projector(self.embedding_filename_, summary_projector)
def exception_projector(
self,
value=None,
tag=None,
step=None,
sample_name=None,
sample_type=None,
x=None,
):
if tag is None:
tag = "exception_projector"
summary_projector = projector_pb2.SummaryProjector()
summary_projector.metadata.type = projector_pb2.MetaData.ProjectorType.EXCEPTION
projector = summary_projector.projector.add()
self.set_projector(pro=projector, tag=tag, step=step, value=value)
if (sample_name is not None) and (sample_type is not None):
self.set_sample(
sample=summary_projector.sample,
name=sample_name,
x=x,
sample_type=sample_type,
)
self.write_projector(self.exception_filename_, summary_projector)
|
the-stack_0_9895 | """Binary classes"""
import binascii
import gzip
import io
import json
import logging
import os
import shutil
import struct
import subprocess
import sys
import tarfile
import tempfile
import zipfile
from json import dumps
from typing import Optional
import h5py
import numpy as np
import pysam
import pysam.bcftools
from bx.seq.twobit import TWOBIT_MAGIC_NUMBER, TWOBIT_MAGIC_NUMBER_SWAP
from galaxy import util
from galaxy.datatypes import metadata
from galaxy.datatypes.data import (
DatatypeValidation,
get_file_peek,
)
from galaxy.datatypes.metadata import (
DictParameter,
FileParameter,
ListParameter,
MetadataElement,
MetadataParameter,
)
from galaxy.datatypes.sniff import build_sniff_from_prefix
from galaxy.util import nice_size, sqlite
from galaxy.util.checkers import is_bz2, is_gzip
from . import data, dataproviders
log = logging.getLogger(__name__)
# pysam 0.16.0.1 emits logs containing the word 'Error', this can confuse the stdout/stderr checkers.
# Can be be removed once https://github.com/pysam-developers/pysam/issues/939 is resolved.
pysam.set_verbosity(0)
# Currently these supported binary data types must be manually set on upload
class Binary(data.Data):
"""Binary data"""
edam_format = "format_2333"
file_ext = "binary"
@staticmethod
def register_sniffable_binary_format(data_type, ext, type_class):
"""Deprecated method."""
@staticmethod
def register_unsniffable_binary_ext(ext):
"""Deprecated method."""
def set_peek(self, dataset, **kwd):
"""Set the peek and blurb text"""
if not dataset.dataset.purged:
dataset.peek = 'binary data'
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def get_mime(self):
"""Returns the mime type of the datatype"""
return 'application/octet-stream'
class Ab1(Binary):
"""Class describing an ab1 binary sequence file"""
file_ext = "ab1"
edam_format = "format_3000"
edam_data = "data_0924"
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "Binary ab1 sequence file"
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return f"Binary ab1 sequence file ({nice_size(dataset.get_size())})"
class Idat(Binary):
"""Binary data in idat format"""
file_ext = "idat"
edam_format = "format_2058"
edam_data = "data_2603"
def sniff(self, filename):
try:
header = open(filename, 'rb').read(4)
if header == b'IDAT':
return True
return False
except Exception:
return False
class Cel(Binary):
""" Cel File format described at:
http://media.affymetrix.com/support/developer/powertools/changelog/gcos-agcc/cel.html
"""
file_ext = "cel"
edam_format = "format_1638"
edam_data = "data_3110"
MetadataElement(name="version", default="3", desc="Version", readonly=True, visible=True,
optional=True, no_value="3")
def sniff(self, filename):
"""
Try to guess if the file is a Cel file.
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('affy_v_agcc.cel')
>>> Cel().sniff(fname)
True
>>> fname = get_test_fname('affy_v_3.cel')
>>> Cel().sniff(fname)
True
>>> fname = get_test_fname('affy_v_4.cel')
>>> Cel().sniff(fname)
True
>>> fname = get_test_fname('test.gal')
>>> Cel().sniff(fname)
False
"""
with open(filename, 'rb') as handle:
header_bytes = handle.read(8)
found_cel_4 = False
found_cel_3 = False
found_cel_agcc = False
if struct.unpack("<ii", header_bytes[:9]) == (64, 4):
found_cel_4 = True
elif struct.unpack(">bb", header_bytes[:2]) == (59, 1):
found_cel_agcc = True
elif header_bytes.decode("utf8", errors="ignore").startswith('[CEL]'):
found_cel_3 = True
return found_cel_3 or found_cel_4 or found_cel_agcc
def set_meta(self, dataset, **kwd):
"""
Set metadata for Cel file.
"""
with open(dataset.file_name, 'rb') as handle:
header_bytes = handle.read(8)
if struct.unpack("<ii", header_bytes[:9]) == (64, 4):
dataset.metadata.version = "4"
elif struct.unpack(">bb", header_bytes[:2]) == (59, 1):
dataset.metadata.version = "agcc"
elif header_bytes.decode("utf8", errors="ignore").startswith('[CEL]'):
dataset.metadata.version = "3"
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.blurb = f"Cel version: {dataset.metadata.version}"
dataset.peek = get_file_peek(dataset.file_name)
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
class MashSketch(Binary):
"""
Mash Sketch file.
Sketches are used by the MinHash algorithm to allow fast distance estimations
with low storage and memory requirements. To make a sketch, each k-mer in a sequence
is hashed, which creates a pseudo-random identifier. By sorting these identifiers (hashes),
a small subset from the top of the sorted list can represent the entire sequence (these are min-hashes).
The more similar another sequence is, the more min-hashes it is likely to share.
"""
file_ext = "msh"
class CompressedArchive(Binary):
"""
Class describing an compressed binary file
This class can be sublass'ed to implement archive filetypes that will not be unpacked by upload.py.
"""
file_ext = "compressed_archive"
compressed = True
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "Compressed binary file"
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return f"Compressed binary file ({nice_size(dataset.get_size())})"
class Meryldb(CompressedArchive):
"""MerylDB is a tar.gz archive, with 128 files. 64 data files and 64 index files."""
file_ext = "meryldb"
def sniff(self, filename):
"""
Try to guess if the file is a Cel file.
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('affy_v_agcc.cel')
>>> Meryldb().sniff(fname)
False
>>> fname = get_test_fname('read-db.meryldb')
>>> Meryldb().sniff(fname)
True
"""
try:
if filename and tarfile.is_tarfile(filename):
with tarfile.open(filename, 'r') as temptar:
_tar_content = temptar.getnames()
# 64 data files ad 64 indices + 2 folders
if len(_tar_content) == 130:
if len([_ for _ in _tar_content if _.endswith('.merylIndex')]) == 64:
return True
except Exception as e:
log.warning('%s, sniff Exception: %s', self, e)
return False
class Bref3(Binary):
"""Bref3 format is a binary format for storing phased, non-missing genotypes for a list of samples."""
file_ext = "bref3"
def __init__(self, **kwd):
super().__init__(**kwd)
self._magic = binascii.unhexlify("7a8874f400156272")
def sniff_prefix(self, sniff_prefix):
return sniff_prefix.startswith_bytes(self._magic)
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "Binary bref3 file"
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return f"Binary bref3 file ({nice_size(dataset.get_size())})"
class DynamicCompressedArchive(CompressedArchive):
def matches_any(self, target_datatypes):
"""Treat two aspects of compressed datatypes separately.
"""
compressed_target_datatypes = []
uncompressed_target_datatypes = []
for target_datatype in target_datatypes:
if hasattr(target_datatype, "uncompressed_datatype_instance") and target_datatype.compressed_format == self.compressed_format:
uncompressed_target_datatypes.append(target_datatype.uncompressed_datatype_instance)
else:
compressed_target_datatypes.append(target_datatype)
# TODO: Add gz and bz2 as proper datatypes and use those instances instead of
# CompressedArchive() in the following check.
return self.uncompressed_datatype_instance.matches_any(uncompressed_target_datatypes) or \
CompressedArchive().matches_any(compressed_target_datatypes)
class GzDynamicCompressedArchive(DynamicCompressedArchive):
compressed_format = "gzip"
class Bz2DynamicCompressedArchive(DynamicCompressedArchive):
compressed_format = "bz2"
class CompressedZipArchive(CompressedArchive):
"""
Class describing an compressed binary file
This class can be sublass'ed to implement archive filetypes that will not be unpacked by upload.py.
"""
file_ext = "zip"
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "Compressed zip file"
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return f"Compressed zip file ({nice_size(dataset.get_size())})"
def sniff(self, filename):
with zipfile.ZipFile(filename) as zf:
zf_files = zf.infolist()
count = 0
for f in zf_files:
if f.file_size > 0 and not f.filename.startswith('__MACOSX/') and not f.filename.endswith('.DS_Store'):
count += 1
if count > 1:
return True
class GenericAsn1Binary(Binary):
"""Class for generic ASN.1 binary format"""
file_ext = "asn1-binary"
edam_format = "format_1966"
edam_data = "data_0849"
class _BamOrSam:
"""
Helper class to set the metadata common to sam and bam files
"""
def set_meta(self, dataset, overwrite=True, **kwd):
try:
bam_file = pysam.AlignmentFile(dataset.file_name, mode='rb')
# TODO: Reference names, lengths, read_groups and headers can become very large, truncate when necessary
dataset.metadata.reference_names = list(bam_file.references)
dataset.metadata.reference_lengths = list(bam_file.lengths)
dataset.metadata.bam_header = dict(bam_file.header.items())
dataset.metadata.read_groups = [read_group['ID'] for read_group in dataset.metadata.bam_header.get('RG', []) if 'ID' in read_group]
dataset.metadata.sort_order = dataset.metadata.bam_header.get('HD', {}).get('SO', None)
dataset.metadata.bam_version = dataset.metadata.bam_header.get('HD', {}).get('VN', None)
except Exception:
# Per Dan, don't log here because doing so will cause datasets that
# fail metadata to end in the error state
pass
class BamNative(CompressedArchive, _BamOrSam):
"""Class describing a BAM binary file that is not necessarily sorted"""
edam_format = "format_2572"
edam_data = "data_0863"
file_ext = "unsorted.bam"
sort_flag: Optional[str] = None
MetadataElement(name="columns", default=12, desc="Number of columns", readonly=True, visible=False, no_value=0)
MetadataElement(name="column_types", default=['str', 'int', 'str', 'int', 'int', 'str', 'str', 'int', 'int', 'str', 'str', 'str'], desc="Column types", param=metadata.ColumnTypesParameter, readonly=True, visible=False, no_value=[])
MetadataElement(name="column_names", default=['QNAME', 'FLAG', 'RNAME', 'POS', 'MAPQ', 'CIGAR', 'MRNM', 'MPOS', 'ISIZE', 'SEQ', 'QUAL', 'OPT'], desc="Column names", readonly=True, visible=False, optional=True, no_value=[])
MetadataElement(name="bam_version", default=None, desc="BAM Version", param=MetadataParameter, readonly=True, visible=False, optional=True, no_value=None)
MetadataElement(name="sort_order", default=None, desc="Sort Order", param=MetadataParameter, readonly=True, visible=False, optional=True, no_value=None)
MetadataElement(name="read_groups", default=[], desc="Read Groups", param=MetadataParameter, readonly=True, visible=False, optional=True, no_value=[])
MetadataElement(name="reference_names", default=[], desc="Chromosome Names", param=MetadataParameter, readonly=True, visible=False, optional=True, no_value=[])
MetadataElement(name="reference_lengths", default=[], desc="Chromosome Lengths", param=MetadataParameter, readonly=True, visible=False, optional=True, no_value=[])
MetadataElement(name="bam_header", default={}, desc="Dictionary of BAM Headers", param=MetadataParameter, readonly=True, visible=False, optional=True, no_value={})
def set_meta(self, dataset, overwrite=True, **kwd):
_BamOrSam().set_meta(dataset)
@staticmethod
def merge(split_files, output_file):
"""
Merges BAM files
:param split_files: List of bam file paths to merge
:param output_file: Write merged bam file to this location
"""
pysam.merge('-O', 'BAM', output_file, *split_files)
def init_meta(self, dataset, copy_from=None):
Binary.init_meta(self, dataset, copy_from=copy_from)
def sniff(self, filename):
return BamNative.is_bam(filename)
@classmethod
def is_bam(cls, filename):
# BAM is compressed in the BGZF format, and must not be uncompressed in Galaxy.
# The first 4 bytes of any bam file is 'BAM\1', and the file is binary.
try:
header = gzip.open(filename).read(4)
if header == b'BAM\1':
return True
return False
except Exception:
return False
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "Binary bam alignments file"
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return f"Binary bam alignments file ({nice_size(dataset.get_size())})"
def to_archive(self, dataset, name=""):
rel_paths = []
file_paths = []
rel_paths.append(f"{name or dataset.file_name}.{dataset.extension}")
file_paths.append(dataset.file_name)
rel_paths.append(f"{name or dataset.file_name}.{dataset.extension}.bai")
file_paths.append(dataset.metadata.bam_index.file_name)
return zip(file_paths, rel_paths)
def groom_dataset_content(self, file_name):
"""
Ensures that the BAM file contents are coordinate-sorted. This function is called
on an output dataset after the content is initially generated.
"""
# Use pysam to sort the BAM file
# This command may also creates temporary files <out.prefix>.%d.bam when the
# whole alignment cannot fit into memory.
# do this in a unique temp directory, because of possible <out.prefix>.%d.bam temp files
if not self.dataset_content_needs_grooming(file_name):
# Don't re-sort if already sorted
return
tmp_dir = tempfile.mkdtemp()
tmp_sorted_dataset_file_name_prefix = os.path.join(tmp_dir, 'sorted')
sorted_file_name = f"{tmp_sorted_dataset_file_name_prefix}.bam"
slots = os.environ.get('GALAXY_SLOTS', 1)
sort_args = []
if self.sort_flag:
sort_args = [self.sort_flag]
sort_args.extend([f"-@{slots}", file_name, '-T', tmp_sorted_dataset_file_name_prefix, '-O', 'BAM', '-o', sorted_file_name])
try:
pysam.sort(*sort_args)
except Exception:
shutil.rmtree(tmp_dir, ignore_errors=True)
raise
# Move samtools_created_sorted_file_name to our output dataset location
shutil.move(sorted_file_name, file_name)
# Remove temp file and empty temporary directory
os.rmdir(tmp_dir)
def get_chunk(self, trans, dataset, offset=0, ck_size=None):
if not offset == -1:
try:
with pysam.AlignmentFile(dataset.file_name, "rb") as bamfile:
ck_size = 300 # 300 lines
ck_data = ""
header_line_count = 0
if offset == 0:
ck_data = bamfile.text.replace('\t', ' ')
header_line_count = bamfile.text.count('\n')
else:
bamfile.seek(offset)
for line_number, alignment in enumerate(bamfile):
# return only Header lines if 'header_line_count' exceeds 'ck_size'
# FIXME: Can be problematic if bam has million lines of header
offset = bamfile.tell()
if (line_number + header_line_count) > ck_size:
break
else:
bamline = alignment.tostring(bamfile)
# Galaxy display each tag as separate column because 'tostring()' funcition put tabs in between each tag of tags column.
# Below code will remove spaces between each tag.
bamline_modified = ('\t').join(bamline.split()[:11] + [(' ').join(bamline.split()[11:])])
ck_data = f"{ck_data}\n{bamline_modified}"
else:
# Nothing to enumerate; we've either offset to the end
# of the bamfile, or there is no data. (possible with
# header-only bams)
offset = -1
except Exception as e:
offset = -1
ck_data = f"Could not display BAM file, error was:\n{e}"
else:
ck_data = ''
offset = -1
return dumps({'ck_data': util.unicodify(ck_data),
'offset': offset})
def display_data(self, trans, dataset, preview=False, filename=None, to_ext=None, offset=None, ck_size=None, **kwd):
preview = util.string_as_bool(preview)
if offset is not None:
return self.get_chunk(trans, dataset, offset, ck_size)
elif to_ext or not preview:
return super().display_data(trans, dataset, preview, filename, to_ext, **kwd)
else:
column_names = dataset.metadata.column_names
if not column_names:
column_names = []
column_types = dataset.metadata.column_types
if not column_types:
column_types = []
column_number = dataset.metadata.columns
if column_number is None:
column_number = 1
return trans.fill_template("/dataset/tabular_chunked.mako",
dataset=dataset,
chunk=self.get_chunk(trans, dataset, 0),
column_number=column_number,
column_names=column_names,
column_types=column_types)
def validate(self, dataset, **kwd):
if not BamNative.is_bam(dataset.file_name):
return DatatypeValidation.invalid("This dataset does not appear to a BAM file.")
elif self.dataset_content_needs_grooming(dataset.file_name):
return DatatypeValidation.invalid("This BAM file does not appear to have the correct sorting for declared datatype.")
return DatatypeValidation.validated()
@dataproviders.decorators.has_dataproviders
class Bam(BamNative):
"""Class describing a BAM binary file"""
edam_format = "format_2572"
edam_data = "data_0863"
file_ext = "bam"
track_type = "ReadTrack"
data_sources = {"data": "bai", "index": "bigwig"}
MetadataElement(name="bam_index", desc="BAM Index File", param=metadata.FileParameter, file_ext="bai", readonly=True, no_value=None, visible=False, optional=True)
MetadataElement(name="bam_csi_index", desc="BAM CSI Index File", param=metadata.FileParameter, file_ext="bam.csi", readonly=True, no_value=None, visible=False, optional=True)
def get_index_flag(self, file_name):
"""
Return pysam flag for bai index (default) or csi index (contig size > (2**29 - 1) )
"""
index_flag = '-b' # bai index
try:
with pysam.AlignmentFile(file_name) as alignment_file:
if max(alignment_file.header.lengths) > (2 ** 29) - 1:
index_flag = '-c' # csi index
except Exception:
# File may not have a header, that's OK
pass
return index_flag
def dataset_content_needs_grooming(self, file_name):
"""
Check if file_name is a coordinate-sorted BAM file
"""
# The best way to ensure that BAM files are coordinate-sorted and indexable
# is to actually index them.
index_flag = self.get_index_flag(file_name)
index_name = tempfile.NamedTemporaryFile(prefix="bam_index").name
try:
# If pysam fails to index a file it will write to stderr,
# and this causes the set_meta script to fail. So instead
# we start another process and discard stderr.
if index_flag == '-b':
# IOError: No such file or directory: '-b' if index_flag is set to -b (pysam 0.15.4)
cmd = ['python', '-c', f"import pysam; pysam.set_verbosity(0); pysam.index('{file_name}', '{index_name}')"]
else:
cmd = ['python', '-c', f"import pysam; pysam.set_verbosity(0); pysam.index('{index_flag}', '{file_name}', '{index_name}')"]
with open(os.devnull, 'w') as devnull:
subprocess.check_call(cmd, stderr=devnull, shell=False)
needs_sorting = False
except subprocess.CalledProcessError:
needs_sorting = True
try:
os.unlink(index_name)
except Exception:
pass
return needs_sorting
def set_meta(self, dataset, overwrite=True, **kwd):
# These metadata values are not accessible by users, always overwrite
super().set_meta(dataset=dataset, overwrite=overwrite, **kwd)
index_flag = self.get_index_flag(dataset.file_name)
if index_flag == '-b':
spec_key = 'bam_index'
index_file = dataset.metadata.bam_index
else:
spec_key = 'bam_csi_index'
index_file = dataset.metadata.bam_csi_index
if not index_file:
index_file = dataset.metadata.spec[spec_key].param.new_file(dataset=dataset)
if index_flag == '-b':
# IOError: No such file or directory: '-b' if index_flag is set to -b (pysam 0.15.4)
pysam.index(dataset.file_name, index_file.file_name)
else:
pysam.index(index_flag, dataset.file_name, index_file.file_name)
dataset.metadata.bam_index = index_file
def sniff(self, file_name):
return super().sniff(file_name) and not self.dataset_content_needs_grooming(file_name)
# ------------- Dataproviders
# pipe through samtools view
# ALSO: (as Sam)
# bam does not use '#' to indicate comments/headers - we need to strip out those headers from the std. providers
# TODO:?? seems like there should be an easier way to do/inherit this - metadata.comment_char?
# TODO: incorporate samtools options to control output: regions first, then flags, etc.
@dataproviders.decorators.dataprovider_factory('line', dataproviders.line.FilteredLineDataProvider.settings)
def line_dataprovider(self, dataset, **settings):
samtools_source = dataproviders.dataset.SamtoolsDataProvider(dataset)
settings['comment_char'] = '@'
return dataproviders.line.FilteredLineDataProvider(samtools_source, **settings)
@dataproviders.decorators.dataprovider_factory('regex-line', dataproviders.line.RegexLineDataProvider.settings)
def regex_line_dataprovider(self, dataset, **settings):
samtools_source = dataproviders.dataset.SamtoolsDataProvider(dataset)
settings['comment_char'] = '@'
return dataproviders.line.RegexLineDataProvider(samtools_source, **settings)
@dataproviders.decorators.dataprovider_factory('column', dataproviders.column.ColumnarDataProvider.settings)
def column_dataprovider(self, dataset, **settings):
samtools_source = dataproviders.dataset.SamtoolsDataProvider(dataset)
settings['comment_char'] = '@'
return dataproviders.column.ColumnarDataProvider(samtools_source, **settings)
@dataproviders.decorators.dataprovider_factory('dict', dataproviders.column.DictDataProvider.settings)
def dict_dataprovider(self, dataset, **settings):
samtools_source = dataproviders.dataset.SamtoolsDataProvider(dataset)
settings['comment_char'] = '@'
return dataproviders.column.DictDataProvider(samtools_source, **settings)
# these can't be used directly - may need BamColumn, BamDict (Bam metadata -> column/dict)
# OR - see genomic_region_dataprovider
# @dataproviders.decorators.dataprovider_factory('dataset-column', dataproviders.column.ColumnarDataProvider.settings)
# def dataset_column_dataprovider(self, dataset, **settings):
# settings['comment_char'] = '@'
# return super().dataset_column_dataprovider(dataset, **settings)
# @dataproviders.decorators.dataprovider_factory('dataset-dict', dataproviders.column.DictDataProvider.settings)
# def dataset_dict_dataprovider(self, dataset, **settings):
# settings['comment_char'] = '@'
# return super().dataset_dict_dataprovider(dataset, **settings)
@dataproviders.decorators.dataprovider_factory('header', dataproviders.line.RegexLineDataProvider.settings)
def header_dataprovider(self, dataset, **settings):
# in this case we can use an option of samtools view to provide just what we need (w/o regex)
samtools_source = dataproviders.dataset.SamtoolsDataProvider(dataset, '-H')
return dataproviders.line.RegexLineDataProvider(samtools_source, **settings)
@dataproviders.decorators.dataprovider_factory('id-seq-qual', dataproviders.column.DictDataProvider.settings)
def id_seq_qual_dataprovider(self, dataset, **settings):
settings['indeces'] = [0, 9, 10]
settings['column_types'] = ['str', 'str', 'str']
settings['column_names'] = ['id', 'seq', 'qual']
return self.dict_dataprovider(dataset, **settings)
@dataproviders.decorators.dataprovider_factory('genomic-region', dataproviders.column.ColumnarDataProvider.settings)
def genomic_region_dataprovider(self, dataset, **settings):
# GenomicRegionDataProvider currently requires a dataset as source - may not be necc.
# TODO:?? consider (at least) the possible use of a kwarg: metadata_source (def. to source.dataset),
# or remove altogether...
# samtools_source = dataproviders.dataset.SamtoolsDataProvider(dataset)
# return dataproviders.dataset.GenomicRegionDataProvider(samtools_source, metadata_source=dataset,
# 2, 3, 3, **settings)
# instead, set manually and use in-class column gen
settings['indeces'] = [2, 3, 3]
settings['column_types'] = ['str', 'int', 'int']
return self.column_dataprovider(dataset, **settings)
@dataproviders.decorators.dataprovider_factory('genomic-region-dict', dataproviders.column.DictDataProvider.settings)
def genomic_region_dict_dataprovider(self, dataset, **settings):
settings['indeces'] = [2, 3, 3]
settings['column_types'] = ['str', 'int', 'int']
settings['column_names'] = ['chrom', 'start', 'end']
return self.dict_dataprovider(dataset, **settings)
@dataproviders.decorators.dataprovider_factory('samtools')
def samtools_dataprovider(self, dataset, **settings):
"""Generic samtools interface - all options available through settings."""
dataset_source = dataproviders.dataset.DatasetDataProvider(dataset)
return dataproviders.dataset.SamtoolsDataProvider(dataset_source, **settings)
class ProBam(Bam):
"""Class describing a BAM binary file - extended for proteomics data"""
edam_format = "format_3826"
edam_data = "data_0863"
file_ext = "probam"
class BamInputSorted(BamNative):
sort_flag = '-n'
file_ext = 'qname_input_sorted.bam'
"""
A class for BAM files that can formally be unsorted or queryname sorted.
Alignments are either ordered based on the order with which the queries appear when producing the alignment,
or ordered by their queryname.
This notaby keeps alignments produced by paired end sequencing adjacent.
"""
def sniff(self, file_name):
# We never want to sniff to this datatype
return False
def dataset_content_needs_grooming(self, file_name):
"""
Groom if the file is coordinate sorted
"""
# The best way to ensure that BAM files are coordinate-sorted and indexable
# is to actually index them.
with pysam.AlignmentFile(filename=file_name) as f:
# The only sure thing we know here is that the sort order can't be coordinate
return f.header.get('HD', {}).get('SO') == 'coordinate'
class BamQuerynameSorted(BamInputSorted):
"""A class for queryname sorted BAM files."""
sort_flag = '-n'
file_ext = "qname_sorted.bam"
def sniff(self, file_name):
return BamNative().sniff(file_name) and not self.dataset_content_needs_grooming(file_name)
def dataset_content_needs_grooming(self, file_name):
"""
Check if file_name is a queryname-sorted BAM file
"""
# The best way to ensure that BAM files are coordinate-sorted and indexable
# is to actually index them.
with pysam.AlignmentFile(filename=file_name) as f:
return f.header.get('HD', {}).get('SO') != 'queryname'
class CRAM(Binary):
file_ext = "cram"
edam_format = "format_3462"
edam_data = "data_0863"
MetadataElement(name="cram_version", default=None, desc="CRAM Version", param=MetadataParameter, readonly=True, visible=False, optional=False, no_value=None)
MetadataElement(name="cram_index", desc="CRAM Index File", param=metadata.FileParameter, file_ext="crai", readonly=True, no_value=None, visible=False, optional=True)
def set_meta(self, dataset, overwrite=True, **kwd):
major_version, minor_version = self.get_cram_version(dataset.file_name)
if major_version != -1:
dataset.metadata.cram_version = f"{str(major_version)}.{str(minor_version)}"
if not dataset.metadata.cram_index:
index_file = dataset.metadata.spec['cram_index'].param.new_file(dataset=dataset)
if self.set_index_file(dataset, index_file):
dataset.metadata.cram_index = index_file
def get_cram_version(self, filename):
try:
with open(filename, "rb") as fh:
header = bytearray(fh.read(6))
return header[4], header[5]
except Exception as exc:
log.warning('%s, get_cram_version Exception: %s', self, exc)
return -1, -1
def set_index_file(self, dataset, index_file):
try:
pysam.index(dataset.file_name, index_file.file_name)
return True
except Exception as exc:
log.warning('%s, set_index_file Exception: %s', self, exc)
return False
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = 'CRAM binary alignment file'
dataset.blurb = 'binary data'
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def sniff(self, filename):
try:
header = open(filename, 'rb').read(4)
if header == b"CRAM":
return True
return False
except Exception:
return False
class BaseBcf(CompressedArchive):
edam_format = "format_3020"
edam_data = "data_3498"
class Bcf(BaseBcf):
"""
Class describing a (BGZF-compressed) BCF file
"""
file_ext = "bcf"
MetadataElement(name="bcf_index", desc="BCF Index File", param=metadata.FileParameter, file_ext="csi", readonly=True, no_value=None, visible=False, optional=True)
def sniff(self, filename):
# BCF is compressed in the BGZF format, and must not be uncompressed in Galaxy.
try:
header = gzip.open(filename).read(3)
# The first 3 bytes of any BCF file are 'BCF', and the file is binary.
if header == b'BCF':
return True
return False
except Exception:
return False
def set_meta(self, dataset, overwrite=True, **kwd):
""" Creates the index for the BCF file. """
# These metadata values are not accessible by users, always overwrite
index_file = dataset.metadata.bcf_index
if not index_file:
index_file = dataset.metadata.spec['bcf_index'].param.new_file(dataset=dataset)
# Create the bcf index
dataset_symlink = os.path.join(os.path.dirname(index_file.file_name),
'__dataset_%d_%s' % (dataset.id, os.path.basename(index_file.file_name)))
os.symlink(dataset.file_name, dataset_symlink)
try:
cmd = ['python', '-c', f"import pysam.bcftools; pysam.bcftools.index('{dataset_symlink}')"]
subprocess.check_call(cmd)
shutil.move(f"{dataset_symlink}.csi", index_file.file_name)
except Exception as e:
raise Exception(f'Error setting BCF metadata: {util.unicodify(e)}')
finally:
# Remove temp file and symlink
os.remove(dataset_symlink)
dataset.metadata.bcf_index = index_file
class BcfUncompressed(BaseBcf):
"""
Class describing an uncompressed BCF file
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('1.bcf_uncompressed')
>>> BcfUncompressed().sniff(fname)
True
>>> fname = get_test_fname('1.bcf')
>>> BcfUncompressed().sniff(fname)
False
"""
file_ext = "bcf_uncompressed"
def sniff(self, filename):
try:
header = open(filename, mode='rb').read(3)
# The first 3 bytes of any BCF file are 'BCF', and the file is binary.
if header == b'BCF':
return True
return False
except Exception:
return False
class H5(Binary):
"""
Class describing an HDF5 file
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('test.mz5')
>>> H5().sniff(fname)
True
>>> fname = get_test_fname('interval.interval')
>>> H5().sniff(fname)
False
"""
file_ext = "h5"
edam_format = "format_3590"
def __init__(self, **kwd):
super().__init__(**kwd)
self._magic = binascii.unhexlify("894844460d0a1a0a")
def sniff(self, filename):
# The first 8 bytes of any hdf5 file are 0x894844460d0a1a0a
try:
header = open(filename, 'rb').read(8)
if header == self._magic:
return True
return False
except Exception:
return False
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "Binary HDF5 file"
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return f"Binary HDF5 file ({nice_size(dataset.get_size())})"
class Loom(H5):
"""
Class describing a Loom file: http://loompy.org/
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('test.loom')
>>> Loom().sniff(fname)
True
>>> fname = get_test_fname('test.mz5')
>>> Loom().sniff(fname)
False
"""
file_ext = "loom"
edam_format = "format_3590"
MetadataElement(name="title", default="", desc="title", readonly=True, visible=True, no_value="")
MetadataElement(name="description", default="", desc="description", readonly=True, visible=True, no_value="")
MetadataElement(name="url", default="", desc="url", readonly=True, visible=True, no_value="")
MetadataElement(name="doi", default="", desc="doi", readonly=True, visible=True, no_value="")
MetadataElement(name="loom_spec_version", default="", desc="loom_spec_version", readonly=True, visible=True, no_value="")
MetadataElement(name="creation_date", default=None, desc="creation_date", readonly=True, visible=True, no_value=None)
MetadataElement(name="shape", default=(), desc="shape", param=metadata.ListParameter, readonly=True, visible=True, no_value=())
MetadataElement(name="layers_count", default=0, desc="layers_count", readonly=True, visible=True, no_value=0)
MetadataElement(name="layers_names", desc="layers_names", default=[], param=metadata.SelectParameter, multiple=True, readonly=True, no_value=None)
MetadataElement(name="row_attrs_count", default=0, desc="row_attrs_count", readonly=True, visible=True, no_value=0)
MetadataElement(name="row_attrs_names", desc="row_attrs_names", default=[], param=metadata.SelectParameter, multiple=True, readonly=True, no_value=None)
MetadataElement(name="col_attrs_count", default=0, desc="col_attrs_count", readonly=True, visible=True, no_value=0)
MetadataElement(name="col_attrs_names", desc="col_attrs_names", default=[], param=metadata.SelectParameter, multiple=True, readonly=True, no_value=None)
MetadataElement(name="col_graphs_count", default=0, desc="col_graphs_count", readonly=True, visible=True, no_value=0)
MetadataElement(name="col_graphs_names", desc="col_graphs_names", default=[], param=metadata.SelectParameter, multiple=True, readonly=True, no_value=None)
MetadataElement(name="row_graphs_count", default=0, desc="row_graphs_count", readonly=True, visible=True, no_value=0)
MetadataElement(name="row_graphs_names", desc="row_graphs_names", default=[], param=metadata.SelectParameter, multiple=True, readonly=True, no_value=None)
def sniff(self, filename):
if super().sniff(filename):
with h5py.File(filename, 'r') as loom_file:
# Check the optional but distinctive LOOM_SPEC_VERSION attribute
if bool(loom_file.attrs.get('LOOM_SPEC_VERSION')):
return True
# Check some mandatory H5 datasets and groups
for el in ('matrix', 'row_attrs', 'col_attrs'):
if loom_file.get(el) is None:
return False
else:
return True
return False
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "Binary Loom file"
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return f"Binary Loom file ({nice_size(dataset.get_size())})"
def set_meta(self, dataset, overwrite=True, **kwd):
super().set_meta(dataset, overwrite=overwrite, **kwd)
try:
with h5py.File(dataset.file_name, 'r') as loom_file:
dataset.metadata.title = loom_file.attrs.get('title')
dataset.metadata.description = loom_file.attrs.get('description')
dataset.metadata.url = loom_file.attrs.get('url')
dataset.metadata.doi = loom_file.attrs.get('doi')
loom_spec_version = loom_file.attrs.get('LOOM_SPEC_VERSION')
if isinstance(loom_spec_version, np.ndarray):
loom_spec_version = loom_spec_version[0]
if isinstance(loom_spec_version, bytes):
loom_spec_version = loom_spec_version.decode()
dataset.metadata.loom_spec_version = loom_spec_version
dataset.creation_date = loom_file.attrs.get('creation_date')
dataset.metadata.shape = tuple(loom_file['matrix'].shape)
tmp = list(loom_file.get('layers', {}).keys())
dataset.metadata.layers_count = len(tmp)
dataset.metadata.layers_names = tmp
tmp = list(loom_file['row_attrs'].keys())
dataset.metadata.row_attrs_count = len(tmp)
dataset.metadata.row_attrs_names = tmp
tmp = list(loom_file['col_attrs'].keys())
dataset.metadata.col_attrs_count = len(tmp)
dataset.metadata.col_attrs_names = tmp
# According to the Loom file format specification, col_graphs
# and row_graphs are mandatory groups, but files created by
# Bioconductor LoomExperiment do not always have them:
# https://github.com/Bioconductor/LoomExperiment/issues/7
tmp = list(loom_file.get('col_graphs', {}).keys())
dataset.metadata.col_graphs_count = len(tmp)
dataset.metadata.col_graphs_names = tmp
tmp = list(loom_file.get('row_graphs', {}).keys())
dataset.metadata.row_graphs_count = len(tmp)
dataset.metadata.row_graphs_names = tmp
except Exception as e:
log.warning('%s, set_meta Exception: %s', self, e)
class Anndata(H5):
"""
Class describing an HDF5 anndata files: http://anndata.rtfd.io
>>> from galaxy.datatypes.sniff import get_test_fname
>>> Anndata().sniff(get_test_fname('pbmc3k_tiny.h5ad'))
True
>>> Anndata().sniff(get_test_fname('test.mz5'))
False
>>> Anndata().sniff(get_test_fname('import.loom.krumsiek11.h5ad'))
True
>>> Anndata().sniff(get_test_fname('adata_0_6_small2.h5ad'))
True
>>> Anndata().sniff(get_test_fname('adata_0_6_small.h5ad'))
True
>>> Anndata().sniff(get_test_fname('adata_0_7_4_small2.h5ad'))
True
>>> Anndata().sniff(get_test_fname('adata_0_7_4_small.h5ad'))
True
>>> Anndata().sniff(get_test_fname('adata_unk2.h5ad'))
True
>>> Anndata().sniff(get_test_fname('adata_unk.h5ad'))
True
"""
file_ext = 'h5ad'
MetadataElement(name="title", default="", desc="title", readonly=True, visible=True, no_value="")
MetadataElement(name="description", default="", desc="description", readonly=True, visible=True, no_value="")
MetadataElement(name="url", default="", desc="url", readonly=True, visible=True, no_value="")
MetadataElement(name="doi", default="", desc="doi", readonly=True, visible=True, no_value="")
MetadataElement(name="anndata_spec_version", default="", desc="anndata_spec_version", readonly=True, visible=True, no_value="")
MetadataElement(name="creation_date", default=None, desc="creation_date", readonly=True, visible=True, no_value=None)
MetadataElement(name="layers_count", default=0, desc="layers_count", readonly=True, visible=True, no_value=0)
MetadataElement(name="layers_names", desc="layers_names", default=[], param=metadata.SelectParameter, multiple=True, readonly=True, no_value=None)
MetadataElement(name="row_attrs_count", default=0, desc="row_attrs_count", readonly=True, visible=True, no_value=0)
# obs_names: Cell1, Cell2, Cell3,...
# obs_layers: louvain, leidein, isBcell
# obs_count: number of obs_layers
# obs_size: number of obs_names
MetadataElement(name="obs_names", desc="obs_names", default=[], multiple=True, readonly=True, no_value=None)
MetadataElement(name="obs_layers", desc="obs_layers", default=[], param=metadata.SelectParameter, multiple=True, readonly=True, no_value=None)
MetadataElement(name="obs_count", default=0, desc="obs_count", readonly=True, visible=True, no_value=0)
MetadataElement(name="obs_size", default=-1, desc="obs_size", readonly=True, visible=True, no_value=0)
MetadataElement(name="obsm_layers", desc="obsm_layers", default=[], param=metadata.SelectParameter, multiple=True, readonly=True, no_value=None)
MetadataElement(name="obsm_count", default=0, desc="obsm_count", readonly=True, visible=True, no_value=0)
MetadataElement(name="raw_var_layers", desc="raw_var_layers", default=[], param=metadata.SelectParameter, multiple=True, readonly=True, no_value=None)
MetadataElement(name="raw_var_count", default=0, desc="raw_var_count", readonly=True, visible=True, no_value=0)
MetadataElement(name="raw_var_size", default=0, desc="raw_var_size", readonly=True, visible=True, no_value=0)
MetadataElement(name="var_layers", desc="var_layers", default=[], param=metadata.SelectParameter, multiple=True, readonly=True, no_value=None)
MetadataElement(name="var_count", default=0, desc="var_count", readonly=True, visible=True, no_value=0)
MetadataElement(name="var_size", default=-1, desc="var_size", readonly=True, visible=True, no_value=0)
MetadataElement(name="varm_layers", desc="varm_layers", default=[], param=metadata.SelectParameter, multiple=True, readonly=True, no_value=None)
MetadataElement(name="varm_count", default=0, desc="varm_count", readonly=True, visible=True, no_value=0)
MetadataElement(name="uns_layers", desc="uns_layers", default=[], param=metadata.SelectParameter, multiple=True, readonly=True, no_value=None)
MetadataElement(name="uns_count", default=0, desc="uns_count", readonly=True, visible=True, no_value=0)
MetadataElement(name="shape", default=(-1, -1), desc="shape", param=metadata.ListParameter, readonly=True, visible=True, no_value=(0, 0))
def sniff(self, filename):
if super().sniff(filename):
try:
with h5py.File(filename, 'r') as f:
return all(attr in f for attr in ['X', 'obs', 'var'])
except Exception:
return False
return False
def set_meta(self, dataset, overwrite=True, **kwd):
super().set_meta(dataset, overwrite=overwrite, **kwd)
with h5py.File(dataset.file_name, 'r') as anndata_file:
dataset.metadata.title = anndata_file.attrs.get('title')
dataset.metadata.description = anndata_file.attrs.get('description')
dataset.metadata.url = anndata_file.attrs.get('url')
dataset.metadata.doi = anndata_file.attrs.get('doi')
dataset.creation_date = anndata_file.attrs.get('creation_date')
dataset.metadata.shape = anndata_file.attrs.get('shape', dataset.metadata.shape)
# none of the above appear to work in any dataset tested, but could be useful for
# future AnnData datasets
dataset.metadata.layers_count = len(anndata_file)
dataset.metadata.layers_names = list(anndata_file.keys())
def _layercountsize(tmp, lennames=0):
"From TMP and LENNAMES, return layers, their number, and the length of one of the layers (all equal)."
if hasattr(tmp, 'dtype'):
layers = list(tmp.dtype.names)
count = len(tmp.dtype)
size = int(tmp.size)
else:
layers = list(tmp.keys())
count = len(layers)
size = lennames
return (layers, count, size)
if 'obs' in dataset.metadata.layers_names:
tmp = anndata_file["obs"]
obs_index = None
if "index" in tmp:
obs_index = "index"
elif "_index" in tmp:
obs_index = "_index"
# Determine cell labels
if obs_index:
dataset.metadata.obs_names = list(tmp[obs_index])
elif hasattr(tmp, 'dtype'):
if "index" in tmp.dtype.names:
# Yes, we call tmp["index"], and not tmp.dtype["index"]
# here, despite the above tests.
dataset.metadata.obs_names = list(tmp["index"])
elif "_index" in tmp.dtype.names:
dataset.metadata.obs_names = list(tmp["_index"])
else:
log.warning("Could not determine cell labels for %s", self)
else:
log.warning("Could not determine observation index for %s", self)
x, y, z = _layercountsize(tmp, len(dataset.metadata.obs_names))
dataset.metadata.obs_layers = x
dataset.metadata.obs_count = y
dataset.metadata.obs_size = z
if 'obsm' in dataset.metadata.layers_names:
tmp = anndata_file["obsm"]
dataset.metadata.obsm_layers, dataset.metadata.obsm_count, _ = _layercountsize(tmp)
if 'raw.var' in dataset.metadata.layers_names:
tmp = anndata_file["raw.var"]
# full set of genes would never need to be previewed
# dataset.metadata.raw_var_names = tmp["index"]
x, y, z = _layercountsize(tmp, len(tmp["index"]))
dataset.metadata.raw_var_layers = x
dataset.metadata.raw_var_count = y
dataset.metadata.raw_var_size = z
if 'var' in dataset.metadata.layers_names:
tmp = anndata_file["var"]
var_index = None
if "index" in tmp:
var_index = "index"
elif "_index" in tmp:
var_index = "_index"
# We never use var_names
# dataset.metadata.var_names = tmp[var_index]
if var_index:
x, y, z = _layercountsize(tmp, len(tmp[var_index]))
else:
# failing to detect a var_index is not an indicator
# that the dataset is empty
x, y, z = _layercountsize(tmp)
dataset.metadata.var_layers = x
dataset.metadata.var_count = y
dataset.metadata.var_size = z
if 'varm' in dataset.metadata.layers_names:
tmp = anndata_file["varm"]
dataset.metadata.varm_layers, dataset.metadata.varm_count, _ = _layercountsize(tmp)
if 'uns' in dataset.metadata.layers_names:
tmp = anndata_file["uns"]
dataset.metadata.uns_layers, dataset.metadata.uns_count, _ = _layercountsize(tmp)
# Resolving the problematic shape parameter
if 'X' in dataset.metadata.layers_names:
# Shape we determine here due to the non-standard representation of 'X' dimensions
shape = anndata_file['X'].attrs.get("shape")
if shape is not None:
dataset.metadata.shape = tuple(shape)
elif hasattr(anndata_file['X'], 'shape'):
dataset.metadata.shape = tuple(anndata_file['X'].shape)
if dataset.metadata.shape is None:
dataset.metadata.shape = (int(dataset.metadata.obs_size), int(dataset.metadata.var_size))
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
tmp = dataset.metadata
def _makelayerstrings(layer, count, names):
"Format the layers."
if layer in tmp.layers_names:
return "\n[%s]: %d %s\n %s" % (
layer,
count,
"layer" if count == 1 else "layers",
', '.join(sorted(names))
)
return ""
peekstr = "[n_obs x n_vars]\n %d x %d" % tuple(tmp.shape)
peekstr += _makelayerstrings("obs", tmp.obs_count, tmp.obs_layers)
peekstr += _makelayerstrings("var", tmp.var_count, tmp.var_layers)
peekstr += _makelayerstrings("obsm", tmp.obsm_count, tmp.obsm_layers)
peekstr += _makelayerstrings("varm", tmp.varm_count, tmp.varm_layers)
peekstr += _makelayerstrings("uns", tmp.uns_count, tmp.uns_layers)
dataset.peek = peekstr
dataset.blurb = f"Anndata file ({nice_size(dataset.get_size())})"
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return f"Binary Anndata file ({nice_size(dataset.get_size())})"
@build_sniff_from_prefix
class GmxBinary(Binary):
"""
Base class for GROMACS binary files - xtc, trr, cpt
"""
magic_number: Optional[int] = None # variables to be overwritten in the child class
file_ext = ""
def sniff_prefix(self, sniff_prefix):
# The first 4 bytes of any GROMACS binary file containing the magic number
return sniff_prefix.magic_header('>1i') == self.magic_number
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = f"Binary GROMACS {self.file_ext} file"
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return f"Binary GROMACS {self.file_ext} trajectory file ({nice_size(dataset.get_size())})"
class Trr(GmxBinary):
"""
Class describing an trr file from the GROMACS suite
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('md.trr')
>>> Trr().sniff(fname)
True
>>> fname = get_test_fname('interval.interval')
>>> Trr().sniff(fname)
False
"""
file_ext = "trr"
magic_number = 1993 # magic number reference: https://github.com/gromacs/gromacs/blob/cec211b2c835ba6e8ea849fb1bf67d7fc19693a4/src/gromacs/fileio/trrio.cpp
class Cpt(GmxBinary):
"""
Class describing a checkpoint (.cpt) file from the GROMACS suite
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('md.cpt')
>>> Cpt().sniff(fname)
True
>>> fname = get_test_fname('md.trr')
>>> Cpt().sniff(fname)
False
"""
file_ext = "cpt"
magic_number = 171817 # magic number reference: https://github.com/gromacs/gromacs/blob/cec211b2c835ba6e8ea849fb1bf67d7fc19693a4/src/gromacs/fileio/checkpoint.cpp
class Xtc(GmxBinary):
"""
Class describing an xtc file from the GROMACS suite
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('md.xtc')
>>> Xtc().sniff(fname)
True
>>> fname = get_test_fname('md.trr')
>>> Xtc().sniff(fname)
False
"""
file_ext = "xtc"
magic_number = 1995 # reference: https://github.com/gromacs/gromacs/blob/cec211b2c835ba6e8ea849fb1bf67d7fc19693a4/src/gromacs/fileio/xtcio.cpp
class Edr(GmxBinary):
"""
Class describing an edr file from the GROMACS suite
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('md.edr')
>>> Edr().sniff(fname)
True
>>> fname = get_test_fname('md.trr')
>>> Edr().sniff(fname)
False
"""
file_ext = "edr"
magic_number = -55555 # reference: https://github.com/gromacs/gromacs/blob/cec211b2c835ba6e8ea849fb1bf67d7fc19693a4/src/gromacs/fileio/enxio.cpp
class Biom2(H5):
"""
Class describing a biom2 file (http://biom-format.org/documentation/biom_format.html)
"""
MetadataElement(name="id", default=None, desc="table id", readonly=True, visible=True, no_value=None)
MetadataElement(name="format_url", default=None, desc="format-url", readonly=True, visible=True, no_value=None)
MetadataElement(name="format_version", default=None, desc="format-version", readonly=True, visible=True, no_value=None)
MetadataElement(name="format", default=None, desc="format", readonly=True, visible=True, no_value=None)
MetadataElement(name="type", default=None, desc="table type", readonly=True, visible=True, no_value=None)
MetadataElement(name="generated_by", default=None, desc="generated by", readonly=True, visible=True, no_value=None)
MetadataElement(name="creation_date", default=None, desc="creation date", readonly=True, visible=True, no_value=None)
MetadataElement(name="nnz", default=-1, desc="nnz: The number of non-zero elements in the table", readonly=True, visible=True, no_value=-1)
MetadataElement(name="shape", default=(), desc="shape: The number of rows and columns in the dataset", readonly=True, visible=True, no_value=())
file_ext = "biom2"
edam_format = "format_3746"
def sniff(self, filename):
"""
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('biom2_sparse_otu_table_hdf5.biom2')
>>> Biom2().sniff(fname)
True
>>> fname = get_test_fname('test.mz5')
>>> Biom2().sniff(fname)
False
>>> fname = get_test_fname('wiggle.wig')
>>> Biom2().sniff(fname)
False
"""
if super().sniff(filename):
with h5py.File(filename, 'r') as f:
required_fields = {'id', 'format-url', 'type', 'generated-by', 'creation-date', 'nnz', 'shape'}
return required_fields.issubset(f.attrs.keys())
return False
def set_meta(self, dataset, overwrite=True, **kwd):
super().set_meta(dataset, overwrite=overwrite, **kwd)
try:
with h5py.File(dataset.file_name, 'r') as f:
attributes = f.attrs
dataset.metadata.id = util.unicodify(attributes['id'])
dataset.metadata.format_url = util.unicodify(attributes['format-url'])
if 'format-version' in attributes: # biom 2.1
dataset.metadata.format_version = '.'.join(str(_) for _ in attributes['format-version'])
elif 'format' in attributes: # biom 2.0
dataset.metadata.format = util.unicodify(attributes['format'])
dataset.metadata.type = util.unicodify(attributes['type'])
dataset.metadata.shape = tuple(int(_) for _ in attributes['shape'])
dataset.metadata.generated_by = util.unicodify(attributes['generated-by'])
dataset.metadata.creation_date = util.unicodify(attributes['creation-date'])
dataset.metadata.nnz = int(attributes['nnz'])
except Exception as e:
log.warning('%s, set_meta Exception: %s', self, util.unicodify(e))
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
lines = ['Biom2 (HDF5) file']
try:
with h5py.File(dataset.file_name) as f:
for k, v in f.attrs.items():
lines.append(f'{k}: {util.unicodify(v)}')
except Exception as e:
log.warning('%s, set_peek Exception: %s', self, util.unicodify(e))
dataset.peek = '\n'.join(lines)
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return f"Biom2 (HDF5) file ({nice_size(dataset.get_size())})"
class Cool(H5):
"""
Class describing the cool format (https://github.com/mirnylab/cooler)
"""
file_ext = "cool"
def sniff(self, filename):
"""
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('matrix.cool')
>>> Cool().sniff(fname)
True
>>> fname = get_test_fname('test.mz5')
>>> Cool().sniff(fname)
False
>>> fname = get_test_fname('wiggle.wig')
>>> Cool().sniff(fname)
False
>>> fname = get_test_fname('biom2_sparse_otu_table_hdf5.biom2')
>>> Cool().sniff(fname)
False
"""
MAGIC = "HDF5::Cooler"
URL = "https://github.com/mirnylab/cooler"
if super().sniff(filename):
keys = ['chroms', 'bins', 'pixels', 'indexes']
with h5py.File(filename, 'r') as handle:
fmt = util.unicodify(handle.attrs.get('format'))
url = util.unicodify(handle.attrs.get('format-url'))
if fmt == MAGIC or url == URL:
if not all(name in handle.keys() for name in keys):
return False
return True
return False
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "Cool (HDF5) file for storing genomic interaction data."
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return f"Cool (HDF5) file ({nice_size(dataset.get_size())})."
class MCool(H5):
"""
Class describing the multi-resolution cool format (https://github.com/mirnylab/cooler)
"""
file_ext = "mcool"
def sniff(self, filename):
"""
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('matrix.mcool')
>>> MCool().sniff(fname)
True
>>> fname = get_test_fname('matrix.cool')
>>> MCool().sniff(fname)
False
>>> fname = get_test_fname('test.mz5')
>>> MCool().sniff(fname)
False
>>> fname = get_test_fname('wiggle.wig')
>>> MCool().sniff(fname)
False
>>> fname = get_test_fname('biom2_sparse_otu_table_hdf5.biom2')
>>> MCool().sniff(fname)
False
"""
MAGIC = "HDF5::Cooler"
URL = "https://github.com/mirnylab/cooler"
if super().sniff(filename):
keys0 = ['resolutions']
with h5py.File(filename, 'r') as handle:
if not all(name in handle.keys() for name in keys0):
return False
res0 = next(iter(handle['resolutions'].keys()))
keys = ['chroms', 'bins', 'pixels', 'indexes']
fmt = util.unicodify(handle['resolutions'][res0].attrs.get('format'))
url = util.unicodify(handle['resolutions'][res0].attrs.get('format-url'))
if fmt == MAGIC or url == URL:
if not all(name in handle['resolutions'][res0].keys() for name in keys):
return False
return True
return False
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "Multi-resolution Cool (HDF5) file for storing genomic interaction data."
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return f"MCool (HDF5) file ({nice_size(dataset.get_size())})."
class H5MLM(H5):
"""
Machine learning model generated by Galaxy-ML.
"""
file_ext = "h5mlm"
URL = "https://github.com/goeckslab/Galaxy-ML"
max_peek_size = 1000 # 1 KB
max_preview_size = 1000000 # 1 MB
MetadataElement(name="hyper_params", desc="Hyperparameter File", param=FileParameter, file_ext="tabular", readonly=True, no_value=None, visible=False, optional=True)
def set_meta(self, dataset, overwrite=True, **kwd):
try:
spec_key = "hyper_params"
params_file = dataset.metadata.hyper_params
if not params_file:
params_file = dataset.metadata.spec[spec_key].param.new_file(dataset=dataset)
with h5py.File(dataset.file_name, "r") as handle:
hyper_params = handle["-model_hyperparameters-"][()]
hyper_params = json.loads(util.unicodify(hyper_params))
with open(params_file.file_name, "w") as f:
f.write("\tParameter\tValue\n")
for p in hyper_params:
f.write("\t".join(p) + "\n")
dataset.metadata.hyper_params = params_file
except Exception as e:
log.warning('%s, set_meta Exception: %s', self, e)
def sniff(self, filename):
if super().sniff(filename):
keys = ["-model_config-"]
with h5py.File(filename, "r") as handle:
if not all(name in handle.keys() for name in keys):
return False
url = util.unicodify(handle.attrs.get("-URL-"))
if url == self.URL:
return True
return False
def get_repr(self, filename):
try:
with h5py.File(filename, "r") as handle:
repr_ = util.unicodify(handle.attrs.get("-repr-"))
return repr_
except Exception as e:
log.warning('%s, get_repr Except: %s', self, e)
return ""
def get_config_string(self, filename):
try:
with h5py.File(filename, "r") as handle:
config = util.unicodify(handle["-model_config-"][()])
return config
except Exception as e:
log.warning('%s, get model configuration Except: %s', self, e)
return ""
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
repr_ = self.get_repr(dataset.file_name)
dataset.peek = repr_[:self.max_peek_size]
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return "HDF5 Model (%s)" % (nice_size(dataset.get_size()))
def display_data(self, trans, dataset, preview=False, filename=None, to_ext=None, **kwd):
preview = util.string_as_bool(preview)
if to_ext or not preview:
to_ext = to_ext or dataset.extension
return self._serve_raw(trans, dataset, to_ext, **kwd)
rval = {}
try:
with h5py.File(dataset.file_name, "r") as handle:
rval['Attributes'] = {}
attributes = handle.attrs
for k in (set(attributes.keys()) - {'-URL-', '-repr-'}):
rval['Attributes'][k] = util.unicodify(attributes.get(k))
except Exception as e:
log.warning(e)
config = self.get_config_string(dataset.file_name)
rval['Config'] = json.loads(config) if config else ''
rval = json.dumps(rval, sort_keys=True, indent=2)
rval = rval[:self.max_preview_size]
repr_ = self.get_repr(dataset.file_name)
return f"<pre>{repr_}</pre><pre>{rval}</pre>"
class HexrdMaterials(H5):
"""
Class describing a Hexrd Materials file: https://github.com/HEXRD/hexrd
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('hexrd.materials.h5')
>>> HexrdMaterials().sniff(fname)
True
>>> fname = get_test_fname('test.loom')
>>> HexrdMaterials().sniff(fname)
False
"""
file_ext = "hexrd.materials.h5"
edam_format = "format_3590"
MetadataElement(name="materials", desc="materials", default=[], param=metadata.SelectParameter, multiple=True, readonly=True, no_value=None)
MetadataElement(name="SpaceGroupNumber", default={}, param=DictParameter, desc="SpaceGroupNumber", readonly=True, visible=True, no_value={})
MetadataElement(name="LatticeParameters", default={}, param=DictParameter, desc="LatticeParameters", readonly=True, visible=True, no_value={})
def sniff(self, filename):
if super().sniff(filename):
req = {'AtomData', 'Atomtypes', 'CrystalSystem', 'LatticeParameters'}
with h5py.File(filename, 'r') as mat_file:
for k in mat_file.keys():
if isinstance(mat_file[k], h5py._hl.group.Group) and set(mat_file[k].keys()) >= req:
return True
return False
def set_meta(self, dataset, overwrite=True, **kwd):
super().set_meta(dataset, overwrite=overwrite, **kwd)
try:
with h5py.File(dataset.file_name, 'r') as mat_file:
dataset.metadata.materials = list(mat_file.keys())
sgn = dict()
lp = dict()
for m in mat_file.keys():
if 'SpaceGroupNumber' in mat_file[m] and len(mat_file[m]['SpaceGroupNumber']) > 0:
sgn[m] = mat_file[m]['SpaceGroupNumber'][0].item()
if 'LatticeParameters' in mat_file[m]:
lp[m] = mat_file[m]['LatticeParameters'][0:].tolist()
dataset.metadata.SpaceGroupNumber = sgn
dataset.metadata.LatticeParameters = lp
except Exception as e:
log.warning('%s, set_meta Exception: %s', self, e)
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
lines = ['Material SpaceGroup Lattice']
if dataset.metadata.materials:
for m in dataset.metadata.materials:
try:
lines.append(f'{m} {dataset.metadata.SpaceGroupNumber[m]} {dataset.metadata.LatticeParameters[m]}')
except Exception:
continue
dataset.peek = '\n'.join(lines)
dataset.blurb = f"Materials: {' '.join(dataset.metadata.materials)}"
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
class Scf(Binary):
"""Class describing an scf binary sequence file"""
edam_format = "format_1632"
edam_data = "data_0924"
file_ext = "scf"
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "Binary scf sequence file"
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return f"Binary scf sequence file ({nice_size(dataset.get_size())})"
@build_sniff_from_prefix
class Sff(Binary):
""" Standard Flowgram Format (SFF) """
edam_format = "format_3284"
edam_data = "data_0924"
file_ext = "sff"
def sniff_prefix(self, sniff_prefix):
# The first 4 bytes of any sff file is '.sff', and the file is binary. For details
# about the format, see http://www.ncbi.nlm.nih.gov/Traces/trace.cgi?cmd=show&f=formats&m=doc&s=format
return sniff_prefix.startswith_bytes(b'.sff')
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "Binary sff file"
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return f"Binary sff file ({nice_size(dataset.get_size())})"
@build_sniff_from_prefix
class BigWig(Binary):
"""
Accessing binary BigWig files from UCSC.
The supplemental info in the paper has the binary details:
http://bioinformatics.oxfordjournals.org/cgi/content/abstract/btq351v1
"""
edam_format = "format_3006"
edam_data = "data_3002"
file_ext = "bigwig"
track_type = "LineTrack"
data_sources = {"data_standalone": "bigwig"}
def __init__(self, **kwd):
super().__init__(**kwd)
self._magic = 0x888FFC26
self._name = "BigWig"
def sniff_prefix(self, sniff_prefix):
return sniff_prefix.magic_header("I") == self._magic
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = f"Binary UCSC {self._name} file"
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return f"Binary UCSC {self._name} file ({nice_size(dataset.get_size())})"
class BigBed(BigWig):
"""BigBed support from UCSC."""
edam_format = "format_3004"
edam_data = "data_3002"
file_ext = "bigbed"
data_sources = {"data_standalone": "bigbed"}
def __init__(self, **kwd):
Binary.__init__(self, **kwd)
self._magic = 0x8789F2EB
self._name = "BigBed"
@build_sniff_from_prefix
class TwoBit(Binary):
"""Class describing a TwoBit format nucleotide file"""
edam_format = "format_3009"
edam_data = "data_0848"
file_ext = "twobit"
def sniff_prefix(self, sniff_prefix):
magic = sniff_prefix.magic_header(">L")
return magic == TWOBIT_MAGIC_NUMBER or magic == TWOBIT_MAGIC_NUMBER_SWAP
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "Binary TwoBit format nucleotide file"
dataset.blurb = nice_size(dataset.get_size())
else:
return super().set_peek(dataset)
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return f"Binary TwoBit format nucleotide file ({nice_size(dataset.get_size())})"
@dataproviders.decorators.has_dataproviders
class SQlite(Binary):
"""Class describing a Sqlite database """
MetadataElement(name="tables", default=[], param=ListParameter, desc="Database Tables", readonly=True, visible=True, no_value=[])
MetadataElement(name="table_columns", default={}, param=DictParameter, desc="Database Table Columns", readonly=True, visible=True, no_value={})
MetadataElement(name="table_row_count", default={}, param=DictParameter, desc="Database Table Row Count", readonly=True, visible=True, no_value={})
file_ext = "sqlite"
edam_format = "format_3621"
def init_meta(self, dataset, copy_from=None):
Binary.init_meta(self, dataset, copy_from=copy_from)
def set_meta(self, dataset, overwrite=True, **kwd):
try:
tables = []
columns = dict()
rowcounts = dict()
conn = sqlite.connect(dataset.file_name)
c = conn.cursor()
tables_query = "SELECT name,sql FROM sqlite_master WHERE type='table' ORDER BY name"
rslt = c.execute(tables_query).fetchall()
for table, _ in rslt:
tables.append(table)
try:
col_query = f'SELECT * FROM {table} LIMIT 0'
cur = conn.cursor().execute(col_query)
cols = [col[0] for col in cur.description]
columns[table] = cols
except Exception as exc:
log.warning('%s, set_meta Exception: %s', self, exc)
for table in tables:
try:
row_query = f"SELECT count(*) FROM {table}"
rowcounts[table] = c.execute(row_query).fetchone()[0]
except Exception as exc:
log.warning('%s, set_meta Exception: %s', self, exc)
dataset.metadata.tables = tables
dataset.metadata.table_columns = columns
dataset.metadata.table_row_count = rowcounts
except Exception as exc:
log.warning('%s, set_meta Exception: %s', self, exc)
def sniff(self, filename):
# The first 16 bytes of any SQLite3 database file is 'SQLite format 3\0', and the file is binary. For details
# about the format, see http://www.sqlite.org/fileformat.html
try:
header = open(filename, 'rb').read(16)
if header == b'SQLite format 3\0':
return True
return False
except Exception:
return False
def sniff_table_names(self, filename, table_names):
# All table names should be in the schema
try:
conn = sqlite.connect(filename)
c = conn.cursor()
tables_query = "SELECT name FROM sqlite_master WHERE type='table' ORDER BY name"
result = c.execute(tables_query).fetchall()
result = [_[0] for _ in result]
for table_name in table_names:
if table_name not in result:
return False
return True
except Exception as e:
log.warning('%s, sniff Exception: %s', self, e)
return False
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "SQLite Database"
lines = ['SQLite Database']
if dataset.metadata.tables:
for table in dataset.metadata.tables:
try:
lines.append(f'{table} [{dataset.metadata.table_row_count[table]}]')
except Exception:
continue
dataset.peek = '\n'.join(lines)
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return f"SQLite Database ({nice_size(dataset.get_size())})"
@dataproviders.decorators.dataprovider_factory('sqlite', dataproviders.dataset.SQliteDataProvider.settings)
def sqlite_dataprovider(self, dataset, **settings):
dataset_source = dataproviders.dataset.DatasetDataProvider(dataset)
return dataproviders.dataset.SQliteDataProvider(dataset_source, **settings)
@dataproviders.decorators.dataprovider_factory('sqlite-table', dataproviders.dataset.SQliteDataTableProvider.settings)
def sqlite_datatableprovider(self, dataset, **settings):
dataset_source = dataproviders.dataset.DatasetDataProvider(dataset)
return dataproviders.dataset.SQliteDataTableProvider(dataset_source, **settings)
@dataproviders.decorators.dataprovider_factory('sqlite-dict', dataproviders.dataset.SQliteDataDictProvider.settings)
def sqlite_datadictprovider(self, dataset, **settings):
dataset_source = dataproviders.dataset.DatasetDataProvider(dataset)
return dataproviders.dataset.SQliteDataDictProvider(dataset_source, **settings)
class GeminiSQLite(SQlite):
"""Class describing a Gemini Sqlite database """
MetadataElement(name="gemini_version", default='0.10.0', param=MetadataParameter, desc="Gemini Version",
readonly=True, visible=True, no_value='0.10.0')
file_ext = "gemini.sqlite"
edam_format = "format_3622"
edam_data = "data_3498"
def set_meta(self, dataset, overwrite=True, **kwd):
super().set_meta(dataset, overwrite=overwrite, **kwd)
try:
conn = sqlite.connect(dataset.file_name)
c = conn.cursor()
tables_query = "SELECT version FROM version"
result = c.execute(tables_query).fetchall()
for version, in result:
dataset.metadata.gemini_version = version
# TODO: Can/should we detect even more attributes, such as use of PED file, what was input annotation type, etc.
except Exception as e:
log.warning('%s, set_meta Exception: %s', self, e)
def sniff(self, filename):
if super().sniff(filename):
table_names = ["gene_detailed", "gene_summary", "resources", "sample_genotype_counts",
"sample_genotypes", "samples", "variant_impacts", "variants", "version"]
return self.sniff_table_names(filename, table_names)
return False
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "Gemini SQLite Database, version %s" % (dataset.metadata.gemini_version or 'unknown')
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return "Gemini SQLite Database, version %s" % (dataset.metadata.gemini_version or 'unknown')
class ChiraSQLite(SQlite):
"""Class describing a ChiRAViz Sqlite database """
file_ext = "chira.sqlite"
def set_meta(self, dataset, overwrite=True, **kwd):
super().set_meta(dataset, overwrite=overwrite, **kwd)
def sniff(self, filename):
if super().sniff(filename):
self.sniff_table_names(filename, ['Chimeras'])
return False
class CuffDiffSQlite(SQlite):
"""Class describing a CuffDiff SQLite database """
MetadataElement(name="cuffdiff_version", default='2.2.1', param=MetadataParameter, desc="CuffDiff Version",
readonly=True, visible=True, no_value='2.2.1')
MetadataElement(name="genes", default=[], param=MetadataParameter, desc="Genes",
readonly=True, visible=True, no_value=[])
MetadataElement(name="samples", default=[], param=MetadataParameter, desc="Samples",
readonly=True, visible=True, no_value=[])
file_ext = "cuffdiff.sqlite"
# TODO: Update this when/if there is a specific EDAM format for CuffDiff SQLite data.
edam_format = "format_3621"
def set_meta(self, dataset, overwrite=True, **kwd):
super().set_meta(dataset, overwrite=overwrite, **kwd)
try:
genes = []
samples = []
conn = sqlite.connect(dataset.file_name)
c = conn.cursor()
tables_query = "SELECT value FROM runInfo where param = 'version'"
result = c.execute(tables_query).fetchall()
for version, in result:
dataset.metadata.cuffdiff_version = version
genes_query = 'SELECT gene_id, gene_short_name FROM genes ORDER BY gene_short_name'
result = c.execute(genes_query).fetchall()
for gene_id, gene_name in result:
if gene_name is None:
continue
gene = f'{gene_id}: {gene_name}'
if gene not in genes:
genes.append(gene)
samples_query = 'SELECT DISTINCT(sample_name) as sample_name FROM samples ORDER BY sample_name'
result = c.execute(samples_query).fetchall()
for sample_name, in result:
if sample_name not in samples:
samples.append(sample_name)
dataset.metadata.genes = genes
dataset.metadata.samples = samples
except Exception as e:
log.warning('%s, set_meta Exception: %s', self, e)
def sniff(self, filename):
if super().sniff(filename):
# These tables should be in any CuffDiff SQLite output.
table_names = ['CDS', 'genes', 'isoforms', 'replicates', 'runInfo', 'samples', 'TSS']
return self.sniff_table_names(filename, table_names)
return False
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "CuffDiff SQLite Database, version %s" % (dataset.metadata.cuffdiff_version or 'unknown')
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return "CuffDiff SQLite Database, version %s" % (dataset.metadata.cuffdiff_version or 'unknown')
class MzSQlite(SQlite):
"""Class describing a Proteomics Sqlite database """
file_ext = "mz.sqlite"
def set_meta(self, dataset, overwrite=True, **kwd):
super().set_meta(dataset, overwrite=overwrite, **kwd)
def sniff(self, filename):
if super().sniff(filename):
table_names = ["DBSequence", "Modification", "Peaks", "Peptide", "PeptideEvidence",
"Score", "SearchDatabase", "Source", "SpectraData", "Spectrum", "SpectrumIdentification"]
return self.sniff_table_names(filename, table_names)
return False
class PQP(SQlite):
"""
Class describing a Peptide query parameters file
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('test.pqp')
>>> PQP().sniff(fname)
True
>>> fname = get_test_fname('test.osw')
>>> PQP().sniff(fname)
False
"""
file_ext = "pqp"
def set_meta(self, dataset, overwrite=True, **kwd):
super().set_meta(dataset, overwrite=overwrite, **kwd)
def sniff(self, filename):
"""
table definition according to https://github.com/grosenberger/OpenMS/blob/develop/src/openms/source/ANALYSIS/OPENSWATH/TransitionPQPFile.cpp#L264
for now VERSION GENE PEPTIDE_GENE_MAPPING are excluded, since
there is test data wo these tables, see also here https://github.com/OpenMS/OpenMS/issues/4365
"""
if not super().sniff(filename):
return False
table_names = ['COMPOUND', 'PEPTIDE', 'PEPTIDE_PROTEIN_MAPPING', 'PRECURSOR',
'PRECURSOR_COMPOUND_MAPPING', 'PRECURSOR_PEPTIDE_MAPPING', 'PROTEIN',
'TRANSITION', 'TRANSITION_PEPTIDE_MAPPING', 'TRANSITION_PRECURSOR_MAPPING']
osw_table_names = ['FEATURE', 'FEATURE_MS1', 'FEATURE_MS2', 'FEATURE_TRANSITION', 'RUN']
return self.sniff_table_names(filename, table_names) and not self.sniff_table_names(filename, osw_table_names)
class OSW(SQlite):
"""
Class describing OpenSwath output
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('test.osw')
>>> OSW().sniff(fname)
True
>>> fname = get_test_fname('test.sqmass')
>>> OSW().sniff(fname)
False
"""
file_ext = "osw"
def set_meta(self, dataset, overwrite=True, **kwd):
super().set_meta(dataset, overwrite=overwrite, **kwd)
def sniff(self, filename):
# osw seems to be an extension of pqp (few tables are added)
# see also here https://github.com/OpenMS/OpenMS/issues/4365
if not super().sniff(filename):
return False
table_names = ['COMPOUND', 'PEPTIDE', 'PEPTIDE_PROTEIN_MAPPING', 'PRECURSOR',
'PRECURSOR_COMPOUND_MAPPING', 'PRECURSOR_PEPTIDE_MAPPING', 'PROTEIN',
'TRANSITION', 'TRANSITION_PEPTIDE_MAPPING', 'TRANSITION_PRECURSOR_MAPPING',
'FEATURE', 'FEATURE_MS1', 'FEATURE_MS2', 'FEATURE_TRANSITION', 'RUN']
return self.sniff_table_names(filename, table_names)
class SQmass(SQlite):
"""
Class describing a Sqmass database
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('test.sqmass')
>>> SQmass().sniff(fname)
True
>>> fname = get_test_fname('test.pqp')
>>> SQmass().sniff(fname)
False
"""
file_ext = "sqmass"
def set_meta(self, dataset, overwrite=True, **kwd):
super().set_meta(dataset, overwrite=overwrite, **kwd)
def sniff(self, filename):
if super().sniff(filename):
table_names = ["CHROMATOGRAM", "PRECURSOR", "RUN", "SPECTRUM", "DATA", "PRODUCT", "RUN_EXTRA"]
return self.sniff_table_names(filename, table_names)
return False
class BlibSQlite(SQlite):
"""Class describing a Proteomics Spectral Library Sqlite database """
MetadataElement(name="blib_version", default='1.8', param=MetadataParameter, desc="Blib Version",
readonly=True, visible=True, no_value='1.8')
file_ext = "blib"
def set_meta(self, dataset, overwrite=True, **kwd):
super().set_meta(dataset, overwrite=overwrite, **kwd)
try:
conn = sqlite.connect(dataset.file_name)
c = conn.cursor()
tables_query = "SELECT majorVersion,minorVersion FROM LibInfo"
(majorVersion, minorVersion) = c.execute(tables_query).fetchall()[0]
dataset.metadata.blib_version = f'{majorVersion}.{minorVersion}'
except Exception as e:
log.warning('%s, set_meta Exception: %s', self, e)
def sniff(self, filename):
if super().sniff(filename):
table_names = ['IonMobilityTypes', 'LibInfo', 'Modifications', 'RefSpectra',
'RefSpectraPeakAnnotations', 'RefSpectraPeaks', 'ScoreTypes', 'SpectrumSourceFiles']
return self.sniff_table_names(filename, table_names)
return False
class DlibSQlite(SQlite):
"""
Class describing a Proteomics Spectral Library Sqlite database
DLIBs only have the "entries", "metadata", and "peptidetoprotein" tables populated.
ELIBs have the rest of the tables populated too, such as "peptidequants" or "peptidescores".
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('test.dlib')
>>> DlibSQlite().sniff(fname)
True
>>> fname = get_test_fname('interval.interval')
>>> DlibSQlite().sniff(fname)
False
"""
MetadataElement(name="dlib_version", default='1.8', param=MetadataParameter, desc="Dlib Version",
readonly=True, visible=True, no_value='1.8')
file_ext = "dlib"
def set_meta(self, dataset, overwrite=True, **kwd):
super().set_meta(dataset, overwrite=overwrite, **kwd)
try:
conn = sqlite.connect(dataset.file_name)
c = conn.cursor()
tables_query = "SELECT Value FROM metadata WHERE Key = 'version'"
version = c.execute(tables_query).fetchall()[0]
dataset.metadata.dlib_version = f'{version}'
except Exception as e:
log.warning('%s, set_meta Exception: %s', self, e)
def sniff(self, filename):
if super().sniff(filename):
table_names = ['entries', 'metadata', 'peptidetoprotein']
return self.sniff_table_names(filename, table_names)
return False
class ElibSQlite(SQlite):
"""
Class describing a Proteomics Chromatagram Library Sqlite database
DLIBs only have the "entries", "metadata", and "peptidetoprotein" tables populated.
ELIBs have the rest of the tables populated too, such as "peptidequants" or "peptidescores".
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('test.elib')
>>> ElibSQlite().sniff(fname)
True
>>> fname = get_test_fname('test.dlib')
>>> ElibSQlite().sniff(fname)
False
"""
MetadataElement(name="version", default='0.1.14', param=MetadataParameter, desc="Elib Version",
readonly=True, visible=True, no_value='0.1.14')
file_ext = "elib"
def set_meta(self, dataset, overwrite=True, **kwd):
super().set_meta(dataset, overwrite=overwrite, **kwd)
try:
conn = sqlite.connect(dataset.file_name)
c = conn.cursor()
tables_query = "SELECT Value FROM metadata WHERE Key = 'version'"
version = c.execute(tables_query).fetchall()[0]
dataset.metadata.dlib_version = f'{version}'
except Exception as e:
log.warning('%s, set_meta Exception: %s', self, e)
def sniff(self, filename):
if super().sniff(filename):
table_names = ['entries', 'fragmentquants', 'metadata', 'peptidelocalizations', 'peptidequants',
'peptidescores', 'peptidetoprotein', 'proteinscores', 'retentiontimes']
if self.sniff_table_names(filename, table_names):
try:
conn = sqlite.connect(filename)
c = conn.cursor()
row_query = "SELECT count(*) FROM peptidescores"
count = c.execute(row_query).fetchone()[0]
return int(count) > 0
except Exception as e:
log.warning('%s, sniff Exception: %s', self, e)
return False
class IdpDB(SQlite):
"""
Class describing an IDPicker 3 idpDB (sqlite) database
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('test.idpdb')
>>> IdpDB().sniff(fname)
True
>>> fname = get_test_fname('interval.interval')
>>> IdpDB().sniff(fname)
False
"""
file_ext = "idpdb"
def set_meta(self, dataset, overwrite=True, **kwd):
super().set_meta(dataset, overwrite=overwrite, **kwd)
def sniff(self, filename):
if super().sniff(filename):
table_names = ["About", "Analysis", "AnalysisParameter", "PeptideSpectrumMatch",
"Spectrum", "SpectrumSource"]
return self.sniff_table_names(filename, table_names)
return False
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "IDPickerDB SQLite file"
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return f"IDPickerDB SQLite file ({nice_size(dataset.get_size())})"
class GAFASQLite(SQlite):
"""Class describing a GAFA SQLite database"""
MetadataElement(name='gafa_schema_version', default='0.3.0', param=MetadataParameter, desc='GAFA schema version',
readonly=True, visible=True, no_value='0.3.0')
file_ext = 'gafa.sqlite'
def set_meta(self, dataset, overwrite=True, **kwd):
super().set_meta(dataset, overwrite=overwrite, **kwd)
try:
conn = sqlite.connect(dataset.file_name)
c = conn.cursor()
version_query = 'SELECT version FROM meta'
results = c.execute(version_query).fetchall()
if len(results) == 0:
raise Exception('version not found in meta table')
elif len(results) > 1:
raise Exception('Multiple versions found in meta table')
dataset.metadata.gafa_schema_version = results[0][0]
except Exception as e:
log.warning("%s, set_meta Exception: %s", self, e)
def sniff(self, filename):
if super().sniff(filename):
table_names = frozenset({'gene', 'gene_family', 'gene_family_member', 'meta', 'transcript'})
return self.sniff_table_names(filename, table_names)
return False
class NcbiTaxonomySQlite(SQlite):
"""Class describing the NCBI Taxonomy database stored in SQLite as done by rust-ncbitaxonomy"""
MetadataElement(name='ncbitaxonomy_schema_version', default='20200501095116', param=MetadataParameter, desc='ncbitaxonomy schema version',
readonly=True, visible=True, no_value='20200501095116')
MetadataElement(name="taxon_count", default=[], param=MetadataParameter, desc="Count of taxa in the taxonomy",
readonly=True, visible=True, no_value=[])
file_ext = 'ncbitaxonomy.sqlite'
def set_meta(self, dataset, overwrite=True, **kwd):
super().set_meta(dataset, overwrite=overwrite, **kwd)
try:
conn = sqlite.connect(dataset.file_name)
c = conn.cursor()
version_query = 'SELECT version FROM __diesel_schema_migrations ORDER BY run_on DESC LIMIT 1'
results = c.execute(version_query).fetchall()
if len(results) == 0:
raise Exception('version not found in __diesel_schema_migrations table')
dataset.metadata.ncbitaxonomy_schema_version = results[0][0]
taxons_query = 'SELECT count(name) FROM taxonomy'
results = c.execute(taxons_query).fetchall()
if len(results) == 0:
raise Exception('could not count size of taxonomy table')
dataset.metadata.taxon_count = results[0][0]
except Exception as e:
log.warning("%s, set_meta Exception: %s", self, e)
def sniff(self, filename):
if super().sniff(filename):
table_names = frozenset({'__diesel_schema_migrations', 'taxonomy'})
return self.sniff_table_names(filename, table_names)
return False
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "NCBI Taxonomy SQLite Database, version {} ({} taxons)".format(
getattr(dataset.metadata, "ncbitaxonomy_schema_version", "unknown"),
getattr(dataset.metadata, "taxon_count", "unknown")
)
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return "NCBI Taxonomy SQLite Database, version {} ({} taxons)".format(
getattr(dataset.metadata, "ncbitaxonomy_schema_version", "unknown"),
getattr(dataset.metadata, "taxon_count", "unknown")
)
class Xlsx(Binary):
"""Class for Excel 2007 (xlsx) files"""
file_ext = "xlsx"
compressed = True
def sniff(self, filename):
# Xlsx is compressed in zip format and must not be uncompressed in Galaxy.
try:
if zipfile.is_zipfile(filename):
tempzip = zipfile.ZipFile(filename)
if "[Content_Types].xml" in tempzip.namelist() and tempzip.read("[Content_Types].xml").find(b'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet.main+xml') != -1:
return True
return False
except Exception:
return False
class ExcelXls(Binary):
"""Class describing an Excel (xls) file"""
file_ext = "excel.xls"
edam_format = "format_3468"
def sniff(self, filename):
mime_type = subprocess.check_output(['file', '--mime-type', filename])
return b"application/vnd.ms-excel" in mime_type
def get_mime(self):
"""Returns the mime type of the datatype"""
return 'application/vnd.ms-excel'
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "Microsoft Excel XLS file"
dataset.blurb = data.nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return f"Microsoft Excel XLS file ({data.nice_size(dataset.get_size())})"
@build_sniff_from_prefix
class Sra(Binary):
""" Sequence Read Archive (SRA) datatype originally from mdshw5/sra-tools-galaxy"""
file_ext = 'sra'
def sniff_prefix(self, sniff_prefix):
""" The first 8 bytes of any NCBI sra file is 'NCBI.sra', and the file is binary.
For details about the format, see http://www.ncbi.nlm.nih.gov/books/n/helpsra/SRA_Overview_BK/#SRA_Overview_BK.4_SRA_Data_Structure
"""
return sniff_prefix.startswith_bytes(b'NCBI.sra')
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = 'Binary sra file'
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return f'Binary sra file ({nice_size(dataset.get_size())})'
class RData(Binary):
"""Generic R Data file datatype implementation"""
file_ext = 'rdata'
def sniff(self, filename):
rdata_header = b'RDX2\nX\n'
try:
header = open(filename, 'rb').read(7)
if header == rdata_header:
return True
header = gzip.open(filename).read(7)
if header == rdata_header:
return True
except Exception:
return False
class OxliBinary(Binary):
@staticmethod
def _sniff(filename, oxlitype):
try:
with open(filename, 'rb') as fileobj:
header = fileobj.read(4)
if header == b'OXLI':
fileobj.read(1) # skip the version number
ftype = fileobj.read(1)
if binascii.hexlify(ftype) == oxlitype:
return True
return False
except OSError:
return False
class OxliCountGraph(OxliBinary):
"""
OxliCountGraph starts with "OXLI" + one byte version number +
8-bit binary '1'
Test file generated via::
load-into-counting.py --n_tables 1 --max-tablesize 1 \\
oxli_countgraph.oxlicg khmer/tests/test-data/100-reads.fq.bz2
using khmer 2.0
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('sequence.csfasta')
>>> OxliCountGraph().sniff(fname)
False
>>> fname = get_test_fname("oxli_countgraph.oxlicg")
>>> OxliCountGraph().sniff(fname)
True
"""
file_ext = 'oxlicg'
def sniff(self, filename):
return OxliBinary._sniff(filename, b"01")
class OxliNodeGraph(OxliBinary):
"""
OxliNodeGraph starts with "OXLI" + one byte version number +
8-bit binary '2'
Test file generated via::
load-graph.py --n_tables 1 --max-tablesize 1 oxli_nodegraph.oxling \\
khmer/tests/test-data/100-reads.fq.bz2
using khmer 2.0
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('sequence.csfasta')
>>> OxliNodeGraph().sniff(fname)
False
>>> fname = get_test_fname("oxli_nodegraph.oxling")
>>> OxliNodeGraph().sniff(fname)
True
"""
file_ext = 'oxling'
def sniff(self, filename):
return OxliBinary._sniff(filename, b"02")
class OxliTagSet(OxliBinary):
"""
OxliTagSet starts with "OXLI" + one byte version number +
8-bit binary '3'
Test file generated via::
load-graph.py --n_tables 1 --max-tablesize 1 oxli_nodegraph.oxling \\
khmer/tests/test-data/100-reads.fq.bz2;
mv oxli_nodegraph.oxling.tagset oxli_tagset.oxlits
using khmer 2.0
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('sequence.csfasta')
>>> OxliTagSet().sniff(fname)
False
>>> fname = get_test_fname("oxli_tagset.oxlits")
>>> OxliTagSet().sniff(fname)
True
"""
file_ext = 'oxlits'
def sniff(self, filename):
return OxliBinary._sniff(filename, b"03")
class OxliStopTags(OxliBinary):
"""
OxliStopTags starts with "OXLI" + one byte version number +
8-bit binary '4'
Test file adapted from khmer 2.0's
"khmer/tests/test-data/goodversion-k32.stoptags"
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('sequence.csfasta')
>>> OxliStopTags().sniff(fname)
False
>>> fname = get_test_fname("oxli_stoptags.oxlist")
>>> OxliStopTags().sniff(fname)
True
"""
file_ext = 'oxlist'
def sniff(self, filename):
return OxliBinary._sniff(filename, b"04")
class OxliSubset(OxliBinary):
"""
OxliSubset starts with "OXLI" + one byte version number +
8-bit binary '5'
Test file generated via::
load-graph.py -k 20 example tests/test-data/random-20-a.fa;
partition-graph.py example;
mv example.subset.0.pmap oxli_subset.oxliss
using khmer 2.0
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('sequence.csfasta')
>>> OxliSubset().sniff(fname)
False
>>> fname = get_test_fname("oxli_subset.oxliss")
>>> OxliSubset().sniff(fname)
True
"""
file_ext = 'oxliss'
def sniff(self, filename):
return OxliBinary._sniff(filename, b"05")
class OxliGraphLabels(OxliBinary):
"""
OxliGraphLabels starts with "OXLI" + one byte version number +
8-bit binary '6'
Test file generated via::
python -c "from khmer import GraphLabels; \\
gl = GraphLabels(20, 1e7, 4); \\
gl.consume_fasta_and_tag_with_labels('tests/test-data/test-labels.fa'); \\
gl.save_labels_and_tags('oxli_graphlabels.oxligl')"
using khmer 2.0
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('sequence.csfasta')
>>> OxliGraphLabels().sniff(fname)
False
>>> fname = get_test_fname("oxli_graphlabels.oxligl")
>>> OxliGraphLabels().sniff(fname)
True
"""
file_ext = 'oxligl'
def sniff(self, filename):
return OxliBinary._sniff(filename, b"06")
class PostgresqlArchive(CompressedArchive):
"""
Class describing a Postgresql database packed into a tar archive
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('postgresql_fake.tar.bz2')
>>> PostgresqlArchive().sniff(fname)
True
>>> fname = get_test_fname('test.fast5.tar')
>>> PostgresqlArchive().sniff(fname)
False
"""
MetadataElement(name="version", default=None, param=MetadataParameter, desc="PostgreSQL database version",
readonly=True, visible=True, no_value=None)
file_ext = "postgresql"
def set_meta(self, dataset, overwrite=True, **kwd):
super().set_meta(dataset, overwrite=overwrite, **kwd)
try:
if dataset and tarfile.is_tarfile(dataset.file_name):
with tarfile.open(dataset.file_name, 'r') as temptar:
pg_version_file = temptar.extractfile('postgresql/db/PG_VERSION')
dataset.metadata.version = util.unicodify(pg_version_file.read()).strip()
except Exception as e:
log.warning('%s, set_meta Exception: %s', self, util.unicodify(e))
def sniff(self, filename):
if filename and tarfile.is_tarfile(filename):
with tarfile.open(filename, 'r') as temptar:
return 'postgresql/db/PG_VERSION' in temptar.getnames()
return False
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = f"PostgreSQL Archive ({nice_size(dataset.get_size())})"
dataset.blurb = "PostgreSQL version %s" % (dataset.metadata.version or 'unknown')
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return f"PostgreSQL Archive ({nice_size(dataset.get_size())})"
class Fast5Archive(CompressedArchive):
"""
Class describing a FAST5 archive
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('test.fast5.tar')
>>> Fast5Archive().sniff(fname)
True
"""
MetadataElement(name="fast5_count", default='0', param=MetadataParameter, desc="Read Count",
readonly=True, visible=True, no_value=None)
file_ext = "fast5.tar"
def set_meta(self, dataset, overwrite=True, **kwd):
super().set_meta(dataset, overwrite=overwrite, **kwd)
try:
if dataset and tarfile.is_tarfile(dataset.file_name):
with tarfile.open(dataset.file_name, 'r') as temptar:
dataset.metadata.fast5_count = sum(
1 for f in temptar if f.name.endswith('.fast5')
)
except Exception as e:
log.warning('%s, set_meta Exception: %s', self, e)
def sniff(self, filename):
try:
if filename and tarfile.is_tarfile(filename):
with tarfile.open(filename, 'r') as temptar:
for f in temptar:
if not f.isfile():
continue
if f.name.endswith('.fast5'):
return True
else:
return False
except Exception as e:
log.warning('%s, sniff Exception: %s', self, e)
return False
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = f"FAST5 Archive ({nice_size(dataset.get_size())})"
dataset.blurb = "%s sequences" % (dataset.metadata.fast5_count or 'unknown')
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return f"FAST5 Archive ({nice_size(dataset.get_size())})"
class Fast5ArchiveGz(Fast5Archive):
"""
Class describing a gzip-compressed FAST5 archive
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('test.fast5.tar.gz')
>>> Fast5ArchiveGz().sniff(fname)
True
>>> fname = get_test_fname('test.fast5.tar.bz2')
>>> Fast5ArchiveGz().sniff(fname)
False
>>> fname = get_test_fname('test.fast5.tar')
>>> Fast5ArchiveGz().sniff(fname)
False
"""
file_ext = "fast5.tar.gz"
def sniff(self, filename):
if not is_gzip(filename):
return False
return Fast5Archive.sniff(self, filename)
class Fast5ArchiveBz2(Fast5Archive):
"""
Class describing a bzip2-compressed FAST5 archive
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('test.fast5.tar.bz2')
>>> Fast5ArchiveBz2().sniff(fname)
True
>>> fname = get_test_fname('test.fast5.tar.gz')
>>> Fast5ArchiveBz2().sniff(fname)
False
>>> fname = get_test_fname('test.fast5.tar')
>>> Fast5ArchiveBz2().sniff(fname)
False
"""
file_ext = "fast5.tar.bz2"
def sniff(self, filename):
if not is_bz2(filename):
return False
return Fast5Archive.sniff(self, filename)
class SearchGuiArchive(CompressedArchive):
"""Class describing a SearchGUI archive """
MetadataElement(name="searchgui_version", default='1.28.0', param=MetadataParameter, desc="SearchGui Version",
readonly=True, visible=True, no_value=None)
MetadataElement(name="searchgui_major_version", default='1', param=MetadataParameter, desc="SearchGui Major Version",
readonly=True, visible=True, no_value=None)
file_ext = "searchgui_archive"
def set_meta(self, dataset, overwrite=True, **kwd):
super().set_meta(dataset, overwrite=overwrite, **kwd)
try:
if dataset and zipfile.is_zipfile(dataset.file_name):
with zipfile.ZipFile(dataset.file_name) as tempzip:
if 'searchgui.properties' in tempzip.namelist():
with tempzip.open('searchgui.properties') as fh:
for line in io.TextIOWrapper(fh):
if line.startswith('searchgui.version'):
version = line.split('=')[1].strip()
dataset.metadata.searchgui_version = version
dataset.metadata.searchgui_major_version = version.split('.')[0]
except Exception as e:
log.warning('%s, set_meta Exception: %s', self, e)
def sniff(self, filename):
try:
if filename and zipfile.is_zipfile(filename):
with zipfile.ZipFile(filename, 'r') as tempzip:
is_searchgui = 'searchgui.properties' in tempzip.namelist()
return is_searchgui
except Exception as e:
log.warning('%s, sniff Exception: %s', self, e)
return False
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "SearchGUI Archive, version %s" % (dataset.metadata.searchgui_version or 'unknown')
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return "SearchGUI Archive, version %s" % (dataset.metadata.searchgui_version or 'unknown')
@build_sniff_from_prefix
class NetCDF(Binary):
"""Binary data in netCDF format"""
file_ext = "netcdf"
edam_format = "format_3650"
edam_data = "data_0943"
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "Binary netCDF file"
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return f"Binary netCDF file ({nice_size(dataset.get_size())})"
def sniff_prefix(self, sniff_prefix):
return sniff_prefix.startswith_bytes(b'CDF')
class Dcd(Binary):
"""
Class describing a dcd file from the CHARMM molecular simulation program
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('test_glucose_vacuum.dcd')
>>> Dcd().sniff(fname)
True
>>> fname = get_test_fname('interval.interval')
>>> Dcd().sniff(fname)
False
"""
file_ext = "dcd"
edam_data = "data_3842"
def __init__(self, **kwd):
super().__init__(**kwd)
self._magic_number = b'CORD'
def sniff(self, filename):
# Match the keyword 'CORD' at position 4 or 8 - intsize dependent
# Not checking for endianness
try:
with open(filename, 'rb') as header:
intsize = 4
header.seek(intsize)
if header.read(intsize) == self._magic_number:
return True
else:
intsize = 8
header.seek(intsize)
if header.read(intsize) == self._magic_number:
return True
return False
except Exception:
return False
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "Binary CHARMM/NAMD dcd file"
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return f"Binary CHARMM/NAMD dcd file ({nice_size(dataset.get_size())})"
class Vel(Binary):
"""
Class describing a velocity file from the CHARMM molecular simulation program
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('test_charmm.vel')
>>> Vel().sniff(fname)
True
>>> fname = get_test_fname('interval.interval')
>>> Vel().sniff(fname)
False
"""
file_ext = "vel"
def __init__(self, **kwd):
super().__init__(**kwd)
self._magic_number = b'VELD'
def sniff(self, filename):
# Match the keyword 'VELD' at position 4 or 8 - intsize dependent
# Not checking for endianness
try:
with open(filename, 'rb') as header:
intsize = 4
header.seek(intsize)
if header.read(intsize) == self._magic_number:
return True
else:
intsize = 8
header.seek(intsize)
if header.read(intsize) == self._magic_number:
return True
return False
except Exception:
return False
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "Binary CHARMM velocity file"
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return f"Binary CHARMM velocity file ({nice_size(dataset.get_size())})"
@build_sniff_from_prefix
class DAA(Binary):
"""
Class describing an DAA (diamond alignment archive) file
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('diamond.daa')
>>> DAA().sniff(fname)
True
>>> fname = get_test_fname('interval.interval')
>>> DAA().sniff(fname)
False
"""
file_ext = "daa"
def __init__(self, **kwd):
super().__init__(**kwd)
self._magic = binascii.unhexlify("6be33e6d47530e3c")
def sniff_prefix(self, sniff_prefix):
# The first 8 bytes of any daa file are 0x3c0e53476d3ee36b
return sniff_prefix.startswith_bytes(self._magic)
@build_sniff_from_prefix
class RMA6(Binary):
"""
Class describing an RMA6 (MEGAN6 read-match archive) file
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('diamond.rma6')
>>> RMA6().sniff(fname)
True
>>> fname = get_test_fname('interval.interval')
>>> RMA6().sniff(fname)
False
"""
file_ext = "rma6"
def __init__(self, **kwd):
super().__init__(**kwd)
self._magic = binascii.unhexlify("000003f600000006")
def sniff_prefix(self, sniff_prefix):
return sniff_prefix.startswith_bytes(self._magic)
@build_sniff_from_prefix
class DMND(Binary):
"""
Class describing an DMND file
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('diamond_db.dmnd')
>>> DMND().sniff(fname)
True
>>> fname = get_test_fname('interval.interval')
>>> DMND().sniff(fname)
False
"""
file_ext = "dmnd"
def __init__(self, **kwd):
super().__init__(**kwd)
self._magic = binascii.unhexlify("6d18ee15a4f84a02")
def sniff_prefix(self, sniff_prefix):
# The first 8 bytes of any dmnd file are 0x24af8a415ee186d
return sniff_prefix.startswith_bytes(self._magic)
class ICM(Binary):
"""
Class describing an ICM (interpolated context model) file, used by Glimmer
"""
file_ext = "icm"
edam_data = "data_0950"
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "Binary ICM (interpolated context model) file"
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def sniff(self, dataset):
line = open(dataset).read(100)
if '>ver = ' in line and 'len = ' in line and 'depth = ' in line and 'periodicity =' in line and 'nodes = ' in line:
return True
return False
@build_sniff_from_prefix
class Parquet(Binary):
"""
Class describing Apache Parquet file (https://parquet.apache.org/)
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('example.parquet')
>>> Parquet().sniff(fname)
True
>>> fname = get_test_fname('test.mz5')
>>> Parquet().sniff(fname)
False
"""
file_ext = "parquet"
def __init__(self, **kwd):
super().__init__(**kwd)
self._magic = b"PAR1" # Defined at https://parquet.apache.org/documentation/latest/
def sniff_prefix(self, sniff_prefix):
return sniff_prefix.startswith_bytes(self._magic)
class BafTar(CompressedArchive):
"""
Base class for common behavior of tar files of directory-based raw file formats
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('brukerbaf.d.tar')
>>> BafTar().sniff(fname)
True
>>> fname = get_test_fname('test.fast5.tar')
>>> BafTar().sniff(fname)
False
"""
edam_data = "data_2536" # mass spectrometry data
edam_format = "format_3712" # TODO: add more raw formats to EDAM?
file_ext = "brukerbaf.d.tar"
def get_signature_file(self):
return "analysis.baf"
def sniff(self, filename):
if tarfile.is_tarfile(filename):
with tarfile.open(filename) as rawtar:
return self.get_signature_file() in [os.path.basename(f).lower() for f in rawtar.getnames()]
return False
def get_type(self):
return "Bruker BAF directory archive"
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = self.get_type()
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return f"{self.get_type()} ({nice_size(dataset.get_size())})"
class YepTar(BafTar):
""" A tar'd up .d directory containing Agilent/Bruker YEP format data """
file_ext = "agilentbrukeryep.d.tar"
def get_signature_file(self):
return "analysis.yep"
def get_type(self):
return "Agilent/Bruker YEP directory archive"
class TdfTar(BafTar):
""" A tar'd up .d directory containing Bruker TDF format data """
file_ext = "brukertdf.d.tar"
def get_signature_file(self):
return "analysis.tdf"
def get_type(self):
return "Bruker TDF directory archive"
class MassHunterTar(BafTar):
""" A tar'd up .d directory containing Agilent MassHunter format data """
file_ext = "agilentmasshunter.d.tar"
def get_signature_file(self):
return "msscan.bin"
def get_type(self):
return "Agilent MassHunter directory archive"
class MassLynxTar(BafTar):
""" A tar'd up .d directory containing Waters MassLynx format data """
file_ext = "watersmasslynx.raw.tar"
def get_signature_file(self):
return "_func001.dat"
def get_type(self):
return "Waters MassLynx RAW directory archive"
class WiffTar(BafTar):
"""
A tar'd up .wiff/.scan pair containing Sciex WIFF format data
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('some.wiff.tar')
>>> WiffTar().sniff(fname)
True
>>> fname = get_test_fname('brukerbaf.d.tar')
>>> WiffTar().sniff(fname)
False
>>> fname = get_test_fname('test.fast5.tar')
>>> WiffTar().sniff(fname)
False
"""
file_ext = "wiff.tar"
def sniff(self, filename):
if tarfile.is_tarfile(filename):
with tarfile.open(filename) as rawtar:
return ".wiff" in [os.path.splitext(os.path.basename(f).lower())[1] for f in rawtar.getnames()]
return False
def get_type(self):
return "Sciex WIFF/SCAN archive"
@build_sniff_from_prefix
class Pretext(Binary):
"""
PretextMap contact map file
Try to guess if the file is a Pretext file.
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('sample.pretext')
>>> Pretext().sniff(fname)
True
"""
file_ext = "pretext"
def sniff_prefix(self, sniff_prefix):
# The first 4 bytes of any pretext file is 'pstm', and the rest of the
# file contains binary data.
return sniff_prefix.startswith_bytes(b'pstm')
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "Binary pretext file"
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return "Binary pretext file (%s)" % (nice_size(dataset.get_size()))
class JP2(Binary):
"""
JPEG 2000 binary image format
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('test.jp2')
>>> JP2().sniff(fname)
True
>>> fname = get_test_fname('interval.interval')
>>> JP2().sniff(fname)
False
"""
file_ext = "jp2"
def __init__(self, **kwd):
super().__init__(**kwd)
self._magic = binascii.unhexlify("0000000C6A5020200D0A870A")
def sniff(self, filename):
# The first 12 bytes of any jp2 file are 0000000C6A5020200D0A870A
try:
header = open(filename, 'rb').read(12)
if header == self._magic:
return True
return False
except Exception:
return False
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "Binary JPEG 2000 file"
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return "Binary JPEG 2000 file (%s)" % (nice_size(dataset.get_size()))
class Npz(CompressedArchive):
"""
Class describing an Numpy NPZ file
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('hexrd.images.npz')
>>> Npz().sniff(fname)
True
>>> fname = get_test_fname('interval.interval')
>>> Npz().sniff(fname)
False
"""
file_ext = "npz"
# edam_format = "format_4003"
MetadataElement(name="nfiles", default=0, desc="nfiles", readonly=True, visible=True, no_value=0)
MetadataElement(name="files", default=[], desc="files", readonly=True, visible=True, no_value=[])
def __init__(self, **kwd):
super().__init__(**kwd)
def sniff(self, filename):
try:
npz = np.load(filename)
if isinstance(npz, np.lib.npyio.NpzFile):
for f in npz.files:
if isinstance(npz[f], np.ndarray):
return True
except Exception:
return False
return False
def set_meta(self, dataset, **kwd):
try:
with np.load(dataset.file_name) as npz:
dataset.metadata.nfiles = len(npz.files)
dataset.metadata.files = npz.files
except Exception as e:
log.warning('%s, set_meta Exception: %s', self, e)
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = f"Binary Numpy npz {dataset.metadata.nfiles} files ({nice_size(dataset.get_size())})"
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return "Binary Numpy npz file (%s)" % (nice_size(dataset.get_size()))
class HexrdImagesNpz(Npz):
"""
Class describing an HEXRD Images Numpy NPZ file
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('hexrd.images.npz')
>>> HexrdImagesNpz().sniff(fname)
True
>>> fname = get_test_fname('eta_ome.npz')
>>> HexrdImagesNpz().sniff(fname)
False
"""
file_ext = "hexrd.images.npz"
MetadataElement(name="panel_id", default='', desc="Detector Panel ID", param=MetadataParameter, readonly=True, visible=True, optional=True, no_value='')
MetadataElement(name="shape", default=(), desc="shape", param=metadata.ListParameter, readonly=True, visible=True, no_value=())
MetadataElement(name="nframes", default=0, desc="nframes", readonly=True, visible=True, no_value=0)
MetadataElement(name="omegas", desc="has omegas", default="False", visible=False)
def __init__(self, **kwd):
super().__init__(**kwd)
def sniff(self, filename):
if super().sniff(filename):
try:
req_files = {'0_row', '0_col', '0_data', 'shape', 'nframes', 'dtype'}
with np.load(filename) as npz:
return set(npz.files) >= req_files
except Exception as e:
log.warning('%s, sniff Exception: %s', self, e)
return False
return False
def set_meta(self, dataset, **kwd):
super().set_meta(dataset, **kwd)
try:
with np.load(dataset.file_name) as npz:
if 'panel_id' in npz.files:
dataset.metadata.panel_id = str(npz['panel_id'])
if 'omega' in npz.files:
dataset.metadata.omegas = "True"
dataset.metadata.shape = npz['shape'].tolist()
dataset.metadata.nframes = npz['nframes'].tolist()
except Exception as e:
log.warning('%s, set_meta Exception: %s', self, e)
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
lines = [f"Binary Hexrd Image npz {dataset.metadata.nfiles} files ({nice_size(dataset.get_size())})",
f"Panel: {dataset.metadata.panel_id} Frames: {dataset.metadata.nframes} Shape: {dataset.metadata.shape}"]
dataset.peek = '\n'.join(lines)
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return "Binary Numpy npz file (%s)" % (nice_size(dataset.get_size()))
class HexrdEtaOmeNpz(Npz):
"""
Class describing an HEXRD Eta-Ome Numpy NPZ file
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('hexrd.eta_ome.npz')
>>> HexrdEtaOmeNpz().sniff(fname)
True
>>> fname = get_test_fname('hexrd.images.npz')
>>> HexrdEtaOmeNpz().sniff(fname)
False
"""
file_ext = "hexrd.eta_ome.npz"
MetadataElement(name="HKLs", default=(), desc="HKLs", param=metadata.ListParameter, readonly=True, visible=True, no_value=())
MetadataElement(name="nframes", default=0, desc="nframes", readonly=True, visible=True, no_value=0)
def __init__(self, **kwd):
super().__init__(**kwd)
def sniff(self, filename):
if super().sniff(filename):
try:
req_files = {'dataStore', 'etas', 'etaEdges', 'iHKLList', 'omegas', 'omeEdges', 'planeData_hkls'}
with np.load(filename) as npz:
return set(npz.files) >= req_files
except Exception as e:
log.warning('%s, sniff Exception: %s', self, e)
return False
return False
def set_meta(self, dataset, **kwd):
super().set_meta(dataset, **kwd)
try:
with np.load(dataset.file_name) as npz:
dataset.metadata.HKLs = npz['iHKLList'].tolist()
dataset.metadata.nframes = len(npz['omegas'])
except Exception as e:
log.warning('%s, set_meta Exception: %s', self, e)
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
lines = [f"Binary Hexrd Eta-Ome npz {dataset.metadata.nfiles} files ({nice_size(dataset.get_size())})",
f"Eta-Ome HKLs: {dataset.metadata.HKLs} Frames: {dataset.metadata.nframes}"]
dataset.peek = '\n'.join(lines)
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return "Binary Numpy npz file (%s)" % (nice_size(dataset.get_size()))
if __name__ == '__main__':
import doctest
doctest.testmod(sys.modules[__name__])
|
the-stack_0_9897 | import os
import sfepy
from sfepy.base.base import load_classes, insert_static_method
from solvers import *
from eigen import eig
solver_files = sfepy.get_paths('sfepy/solvers/*.py')
remove = ['setup.py', 'solvers.py', 'petsc_worker.py']
solver_files = [name for name in solver_files
if os.path.basename(name) not in remove]
solver_table = load_classes(solver_files,
[LinearSolver, NonlinearSolver,
TimeSteppingSolver, EigenvalueSolver,
OptimizationSolver], package_name='sfepy.solvers')
def register_solver(cls):
"""
Register a custom solver.
"""
solver_table[cls.name] = cls
def any_from_conf(conf, **kwargs):
"""Create an instance of a solver class according to the configuration."""
return solver_table[conf.kind](conf, **kwargs)
insert_static_method(Solver, any_from_conf)
del any_from_conf
del sfepy
|
the-stack_0_9898 | import datetime
import os
from cryptography.x509.oid import NameOID
from cryptography.hazmat.primitives.serialization import Encoding
from .logger import logger
class StorageEngineCertificateConflict(Exception):
"""
Raise when a StorageEngine implementation is asked to persist a certificate
with a serial number that already exists or CommonName that is already in
use by another non-expired/revoked certificate
"""
class StorageEngineMissing(Exception):
"""
Raise when a StorageEngine type is missing.
"""
class UpdateCertException(Exception):
"""
Raise when attempting to update a cert and parameters are missing.
"""
class SqlStorageEngine:
"""
A Base SQL Storage Engine implementation.
"""
def close(self):
return self.conn.close()
class SQLiteStorageEngine(SqlStorageEngine):
"""
A StorageEngine implementation that persists data to a SQLite3 database
"""
def __init__(self, config):
import sqlite3
db_path = config.get(
"storage.sqlite3",
"db_path",
os.path.join(os.getcwd(), "mtls-server.db"),
)
self.conn = sqlite3.connect(db_path, check_same_thread=False)
def init_db(self):
cur = self.conn.cursor()
cur.execute(
"""
CREATE TABLE IF NOT EXISTS certs (
serial_number text,
common_name text,
not_valid_after datetime,
cert blob,
revoked boolean,
fingerprint text
)
"""
)
self.conn.commit()
def save_cert(self, cert, fingerprint):
if self.__conflicting_cert_exists(cert, fingerprint):
raise StorageEngineCertificateConflict
common_name = cert.subject.get_attributes_for_oid(NameOID.COMMON_NAME)
common_name = common_name[0].value
cur = self.conn.cursor()
cur.execute(
"""
INSERT INTO certs (
serial_number,
common_name,
not_valid_after,
cert,
revoked,
fingerprint
)
VALUES (?, ?, ?, ?, ?, ?)
""",
[
str(cert.serial_number),
common_name,
cert.not_valid_after,
cert.public_bytes(Encoding.PEM).decode("UTF-8"),
False,
fingerprint,
],
)
self.conn.commit()
def revoke_cert(self, serial_number):
cur = self.conn.cursor()
logger.info(
"Revoking certificate {serial_number}".format(
serial_number=serial_number
)
)
cur.execute(
"UPDATE certs SET revoked=1 WHERE serial_number=?",
[str(serial_number)],
)
self.conn.commit()
def update_cert(self, serial_number=None, cert=None):
if not serial_number or not cert:
logger.error("A serial number and cert are required to update.")
raise UpdateCertException
cur = self.conn.cursor()
logger.info(
"Updating certificate {serial_number}".format(
serial_number=serial_number
)
)
cur.execute(
"""
UPDATE
certs
SET
cert=?,
not_valid_after=?
WHERE
serial_number=?
""",
[
cert.public_bytes(Encoding.PEM).decode("UTF-8"),
cert.not_valid_after,
str(serial_number),
],
)
self.conn.commit()
def get_cert(
self,
serial_number=None,
common_name=None,
fingerprint=None,
show_revoked=False,
):
cur = self.conn.cursor()
value = None
query = "SELECT cert FROM certs WHERE"
if serial_number is not None:
query += " serial_number=?"
value = str(serial_number)
elif fingerprint is not None:
query += " fingerprint=?"
value = str(fingerprint)
elif common_name is not None:
query += " common_name=?"
value = str(common_name)
else:
return None
if show_revoked:
query += " AND revoked=1"
else:
query += " AND revoked=0"
cur.execute(query, [str(value)])
rows = cur.fetchall()
certs = []
for row in rows:
certs.append(row[0])
return certs
def get_revoked_certs(self):
cur = self.conn.cursor()
now = str(datetime.datetime.utcnow())
cur.execute(
"SELECT cert FROM certs WHERE revoked=1 AND not_valid_after>?",
[now],
)
rows = cur.fetchall()
certs = []
for row in rows:
certs.append(row[0])
return certs
def __conflicting_cert_exists(self, cert, fingerprint):
common_name = cert.subject.get_attributes_for_oid(NameOID.COMMON_NAME)
common_name = common_name[0].value
cur = self.conn.cursor()
cur.execute(
"""
SELECT count(*) FROM certs
WHERE serial_number=?
OR (
common_name=?
AND revoked=0
)
""",
[str(cert.serial_number), common_name],
)
conflicts = cur.fetchone()[0]
return conflicts > 0
class PostgresqlStorageEngine(SqlStorageEngine):
"""
A StorageEngine implementation that persists data to a Postgresql database
"""
def __init__(self, config):
import psycopg2
self.conn = psycopg2.connect(
dbname=config.get("storage.postgres", "database"),
user=config.get("storage.postgres", "user"),
password=config.get("storage.postgres", "password"),
host=config.get("storage.postgres", "host", "localhost"),
port=config.get_int("storage.postgres", "port", 5432),
)
def init_db(self):
cur = self.conn.cursor()
cur.execute(
"""
CREATE TABLE IF NOT EXISTS certs (
serial_number text,
common_name text,
not_valid_after timestamp,
cert text,
revoked boolean,
fingerprint text
)
"""
)
self.conn.commit()
def save_cert(self, cert, fingerprint):
if self.__conflicting_cert_exists(cert, fingerprint):
raise StorageEngineCertificateConflict
common_name = cert.subject.get_attributes_for_oid(NameOID.COMMON_NAME)
common_name = common_name[0].value
cur = self.conn.cursor()
cur.execute(
"""
INSERT INTO certs (
serial_number,
common_name,
not_valid_after,
cert,
revoked,
fingerprint
)
VALUES (%s, %s, %s, %s, %s, %s)
""",
(
str(cert.serial_number),
common_name,
cert.not_valid_after,
cert.public_bytes(Encoding.PEM).decode("UTF-8"),
False,
fingerprint,
),
)
self.conn.commit()
def get_cert(
self,
serial_number=None,
common_name=None,
fingerprint=None,
show_revoked=False,
):
cur = self.conn.cursor()
value = None
query = "SELECT cert FROM certs WHERE"
if serial_number is not None:
query += " serial_number = %s"
value = str(serial_number)
elif fingerprint is not None:
query += " fingerprint = %s"
value = fingerprint
elif common_name is not None:
query += " common_name = %s"
value = common_name
else:
return None
query += " AND revoked = %s"
cur.execute(query, (value, show_revoked))
rows = cur.fetchall()
certs = []
for row in rows:
certs.append(row[0])
return certs
def revoke_cert(self, serial_number):
cur = self.conn.cursor()
logger.info(
"Revoking certificate {serial_number}".format(
serial_number=serial_number
)
)
cur.execute(
"UPDATE certs SET revoked=true WHERE serial_number = %s",
(str(serial_number),),
)
self.conn.commit()
def update_cert(self, serial_number=None, cert=None):
if not serial_number or not cert:
logger.error("A serial number and cert are required to update.")
raise UpdateCertException
cur = self.conn.cursor()
logger.info(
"Updating certificate {serial_number}".format(
serial_number=serial_number
)
)
cur.execute(
"""
UPDATE
certs
SET
cert = %s,
not_valid_after = %s
WHERE
serial_number = %s
""",
(
cert.public_bytes(Encoding.PEM).decode("UTF-8"),
cert.not_valid_after,
str(serial_number),
),
)
self.conn.commit()
def get_revoked_certs(self):
cur = self.conn.cursor()
now = datetime.datetime.utcnow()
not_valid_after = now.strftime("%Y-%m-%d %H:%M:%S")
cur.execute(
"SELECT cert FROM certs WHERE revoked = true AND "
+ "not_valid_after > %s",
(str(not_valid_after),),
)
rows = cur.fetchall()
certs = []
for row in rows:
certs.append(row[0])
return certs
def __conflicting_cert_exists(self, cert, fingerprint):
common_name = cert.subject.get_attributes_for_oid(NameOID.COMMON_NAME)
common_name = common_name[0].value
cur = self.conn.cursor()
cur.execute(
"""
SELECT count(*) FROM certs
WHERE serial_number = %s
OR (
common_name = %s
AND fingerprint = %s
AND revoked=false
)
""",
(str(cert.serial_number), common_name, fingerprint),
)
conflicts = cur.fetchone()[0]
return conflicts > 0
class StorageEngineNotSupportedError(Exception):
"""
Raise when a StorageEngine implementation cannot be created from the
provided configuration
"""
class StorageEngine:
"""
StorageEngine is a factory that returns a concrete engine implementation
depending on the configuration
"""
def __new__(cls, config):
engine = config.get("storage", "engine", None)
if engine is None:
raise StorageEngineMissing()
if engine == "sqlite3":
return SQLiteStorageEngine(config)
elif engine == "postgres":
return PostgresqlStorageEngine(config)
else:
raise StorageEngineNotSupportedError(engine)
|
the-stack_0_9899 | import datetime
from docxtpl import DocxTemplate
from docxtpl import InlineImage
from docx.shared import Cm
from docxtpl import DocxTemplate, InlineImage
def get_context(brand, model, fuel_consumption, price):
return {
'brand': brand,
'model': model,
'fuel_consumption': fuel_consumption,
'price': price
}
def from_template(brand, model, fuel_consumption, price, template, signature):
template = DocxTemplate(template)
context = get_context(brand, model, fuel_consumption, price)
# Задаём параметры картинки
img_size = Cm(15)
acc = InlineImage(template, signature, img_size)
# Насыщаем шаблон передаваемой информацией
context['acc'] = acc
template.render(context)
# Сохраняем получившийся файл с информацией
template.save(brand + '_' + str(datetime.datetime.now().date()) + '_data.docx')
def generate_report(brand, model, fuel_consumption, price):
template = 'report.docx'
signature = 'skoda.jpeg'
document = from_template(brand, model, fuel_consumption, price, template, signature)
def to_fixed(num_obj, digits=0):
return f"{num_obj:.{digits}f}"
generate_report('Skoda', 'Octavia', '9 l/100 km', '1 500 000 RUB')
|
the-stack_0_9901 | """
Python 3.9 функция для запуска процесса обучения нейронной сети
Название файла train_c4.py
Version: 0.1
Author: Andrej Marinchenko
Date: 2021-12-20
"""
#!/usr/bin/env python
from alpha_net_c4 import ConnectNet, AlphaLoss, board_data
import os
import pickle
import datetime
import numpy as np
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.nn.utils import clip_grad_norm_
import matplotlib.pyplot as plt
import logging
logging.basicConfig(format='%(asctime)s [%(levelname)s]: %(message)s', \
datefmt='%m/%d/%Y %I:%M:%S %p', level=logging.INFO)
logger = logging.getLogger(__file__)
def save_as_pickle(filename, data):
completeName = os.path.join("./model_data/",\
filename)
with open(completeName, 'wb') as output:
pickle.dump(data, output)
def load_pickle(filename):
completeName = os.path.join("./model_data/",\
filename)
with open(completeName, 'rb') as pkl_file:
data = pickle.load(pkl_file)
return data
def load_state(net, optimizer, scheduler, args, iteration, new_optim_state=True):
""" Loads saved model and optimizer states if exists """
base_path = "./model_data/"
checkpoint_path = os.path.join(base_path, "%s_iter%d.pth.tar" % (args.neural_net_name, iteration))
start_epoch, checkpoint = 0, None
if os.path.isfile(checkpoint_path):
checkpoint = torch.load(checkpoint_path)
if checkpoint != None:
if (len(checkpoint) == 1) or (new_optim_state == True):
net.load_state_dict(checkpoint['state_dict'])
logger.info("Loaded checkpoint model %s." % checkpoint_path)
else:
start_epoch = checkpoint['epoch']
net.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
scheduler.load_state_dict(checkpoint['scheduler'])
logger.info("Loaded checkpoint model %s, and optimizer, scheduler." % checkpoint_path)
return start_epoch
def load_results(iteration):
""" Loads saved results if exists """
losses_path = "./model_data/losses_per_epoch_iter%d.pkl" % iteration
if os.path.isfile(losses_path):
losses_per_epoch = load_pickle("losses_per_epoch_iter%d.pkl" % iteration)
logger.info("Loaded results buffer")
else:
losses_per_epoch = []
return losses_per_epoch
def train(net, dataset, optimizer, scheduler, start_epoch, cpu, args, iteration):
torch.manual_seed(cpu)
cuda = torch.cuda.is_available()
net.train()
criterion = AlphaLoss()
train_set = board_data(dataset)
train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, num_workers=0, pin_memory=False)
losses_per_epoch = load_results(iteration + 1)
logger.info("Starting training process...")
update_size = len(train_loader)//10
print("Update step size: %d" % update_size)
for epoch in range(start_epoch, args.num_epochs):
total_loss = 0.0
losses_per_batch = []
for i,data in enumerate(train_loader,0):
state, policy, value = data
state, policy, value = state.float(), policy.float(), value.float()
if cuda:
state, policy, value = state.cuda(), policy.cuda(), value.cuda()
policy_pred, value_pred = net(state) # policy_pred = torch.Size([batch, 4672]) value_pred = torch.Size([batch, 1])
loss = criterion(value_pred[:,0], value, policy_pred, policy)
loss = loss/args.gradient_acc_steps
loss.backward()
clip_grad_norm_(net.parameters(), args.max_norm)
if (epoch % args.gradient_acc_steps) == 0:
optimizer.step()
optimizer.zero_grad()
total_loss += loss.item()
if i % update_size == (update_size - 1): # print every update_size-d mini-batches of size = batch_size
losses_per_batch.append(args.gradient_acc_steps*total_loss/update_size)
print('[Iteration %d] Process ID: %d [Epoch: %d, %5d/ %d points] total loss per batch: %.3f' %
(iteration, os.getpid(), epoch + 1, (i + 1)*args.batch_size, len(train_set), losses_per_batch[-1]))
print("Policy (actual, predicted):",policy[0].argmax().item(),policy_pred[0].argmax().item())
print("Policy data:", policy[0]); print("Policy pred:", policy_pred[0])
print("Value (actual, predicted):", value[0].item(), value_pred[0,0].item())
#print("Conv grad: %.7f" % net.conv.conv1.weight.grad.mean().item())
#print("Res18 grad %.7f:" % net.res_18.conv1.weight.grad.mean().item())
print(" ")
total_loss = 0.0
scheduler.step()
if len(losses_per_batch) >= 1:
losses_per_epoch.append(sum(losses_per_batch)/len(losses_per_batch))
if (epoch % 2) == 0:
save_as_pickle("losses_per_epoch_iter%d.pkl" % (iteration + 1), losses_per_epoch)
torch.save({
'epoch': epoch + 1,\
'state_dict': net.state_dict(),\
'optimizer' : optimizer.state_dict(),\
'scheduler' : scheduler.state_dict(),\
}, os.path.join("./model_data/",\
"%s_iter%d.pth.tar" % (args.neural_net_name, (iteration + 1))))
'''
# Early stopping
if len(losses_per_epoch) > 50:
if abs(sum(losses_per_epoch[-4:-1])/3-sum(losses_per_epoch[-16:-13])/3) <= 0.00017:
break
'''
logger.info("Finished Training!")
fig = plt.figure()
ax = fig.add_subplot(222)
ax.scatter([e for e in range(start_epoch, (len(losses_per_epoch) + start_epoch))], losses_per_epoch)
ax.set_xlabel("Epoch")
ax.set_ylabel("Loss per batch")
ax.set_title("Loss vs Epoch")
plt.savefig(os.path.join("./model_data/", "Loss_vs_Epoch_iter%d_%s.png" % ((iteration + 1), datetime.datetime.today().strftime("%Y-%m-%d"))))
plt.show()
def train_connectnet(args, iteration, new_optim_state):
# gather data
logger.info("Loading training data...")
data_path="./datasets/iter_%d/" % iteration
datasets = []
for idx,file in enumerate(os.listdir(data_path)):
filename = os.path.join(data_path,file)
with open(filename, 'rb') as fo:
datasets.extend(pickle.load(fo, encoding='bytes'))
datasets = np.array(datasets)
logger.info("Loaded data from %s." % data_path)
# train net
net = ConnectNet()
cuda = torch.cuda.is_available()
if cuda:
net.cuda()
optimizer = optim.Adam(net.parameters(), lr=args.lr, betas=(0.8, 0.999))
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[50,100,150,200,250,300,400], gamma=0.77)
start_epoch = load_state(net, optimizer, scheduler, args, iteration, new_optim_state)
train(net, datasets, optimizer, scheduler, start_epoch, 0, args, iteration)
|
the-stack_0_9902 | import unittest
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import multivariate_normal
from pyrolite.util.plot.density import (
percentile_contour_values_from_meshz,
plot_Z_percentiles,
)
from pyrolite.util.plot.legend import proxy_line
from matplotlib.lines import _get_dash_pattern, _scale_dashes
import matplotlib.colors
class TestPercentileContourValuesFromMeshZ(unittest.TestCase):
def setUp(self):
x, y = np.mgrid[-1:1:100j, -1:1:100j]
pos = np.empty(x.shape + (2,))
pos[:, :, 0] = x
pos[:, :, 1] = y
self.z = multivariate_normal([0.5, -0.2], [[2.0, 0.3], [0.3, 0.5]]).pdf(pos)
def test_default(self):
percentile_contour_values_from_meshz(self.z)
def test_percentiles(self):
for ps in [[1.0], [0.001], np.linspace(0.001, 1, 10), [0.95, 0.10]]:
with self.subTest(ps=ps):
pc, cs = percentile_contour_values_from_meshz(self.z, percentiles=ps)
def test_resolution(self):
for res in [10, 100, 1000, 10000]:
with self.subTest(res=res):
pc, cs = percentile_contour_values_from_meshz(self.z, resolution=res)
def test_ask_below_minimum(self):
for ps in [[0.0001], [0.000001]]:
with self.subTest(ps=ps):
pc, cs = percentile_contour_values_from_meshz(
self.z, percentiles=ps, resolution=5
)
self.assertIn("min", pc)
class TestPlotZPercentiles(unittest.TestCase):
def setUp(self):
x, y = np.mgrid[-1:1:100j, -1:1:100j]
pos = np.empty(x.shape + (2,))
pos[:, :, 0] = x
pos[:, :, 1] = y
self.xi, self.yi = x, y
self.zi = multivariate_normal([0.5, -0.2], [[2.0, 0.3], [0.3, 0.5]]).pdf(pos)
def test_default(self):
plot_Z_percentiles(self.xi, self.yi, zi=self.zi)
def test_specified_contour_labels(self):
contour_labels = ["95th", "66th", "33rd"]
cs = plot_Z_percentiles(
self.xi, self.yi, zi=self.zi, contour_labels=contour_labels
)
for contour_label, label in zip(contour_labels, cs.labelTextsList):
label = label.get_text()
self.assertTrue(contour_label == label)
def test_styling_specified(self):
fig, ax = plt.subplots(1)
colors = [matplotlib.colors.to_rgba(c) for c in ["g", "b", "k"]]
linestyles = [_get_dash_pattern(d) for d in ["-", "--", "-."]]
linewidths = [1, 2, 3]
cs = plot_Z_percentiles(
self.xi,
self.yi,
zi=self.zi,
ax=ax,
percentiles=[0.95, 0.66, 0.33],
colors=colors,
linestyles=linestyles,
linewidths=linewidths,
)
for contour, color, ls, lw in zip(
cs.collections, colors, linestyles, linewidths
):
self.assertTrue((contour.get_color() == color).all())
self.assertEqual(contour.get_linestyle(), [_scale_dashes(*ls, lw)])
self.assertEqual(contour.get_linewidth(), lw)
def test_linestyles_specified(self):
plot_Z_percentiles(
self.xi,
self.yi,
zi=self.zi,
percentiles=[0.95, 0.66, 0.33],
)
def test_percentiles(self):
for ps in [[1.0], [0.01], np.linspace(0.001, 1, 10), [0.95, 0.10]]:
with self.subTest(ps=ps):
plot_Z_percentiles(self.xi, self.yi, zi=self.zi, percentiles=ps)
def test_external_ax(self):
fig, ax = plt.subplots(1)
plot_Z_percentiles(self.xi, self.yi, zi=self.zi, ax=ax)
def test_extent(self):
for extent in [[-1, 1, -1, 1], [-0.01, 0.99, -1.01, -0.01], [-2, 2, -2, -2]]:
with self.subTest(extent=extent):
plot_Z_percentiles(self.xi, self.yi, zi=self.zi, extent=extent)
def tearDown(self):
plt.close("all")
if __name__ == "__main__":
unittest.main()
|
the-stack_0_9904 | import numpy as np # linear algebra
import skimage.io
import os
import sys
np.random.seed(1234)
import scipy.misc
import skimage.morphology as mph
from skimage import color
dd = sys.argv[1]
STAGE1_TRAIN = "../inputs/"+dd
STAGE1_TRAIN_IMAGE_PATTERN = "%s/{}/images/{}.png" % STAGE1_TRAIN
STAGE1_TRAIN_MASK_PATTERN = "%s/{}/masks/*.png" % STAGE1_TRAIN
# Get image names
def image_ids_in(root_dir, ignore=['.DS_Store', 'summary.csv', 'stage1_train_labels.csv', 'vsamples.csv', 'stage1_solution.csv', 'samples.csv']):
ids = []
for id in os.listdir(root_dir):
if id in ignore:
print('Skipping ID:', id)
else:
ids.append(id)
return ids
# read in images
def read_image(image_id, space="rgb"):
print(image_id)
image_file = STAGE1_TRAIN_IMAGE_PATTERN.format(image_id, image_id)
image = skimage.io.imread(image_file)
# Drop alpha which is not used
image = image[:, :, :3]
if space == "hsv":
image = skimage.color.rgb2hsv(image)
return image
# Get image width, height and combine masks available.
def read_image_labels(image_id, space="rgb"):
image = read_image(image_id, space = space)
mask_file = STAGE1_TRAIN_MASK_PATTERN.format(image_id)
masks = skimage.io.imread_collection(mask_file).concatenate()
mkk = []
for i in masks:
mask = i/255
selem = mph.disk(1)
mask = mph.erosion(mask, selem)
mkk.append(mask)
mkk = np.asarray(mkk)
height, width, _ = image.shape
num_masks = masks.shape[0]
labels = np.zeros((height, width), np.uint16)
for index in range(0, num_masks):
labels[mkk[index] > 0] = 1
try:
os.mkdir(STAGE1_TRAIN+'/'+image_id+'/label')
except:
pass
scipy.misc.imsave(STAGE1_TRAIN+'/'+image_id+'/label/ER_Combined.png', labels)
return labels
train_image_ids = image_ids_in(STAGE1_TRAIN)
for im in train_image_ids:
read_image_labels(im)
|
the-stack_0_9905 | # @Time : 12/07/21 1:05 PM
# @Author : Fabrice Harel-Canada
# @File : rick_and_morty_stories.py
import torch
from transformers import pipeline, set_seed
from transformers.pipelines import TextGenerationPipeline
class RickAndMortyStories:
def __init__(self, mask_bad_words=True):
self.pipeline = pipeline("text-generation", model="e-tony/gpt2-rnm")
if self.pipeline.tokenizer.pad_token is None:
self.pipeline.tokenizer.pad_token = self.pipeline.tokenizer.eos_token
self.pipeline.model.config.pad_token_id = self.pipeline.model.config.eos_token_id
self.mask_bad_words = mask_bad_words
self.bad_words = self.load_bad_words()
def load_bad_words(self):
import urllib
bad_words = []
try:
file = urllib.request.urlopen(
"https://raw.githubusercontent.com/RobertJGabriel/Google-profanity-words/master/list.txt"
)
for line in file:
dline = line.decode("utf-8")
bad_words.append(dline.split("\n")[0])
except:
print("Failed to load bad words list.")
return bad_words
def tokens2text(self, tokens):
return self.pipeline.tokenizer.decode(tokens)
def generate(self, inputs, max_length=250):
outputs = self.pipeline(
inputs,
do_sample=True,
max_length=len(inputs) + max_length,
top_k=50,
top_p=0.95,
num_return_sequences=1,
)
output_text = self._mask_bad_words(outputs[0]["generated_text"])
return output_text
def _mask_bad_words(self, text):
explicit = False
res_text = text.lower()
for word in self.bad_words:
if word in res_text:
print(word)
res_text = res_text.replace(word, word[0] + "*" * len(word[1:]))
explicit = True
if explicit:
output_text = ""
for oword, rword in zip(text.split(" "), res_text.split(" ")):
if oword.lower() == rword:
output_text += oword + " "
else:
output_text += rword + " "
text = output_text
return text
if __name__ == "__main__":
rm_story_generator = RickAndMortyStories()
STARTERS = {
0: "Rick: Morty, quick! Get in the car!\nMorty: Oh no, I can't do it Rick! Please not this again.\nRick: You don't have a choice! The crystal demons are going to eat you if you don't get in!",
1: "Elon: Oh, you think you're all that Rick? Fight me in a game of space squash!\nRick: Let's go, you wanna-be genius!\nElon: SpaceX fleet, line up!",
2: "Morty: I love Jessica, I want us to get married on Octopulon 300 and have octopus babies.\nRick: Shut up, Morty! You're not going to Octopulon 300!",
3: "Rick: Hey there, Jerry! What a nice day for taking these anti-gravity shoes for a spin!\nJerry: Wow, Rick! You would let me try out one of your crazy gadgets?\nRick: Of course, Jerry! That's how much I respect you.",
4: "Rick: Come on, flip the pickle, Morty. You're not gonna regret it. The payoff is huge.",
5: "Rick: I turned myself into a pickle, Morty! Boom! Big reveal - I'm a pickle. What do you think about that? I turned myself into a pickle!",
6: "Rick: Come on, flip the pickle, Morty. You're not gonna regret it. The payoff is huge.\nMorty: What? Where are you?\nRick: Morty, just do it! [laughing] Just flip the pickle!",
}
for i, starter_text in STARTERS.items():
print("starter_text:", starter_text)
outputs = rm_story_generator.generate(starter_text)
texts = [out['generated_text'] for out in outputs]
print(texts[0])
|
the-stack_0_9906 | # NASBench 301 stuff here
import sys
from pathlib import Path
sys.path.append('./darts/cnn')
lib_dir = (Path(__file__).parent / 'darts' / 'cnn').resolve()
if str(lib_dir) not in sys.path: sys.path.insert(0, str(lib_dir))
import genotypes
from model_search import Network, NetworkNB
import utils
import time
import math
import copy
import random
import logging
import os
import gc
import numpy as np
import torch
from torch.autograd import Variable
import torchvision.datasets as dset
import torch.nn as nn
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
from typing import *
from collections import namedtuple
Genotype_tuple = namedtuple('Genotype_tuple', 'normal normal_concat reduce reduce_concat')
class Genotype:
def __init__(self, normal, normal_concat, reduce, reduce_concat) -> None:
self.normal = normal
self.normal_concat = normal_concat
self.reduce = reduce
self.reduce_concat = reduce_concat
self.genotype_tuple = Genotype_tuple(normal, normal_concat, reduce, reduce_concat)
def tostr(self):
return str(self.genotype_tuple)
def __hash__(self):
return hash(str(self.genotype_tuple))
def __repr__(self):
return str(self.genotype_tuple)
def __getitem__(self, k):
return getattr(self, k)
def get_DARTS_randomNAS(discrete=True, layers=8):
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
model = NetworkNB(C=16, num_classes=10, layers=layers, criterion=criterion, multiplier=4, stem_multiplier=3, discrete=discrete)
print(f"Instantiated DARTS model with discrete={discrete}")
model = model.cuda()
return model
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
class DartsWrapper:
def __init__(self, save_path, seed, batch_size, grad_clip, epochs, resume_iter=None, init_channels=16):
args = {}
args['data'] = r'C:\Users\miros\Documents\Oxford\thesis\liamcli_darts\darts\data'
args['epochs'] = epochs
args['learning_rate'] = 0.025
args['batch_size'] = batch_size
args['learning_rate_min'] = 0.001
args['momentum'] = 0.9
args['weight_decay'] = 3e-4
args['init_channels'] = init_channels
args['layers'] = 8
args['drop_path_prob'] = 0.3
args['grad_clip'] = grad_clip
args['train_portion'] = 0.5
args['seed'] = seed
args['log_interval'] = 50
args['save'] = save_path
args['gpu'] = 0
args['cuda'] = True
args['cutout'] = False
args['cutout_length'] = 16
args['report_freq'] = 50
args = AttrDict(args)
self.args = args
self.seed = seed
np.random.seed(args.seed)
random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.set_device(args.gpu)
cudnn.benchmark = False
cudnn.enabled=True
cudnn.deterministic=True
torch.cuda.manual_seed_all(args.seed)
train_transform, valid_transform = utils._data_transforms_cifar10(args)
train_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=train_transform)
num_train = len(train_data)
indices = list(range(num_train))
split = int(np.floor(args.train_portion * num_train))
self.train_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[:split]),
pin_memory=True, num_workers=0, worker_init_fn=np.random.seed(args.seed))
self.valid_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[split:num_train]),
pin_memory=True, num_workers=0, worker_init_fn=np.random.seed(args.seed))
self.train_iter = iter(self.train_queue)
self.valid_iter = iter(self.valid_queue)
self.steps = 0
self.epochs = 0
self.total_loss = 0
self.start_time = time.time()
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
self.criterion = criterion
model = Network(args.init_channels, 10, args.layers, self.criterion)
model = model.cuda()
self.model = model
try:
self.load()
logging.info('loaded previously saved weights')
except Exception as e:
print(e)
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
optimizer = torch.optim.SGD(
self.model.parameters(),
args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay)
self.optimizer = optimizer
self.scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, float(args.epochs), eta_min=args.learning_rate_min)
if resume_iter is not None:
self.steps = resume_iter
self.epochs = int(resume_iter / len(self.train_queue))
logging.info("Resuming from epoch %d" % self.epochs)
self.objs = utils.AvgrageMeter()
self.top1 = utils.AvgrageMeter()
self.top5 = utils.AvgrageMeter()
for i in range(self.epochs):
self.scheduler.step()
size = 0
for p in model.parameters():
size += p.nelement()
logging.info('param size: {}'.format(size))
total_params = sum(x.data.nelement() for x in model.parameters())
logging.info('Args: {}'.format(args))
logging.info('Model total parameters: {}'.format(total_params))
def train_batch(self, arch):
args = self.args
if self.steps % len(self.train_queue) == 0:
self.scheduler.step()
self.objs = utils.AvgrageMeter()
self.top1 = utils.AvgrageMeter()
self.top5 = utils.AvgrageMeter()
lr = self.scheduler.get_lr()[0]
weights = self.get_weights_from_arch(arch)
self.set_model_weights(weights)
step = self.steps % len(self.train_queue)
input, target = next(self.train_iter)
self.model.train()
n = input.size(0)
input = Variable(input, requires_grad=False).cuda()
target = Variable(target, requires_grad=False).cuda()
# get a random minibatch from the search queue with replacement
self.optimizer.zero_grad()
logits = self.model(input, discrete=True)
if type(logits) is tuple:
_, logits = logits
loss = self.criterion(logits, target)
loss.backward()
nn.utils.clip_grad_norm(self.model.parameters(), args.grad_clip)
self.optimizer.step()
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
self.objs.update(loss.item(), n)
self.top1.update(prec1.item(), n)
self.top5.update(prec5.item(), n)
if step % args.report_freq == 0:
logging.info('train %03d %e %f %f', step, self.objs.avg, self.top1.avg, self.top5.avg)
self.steps += 1
if self.steps % len(self.train_queue) == 0:
self.epochs += 1
self.train_iter = iter(self.train_queue)
valid_err = self.evaluate(arch)
logging.info('epoch %d | train_acc %f | valid_acc %f' % (self.epochs, self.top1.avg, 1-valid_err))
self.save()
def evaluate(self, arch, split=None):
# Return error since we want to minimize obj val
logging.info(arch)
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
weights = self.get_weights_from_arch(arch)
self.set_model_weights(weights)
self.model.eval()
if split is None:
n_batches = 10
else:
n_batches = len(self.valid_queue)
for step in range(n_batches):
try:
input, target = next(self.valid_iter)
except Exception as e:
logging.info('looping back over valid set')
self.valid_iter = iter(self.valid_queue)
input, target = next(self.valid_iter)
with torch.no_grad():
input = Variable(input).cuda()
target = Variable(target).cuda()
logits = self.model(input, discrete=True)
if type(logits) is tuple:
_, logits = logits
loss = self.criterion(logits, target)
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.item(), n)
top1.update(prec1.item(), n)
top5.update(prec5.item(), n)
if step % self.args.report_freq == 0:
logging.info('valid %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)
return 1-top1.avg
def save(self):
utils.save(self.model, os.path.join(self.args.save, 'weights.pt'))
def load(self):
utils.load(self.model, os.path.join(self.args.save, 'weights.pt'))
def get_weights_from_arch(self, arch):
k = sum(1 for i in range(self.model._steps) for n in range(2+i))
num_ops = len(genotypes.PRIMITIVES)
n_nodes = self.model._steps
alphas_normal = Variable(torch.zeros(k, num_ops).cuda(), requires_grad=False)
alphas_reduce = Variable(torch.zeros(k, num_ops).cuda(), requires_grad=False)
offset = 0
for i in range(n_nodes):
normal1 = arch[0][2*i]
normal2 = arch[0][2*i+1]
reduce1 = arch[1][2*i]
reduce2 = arch[1][2*i+1]
alphas_normal[offset+normal1[0], normal1[1]] = 1
alphas_normal[offset+normal2[0], normal2[1]] = 1
alphas_reduce[offset+reduce1[0], reduce1[1]] = 1
alphas_reduce[offset+reduce2[0], reduce2[1]] = 1
offset += (i+2)
arch_parameters = [
alphas_normal,
alphas_reduce,
]
return arch_parameters
def set_model_weights(self, weights):
self.model.alphas_normal = weights[0]
self.model.alphas_reduce = weights[1]
self.model.arch_normal_parameters = weights[0]
self.model.arch_reduce_parameters = weights[1]
self.model._arch_parameters = [self.model.alphas_normal, self.model.alphas_reduce]
self.model.dynamic_cell = Genotype(normal=self.model.alphas_normal, reduce = self.model.alphas_reduce, normal_concat=[2,3,4,5], reduce_concat=[2,3,4,5])
def sample_arch(self):
k = sum(1 for i in range(self.model._steps) for n in range(2+i))
num_ops = len(genotypes.PRIMITIVES)
n_nodes = self.model._steps
normal = []
reduction = []
for i in range(n_nodes):
ops = np.random.choice(range(num_ops), 4)
nodes_in_normal = np.random.choice(range(i+2), 2, replace=False)
nodes_in_reduce = np.random.choice(range(i+2), 2, replace=False)
normal.extend([(nodes_in_normal[0], ops[0]), (nodes_in_normal[1], ops[1])])
reduction.extend([(nodes_in_reduce[0], ops[2]), (nodes_in_reduce[1], ops[3])])
return (normal, reduction)
def perturb_arch(self, arch):
new_arch = copy.deepcopy(arch)
num_ops = len(genotypes.PRIMITIVES)
cell_ind = np.random.choice(2)
step_ind = np.random.choice(self.model._steps)
nodes_in = np.random.choice(step_ind+2, 2, replace=False)
ops = np.random.choice(range(num_ops), 2)
new_arch[cell_ind][2*step_ind] = (nodes_in[0], ops[0])
new_arch[cell_ind][2*step_ind+1] = (nodes_in[1], ops[1])
return new_arch
|
the-stack_0_9907 | # -*- coding: utf-8 -*-
"""DNACenterAPI non_fabric_wireless API fixtures and tests.
Copyright (c) 2019-2020 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import pytest
from tests.environment import DNA_CENTER_VERSION
pytestmark = pytest.mark.skipif(DNA_CENTER_VERSION != '1.2.10', reason='version does not match')
def is_valid_delete_and_provision_ssid(json_schema_validate, obj):
json_schema_validate('jsd_cca098344a489dfa_v1_2_10').validate(obj)
return True
def delete_and_provision_ssid(api):
endpoint_result = api.non_fabric_wireless.delete_and_provision_ssid(
managed_aplocations='string',
ssid_name='string'
)
return endpoint_result
@pytest.mark.non_fabric_wireless
def test_delete_and_provision_ssid(api, validator):
assert is_valid_delete_and_provision_ssid(
validator,
delete_and_provision_ssid(api)
)
def delete_and_provision_ssid_default(api):
endpoint_result = api.non_fabric_wireless.delete_and_provision_ssid(
managed_aplocations='string',
ssid_name='string'
)
return endpoint_result
@pytest.mark.non_fabric_wireless
def test_delete_and_provision_ssid_default(api, validator):
try:
assert is_valid_delete_and_provision_ssid(
validator,
delete_and_provision_ssid_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_create_enterprise_ssid(json_schema_validate, obj):
return True if obj else False
def create_enterprise_ssid(api):
endpoint_result = api.non_fabric_wireless.create_enterprise_ssid(
active_validation=True,
enableBroadcastSSID=True,
enableFastLane=True,
enableMACFiltering=True,
fastTransition='Adaptive',
name='********************************',
passphrase='********',
payload=None,
radioPolicy='Dual band operation (2.4GHz and 5GHz)',
securityLevel='WPA2_ENTERPRISE',
trafficType='voicedata'
)
return endpoint_result
@pytest.mark.non_fabric_wireless
def test_create_enterprise_ssid(api, validator):
assert is_valid_create_enterprise_ssid(
validator,
create_enterprise_ssid(api)
)
def create_enterprise_ssid_default(api):
endpoint_result = api.non_fabric_wireless.create_enterprise_ssid(
active_validation=True,
enableBroadcastSSID=None,
enableFastLane=None,
enableMACFiltering=None,
fastTransition=None,
name=None,
passphrase=None,
payload=None,
radioPolicy=None,
securityLevel=None,
trafficType=None
)
return endpoint_result
@pytest.mark.non_fabric_wireless
def test_create_enterprise_ssid_default(api, validator):
try:
assert is_valid_create_enterprise_ssid(
validator,
create_enterprise_ssid_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_create_and_provision_ssid(json_schema_validate, obj):
json_schema_validate('jsd_db9f997f4e59aec1_v1_2_10').validate(obj)
return True
def create_and_provision_ssid(api):
endpoint_result = api.non_fabric_wireless.create_and_provision_ssid(
active_validation=True,
enableFabric=True,
flexConnect={'enableFlexConnect': True, 'localToVlan': 0},
managedAPLocations=['string'],
payload=None,
ssidDetails={'name': 'string', 'securityLevel': 'WPA2_ENTERPRISE', 'enableFastLane': True, 'passphrase': 'string', 'trafficType': 'data', 'enableBroadcastSSID': True, 'radioPolicy': 'Dual band operation (2.4GHz and 5GHz)', 'enableMACFiltering': True, 'fastTransition': 'Adaptive', 'webAuthURL': 'string'},
ssidType='Guest',
vlanAndDynamicInterfaceDetails={'managedAPLocation': {'interfaceIPAddress': 'string', 'interfaceNetmaskInCIDR': 0, 'interfaceGateway': 'string', 'lagOrPortNumber': 0}, 'vlanId': 0, 'vlanName': 'string'}
)
return endpoint_result
@pytest.mark.non_fabric_wireless
def test_create_and_provision_ssid(api, validator):
assert is_valid_create_and_provision_ssid(
validator,
create_and_provision_ssid(api)
)
def create_and_provision_ssid_default(api):
endpoint_result = api.non_fabric_wireless.create_and_provision_ssid(
active_validation=True,
enableFabric=None,
flexConnect=None,
managedAPLocations=None,
payload=None,
ssidDetails=None,
ssidType=None,
vlanAndDynamicInterfaceDetails=None
)
return endpoint_result
@pytest.mark.non_fabric_wireless
def test_create_and_provision_ssid_default(api, validator):
try:
assert is_valid_create_and_provision_ssid(
validator,
create_and_provision_ssid_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_delete_enterprise_ssid(json_schema_validate, obj):
json_schema_validate('jsd_c7a6592b4b98a369_v1_2_10').validate(obj)
return True
def delete_enterprise_ssid(api):
endpoint_result = api.non_fabric_wireless.delete_enterprise_ssid(
ssid_name='string'
)
return endpoint_result
@pytest.mark.non_fabric_wireless
def test_delete_enterprise_ssid(api, validator):
assert is_valid_delete_enterprise_ssid(
validator,
delete_enterprise_ssid(api)
)
def delete_enterprise_ssid_default(api):
endpoint_result = api.non_fabric_wireless.delete_enterprise_ssid(
ssid_name='string'
)
return endpoint_result
@pytest.mark.non_fabric_wireless
def test_delete_enterprise_ssid_default(api, validator):
try:
assert is_valid_delete_enterprise_ssid(
validator,
delete_enterprise_ssid_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_get_enterprise_ssid(json_schema_validate, obj):
json_schema_validate('jsd_cca519ba45ebb423_v1_2_10').validate(obj)
return True
def get_enterprise_ssid(api):
endpoint_result = api.non_fabric_wireless.get_enterprise_ssid(
ssid_name='string'
)
return endpoint_result
@pytest.mark.non_fabric_wireless
def test_get_enterprise_ssid(api, validator):
assert is_valid_get_enterprise_ssid(
validator,
get_enterprise_ssid(api)
)
def get_enterprise_ssid_default(api):
endpoint_result = api.non_fabric_wireless.get_enterprise_ssid(
ssid_name=None
)
return endpoint_result
@pytest.mark.non_fabric_wireless
def test_get_enterprise_ssid_default(api, validator):
try:
assert is_valid_get_enterprise_ssid(
validator,
get_enterprise_ssid_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
|
the-stack_0_9908 | # coding=utf-8
from __future__ import absolute_import, print_function
import os
import numpy as np
from suanpan.app import app
from suanpan.storage import storage
from suanpan.utils import image
from arguments import Images
@app.input(Images(key="inputImage"))
@app.output(Images(key="outputImage"))
def SPRemoveWatermark(context):
args = context.args
images = args.inputImage
alpha = 2.0
beta = -160
for i, img in enumerate(images):
new = alpha * img + beta
new = np.clip(new, 0, 255).astype(np.uint8)
image.save(
os.path.join(
args.outputImage,
storage.delimiter.join(images.images[i].split(storage.delimiter)[8:]),
),
new,
)
return args.outputImage
if __name__ == "__main__":
SPRemoveWatermark()
|
the-stack_0_9909 | #!/usr/bin/env python3
"""
Prompt:
Loop through all numbers from 1 to 100. If the number is divisible by 3, print
out "Fizz" instead. If the number is divisible by 5, print out "Buzz" instead.
"""
from typing import Iterable
from typing import Union
def fizz_buzz(n: int) -> Iterable[Union[int, str]]:
for i in range(n):
if i%3 == 0: yield 'Fizz'
elif i%5 == 0: yield 'Buzz'
else: yield i
if __name__ == '__main__':
print(list(fizz_buzz(17)))
|
the-stack_0_9910 | from gi.repository import Gtk, Gdk
css = """
#top GtkComboBox {
background-color: #000000;
}
GtkWindow {
color: black;
background: black;
background-color: black;
}
GtkComboBox {
color: black;
background: black;
background-color: black;
}
"""
class ComboBoxWindow(Gtk.Window):
def __init__(self):
style_provider = Gtk.CssProvider()
style_provider.load_from_data(css)
Gtk.StyleContext.add_provider_for_screen(
Gdk.Screen.get_default(),
style_provider,
Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION
)
Gtk.Window.__init__(self, title="ComboBox Example")
self.set_border_width(10)
name_store = Gtk.ListStore(int, str)
name_store.append([1, "Billy Bob"])
name_store.append([11, "Billy Bob Junior"])
name_store.append([12, "Sue Bob"])
name_store.append([2, "Joey Jojo"])
name_store.append([3, "Rob McRoberts"])
name_store.append([31, "Xavier McRoberts"])
vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=6)
name_combo = Gtk.ComboBox.new_with_model_and_entry(name_store)
name_combo.connect("changed", self.on_name_combo_changed)
name_combo.set_entry_text_column(1)
vbox.pack_start(name_combo, False, False, 0)
country_store = Gtk.ListStore(str)
countries = ["Austria", "Brazil", "Belgium", "France", "Germany",
"Switzerland", "United Kingdom", "United States of America",
"Uruguay"]
for country in countries:
country_store.append([country])
country_combo = Gtk.ComboBox.new_with_model(country_store)
country_combo.connect("changed", self.on_country_combo_changed)
renderer_text = Gtk.CellRendererText()
country_combo.pack_start(renderer_text, True)
country_combo.add_attribute(renderer_text, "text", 0)
vbox.pack_start(country_combo, False, False, True)
currencies = ["Euro", "US Dollars", "British Pound", "Japanese Yen",
"Russian Ruble", "Mexican peso", "Swiss franc"]
currency_combo = Gtk.ComboBoxText()
currency_combo.set_entry_text_column(0)
currency_combo.connect("changed", self.on_currency_combo_changed)
for currency in currencies:
currency_combo.append_text(currency)
vbox.pack_start(currency_combo, False, False, 0)
self.add(vbox)
def on_name_combo_changed(self, combo):
tree_iter = combo.get_active_iter()
if tree_iter != None:
model = combo.get_model()
row_id, name = model[tree_iter][:2]
print("Selected: ID=%d, name=%s" % (row_id, name))
else:
entry = combo.get_child()
print("Entered: %s" % entry.get_text())
def on_country_combo_changed(self, combo):
tree_iter = combo.get_active_iter()
if tree_iter != None:
model = combo.get_model()
country = model[tree_iter][0]
print("Selected: country=%s" % country)
def on_currency_combo_changed(self, combo):
text = combo.get_active_text()
if text != None:
print("Selected: currency=%s" % text)
win = ComboBoxWindow()
win.connect("delete-event", Gtk.main_quit)
win.show_all()
Gtk.main() |
the-stack_0_9913 | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import os
class Templight(CMakePackage):
"""Templight is a Clang-based tool to profile the time and memory
consumption of template instantiations and to perform interactive
debugging sessions to gain introspection into the template
instantiation process."""
homepage = "https://github.com/mikael-s-persson/templight"
git = "https://github.com/mikael-s-persson/templight.git"
llvm_svn = "http://llvm.org/svn/llvm-project/{0}/trunk"
family = 'compiler' # Used by lmod
# Templight is a patch to clang, so we have three versions to care about:
# - The one that will be used in Spack specifications
# - The git branch that we need to fetch from in the templight repo
# - The svn tag that we need to fetch from in the LLVM repos
version('develop', branch='master')
resource(name='llvm-trunk',
svn=llvm_svn.format('llvm'),
destination='.',
placement='llvm',
when='@develop')
resource(name='clang-trunk',
svn=llvm_svn.format('cfe'),
destination='llvm/tools',
placement='clang',
when='@develop')
# Templight has no stable release yet, and is supposed to be built against
# the LLVM trunk. As this is a brittle combination, I decided to
# artificially create stable releases based on what works today. Please
# feel free to remove these versions once templight has stabilized.
version('2019.01.09', commit='0899a4345607f1bb244cae477214f274ad2c52cc')
resource(name='llvm-r350726',
svn=llvm_svn.format('llvm'),
revision=350726,
destination='.',
placement='llvm',
when='@2019.01.09')
resource(name='clang-r350726',
svn=llvm_svn.format('cfe'),
revision=350726,
destination='llvm/tools',
placement='clang',
when='@2019.01.09')
version('2018.07.20', commit='91589f95427620dd0a2346bd69ba922f374aa42a')
resource(name='llvm-r337566',
svn=llvm_svn.format('llvm'),
revision=337566,
destination='.',
placement='llvm',
when='@2018.07.20')
resource(name='clang-r337566',
svn=llvm_svn.format('cfe'),
revision=337566,
destination='llvm/tools',
placement='clang',
when='@2018.07.20')
patch('develop-20180720.patch', when='@2018.07.20')
# Clang debug builds can be _huge_ (20+ GB), make sure you know what you
# are doing before switching to them
variant('build_type', default='Release',
description='CMake build type',
values=('Debug', 'Release', 'RelWithDebInfo', 'MinSizeRel'))
# NOTE: LLVM has many configurable tweaks and optional tools/extensions.
# I did not think that propagating all of these to a debugging and
# performance analysis tool was worth the maintenance burden. But
# if you disagree, the llvm package can be used for inspiration.
depends_on('[email protected]:', type='build')
depends_on('python')
depends_on('py-lit', type=('build', 'run'))
def patch(self):
# We start with the templight source tree and an "llvm" subdir.
# But we actually need an llvm source tree with a "templight" subdir.
# Let's flip the directory organization around
templight_files = os.listdir('.')
templight_files.remove('llvm')
templight_dir = 'llvm/tools/clang/tools/templight'
os.mkdir(templight_dir)
for name in templight_files:
os.rename(name, os.path.join(templight_dir, name))
for name in os.listdir('llvm'):
os.rename(os.path.join('llvm', name), name)
os.rmdir('llvm')
# Tell the clang build system that it needs to build templight
with open("tools/clang/tools/CMakeLists.txt", "a") as cmake_lists:
cmake_lists.write("add_clang_subdirectory(templight)")
def setup_environment(self, spack_env, run_env):
spack_env.append_flags('CXXFLAGS', self.compiler.cxx11_flag)
run_env.set('CC', join_path(self.spec.prefix.bin, 'templight'))
run_env.set('CXX', join_path(self.spec.prefix.bin, 'templight++'))
def cmake_args(self):
spec = self.spec
# Templight is a debugging tool, not a production compiler, so we only
# need a very bare-bones build of clang
#
# Minimal build config ideas were taken from the llvm package, with
# the templight-specific assumption that we will always be building
# for LLVM / Clang 5.0+ and can safely ignore older tricks.
#
cmake_args = [
'-DLLVM_REQUIRES_RTTI:BOOL=ON',
'-DCLANG_DEFAULT_OPENMP_RUNTIME:STRING=libomp',
'-DPYTHON_EXECUTABLE:PATH={0}'.format(spec['python'].command.path),
'-DLLVM_EXTERNAL_POLLY_BUILD:Bool=OFF',
'-DLLVM_TOOL_POLLY_BUILD:Bool=OFF',
'-DLLVM_POLLY_BUILD:Bool=OFF',
'-DLLVM_POLLY_LINK_INTO_TOOLS:Bool=OFF',
'-DLLVM_EXTERNAL_LLDB_BUILD:Bool=OFF',
'-DLLVM_TOOL_LLDB_BUILD:Bool=OFF',
'-DLLVM_TOOL_LLD_BUILD:Bool=OFF',
'-DLLVM_EXTERNAL_LIBUNWIND_BUILD:Bool=OFF',
'-DLLVM_EXTERNAL_LIBCXX_BUILD:Bool=OFF',
'-DLLVM_EXTERNAL_LIBCXXABI_BUILD:Bool=OFF',
'-DLLVM_EXTERNAL_COMPILER_RT_BUILD:Bool=OFF',
]
targets = ['NVPTX', 'AMDGPU']
if 'x86' in spec.architecture.target.lower():
targets.append('X86')
elif 'arm' in spec.architecture.target.lower():
targets.append('ARM')
elif 'aarch64' in spec.architecture.target.lower():
targets.append('AArch64')
elif 'sparc' in spec.architecture.target.lower():
targets.append('Sparc')
elif ('ppc' in spec.architecture.target.lower() or
'power' in spec.architecture.target.lower()):
targets.append('PowerPC')
cmake_args.append(
'-DLLVM_TARGETS_TO_BUILD:Bool=' + ';'.join(targets))
if spec.satisfies('platform=linux'):
cmake_args.append('-DCMAKE_BUILD_WITH_INSTALL_RPATH=1')
return cmake_args
@run_after('install')
def post_install(self):
with working_dir(self.build_directory):
install_tree('bin', self.prefix.libexec.llvm)
|
the-stack_0_9916 | from __future__ import print_function, division
import sys, os
sys.path.append(os.path.abspath("."))
from problems.problem import *
from helper.pom3 import pom3
__author__ = 'panzer'
class POM3BSansComp(Problem):
"""
POM 3B without Completion
"""
def __init__(self):
Problem.__init__(self)
self.name = POM3BSansComp.__name__
names = ["Culture", "Criticality", "Criticality Modifier", "Initial Known", "Inter-Dependency", "Dynamism",
"Size", "Plan", "Team Size"]
lows = [0.10, 0.82, 80, 0.40, 0, 1, 0, 0, 1]
ups = [0.90, 1.26, 95, 0.70, 100, 50, 2, 5, 20]
self.decisions = [Decision(names[i], lows[i], ups[i]) for i in range(len(names))]
self.objectives = [Objective("Cost", True, 0), Objective("Score", False, 0, 1),
Objective("Idle", True, 0, 1)]
def evaluate(self, decisions):
p = pom3()
output = p.simulate(decisions)
return [output[0], output[1], output[3]]
|
the-stack_0_9918 | def dobro(num, formatado=False):
if formatado:
return f'R${num * 2:.2f}'
else:
return num * 2
def metade(num, formatado=False):
if formatado:
return f'R${num / 2:.2f}'
else:
return num / 2
def adicionar(num, index, formatado=False):
if formatado:
return f'R${num + (num / 100 * index):.2f}'
else:
return num + (num / 100 * index)
def descontar(num, index, formatado=False):
if formatado:
return f'R${num - (num / 100 * index):.2f}'
else:
return num - (num / 100 * index)
def cifrar(num):
return f'R${num:.2f}'
def resumo(valor, aumento, desconto):
print('-' * 40)
print('{:^40}'.format('RESUMO DO VALOR'))
print('¯' * 40)
print('Preço analisado:', end='')
print(f'{(cifrar(valor)):>24}')
print('Dobro do Preço:', end='')
print(f'{dobro(valor, True):>25}')
print(f'Metado do Preço', end='')
print(f'{metade(valor, True):>25}')
print(f'{aumento}% de aumento:', end='')
print(f'{adicionar(valor, aumento, True):>25}')
print(f'{desconto}% de desconto:', end='')
print(f'{descontar(valor, desconto, True):>24}')
print('¯' * 40)
|
the-stack_0_9920 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
exporters
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import sys
import os
import itertools
import six
import inspect
import abc
import logging
import numpy as np
import paddle.fluid as F
import paddle.fluid.layers as L
from propeller.paddle.train import Saver
from propeller.types import InferenceSpec
from propeller.train.model import Model
from propeller.paddle.train.trainer import _build_net
from propeller.paddle.train.trainer import _build_model_fn
from propeller.types import RunMode
from propeller.types import ProgramPair
log = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class Exporter(object):
"""base exporter"""
@abc.abstractmethod
def export(self, exe, program, eval_result, state):
"""export"""
raise NotImplementedError()
class BestExporter(Exporter):
"""export saved model accordingto `cmp_fn`"""
def __init__(self, export_dir, cmp_fn):
"""doc"""
self._export_dir = export_dir
self._best = None
self.cmp_fn = cmp_fn
def export(self, exe, program, eval_model_spec, eval_result, state):
"""doc"""
log.debug('New evaluate result: %s \nold: %s' %
(repr(eval_result), repr(self._best)))
if self._best is None or self.cmp_fn(old=self._best, new=eval_result):
log.debug('[Best Exporter]: export to %s' % self._export_dir)
eval_program = program.train_program
# FIXME: all eval datasets has same name/types/shapes now!!! so every eval program are the smae
saver = Saver(
self._export_dir,
exe,
program=eval_program,
max_ckpt_to_keep=1)
saver.save(state)
self._best = eval_result
else:
log.debug('[Best Exporter]: skip step %s' % state.gstep)
class BestInferenceModelExporter(Exporter):
"""export inference model accordingto `cmp_fn`"""
def __init__(self,
export_dir,
cmp_fn,
model_class_or_model_fn=None,
hparams=None,
dataset=None):
"""doc"""
self._export_dir = export_dir
self._best = None
self.cmp_fn = cmp_fn
self.model_class_or_model_fn = model_class_or_model_fn
self.hparams = hparams
self.dataset = dataset
def export(self, exe, program, eval_model_spec, eval_result, state):
"""doc"""
if self.model_class_or_model_fn is not None and self.hparams is not None \
and self.dataset is not None:
log.info('Building program by user defined model function')
if issubclass(self.model_class_or_model_fn, Model):
_model_fn = _build_model_fn(self.model_class_or_model_fn)
elif inspect.isfunction(self.model_class_or_model_fn):
_model_fn = self.model_class_or_model_fn
else:
raise ValueError('unknown model %s' %
self.model_class_or_model_fn)
# build net
infer_program = F.Program()
startup_prog = F.Program()
with F.program_guard(infer_program, startup_prog):
#share var with Train net
with F.unique_name.guard():
log.info('Building Infer Graph')
infer_fea = self.dataset.features()
# run_config is None
self.model_spec = _build_net(_model_fn, infer_fea,
RunMode.PREDICT, self.hparams,
None)
log.info('Done')
infer_program = infer_program.clone(for_test=True)
self.program = ProgramPair(
train_program=infer_program, startup_program=startup_prog)
else:
self.program = program
self.model_spec = eval_model_spec
log.debug('New evaluate result: %s \nold: %s' %
(repr(eval_result), repr(self._best)))
if self._best is None or self.cmp_fn(old=self._best, new=eval_result):
log.debug('[Best Exporter]: export to %s' % self._export_dir)
if self.model_spec.inference_spec is None:
raise ValueError('model_fn didnt return InferenceSpec')
inf_spec_dict = self.model_spec.inference_spec
if not isinstance(inf_spec_dict, dict):
inf_spec_dict = {'inference': inf_spec_dict}
for inf_spec_name, inf_spec in six.iteritems(inf_spec_dict):
if not isinstance(inf_spec, InferenceSpec):
raise ValueError('unknow inference spec type: %s' %
inf_spec)
save_dir = os.path.join(self._export_dir, inf_spec_name)
log.debug('[Best Exporter]: save inference model: "%s" to %s' %
(inf_spec_name, save_dir))
feed_var = [i.name for i in inf_spec.inputs]
fetch_var = inf_spec.outputs
infer_program = self.program.train_program
startup_prog = F.Program()
F.io.save_inference_model(
save_dir,
feed_var,
fetch_var,
exe,
main_program=infer_program)
self._best = eval_result
else:
log.debug('[Best Exporter]: skip step %s' % state.gstep)
|
the-stack_0_9925 | # Django settings for demo project.
import os
settings_path, settings_module = os.path.split(__file__)
import sys
sys.path.append('../../')
DEBUG = True
#TEMPLATE_DEBUG = DEBUG
USE_TZ=True
#TIME_ZONE = 'America/Chicago'
LANGUAGE_CODE = 'en-us'
SECRET_KEY = '8(o*lht586wqr9hp5env&n!h!gu@t5g4*$$uupbyd*f+61!xjh'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'mydatabase',
}
}
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
# 'django.contrib.admin',
)
MIDDLEWARE_CLASSES = (
)
ROOT_URLCONF = 'urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(settings_path, 'templates')],
}
]
#EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
#EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_BACKEND = 'postmark.django_backend.EmailBackend'
#Supply your own API KEY
POSTMARK_API_KEY = ''
assert len(POSTMARK_API_KEY) != 0
#Use the sender set up in your postmark account
POSTMARK_SENDER = ''
assert len(POSTMARK_SENDER) != 0
|
the-stack_0_9931 | import fire
import os
import tensorflow as tf
import matplotlib.pyplot as plt
import os
import time
import datetime
from dcgan.discriminator import make_discriminator_model, discriminator_loss
from dcgan.generator import make_generator_model, generator_loss
from dcgan.dataset import make_dataset
from dcgan.utils import *
from dcgan.metrics import *
from dcgan import CHECKPOINT_DIR, MODEL_DIR
try:
from IPython import display
except:
pass
@tf.function
def train_step(
images,
epoch,
summary_writer,
generator,
discriminator,
generator_optimizer,
discriminator_optimizer,
):
noise = tf.random.normal([256, 100])
# tf.random.gau
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
generated_images = generator(noise, training=True)
real_output = discriminator(images, training=True)
fake_output = discriminator(generated_images, training=True)
gen_loss = generator_loss(fake_output)
# gen_loss = tf.vectorized_map(generator_loss, fake_output)
# disc_loss = tf.vectorized_map(discriminator_loss, fake_output)
disc_loss = discriminator_loss(real_output, fake_output)
gen_loss_metric.update_state(gen_loss)
disc_loss_metric.update_state(disc_loss)
fake_out.update_state(fake_output[0])
real_out.update_state(real_output[0])
gradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables)
gradients_of_discriminator = disc_tape.gradient(
disc_loss, discriminator.trainable_variables
)
generator_optimizer.apply_gradients(
zip(gradients_of_generator, generator.trainable_variables)
)
discriminator_optimizer.apply_gradients(
zip(gradients_of_discriminator, discriminator.trainable_variables)
)
record_metrics(epoch, summary_writer)
def train(epochs, logname, channels=1, batch_size=256, data_folder=None):
tf.profiler.experimental.server.start(6009)
generator = make_generator_model(32, channels)
discriminator = make_discriminator_model(32, channels)
generator_optimizer = tf.keras.optimizers.Adam(1e-04, beta_1=0.5)
discriminator_optimizer = tf.keras.optimizers.Adam(1e-04, beta_1=0.5)
checkpoint = tf.train.Checkpoint(
step=tf.Variable(1),
generator_optimizer=generator_optimizer,
discriminator_optimizer=discriminator_optimizer,
generator=generator,
discriminator=discriminator,
)
manager = tf.train.CheckpointManager(checkpoint, CHECKPOINT_DIR, max_to_keep=3)
summary_writer = make_summary_writer(logname)
dataset = make_dataset(32, data_folder, channels)
show_dataset(dataset, 16, summary_writer)
checkpoint.restore(manager.latest_checkpoint)
if manager.latest_checkpoint:
print("Restored from {}".format(manager.latest_checkpoint))
current_step = int(checkpoint.step.numpy())
print(
f"Continuing from epoch {current_step} + {epochs} -> {epochs + current_step}"
)
epochs = range(current_step, epochs + current_step)
else:
epochs = range(epochs)
print("Initializing from scratch.")
for epoch in epochs:
seed = tf.random.normal([16, 100])
start = time.time()
fake_out.reset_states()
real_out.reset_states()
gen_loss_metric.reset_states()
disc_loss_metric.reset_states()
for step, img_batch in enumerate(dataset.take(256)):
train_step(img_batch, epoch, summary_writer, generator, discriminator, generator_optimizer, discriminator_optimizer)
display.clear_output(wait=True)
generate_and_save_images(generator, epoch + 1, seed, summary_writer)
checkpoint.step.assign_add(1)
if int(checkpoint.step) % 15 == 0:
save_path = manager.save()
print(
"Saved checkpoint for step {}: {}".format(
int(checkpoint.step), save_path
)
)
# Produce images for the GIF as we go
print("Time for epoch {} is {} sec".format(epoch + 1, time.time() - start))
# Generate after the final epoch
display.clear_output(wait=True)
generate_and_save_images(generator, epochs, seed, summary_writer)
current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
generator.save(os.path.join(MODEL_DIR, f"gen_trained_{current_time}"))
discriminator.save(os.path.join(MODEL_DIR, f"disc_trained_{current_time}"))
def fire_():
fire.Fire(train)
|
the-stack_0_9932 | # -*- coding: utf-8 -*-
import logging
import re
from urllib.parse import quote_plus
from requests.exceptions import RequestException
from flexget import plugin
from flexget.components.sites.utils import normalize_scene, torrent_availability
from flexget.config_schema import one_or_more
from flexget.entry import Entry
from flexget.event import event
from flexget.plugin import PluginError
from flexget.utils.requests import Session as RequestSession
from flexget.utils.soup import get_soup
from flexget.utils.tools import parse_filesize
log = logging.getLogger('fuzer')
requests = RequestSession()
CATEGORIES = {
# Movies
'HD Movies': 9,
'XviD': 7,
'BRRip': 59,
'Israeli HD Movies': 61,
'Israeli Movies': 60,
'DVDR': 58,
'Dubbed Movies': 83,
# TV
'HD Shows': 10,
'Shows': 8,
'Israeli HD Shows': 63,
'Israeli Shows': 62,
'Dubbed Shows': 84,
# Anime
'Anime': 65,
# FuzePacks
'Movie Packs': 73,
'Shows Packs': 76,
}
class UrlRewriteFuzer:
schema = {
'type': 'object',
'properties': {
'cookie_password': {'type': 'string'},
'user_id': {'type': 'integer'},
'rss_key': {'type': 'string'},
'category': one_or_more(
{'oneOf': [{'type': 'string', 'enum': list(CATEGORIES)}, {'type': 'integer'}]}
),
},
'required': ['user_id', 'cookie_password', 'rss_key'],
'additionalProperties': False,
}
def get_fuzer_soup(self, search_term, categories_list):
params = {'matchquery': 'any', 'ref_': 'advanced'}
query = '{}&{}'.format(search_term, '&'.join(categories_list))
try:
page = requests.get(
'https://www.fuzer.me/browse.php?query={}'.format(query),
params=params,
cookies=self.cookies,
)
except RequestException as e:
raise PluginError('Could not connect to Fuzer: {}'.format(e))
if 'login' in page.url:
raise PluginError('Could not fetch results from Fuzer. Check config')
log.debug('Using %s as fuzer search url', page.url)
return get_soup(page.content)
def extract_entry_from_soup(self, soup):
table = soup.find('div', {'id': 'main_table'})
if table is None:
raise PluginError('Could not fetch results table from Fuzer, aborting')
log.trace('fuzer results table: %s', table)
table = table.find('table', {'class': 'table_info'})
if len(table.find_all('tr')) == 1:
log.debug('No search results were returned from Fuzer, continuing')
return []
entries = []
for tr in table.find_all("tr"):
if not tr.get('class') or 'colhead_dark' in tr.get('class'):
continue
name = tr.find('div', {'class': 'main_title'}).find('a').text
torrent_name = re.search(
'\\n(.*)', tr.find('div', {'style': 'float: right;'}).find('a')['title']
).group(1)
attachment_link = tr.find('div', {'style': 'float: right;'}).find('a')['href']
attachment_id = re.search(r'attachmentid=(\d+)', attachment_link).group(1)
raw_size = tr.find_all('td', {'class': 'inline_info'})[0].text.strip()
seeders = int(tr.find_all('td', {'class': 'inline_info'})[2].text)
leechers = int(tr.find_all('td', {'class': 'inline_info'})[3].text)
e = Entry()
e['title'] = name
final_url = 'https://www.fuzer.me/rss/torrent.php/{}/{}/{}/{}'.format(
attachment_id, self.user_id, self.rss_key, torrent_name
)
log.debug('RSS-ified download link: %s', final_url)
e['url'] = final_url
e['torrent_seeds'] = seeders
e['torrent_leeches'] = leechers
e['torrent_availability'] = torrent_availability(
e['torrent_seeds'], e['torrent_leeches']
)
size = re.search(r'(\d+(?:[.,]\d+)*)\s?([KMGTP]B)', raw_size)
e['content_size'] = parse_filesize(size.group(0))
entries.append(e)
return entries
@plugin.internet(log)
def search(self, task, entry, config=None):
"""
Search for name from fuzer.
"""
self.rss_key = config['rss_key']
self.user_id = config['user_id']
self.cookies = {
'fzr2lastactivity': '0',
'fzr2lastvisit': '',
'fzr2password': config['cookie_password'],
'fzr2sessionhash': '',
'fzr2userid': str(self.user_id),
}
category = config.get('category', [0])
# Make sure categories is a list
if not isinstance(category, list):
category = [category]
# If there are any text categories, turn them into their id number
categories = [c if isinstance(c, int) else CATEGORIES[c] for c in category]
c_list = ['c{}={}'.format(quote_plus('[]'), c) for c in categories]
entries = []
if entry.get('imdb_id'):
log.debug("imdb_id '%s' detected, using in search.", entry['imdb_id'])
soup = self.get_fuzer_soup(entry['imdb_id'], c_list)
entries = self.extract_entry_from_soup(soup)
if entries:
for e in list(entries):
e['imdb_id'] = entry.get('imdb_id')
else:
for search_string in entry.get('search_strings', [entry['title']]):
query = normalize_scene(search_string)
text = quote_plus(query.encode('windows-1255'))
soup = self.get_fuzer_soup(text, c_list)
entries += self.extract_entry_from_soup(soup)
return (
sorted(entries, reverse=True, key=lambda x: x.get('torrent_availability'))
if entries
else []
)
@event('plugin.register')
def register_plugin():
plugin.register(UrlRewriteFuzer, 'fuzer', interfaces=['search'], api_ver=2)
|
the-stack_0_9933 | """Progress bars for SDGym compatible with logging and dask."""
import io
import logging
from datetime import datetime, timedelta
LOGGER = logging.getLogger(__name__)
class TqdmLogger(io.StringIO):
_buffer = ''
def write(self, buf):
self._buffer = buf.strip('\r\n\t ')
def flush(self):
LOGGER.info(self._buffer)
def progress(*futures):
"""Track progress of dask computation in a remote cluster.
LogProgressBar is defined inside here to avoid having to import
its dependencies if not used.
"""
# Import distributed only when used
from distributed.client import futures_of # pylint: disable=C0415
from distributed.diagnostics.progressbar import TextProgressBar # pylint: disable=c0415
class LogProgressBar(TextProgressBar):
"""Dask progress bar based on logging instead of stdout."""
last = 0
logger = logging.getLogger('distributed')
def _draw_bar(self, remaining, all, **kwargs): # pylint: disable=W0221,W0622
done = all - remaining
frac = (done / all) if all else 0
if frac > self.last + 0.01:
self.last = int(frac * 100) / 100
bar = "#" * int(self.width * frac)
percent = int(100 * frac)
time_per_task = self.elapsed / (all - remaining)
remaining_time = timedelta(seconds=time_per_task * remaining)
eta = datetime.utcnow() + remaining_time
elapsed = timedelta(seconds=self.elapsed)
msg = "[{0:<{1}}] | {2}/{3} ({4}%) Completed | {5} | {6} | {7}".format(
bar, self.width, done, all, percent, elapsed, remaining_time, eta
)
self.logger.info(msg)
LOGGER.info(msg)
def _draw_stop(self, **kwargs):
pass
futures = futures_of(futures)
if not isinstance(futures, (set, list)):
futures = [futures]
LogProgressBar(futures)
|
the-stack_0_9935 | import pickle
import itertools
import os
import math
from sklearn.preprocessing import normalize
import re
from operator import add
import matplotlib.pyplot as plt
#%matplotlib inline
import numpy as np
import argparse
import pylab as pl
import random
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def normalize_by_row(arr):
row_sums = np.sqrt((arr*arr).sum(axis=1))
new_arr = arr / row_sums[:, np.newaxis]
return new_arr
def grep(pat, txt, ind):
r = re.search(pat, txt)
return int(r.group(1))
def compute_embds_matrix(path, M):
pkls = []
for root, dirs, files in os.walk(path):
if len(files) != 0:
pkls.extend([os.path.join(root, file) for file in files if file.endswith('.pkl')])
pkls.sort(key=lambda txt: grep(r"(\d+)_(\d+)\.pkl", txt, 1))
#print(pkls)
A_lst = []
for pkl in pkls:
print(pkl)
with open(pkl, 'rb') as handle:
samples = pickle.load(handle)
#keys = list(samples.keys())
#keys.sort(key=lambda txt: grep(r"(\d+)\.png", txt, 1))
#samples = [samples[key] for key in keys]
chunks = [normalize(np.asarray(samples[i:i+M]), axis=1, norm='l2') for i in range(0, len(samples), M)]
#print(chunks[0].shape)
#print(len(chunks))
A_lst.extend(chunks)
return A_lst
def monte_carlo(A_lst, I0, N, d):
Count = 0
for i in range(N):
#print('i={}'.format(i))
Ai = A_lst[i]
#print(I0)
AiT = np.transpose(Ai)
#print(np.matmul(I0, AiT))
theta_mat = np.arccos(np.matmul(I0, AiT)) / math.pi
theta_mat = theta_mat - np.ones(theta_mat.shape)*d
Count += np.sum(theta_mat <= 0)
#Pr += np.sum(np.exp(1-np.arccos(np.matmul(I0, AiT)) / math.pi))
return Count
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Sampling nearest neighbors')
parser.add_argument('--start', required=True, help='Start of the distance hard threshold', type=float)
parser.add_argument('--end', required=True, help='End of the distance hard threshold', type=float)
parser.add_argument('--step_size', required=True, help='Step size of the distance hard threshold', type=float)
parser.add_argument('--job_id', required=True, help='The id of the submitted job', type=str)
parser.add_argument('--sampling_path', required=True, help='The path of the saved embeddings', type=str)
parser.add_argument('--M', default=10000, help='The dimension of the tiled matrix', type=int)
parser.add_argument('--N', default=100, help='The number of tiled matrix', type=int)
parser.add_argument('--K', default=100, help='The number of anchor points', type=int)
parser.add_argument('--random_anchor', required=True, help='Whether we should get the anchor points by randomly sampling', type=str2bool)
args, other_args = parser.parse_known_args()
M = args.M
N = args.N
path = os.path.join(args.sampling_path, 'embds')
anchor_pt_dct = {}
if args.random_anchor:
indices = random.sample(range(M * N), args.K)
else:
with open(os.path.join(args.sampling_path, 'neighbors', 'clustered_indices.pkl'), 'rb') as handle:
indices = pickle.load(handle)
print('Loading indices from saved pickle file')
print(indices)
for i in indices:
pkl_dir = os.path.join(path, '{}_{}'.format((i // 1000000)*1000000, (i // 1000000 + 1)*1000000),
'{}_{}.pkl'.format((i // 10000)*10000, (i // 10000 + 1)*10000))
with open(pkl_dir, 'rb') as handle:
pkl = pickle.load(handle)
vec = pkl[i % 10000]
anchor_pt_dct[i] = vec / np.linalg.norm(vec)
ripley_dir = os.path.join(args.sampling_path, 'ripley')
if not os.path.exists(ripley_dir):
os.makedirs(ripley_dir)
A_lst = compute_embds_matrix(path, M)
file = open(os.path.join(ripley_dir, 'ripley_{}.txt'.format(args.job_id)), 'w')
for d in list(pl.frange(args.start,args.end,args.step_size)):
for k,v in anchor_pt_dct.items():
print(d)
v = v / np.linalg.norm(v)
v = v[np.newaxis,:]
count = monte_carlo(A_lst, v, N, d)
#Pr = (monte_carlo(A_lst, v, N)-10000000)/((np.e-1)*10000000)
result = '{}:\t{}:{}'.format(k, d, count)
print(result)
file.write(result+'\n')
file.close()
|
the-stack_0_9937 | #
# This is Seisflows
#
# See LICENCE file
#
###############################################################################
# Import system modules
import system
import subprocess
from glob import glob
from os.path import join
import sys
# Import Numpy
import numpy as np
# Local imports
import seisflows.plugins.solver.specfem3d_globe as solvertools
from seisflows.tools.seismic import getpar, setpar, Minmax
# from seisflows.plugins.io import loadbypar, copybin, loadbin, savebin
from seisflows.tools import unix
from seisflows.tools.seismic import call_solver
from seisflows.tools.tools import Struct, exists
from seisflows.config import ParameterError, custom_import
try:
PAR = sys.modules['seisflows_parameters']
PATH = sys.modules['seisflows_paths']
except:
print("Check parameters and paths.")
class specfem3d_globe(custom_import('solver', 'base')):
""" Python interface for SPECFEM3D_GLOBE
See base class for method descriptions
"""
try:
if PAR.MATERIALS in ['Isotropic']:
parameters = []
parameters += ['vp']
parameters += ['vs']
else:
parameters = []
parameters += ['vpv']
parameters += ['vph']
parameters += ['vsv']
parameters += ['vsh']
parameters += ['eta']
except:
print("Check parameters and paths.")
def check(self):
""" Checks parameters and paths
"""
super(specfem3d_globe, self).check()
if 'CHANNELS' not in PAR:
setattr(PAR, 'CHANNELS', 'ENZ')
# check data format
if 'FORMAT' not in PAR:
raise Exception()
def generate_data(self, **model_kwargs):
""" Generates data
"""
self.generate_mesh(**model_kwargs)
unix.cd(self.cwd)
setpar('SIMULATION_TYPE', '1')
setpar('SAVE_FORWARD', '.true.')
call_solver(system.mpiexec(), 'bin/xspecfem3D')
if PAR.FORMAT in ['ASCII', 'ascii']:
src = glob('OUTPUT_FILES/*.sem.ascii')
dst = 'traces/obs'
unix.mv(src, dst)
if PAR.SAVETRACES:
self.export_traces(PATH.OUTPUT+'/'+'traces/obs')
def generate_mesh(self, model_path=None, model_name=None,
model_type='gll'):
""" Performs meshing and database generation
"""
assert(model_name)
assert(model_type)
self.initialize_solver_directories()
unix.cd(self.cwd)
if model_type == 'gll':
assert (exists(model_path))
self.check_mesh_properties(model_path)
unix.cp(glob(model_path + '/' + '*'), self.model_databases)
call_solver(system.mpiexec(), 'bin/xmeshfem3D')
if self.taskid == 0:
self.export_model(PATH.OUTPUT + '/' + model_name)
else:
raise NotImplementedError
# Model input/output
def load(self, path, prefix='reg1_', suffix='', verbose=False):
""" reads SPECFEM model or kernel
Models are stored in Fortran binary format and separated into
multiple files according to material parameter and processor rank.
"""
raise NotImplementedError
model = Model(self.parameters)
minmax = Minmax(self.parameters)
for iproc in range(self.mesh_properties.nproc):
# read database files
keys, vals = loadbypar(path, self.parameters, iproc, prefix,
suffix)
for key, val in zip(keys, vals):
model[key] += [val]
minmax.update(keys, vals)
if verbose:
minmax.write(path, logpath=PATH.SUBMIT)
return model
def save(self, path, model, prefix='reg1_', suffix=''):
""" writes SPECFEM3D_GLOBE transerverly isotropic model
"""
unix.mkdir(path)
for iproc in range(self.mesh_properties.nproc):
for key in ['vpv', 'vph', 'vsv', 'vsh', 'eta']:
if key in self.parameters:
savebin(model[key][iproc], path, iproc, prefix+key+suffix)
elif 'kernel' in suffix:
pass
else:
src = PATH.OUTPUT + '/' + 'model_init'
dst = path
copybin(src, dst, iproc, prefix+key+suffix)
if 'rho' in self.parameters:
savebin(model['rho'][iproc], path, iproc, prefix+'rho'+suffix)
elif 'kernel' in suffix:
pass
else:
src = PATH.OUTPUT + '/' + 'model_init'
dst = path
copybin(src, dst, iproc, prefix+'rho'+suffix)
# Low-level solver interface
def forward(self, path='traces/syn'):
""" Calls SPECFEM3D_GLOBE forward solver
"""
solvertools.setpar('SIMULATION_TYPE', '1')
solvertools.setpar('SAVE_FORWARD', '.true.')
call_solver(system.mpiexec(), 'bin/xspecfem3D')
if PAR.FORMAT in ['ASCII', 'ascii']:
src = glob('OUTPUT_FILES/*.sem.ascii')
dst = path
unix.mv(src, dst)
def adjoint(self):
""" Calls SPECFEM3D_GLOBE adjoint solver
"""
solvertools.setpar('SIMULATION_TYPE', '3')
solvertools.setpar('SAVE_FORWARD', '.false.')
unix.rm('SEM')
unix.ln('traces/adj', 'SEM')
call_solver(system.mpiexec(), 'bin/xspecfem3D')
def check_mesh_properties(self, path=None, parameters=None):
if not hasattr(self, '_mesh_properties'):
if not path:
path = PATH.MODEL_INIT
if not parameters:
parameters = self.parameters
nproc = 0
ngll = []
while True:
dummy = loadbin(path, nproc, 'reg1_'+parameters[0])
ngll += [len(dummy)]
nproc += 1
if not exists('%s/proc%06d_reg1_%s.bin' % (path, nproc,
parameters[0])):
break
self._mesh_properties = Struct([
['nproc', nproc],
['ngll', ngll]])
return self._mesh_properties
def rename_data(self):
""" Works around conflicting data filename conventions
"""
files = glob(self.cwd + '/' + 'traces/adj/*sem.ascii')
unix.rename('sem.ascii', 'sem.ascii.adj', files)
def initialize_adjoint_traces(self):
super(specfem3d_globe, self).initialize_adjoint_traces()
# workaround for SPECFEM2D's use of different name conventions for
# regular traces and 'adjoint' traces
if PAR.FORMAT in ['ASCII', 'ascii']:
files = glob(self.cwd + '/' + 'traces/adj/*sem.ascii')
unix.rename('sem.ascii', 'adj', files)
# Miscellaneous
@property
def data_filenames(self):
unix.cd(self.cwd)
unix.cd('traces/obs')
print('made it here')
if PAR.FORMAT in ['ASCII', 'ascii']:
filenames = []
for channel in PAR.CHANNELS:
filenames += glob('*.??%s.sem.ascii' % channel)
return [filenames]
@property
def kernel_databases(self):
return join(self.cwd, 'OUTPUT_FILES/DATABASES_MPI')
@property
def model_databases(self):
return join(self.cwd, 'OUTPUT_FILES/DATABASES_MPI')
@property
def source_prefix(self):
return 'CMTSOLUTION'
|
the-stack_0_9938 | """
DataFrame
---------
An efficient 2D container for potentially mixed-type time series or other
labeled data series.
Similar to its R counterpart, data.frame, except providing automatic data
alignment and a host of useful data manipulation methods having to do with the
labeling information
"""
import collections
from collections import abc
from io import StringIO
import itertools
import sys
from textwrap import dedent
from typing import (
IO,
TYPE_CHECKING,
Any,
FrozenSet,
Hashable,
Iterable,
List,
Optional,
Sequence,
Set,
Tuple,
Type,
Union,
cast,
)
import warnings
import numpy as np
import numpy.ma as ma
from pandas._config import get_option
from pandas._libs import algos as libalgos, lib
from pandas._typing import Axes, Axis, Dtype, FilePathOrBuffer, Level, Renamer
from pandas.compat import PY37
from pandas.compat._optional import import_optional_dependency
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender,
Substitution,
deprecate_kwarg,
rewrite_axis_style_signature,
)
from pandas.util._validators import (
validate_axis_style_args,
validate_bool_kwarg,
validate_percentile,
)
from pandas.core.dtypes.cast import (
cast_scalar_to_array,
coerce_to_dtypes,
find_common_type,
infer_dtype_from_scalar,
invalidate_string_dtypes,
maybe_cast_to_datetime,
maybe_convert_platform,
maybe_downcast_to_dtype,
maybe_infer_to_datetimelike,
maybe_upcast,
maybe_upcast_putmask,
)
from pandas.core.dtypes.common import (
ensure_float64,
ensure_int64,
ensure_platform_int,
infer_dtype_from_object,
is_bool_dtype,
is_dict_like,
is_dtype_equal,
is_extension_array_dtype,
is_float_dtype,
is_hashable,
is_integer,
is_integer_dtype,
is_iterator,
is_list_like,
is_named_tuple,
is_object_dtype,
is_scalar,
is_sequence,
needs_i8_conversion,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCIndexClass,
ABCMultiIndex,
ABCSeries,
)
from pandas.core.dtypes.missing import isna, notna
from pandas.core import algorithms, common as com, nanops, ops
from pandas.core.accessor import CachedAccessor
from pandas.core.arrays import Categorical, ExtensionArray
from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin as DatetimeLikeArray
from pandas.core.arrays.sparse import SparseFrameAccessor
from pandas.core.generic import NDFrame, _shared_docs
from pandas.core.indexes import base as ibase
from pandas.core.indexes.api import Index, ensure_index, ensure_index_from_sequences
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.multi import maybe_droplevels
from pandas.core.indexes.period import PeriodIndex
from pandas.core.indexing import check_bool_indexer, convert_to_index_sliceable
from pandas.core.internals import BlockManager
from pandas.core.internals.construction import (
arrays_to_mgr,
get_names_from_index,
init_dict,
init_ndarray,
masked_rec_array_to_mgr,
reorder_arrays,
sanitize_index,
to_arrays,
)
from pandas.core.ops.missing import dispatch_fill_zeros
from pandas.core.series import Series
from pandas.io.common import get_filepath_or_buffer
from pandas.io.formats import console, format as fmt
from pandas.io.formats.printing import pprint_thing
import pandas.plotting
if TYPE_CHECKING:
from pandas.core.groupby.generic import DataFrameGroupBy
from pandas.io.formats.style import Styler
# ---------------------------------------------------------------------
# Docstring templates
_shared_doc_kwargs = dict(
axes="index, columns",
klass="DataFrame",
axes_single_arg="{0 or 'index', 1 or 'columns'}",
axis="""axis : {0 or 'index', 1 or 'columns'}, default 0
If 0 or 'index': apply function to each column.
If 1 or 'columns': apply function to each row.""",
optional_by="""
by : str or list of str
Name or list of names to sort by.
- if `axis` is 0 or `'index'` then `by` may contain index
levels and/or column labels.
- if `axis` is 1 or `'columns'` then `by` may contain column
levels and/or index labels.
.. versionchanged:: 0.23.0
Allow specifying index or column level names.""",
versionadded_to_excel="",
optional_labels="""labels : array-like, optional
New labels / index to conform the axis specified by 'axis' to.""",
optional_axis="""axis : int or str, optional
Axis to target. Can be either the axis name ('index', 'columns')
or number (0, 1).""",
)
_numeric_only_doc = """numeric_only : boolean, default None
Include only float, int, boolean data. If None, will attempt to use
everything, then use only numeric data
"""
_merge_doc = """
Merge DataFrame or named Series objects with a database-style join.
The join is done on columns or indexes. If joining columns on
columns, the DataFrame indexes *will be ignored*. Otherwise if joining indexes
on indexes or indexes on a column or columns, the index will be passed on.
Parameters
----------%s
right : DataFrame or named Series
Object to merge with.
how : {'left', 'right', 'outer', 'inner'}, default 'inner'
Type of merge to be performed.
* left: use only keys from left frame, similar to a SQL left outer join;
preserve key order.
* right: use only keys from right frame, similar to a SQL right outer join;
preserve key order.
* outer: use union of keys from both frames, similar to a SQL full outer
join; sort keys lexicographically.
* inner: use intersection of keys from both frames, similar to a SQL inner
join; preserve the order of the left keys.
on : label or list
Column or index level names to join on. These must be found in both
DataFrames. If `on` is None and not merging on indexes then this defaults
to the intersection of the columns in both DataFrames.
left_on : label or list, or array-like
Column or index level names to join on in the left DataFrame. Can also
be an array or list of arrays of the length of the left DataFrame.
These arrays are treated as if they are columns.
right_on : label or list, or array-like
Column or index level names to join on in the right DataFrame. Can also
be an array or list of arrays of the length of the right DataFrame.
These arrays are treated as if they are columns.
left_index : bool, default False
Use the index from the left DataFrame as the join key(s). If it is a
MultiIndex, the number of keys in the other DataFrame (either the index
or a number of columns) must match the number of levels.
right_index : bool, default False
Use the index from the right DataFrame as the join key. Same caveats as
left_index.
sort : bool, default False
Sort the join keys lexicographically in the result DataFrame. If False,
the order of the join keys depends on the join type (how keyword).
suffixes : tuple of (str, str), default ('_x', '_y')
Suffix to apply to overlapping column names in the left and right
side, respectively. To raise an exception on overlapping columns use
(False, False).
copy : bool, default True
If False, avoid copy if possible.
indicator : bool or str, default False
If True, adds a column to output DataFrame called "_merge" with
information on the source of each row.
If string, column with information on source of each row will be added to
output DataFrame, and column will be named value of string.
Information column is Categorical-type and takes on a value of "left_only"
for observations whose merge key only appears in 'left' DataFrame,
"right_only" for observations whose merge key only appears in 'right'
DataFrame, and "both" if the observation's merge key is found in both.
validate : str, optional
If specified, checks if merge is of specified type.
* "one_to_one" or "1:1": check if merge keys are unique in both
left and right datasets.
* "one_to_many" or "1:m": check if merge keys are unique in left
dataset.
* "many_to_one" or "m:1": check if merge keys are unique in right
dataset.
* "many_to_many" or "m:m": allowed, but does not result in checks.
.. versionadded:: 0.21.0
Returns
-------
DataFrame
A DataFrame of the two merged objects.
See Also
--------
merge_ordered : Merge with optional filling/interpolation.
merge_asof : Merge on nearest keys.
DataFrame.join : Similar method using indices.
Notes
-----
Support for specifying index levels as the `on`, `left_on`, and
`right_on` parameters was added in version 0.23.0
Support for merging named Series objects was added in version 0.24.0
Examples
--------
>>> df1 = pd.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [1, 2, 3, 5]})
>>> df2 = pd.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [5, 6, 7, 8]})
>>> df1
lkey value
0 foo 1
1 bar 2
2 baz 3
3 foo 5
>>> df2
rkey value
0 foo 5
1 bar 6
2 baz 7
3 foo 8
Merge df1 and df2 on the lkey and rkey columns. The value columns have
the default suffixes, _x and _y, appended.
>>> df1.merge(df2, left_on='lkey', right_on='rkey')
lkey value_x rkey value_y
0 foo 1 foo 5
1 foo 1 foo 8
2 foo 5 foo 5
3 foo 5 foo 8
4 bar 2 bar 6
5 baz 3 baz 7
Merge DataFrames df1 and df2 with specified left and right suffixes
appended to any overlapping columns.
>>> df1.merge(df2, left_on='lkey', right_on='rkey',
... suffixes=('_left', '_right'))
lkey value_left rkey value_right
0 foo 1 foo 5
1 foo 1 foo 8
2 foo 5 foo 5
3 foo 5 foo 8
4 bar 2 bar 6
5 baz 3 baz 7
Merge DataFrames df1 and df2, but raise an exception if the DataFrames have
any overlapping columns.
>>> df1.merge(df2, left_on='lkey', right_on='rkey', suffixes=(False, False))
Traceback (most recent call last):
...
ValueError: columns overlap but no suffix specified:
Index(['value'], dtype='object')
"""
# -----------------------------------------------------------------------
# DataFrame class
class DataFrame(NDFrame):
"""
Two-dimensional, size-mutable, potentially heterogeneous tabular data.
Data structure also contains labeled axes (rows and columns).
Arithmetic operations align on both row and column labels. Can be
thought of as a dict-like container for Series objects. The primary
pandas data structure.
Parameters
----------
data : ndarray (structured or homogeneous), Iterable, dict, or DataFrame
Dict can contain Series, arrays, constants, or list-like objects.
.. versionchanged:: 0.23.0
If data is a dict, column order follows insertion-order for
Python 3.6 and later.
.. versionchanged:: 0.25.0
If data is a list of dicts, column order follows insertion-order
for Python 3.6 and later.
index : Index or array-like
Index to use for resulting frame. Will default to RangeIndex if
no indexing information part of input data and no index provided.
columns : Index or array-like
Column labels to use for resulting frame. Will default to
RangeIndex (0, 1, 2, ..., n) if no column labels are provided.
dtype : dtype, default None
Data type to force. Only a single dtype is allowed. If None, infer.
copy : bool, default False
Copy data from inputs. Only affects DataFrame / 2d ndarray input.
See Also
--------
DataFrame.from_records : Constructor from tuples, also record arrays.
DataFrame.from_dict : From dicts of Series, arrays, or dicts.
read_csv
read_table
read_clipboard
Examples
--------
Constructing DataFrame from a dictionary.
>>> d = {'col1': [1, 2], 'col2': [3, 4]}
>>> df = pd.DataFrame(data=d)
>>> df
col1 col2
0 1 3
1 2 4
Notice that the inferred dtype is int64.
>>> df.dtypes
col1 int64
col2 int64
dtype: object
To enforce a single dtype:
>>> df = pd.DataFrame(data=d, dtype=np.int8)
>>> df.dtypes
col1 int8
col2 int8
dtype: object
Constructing DataFrame from numpy ndarray:
>>> df2 = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]),
... columns=['a', 'b', 'c'])
>>> df2
a b c
0 1 2 3
1 4 5 6
2 7 8 9
"""
_typ = "dataframe"
@property
def _constructor(self) -> Type["DataFrame"]:
return DataFrame
_constructor_sliced: Type[Series] = Series
_deprecations: FrozenSet[str] = NDFrame._deprecations | frozenset([])
_accessors: Set[str] = {"sparse"}
@property
def _constructor_expanddim(self):
raise NotImplementedError("Not supported for DataFrames!")
# ----------------------------------------------------------------------
# Constructors
def __init__(
self,
data=None,
index: Optional[Axes] = None,
columns: Optional[Axes] = None,
dtype: Optional[Dtype] = None,
copy: bool = False,
):
if data is None:
data = {}
if dtype is not None:
dtype = self._validate_dtype(dtype)
if isinstance(data, DataFrame):
data = data._data
if isinstance(data, BlockManager):
mgr = self._init_mgr(
data, axes=dict(index=index, columns=columns), dtype=dtype, copy=copy
)
elif isinstance(data, dict):
mgr = init_dict(data, index, columns, dtype=dtype)
elif isinstance(data, ma.MaskedArray):
import numpy.ma.mrecords as mrecords
# masked recarray
if isinstance(data, mrecords.MaskedRecords):
mgr = masked_rec_array_to_mgr(data, index, columns, dtype, copy)
# a masked array
else:
mask = ma.getmaskarray(data)
if mask.any():
data, fill_value = maybe_upcast(data, copy=True)
data.soften_mask() # set hardmask False if it was True
data[mask] = fill_value
else:
data = data.copy()
mgr = init_ndarray(data, index, columns, dtype=dtype, copy=copy)
elif isinstance(data, (np.ndarray, Series, Index)):
if data.dtype.names:
data_columns = list(data.dtype.names)
data = {k: data[k] for k in data_columns}
if columns is None:
columns = data_columns
mgr = init_dict(data, index, columns, dtype=dtype)
elif getattr(data, "name", None) is not None:
mgr = init_dict({data.name: data}, index, columns, dtype=dtype)
else:
mgr = init_ndarray(data, index, columns, dtype=dtype, copy=copy)
# For data is list-like, or Iterable (will consume into list)
elif isinstance(data, abc.Iterable) and not isinstance(data, (str, bytes)):
if not isinstance(data, (abc.Sequence, ExtensionArray)):
data = list(data)
if len(data) > 0:
if is_list_like(data[0]) and getattr(data[0], "ndim", 1) == 1:
if is_named_tuple(data[0]) and columns is None:
columns = data[0]._fields
arrays, columns = to_arrays(data, columns, dtype=dtype)
columns = ensure_index(columns)
# set the index
if index is None:
if isinstance(data[0], Series):
index = get_names_from_index(data)
elif isinstance(data[0], Categorical):
index = ibase.default_index(len(data[0]))
else:
index = ibase.default_index(len(data))
mgr = arrays_to_mgr(arrays, columns, index, columns, dtype=dtype)
else:
mgr = init_ndarray(data, index, columns, dtype=dtype, copy=copy)
else:
mgr = init_dict({}, index, columns, dtype=dtype)
else:
try:
arr = np.array(data, dtype=dtype, copy=copy)
except (ValueError, TypeError) as e:
exc = TypeError(
"DataFrame constructor called with "
f"incompatible data and dtype: {e}"
)
raise exc from e
if arr.ndim == 0 and index is not None and columns is not None:
values = cast_scalar_to_array(
(len(index), len(columns)), data, dtype=dtype
)
mgr = init_ndarray(
values, index, columns, dtype=values.dtype, copy=False
)
else:
raise ValueError("DataFrame constructor not properly called!")
NDFrame.__init__(self, mgr, fastpath=True)
# ----------------------------------------------------------------------
@property
def axes(self) -> List[Index]:
"""
Return a list representing the axes of the DataFrame.
It has the row axis labels and column axis labels as the only members.
They are returned in that order.
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.axes
[RangeIndex(start=0, stop=2, step=1), Index(['col1', 'col2'],
dtype='object')]
"""
return [self.index, self.columns]
@property
def shape(self) -> Tuple[int, int]:
"""
Return a tuple representing the dimensionality of the DataFrame.
See Also
--------
ndarray.shape
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.shape
(2, 2)
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4],
... 'col3': [5, 6]})
>>> df.shape
(2, 3)
"""
return len(self.index), len(self.columns)
@property
def _is_homogeneous_type(self) -> bool:
"""
Whether all the columns in a DataFrame have the same type.
Returns
-------
bool
See Also
--------
Index._is_homogeneous_type : Whether the object has a single
dtype.
MultiIndex._is_homogeneous_type : Whether all the levels of a
MultiIndex have the same dtype.
Examples
--------
>>> DataFrame({"A": [1, 2], "B": [3, 4]})._is_homogeneous_type
True
>>> DataFrame({"A": [1, 2], "B": [3.0, 4.0]})._is_homogeneous_type
False
Items with the same type but different sizes are considered
different types.
>>> DataFrame({
... "A": np.array([1, 2], dtype=np.int32),
... "B": np.array([1, 2], dtype=np.int64)})._is_homogeneous_type
False
"""
if self._data.any_extension_types:
return len({block.dtype for block in self._data.blocks}) == 1
else:
return not self._data.is_mixed_type
# ----------------------------------------------------------------------
# Rendering Methods
def _repr_fits_vertical_(self) -> bool:
"""
Check length against max_rows.
"""
max_rows = get_option("display.max_rows")
return len(self) <= max_rows
def _repr_fits_horizontal_(self, ignore_width: bool = False) -> bool:
"""
Check if full repr fits in horizontal boundaries imposed by the display
options width and max_columns.
In case off non-interactive session, no boundaries apply.
`ignore_width` is here so ipnb+HTML output can behave the way
users expect. display.max_columns remains in effect.
GH3541, GH3573
"""
width, height = console.get_console_size()
max_columns = get_option("display.max_columns")
nb_columns = len(self.columns)
# exceed max columns
if (max_columns and nb_columns > max_columns) or (
(not ignore_width) and width and nb_columns > (width // 2)
):
return False
# used by repr_html under IPython notebook or scripts ignore terminal
# dims
if ignore_width or not console.in_interactive_session():
return True
if get_option("display.width") is not None or console.in_ipython_frontend():
# check at least the column row for excessive width
max_rows = 1
else:
max_rows = get_option("display.max_rows")
# when auto-detecting, so width=None and not in ipython front end
# check whether repr fits horizontal by actually checking
# the width of the rendered repr
buf = StringIO()
# only care about the stuff we'll actually print out
# and to_string on entire frame may be expensive
d = self
if not (max_rows is None): # unlimited rows
# min of two, where one may be None
d = d.iloc[: min(max_rows, len(d))]
else:
return True
d.to_string(buf=buf)
value = buf.getvalue()
repr_width = max(len(l) for l in value.split("\n"))
return repr_width < width
def _info_repr(self) -> bool:
"""
True if the repr should show the info view.
"""
info_repr_option = get_option("display.large_repr") == "info"
return info_repr_option and not (
self._repr_fits_horizontal_() and self._repr_fits_vertical_()
)
def __repr__(self) -> str:
"""
Return a string representation for a particular DataFrame.
"""
buf = StringIO("")
if self._info_repr():
self.info(buf=buf)
return buf.getvalue()
max_rows = get_option("display.max_rows")
min_rows = get_option("display.min_rows")
max_cols = get_option("display.max_columns")
max_colwidth = get_option("display.max_colwidth")
show_dimensions = get_option("display.show_dimensions")
if get_option("display.expand_frame_repr"):
width, _ = console.get_console_size()
else:
width = None
self.to_string(
buf=buf,
max_rows=max_rows,
min_rows=min_rows,
max_cols=max_cols,
line_width=width,
max_colwidth=max_colwidth,
show_dimensions=show_dimensions,
)
return buf.getvalue()
def _repr_html_(self) -> Optional[str]:
"""
Return a html representation for a particular DataFrame.
Mainly for IPython notebook.
"""
if self._info_repr():
buf = StringIO("")
self.info(buf=buf)
# need to escape the <class>, should be the first line.
val = buf.getvalue().replace("<", r"<", 1)
val = val.replace(">", r">", 1)
return "<pre>" + val + "</pre>"
if get_option("display.notebook_repr_html"):
max_rows = get_option("display.max_rows")
min_rows = get_option("display.min_rows")
max_cols = get_option("display.max_columns")
show_dimensions = get_option("display.show_dimensions")
formatter = fmt.DataFrameFormatter(
self,
columns=None,
col_space=None,
na_rep="NaN",
formatters=None,
float_format=None,
sparsify=None,
justify=None,
index_names=True,
header=True,
index=True,
bold_rows=True,
escape=True,
max_rows=max_rows,
min_rows=min_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
decimal=".",
table_id=None,
render_links=False,
)
return formatter.to_html(notebook=True)
else:
return None
@Substitution(
header_type="bool or sequence",
header="Write out the column names. If a list of strings "
"is given, it is assumed to be aliases for the "
"column names",
col_space_type="int",
col_space="The minimum width of each column",
)
@Substitution(shared_params=fmt.common_docstring, returns=fmt.return_docstring)
def to_string(
self,
buf: Optional[FilePathOrBuffer[str]] = None,
columns: Optional[Sequence[str]] = None,
col_space: Optional[int] = None,
header: Union[bool, Sequence[str]] = True,
index: bool = True,
na_rep: str = "NaN",
formatters: Optional[fmt.formatters_type] = None,
float_format: Optional[fmt.float_format_type] = None,
sparsify: Optional[bool] = None,
index_names: bool = True,
justify: Optional[str] = None,
max_rows: Optional[int] = None,
min_rows: Optional[int] = None,
max_cols: Optional[int] = None,
show_dimensions: bool = False,
decimal: str = ".",
line_width: Optional[int] = None,
max_colwidth: Optional[int] = None,
encoding: Optional[str] = None,
) -> Optional[str]:
"""
Render a DataFrame to a console-friendly tabular output.
%(shared_params)s
line_width : int, optional
Width to wrap a line in characters.
max_colwidth : int, optional
Max width to truncate each column in characters. By default, no limit.
.. versionadded:: 1.0.0
encoding : str, default "utf-8"
Set character encoding.
.. versionadded:: 1.0
%(returns)s
See Also
--------
to_html : Convert DataFrame to HTML.
Examples
--------
>>> d = {'col1': [1, 2, 3], 'col2': [4, 5, 6]}
>>> df = pd.DataFrame(d)
>>> print(df.to_string())
col1 col2
0 1 4
1 2 5
2 3 6
"""
from pandas import option_context
with option_context("display.max_colwidth", max_colwidth):
formatter = fmt.DataFrameFormatter(
self,
columns=columns,
col_space=col_space,
na_rep=na_rep,
formatters=formatters,
float_format=float_format,
sparsify=sparsify,
justify=justify,
index_names=index_names,
header=header,
index=index,
min_rows=min_rows,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
decimal=decimal,
line_width=line_width,
)
return formatter.to_string(buf=buf, encoding=encoding)
# ----------------------------------------------------------------------
@property
def style(self) -> "Styler":
"""
Returns a Styler object.
Contains methods for building a styled HTML representation of the DataFrame.
a styled HTML representation fo the DataFrame.
See Also
--------
io.formats.style.Styler
"""
from pandas.io.formats.style import Styler
return Styler(self)
_shared_docs[
"items"
] = r"""
Iterate over (column name, Series) pairs.
Iterates over the DataFrame columns, returning a tuple with
the column name and the content as a Series.
Yields
------
label : object
The column names for the DataFrame being iterated over.
content : Series
The column entries belonging to each label, as a Series.
See Also
--------
DataFrame.iterrows : Iterate over DataFrame rows as
(index, Series) pairs.
DataFrame.itertuples : Iterate over DataFrame rows as namedtuples
of the values.
Examples
--------
>>> df = pd.DataFrame({'species': ['bear', 'bear', 'marsupial'],
... 'population': [1864, 22000, 80000]},
... index=['panda', 'polar', 'koala'])
>>> df
species population
panda bear 1864
polar bear 22000
koala marsupial 80000
>>> for label, content in df.items():
... print('label:', label)
... print('content:', content, sep='\n')
...
label: species
content:
panda bear
polar bear
koala marsupial
Name: species, dtype: object
label: population
content:
panda 1864
polar 22000
koala 80000
Name: population, dtype: int64
"""
@Appender(_shared_docs["items"])
def items(self) -> Iterable[Tuple[Optional[Hashable], Series]]:
if self.columns.is_unique and hasattr(self, "_item_cache"):
for k in self.columns:
yield k, self._get_item_cache(k)
else:
for i, k in enumerate(self.columns):
yield k, self._ixs(i, axis=1)
@Appender(_shared_docs["items"])
def iteritems(self) -> Iterable[Tuple[Optional[Hashable], Series]]:
yield from self.items()
def iterrows(self) -> Iterable[Tuple[Optional[Hashable], Series]]:
"""
Iterate over DataFrame rows as (index, Series) pairs.
Yields
------
index : label or tuple of label
The index of the row. A tuple for a `MultiIndex`.
data : Series
The data of the row as a Series.
it : generator
A generator that iterates over the rows of the frame.
See Also
--------
DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values.
DataFrame.items : Iterate over (column name, Series) pairs.
Notes
-----
1. Because ``iterrows`` returns a Series for each row,
it does **not** preserve dtypes across the rows (dtypes are
preserved across columns for DataFrames). For example,
>>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float'])
>>> row = next(df.iterrows())[1]
>>> row
int 1.0
float 1.5
Name: 0, dtype: float64
>>> print(row['int'].dtype)
float64
>>> print(df['int'].dtype)
int64
To preserve dtypes while iterating over the rows, it is better
to use :meth:`itertuples` which returns namedtuples of the values
and which is generally faster than ``iterrows``.
2. You should **never modify** something you are iterating over.
This is not guaranteed to work in all cases. Depending on the
data types, the iterator returns a copy and not a view, and writing
to it will have no effect.
"""
columns = self.columns
klass = self._constructor_sliced
for k, v in zip(self.index, self.values):
s = klass(v, index=columns, name=k)
yield k, s
def itertuples(self, index=True, name="Pandas"):
"""
Iterate over DataFrame rows as namedtuples.
Parameters
----------
index : bool, default True
If True, return the index as the first element of the tuple.
name : str or None, default "Pandas"
The name of the returned namedtuples or None to return regular
tuples.
Returns
-------
iterator
An object to iterate over namedtuples for each row in the
DataFrame with the first field possibly being the index and
following fields being the column values.
See Also
--------
DataFrame.iterrows : Iterate over DataFrame rows as (index, Series)
pairs.
DataFrame.items : Iterate over (column name, Series) pairs.
Notes
-----
The column names will be renamed to positional names if they are
invalid Python identifiers, repeated, or start with an underscore.
On python versions < 3.7 regular tuples are returned for DataFrames
with a large number of columns (>254).
Examples
--------
>>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]},
... index=['dog', 'hawk'])
>>> df
num_legs num_wings
dog 4 0
hawk 2 2
>>> for row in df.itertuples():
... print(row)
...
Pandas(Index='dog', num_legs=4, num_wings=0)
Pandas(Index='hawk', num_legs=2, num_wings=2)
By setting the `index` parameter to False we can remove the index
as the first element of the tuple:
>>> for row in df.itertuples(index=False):
... print(row)
...
Pandas(num_legs=4, num_wings=0)
Pandas(num_legs=2, num_wings=2)
With the `name` parameter set we set a custom name for the yielded
namedtuples:
>>> for row in df.itertuples(name='Animal'):
... print(row)
...
Animal(Index='dog', num_legs=4, num_wings=0)
Animal(Index='hawk', num_legs=2, num_wings=2)
"""
arrays = []
fields = list(self.columns)
if index:
arrays.append(self.index)
fields.insert(0, "Index")
# use integer indexing because of possible duplicate column names
arrays.extend(self.iloc[:, k] for k in range(len(self.columns)))
# Python versions before 3.7 support at most 255 arguments to constructors
can_return_named_tuples = PY37 or len(self.columns) + index < 255
if name is not None and can_return_named_tuples:
itertuple = collections.namedtuple(name, fields, rename=True)
return map(itertuple._make, zip(*arrays))
# fallback to regular tuples
return zip(*arrays)
def __len__(self) -> int:
"""
Returns length of info axis, but here we use the index.
"""
return len(self.index)
def dot(self, other):
"""
Compute the matrix multiplication between the DataFrame and other.
This method computes the matrix product between the DataFrame and the
values of an other Series, DataFrame or a numpy array.
It can also be called using ``self @ other`` in Python >= 3.5.
Parameters
----------
other : Series, DataFrame or array-like
The other object to compute the matrix product with.
Returns
-------
Series or DataFrame
If other is a Series, return the matrix product between self and
other as a Serie. If other is a DataFrame or a numpy.array, return
the matrix product of self and other in a DataFrame of a np.array.
See Also
--------
Series.dot: Similar method for Series.
Notes
-----
The dimensions of DataFrame and other must be compatible in order to
compute the matrix multiplication. In addition, the column names of
DataFrame and the index of other must contain the same values, as they
will be aligned prior to the multiplication.
The dot method for Series computes the inner product, instead of the
matrix product here.
Examples
--------
Here we multiply a DataFrame with a Series.
>>> df = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]])
>>> s = pd.Series([1, 1, 2, 1])
>>> df.dot(s)
0 -4
1 5
dtype: int64
Here we multiply a DataFrame with another DataFrame.
>>> other = pd.DataFrame([[0, 1], [1, 2], [-1, -1], [2, 0]])
>>> df.dot(other)
0 1
0 1 4
1 2 2
Note that the dot method give the same result as @
>>> df @ other
0 1
0 1 4
1 2 2
The dot method works also if other is an np.array.
>>> arr = np.array([[0, 1], [1, 2], [-1, -1], [2, 0]])
>>> df.dot(arr)
0 1
0 1 4
1 2 2
Note how shuffling of the objects does not change the result.
>>> s2 = s.reindex([1, 0, 2, 3])
>>> df.dot(s2)
0 -4
1 5
dtype: int64
"""
if isinstance(other, (Series, DataFrame)):
common = self.columns.union(other.index)
if len(common) > len(self.columns) or len(common) > len(other.index):
raise ValueError("matrices are not aligned")
left = self.reindex(columns=common, copy=False)
right = other.reindex(index=common, copy=False)
lvals = left.values
rvals = right.values
else:
left = self
lvals = self.values
rvals = np.asarray(other)
if lvals.shape[1] != rvals.shape[0]:
raise ValueError(
f"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}"
)
if isinstance(other, DataFrame):
return self._constructor(
np.dot(lvals, rvals), index=left.index, columns=other.columns
)
elif isinstance(other, Series):
return Series(np.dot(lvals, rvals), index=left.index)
elif isinstance(rvals, (np.ndarray, Index)):
result = np.dot(lvals, rvals)
if result.ndim == 2:
return self._constructor(result, index=left.index)
else:
return Series(result, index=left.index)
else: # pragma: no cover
raise TypeError(f"unsupported type: {type(other)}")
def __matmul__(self, other):
"""
Matrix multiplication using binary `@` operator in Python>=3.5.
"""
return self.dot(other)
def __rmatmul__(self, other):
"""
Matrix multiplication using binary `@` operator in Python>=3.5.
"""
return self.T.dot(np.transpose(other)).T
# ----------------------------------------------------------------------
# IO methods (to / from other formats)
@classmethod
def from_dict(cls, data, orient="columns", dtype=None, columns=None) -> "DataFrame":
"""
Construct DataFrame from dict of array-like or dicts.
Creates DataFrame object from dictionary by columns or by index
allowing dtype specification.
Parameters
----------
data : dict
Of the form {field : array-like} or {field : dict}.
orient : {'columns', 'index'}, default 'columns'
The "orientation" of the data. If the keys of the passed dict
should be the columns of the resulting DataFrame, pass 'columns'
(default). Otherwise if the keys should be rows, pass 'index'.
dtype : dtype, default None
Data type to force, otherwise infer.
columns : list, default None
Column labels to use when ``orient='index'``. Raises a ValueError
if used with ``orient='columns'``.
.. versionadded:: 0.23.0
Returns
-------
DataFrame
See Also
--------
DataFrame.from_records : DataFrame from ndarray (structured
dtype), list of tuples, dict, or DataFrame.
DataFrame : DataFrame object creation using constructor.
Examples
--------
By default the keys of the dict become the DataFrame columns:
>>> data = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']}
>>> pd.DataFrame.from_dict(data)
col_1 col_2
0 3 a
1 2 b
2 1 c
3 0 d
Specify ``orient='index'`` to create the DataFrame using dictionary
keys as rows:
>>> data = {'row_1': [3, 2, 1, 0], 'row_2': ['a', 'b', 'c', 'd']}
>>> pd.DataFrame.from_dict(data, orient='index')
0 1 2 3
row_1 3 2 1 0
row_2 a b c d
When using the 'index' orientation, the column names can be
specified manually:
>>> pd.DataFrame.from_dict(data, orient='index',
... columns=['A', 'B', 'C', 'D'])
A B C D
row_1 3 2 1 0
row_2 a b c d
"""
index = None
orient = orient.lower()
if orient == "index":
if len(data) > 0:
# TODO speed up Series case
if isinstance(list(data.values())[0], (Series, dict)):
data = _from_nested_dict(data)
else:
data, index = list(data.values()), list(data.keys())
elif orient == "columns":
if columns is not None:
raise ValueError("cannot use columns parameter with orient='columns'")
else: # pragma: no cover
raise ValueError("only recognize index or columns for orient")
return cls(data, index=index, columns=columns, dtype=dtype)
def to_numpy(self, dtype=None, copy=False) -> np.ndarray:
"""
Convert the DataFrame to a NumPy array.
.. versionadded:: 0.24.0
By default, the dtype of the returned array will be the common NumPy
dtype of all types in the DataFrame. For example, if the dtypes are
``float16`` and ``float32``, the results dtype will be ``float32``.
This may require copying data and coercing values, which may be
expensive.
Parameters
----------
dtype : str or numpy.dtype, optional
The dtype to pass to :meth:`numpy.asarray`.
copy : bool, default False
Whether to ensure that the returned value is a not a view on
another array. Note that ``copy=False`` does not *ensure* that
``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that
a copy is made, even if not strictly necessary.
Returns
-------
numpy.ndarray
See Also
--------
Series.to_numpy : Similar method for Series.
Examples
--------
>>> pd.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy()
array([[1, 3],
[2, 4]])
With heterogeneous data, the lowest common type will have to
be used.
>>> df = pd.DataFrame({"A": [1, 2], "B": [3.0, 4.5]})
>>> df.to_numpy()
array([[1. , 3. ],
[2. , 4.5]])
For a mix of numeric and non-numeric types, the output array will
have object dtype.
>>> df['C'] = pd.date_range('2000', periods=2)
>>> df.to_numpy()
array([[1, 3.0, Timestamp('2000-01-01 00:00:00')],
[2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object)
"""
result = np.array(self.values, dtype=dtype, copy=copy)
return result
def to_dict(self, orient="dict", into=dict):
"""
Convert the DataFrame to a dictionary.
The type of the key-value pairs can be customized with the parameters
(see below).
Parameters
----------
orient : str {'dict', 'list', 'series', 'split', 'records', 'index'}
Determines the type of the values of the dictionary.
- 'dict' (default) : dict like {column -> {index -> value}}
- 'list' : dict like {column -> [values]}
- 'series' : dict like {column -> Series(values)}
- 'split' : dict like
{'index' -> [index], 'columns' -> [columns], 'data' -> [values]}
- 'records' : list like
[{column -> value}, ... , {column -> value}]
- 'index' : dict like {index -> {column -> value}}
Abbreviations are allowed. `s` indicates `series` and `sp`
indicates `split`.
into : class, default dict
The collections.abc.Mapping subclass used for all Mappings
in the return value. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
.. versionadded:: 0.21.0
Returns
-------
dict, list or collections.abc.Mapping
Return a collections.abc.Mapping object representing the DataFrame.
The resulting transformation depends on the `orient` parameter.
See Also
--------
DataFrame.from_dict: Create a DataFrame from a dictionary.
DataFrame.to_json: Convert a DataFrame to JSON format.
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2],
... 'col2': [0.5, 0.75]},
... index=['row1', 'row2'])
>>> df
col1 col2
row1 1 0.50
row2 2 0.75
>>> df.to_dict()
{'col1': {'row1': 1, 'row2': 2}, 'col2': {'row1': 0.5, 'row2': 0.75}}
You can specify the return orientation.
>>> df.to_dict('series')
{'col1': row1 1
row2 2
Name: col1, dtype: int64,
'col2': row1 0.50
row2 0.75
Name: col2, dtype: float64}
>>> df.to_dict('split')
{'index': ['row1', 'row2'], 'columns': ['col1', 'col2'],
'data': [[1, 0.5], [2, 0.75]]}
>>> df.to_dict('records')
[{'col1': 1, 'col2': 0.5}, {'col1': 2, 'col2': 0.75}]
>>> df.to_dict('index')
{'row1': {'col1': 1, 'col2': 0.5}, 'row2': {'col1': 2, 'col2': 0.75}}
You can also specify the mapping type.
>>> from collections import OrderedDict, defaultdict
>>> df.to_dict(into=OrderedDict)
OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])),
('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))])
If you want a `defaultdict`, you need to initialize it:
>>> dd = defaultdict(list)
>>> df.to_dict('records', into=dd)
[defaultdict(<class 'list'>, {'col1': 1, 'col2': 0.5}),
defaultdict(<class 'list'>, {'col1': 2, 'col2': 0.75})]
"""
if not self.columns.is_unique:
warnings.warn(
"DataFrame columns are not unique, some columns will be omitted.",
UserWarning,
stacklevel=2,
)
# GH16122
into_c = com.standardize_mapping(into)
if orient.lower().startswith("d"):
return into_c((k, v.to_dict(into)) for k, v in self.items())
elif orient.lower().startswith("l"):
return into_c((k, v.tolist()) for k, v in self.items())
elif orient.lower().startswith("sp"):
return into_c(
(
("index", self.index.tolist()),
("columns", self.columns.tolist()),
(
"data",
[
list(map(com.maybe_box_datetimelike, t))
for t in self.itertuples(index=False, name=None)
],
),
)
)
elif orient.lower().startswith("s"):
return into_c((k, com.maybe_box_datetimelike(v)) for k, v in self.items())
elif orient.lower().startswith("r"):
columns = self.columns.tolist()
rows = (
dict(zip(columns, row))
for row in self.itertuples(index=False, name=None)
)
return [
into_c((k, com.maybe_box_datetimelike(v)) for k, v in row.items())
for row in rows
]
elif orient.lower().startswith("i"):
if not self.index.is_unique:
raise ValueError("DataFrame index must be unique for orient='index'.")
return into_c(
(t[0], dict(zip(self.columns, t[1:])))
for t in self.itertuples(name=None)
)
else:
raise ValueError(f"orient '{orient}' not understood")
def to_gbq(
self,
destination_table,
project_id=None,
chunksize=None,
reauth=False,
if_exists="fail",
auth_local_webserver=False,
table_schema=None,
location=None,
progress_bar=True,
credentials=None,
) -> None:
"""
Write a DataFrame to a Google BigQuery table.
This function requires the `pandas-gbq package
<https://pandas-gbq.readthedocs.io>`__.
See the `How to authenticate with Google BigQuery
<https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__
guide for authentication instructions.
Parameters
----------
destination_table : str
Name of table to be written, in the form ``dataset.tablename``.
project_id : str, optional
Google BigQuery Account project ID. Optional when available from
the environment.
chunksize : int, optional
Number of rows to be inserted in each chunk from the dataframe.
Set to ``None`` to load the whole dataframe at once.
reauth : bool, default False
Force Google BigQuery to re-authenticate the user. This is useful
if multiple accounts are used.
if_exists : str, default 'fail'
Behavior when the destination table exists. Value can be one of:
``'fail'``
If table exists raise pandas_gbq.gbq.TableCreationError.
``'replace'``
If table exists, drop it, recreate it, and insert data.
``'append'``
If table exists, insert data. Create if does not exist.
auth_local_webserver : bool, default False
Use the `local webserver flow`_ instead of the `console flow`_
when getting user credentials.
.. _local webserver flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server
.. _console flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console
*New in version 0.2.0 of pandas-gbq*.
table_schema : list of dicts, optional
List of BigQuery table fields to which according DataFrame
columns conform to, e.g. ``[{'name': 'col1', 'type':
'STRING'},...]``. If schema is not provided, it will be
generated according to dtypes of DataFrame columns. See
BigQuery API documentation on available names of a field.
*New in version 0.3.1 of pandas-gbq*.
location : str, optional
Location where the load job should run. See the `BigQuery locations
documentation
<https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a
list of available locations. The location must match that of the
target dataset.
*New in version 0.5.0 of pandas-gbq*.
progress_bar : bool, default True
Use the library `tqdm` to show the progress bar for the upload,
chunk by chunk.
*New in version 0.5.0 of pandas-gbq*.
credentials : google.auth.credentials.Credentials, optional
Credentials for accessing Google APIs. Use this parameter to
override default credentials, such as to use Compute Engine
:class:`google.auth.compute_engine.Credentials` or Service
Account :class:`google.oauth2.service_account.Credentials`
directly.
*New in version 0.8.0 of pandas-gbq*.
.. versionadded:: 0.24.0
See Also
--------
pandas_gbq.to_gbq : This function in the pandas-gbq library.
read_gbq : Read a DataFrame from Google BigQuery.
"""
from pandas.io import gbq
gbq.to_gbq(
self,
destination_table,
project_id=project_id,
chunksize=chunksize,
reauth=reauth,
if_exists=if_exists,
auth_local_webserver=auth_local_webserver,
table_schema=table_schema,
location=location,
progress_bar=progress_bar,
credentials=credentials,
)
@classmethod
def from_records(
cls,
data,
index=None,
exclude=None,
columns=None,
coerce_float=False,
nrows=None,
) -> "DataFrame":
"""
Convert structured or record ndarray to DataFrame.
Parameters
----------
data : ndarray (structured dtype), list of tuples, dict, or DataFrame
index : str, list of fields, array-like
Field of array to use as the index, alternately a specific set of
input labels to use.
exclude : sequence, default None
Columns or fields to exclude.
columns : sequence, default None
Column names to use. If the passed data do not have names
associated with them, this argument provides names for the
columns. Otherwise this argument indicates the order of the columns
in the result (any names not found in the data will become all-NA
columns).
coerce_float : bool, default False
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets.
nrows : int, default None
Number of rows to read if data is an iterator.
Returns
-------
DataFrame
"""
# Make a copy of the input columns so we can modify it
if columns is not None:
columns = ensure_index(columns)
if is_iterator(data):
if nrows == 0:
return cls()
try:
first_row = next(data)
except StopIteration:
return cls(index=index, columns=columns)
dtype = None
if hasattr(first_row, "dtype") and first_row.dtype.names:
dtype = first_row.dtype
values = [first_row]
if nrows is None:
values += data
else:
values.extend(itertools.islice(data, nrows - 1))
if dtype is not None:
data = np.array(values, dtype=dtype)
else:
data = values
if isinstance(data, dict):
if columns is None:
columns = arr_columns = ensure_index(sorted(data))
arrays = [data[k] for k in columns]
else:
arrays = []
arr_columns = []
for k, v in data.items():
if k in columns:
arr_columns.append(k)
arrays.append(v)
arrays, arr_columns = reorder_arrays(arrays, arr_columns, columns)
elif isinstance(data, (np.ndarray, DataFrame)):
arrays, columns = to_arrays(data, columns)
if columns is not None:
columns = ensure_index(columns)
arr_columns = columns
else:
arrays, arr_columns = to_arrays(data, columns, coerce_float=coerce_float)
arr_columns = ensure_index(arr_columns)
if columns is not None:
columns = ensure_index(columns)
else:
columns = arr_columns
if exclude is None:
exclude = set()
else:
exclude = set(exclude)
result_index = None
if index is not None:
if isinstance(index, str) or not hasattr(index, "__iter__"):
i = columns.get_loc(index)
exclude.add(index)
if len(arrays) > 0:
result_index = Index(arrays[i], name=index)
else:
result_index = Index([], name=index)
else:
try:
index_data = [arrays[arr_columns.get_loc(field)] for field in index]
except (KeyError, TypeError):
# raised by get_loc, see GH#29258
result_index = index
else:
result_index = ensure_index_from_sequences(index_data, names=index)
exclude.update(index)
if any(exclude):
arr_exclude = [x for x in exclude if x in arr_columns]
to_remove = [arr_columns.get_loc(col) for col in arr_exclude]
arrays = [v for i, v in enumerate(arrays) if i not in to_remove]
arr_columns = arr_columns.drop(arr_exclude)
columns = columns.drop(exclude)
mgr = arrays_to_mgr(arrays, arr_columns, result_index, columns)
return cls(mgr)
def to_records(
self, index=True, column_dtypes=None, index_dtypes=None
) -> np.recarray:
"""
Convert DataFrame to a NumPy record array.
Index will be included as the first field of the record array if
requested.
Parameters
----------
index : bool, default True
Include index in resulting record array, stored in 'index'
field or using the index label, if set.
column_dtypes : str, type, dict, default None
.. versionadded:: 0.24.0
If a string or type, the data type to store all columns. If
a dictionary, a mapping of column names and indices (zero-indexed)
to specific data types.
index_dtypes : str, type, dict, default None
.. versionadded:: 0.24.0
If a string or type, the data type to store all index levels. If
a dictionary, a mapping of index level names and indices
(zero-indexed) to specific data types.
This mapping is applied only if `index=True`.
Returns
-------
numpy.recarray
NumPy ndarray with the DataFrame labels as fields and each row
of the DataFrame as entries.
See Also
--------
DataFrame.from_records: Convert structured or record ndarray
to DataFrame.
numpy.recarray: An ndarray that allows field access using
attributes, analogous to typed columns in a
spreadsheet.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]},
... index=['a', 'b'])
>>> df
A B
a 1 0.50
b 2 0.75
>>> df.to_records()
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')])
If the DataFrame index has no label then the recarray field name
is set to 'index'. If the index has a label then this is used as the
field name:
>>> df.index = df.index.rename("I")
>>> df.to_records()
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('I', 'O'), ('A', '<i8'), ('B', '<f8')])
The index can be excluded from the record array:
>>> df.to_records(index=False)
rec.array([(1, 0.5 ), (2, 0.75)],
dtype=[('A', '<i8'), ('B', '<f8')])
Data types can be specified for the columns:
>>> df.to_records(column_dtypes={"A": "int32"})
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('I', 'O'), ('A', '<i4'), ('B', '<f8')])
As well as for the index:
>>> df.to_records(index_dtypes="<S2")
rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],
dtype=[('I', 'S2'), ('A', '<i8'), ('B', '<f8')])
>>> index_dtypes = f"<S{df.index.str.len().max()}"
>>> df.to_records(index_dtypes=index_dtypes)
rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],
dtype=[('I', 'S1'), ('A', '<i8'), ('B', '<f8')])
"""
if index:
if isinstance(self.index, ABCMultiIndex):
# array of tuples to numpy cols. copy copy copy
ix_vals = list(map(np.array, zip(*self.index.values)))
else:
ix_vals = [self.index.values]
arrays = ix_vals + [self[c]._internal_get_values() for c in self.columns]
count = 0
index_names = list(self.index.names)
if isinstance(self.index, ABCMultiIndex):
for i, n in enumerate(index_names):
if n is None:
index_names[i] = f"level_{count}"
count += 1
elif index_names[0] is None:
index_names = ["index"]
names = [str(name) for name in itertools.chain(index_names, self.columns)]
else:
arrays = [self[c]._internal_get_values() for c in self.columns]
names = [str(c) for c in self.columns]
index_names = []
index_len = len(index_names)
formats = []
for i, v in enumerate(arrays):
index = i
# When the names and arrays are collected, we
# first collect those in the DataFrame's index,
# followed by those in its columns.
#
# Thus, the total length of the array is:
# len(index_names) + len(DataFrame.columns).
#
# This check allows us to see whether we are
# handling a name / array in the index or column.
if index < index_len:
dtype_mapping = index_dtypes
name = index_names[index]
else:
index -= index_len
dtype_mapping = column_dtypes
name = self.columns[index]
# We have a dictionary, so we get the data type
# associated with the index or column (which can
# be denoted by its name in the DataFrame or its
# position in DataFrame's array of indices or
# columns, whichever is applicable.
if is_dict_like(dtype_mapping):
if name in dtype_mapping:
dtype_mapping = dtype_mapping[name]
elif index in dtype_mapping:
dtype_mapping = dtype_mapping[index]
else:
dtype_mapping = None
# If no mapping can be found, use the array's
# dtype attribute for formatting.
#
# A valid dtype must either be a type or
# string naming a type.
if dtype_mapping is None:
formats.append(v.dtype)
elif isinstance(dtype_mapping, (type, np.dtype, str)):
formats.append(dtype_mapping)
else:
element = "row" if i < index_len else "column"
msg = f"Invalid dtype {dtype_mapping} specified for {element} {name}"
raise ValueError(msg)
return np.rec.fromarrays(arrays, dtype={"names": names, "formats": formats})
@classmethod
def _from_arrays(cls, arrays, columns, index, dtype=None) -> "DataFrame":
mgr = arrays_to_mgr(arrays, columns, index, columns, dtype=dtype)
return cls(mgr)
@deprecate_kwarg(old_arg_name="fname", new_arg_name="path")
def to_stata(
self,
path,
convert_dates=None,
write_index=True,
byteorder=None,
time_stamp=None,
data_label=None,
variable_labels=None,
version=114,
convert_strl=None,
):
"""
Export DataFrame object to Stata dta format.
Writes the DataFrame to a Stata dataset file.
"dta" files contain a Stata dataset.
Parameters
----------
path : str, buffer or path object
String, path object (pathlib.Path or py._path.local.LocalPath) or
object implementing a binary write() function. If using a buffer
then the buffer will not be automatically closed after the file
data has been written.
.. versionchanged:: 1.0.0
Previously this was "fname"
convert_dates : dict
Dictionary mapping columns containing datetime types to stata
internal format to use when writing the dates. Options are 'tc',
'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer
or a name. Datetime columns that do not have a conversion type
specified will be converted to 'tc'. Raises NotImplementedError if
a datetime column has timezone information.
write_index : bool
Write the index to Stata dataset.
byteorder : str
Can be ">", "<", "little", or "big". default is `sys.byteorder`.
time_stamp : datetime
A datetime to use as file creation date. Default is the current
time.
data_label : str, optional
A label for the data set. Must be 80 characters or smaller.
variable_labels : dict
Dictionary containing columns as keys and variable labels as
values. Each label must be 80 characters or smaller.
version : {114, 117, 118, 119, None}, default 114
Version to use in the output dta file. Set to None to let pandas
decide between 118 or 119 formats depending on the number of
columns in the frame. Version 114 can be read by Stata 10 and
later. Version 117 can be read by Stata 13 or later. Version 118
is supported in Stata 14 and later. Version 119 is supported in
Stata 15 and later. Version 114 limits string variables to 244
characters or fewer while versions 117 and later allow strings
with lengths up to 2,000,000 characters. Versions 118 and 119
support Unicode characters, and version 119 supports more than
32,767 variables.
.. versionadded:: 0.23.0
.. versionchanged:: 1.0.0
Added support for formats 118 and 119.
convert_strl : list, optional
List of column names to convert to string columns to Stata StrL
format. Only available if version is 117. Storing strings in the
StrL format can produce smaller dta files if strings have more than
8 characters and values are repeated.
.. versionadded:: 0.23.0
Raises
------
NotImplementedError
* If datetimes contain timezone information
* Column dtype is not representable in Stata
ValueError
* Columns listed in convert_dates are neither datetime64[ns]
or datetime.datetime
* Column listed in convert_dates is not in DataFrame
* Categorical label contains more than 32,000 characters
See Also
--------
read_stata : Import Stata data files.
io.stata.StataWriter : Low-level writer for Stata data files.
io.stata.StataWriter117 : Low-level writer for version 117 files.
Examples
--------
>>> df = pd.DataFrame({'animal': ['falcon', 'parrot', 'falcon',
... 'parrot'],
... 'speed': [350, 18, 361, 15]})
>>> df.to_stata('animals.dta') # doctest: +SKIP
"""
if version not in (114, 117, 118, 119, None):
raise ValueError("Only formats 114, 117, 118 and 119 are supported.")
if version == 114:
if convert_strl is not None:
raise ValueError("strl is not supported in format 114")
from pandas.io.stata import StataWriter as statawriter
elif version == 117:
from pandas.io.stata import StataWriter117 as statawriter
else: # versions 118 and 119
from pandas.io.stata import StataWriterUTF8 as statawriter
kwargs = {}
if version is None or version >= 117:
# strl conversion is only supported >= 117
kwargs["convert_strl"] = convert_strl
if version is None or version >= 118:
# Specifying the version is only supported for UTF8 (118 or 119)
kwargs["version"] = version
writer = statawriter(
path,
self,
convert_dates=convert_dates,
byteorder=byteorder,
time_stamp=time_stamp,
data_label=data_label,
write_index=write_index,
variable_labels=variable_labels,
**kwargs,
)
writer.write_file()
@deprecate_kwarg(old_arg_name="fname", new_arg_name="path")
def to_feather(self, path) -> None:
"""
Write out the binary feather-format for DataFrames.
Parameters
----------
path : str
String file path.
"""
from pandas.io.feather_format import to_feather
to_feather(self, path)
@Appender(
"""
Examples
--------
>>> df = pd.DataFrame(
... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]}
... )
>>> print(df.to_markdown())
| | animal_1 | animal_2 |
|---:|:-----------|:-----------|
| 0 | elk | dog |
| 1 | pig | quetzal |
"""
)
@Substitution(klass="DataFrame")
@Appender(_shared_docs["to_markdown"])
def to_markdown(
self, buf: Optional[IO[str]] = None, mode: Optional[str] = None, **kwargs
) -> Optional[str]:
kwargs.setdefault("headers", "keys")
kwargs.setdefault("tablefmt", "pipe")
tabulate = import_optional_dependency("tabulate")
result = tabulate.tabulate(self, **kwargs)
if buf is None:
return result
buf, _, _, _ = get_filepath_or_buffer(buf, mode=mode)
assert buf is not None # Help mypy.
buf.writelines(result)
return None
@deprecate_kwarg(old_arg_name="fname", new_arg_name="path")
def to_parquet(
self,
path,
engine="auto",
compression="snappy",
index=None,
partition_cols=None,
**kwargs,
) -> None:
"""
Write a DataFrame to the binary parquet format.
.. versionadded:: 0.21.0
This function writes the dataframe as a `parquet file
<https://parquet.apache.org/>`_. You can choose different parquet
backends, and have the option of compression. See
:ref:`the user guide <io.parquet>` for more details.
Parameters
----------
path : str
File path or Root Directory path. Will be used as Root Directory
path while writing a partitioned dataset.
.. versionchanged:: 1.0.0
Previously this was "fname"
engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto'
Parquet library to use. If 'auto', then the option
``io.parquet.engine`` is used. The default ``io.parquet.engine``
behavior is to try 'pyarrow', falling back to 'fastparquet' if
'pyarrow' is unavailable.
compression : {'snappy', 'gzip', 'brotli', None}, default 'snappy'
Name of the compression to use. Use ``None`` for no compression.
index : bool, default None
If ``True``, include the dataframe's index(es) in the file output.
If ``False``, they will not be written to the file.
If ``None``, similar to ``True`` the dataframe's index(es)
will be saved. However, instead of being saved as values,
the RangeIndex will be stored as a range in the metadata so it
doesn't require much space and is faster. Other indexes will
be included as columns in the file output.
.. versionadded:: 0.24.0
partition_cols : list, optional, default None
Column names by which to partition the dataset.
Columns are partitioned in the order they are given.
.. versionadded:: 0.24.0
**kwargs
Additional arguments passed to the parquet library. See
:ref:`pandas io <io.parquet>` for more details.
See Also
--------
read_parquet : Read a parquet file.
DataFrame.to_csv : Write a csv file.
DataFrame.to_sql : Write to a sql table.
DataFrame.to_hdf : Write to hdf.
Notes
-----
This function requires either the `fastparquet
<https://pypi.org/project/fastparquet>`_ or `pyarrow
<https://arrow.apache.org/docs/python/>`_ library.
Examples
--------
>>> df = pd.DataFrame(data={'col1': [1, 2], 'col2': [3, 4]})
>>> df.to_parquet('df.parquet.gzip',
... compression='gzip') # doctest: +SKIP
>>> pd.read_parquet('df.parquet.gzip') # doctest: +SKIP
col1 col2
0 1 3
1 2 4
"""
from pandas.io.parquet import to_parquet
to_parquet(
self,
path,
engine,
compression=compression,
index=index,
partition_cols=partition_cols,
**kwargs,
)
@Substitution(
header_type="bool",
header="Whether to print column labels, default True",
col_space_type="str or int",
col_space="The minimum width of each column in CSS length "
"units. An int is assumed to be px units.\n\n"
" .. versionadded:: 0.25.0\n"
" Ability to use str",
)
@Substitution(shared_params=fmt.common_docstring, returns=fmt.return_docstring)
def to_html(
self,
buf=None,
columns=None,
col_space=None,
header=True,
index=True,
na_rep="NaN",
formatters=None,
float_format=None,
sparsify=None,
index_names=True,
justify=None,
max_rows=None,
max_cols=None,
show_dimensions=False,
decimal=".",
bold_rows=True,
classes=None,
escape=True,
notebook=False,
border=None,
table_id=None,
render_links=False,
encoding=None,
):
"""
Render a DataFrame as an HTML table.
%(shared_params)s
bold_rows : bool, default True
Make the row labels bold in the output.
classes : str or list or tuple, default None
CSS class(es) to apply to the resulting html table.
escape : bool, default True
Convert the characters <, >, and & to HTML-safe sequences.
notebook : {True, False}, default False
Whether the generated HTML is for IPython Notebook.
border : int
A ``border=border`` attribute is included in the opening
`<table>` tag. Default ``pd.options.display.html.border``.
encoding : str, default "utf-8"
Set character encoding.
.. versionadded:: 1.0
table_id : str, optional
A css id is included in the opening `<table>` tag if specified.
.. versionadded:: 0.23.0
render_links : bool, default False
Convert URLs to HTML links.
.. versionadded:: 0.24.0
%(returns)s
See Also
--------
to_string : Convert DataFrame to a string.
"""
if justify is not None and justify not in fmt._VALID_JUSTIFY_PARAMETERS:
raise ValueError("Invalid value for justify parameter")
formatter = fmt.DataFrameFormatter(
self,
columns=columns,
col_space=col_space,
na_rep=na_rep,
formatters=formatters,
float_format=float_format,
sparsify=sparsify,
justify=justify,
index_names=index_names,
header=header,
index=index,
bold_rows=bold_rows,
escape=escape,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
decimal=decimal,
table_id=table_id,
render_links=render_links,
)
# TODO: a generic formatter wld b in DataFrameFormatter
return formatter.to_html(
buf=buf,
classes=classes,
notebook=notebook,
border=border,
encoding=encoding,
)
# ----------------------------------------------------------------------
def info(
self, verbose=None, buf=None, max_cols=None, memory_usage=None, null_counts=None
) -> None:
"""
Print a concise summary of a DataFrame.
This method prints information about a DataFrame including
the index dtype and column dtypes, non-null values and memory usage.
Parameters
----------
verbose : bool, optional
Whether to print the full summary. By default, the setting in
``pandas.options.display.max_info_columns`` is followed.
buf : writable buffer, defaults to sys.stdout
Where to send the output. By default, the output is printed to
sys.stdout. Pass a writable buffer if you need to further process
the output.
max_cols : int, optional
When to switch from the verbose to the truncated output. If the
DataFrame has more than `max_cols` columns, the truncated output
is used. By default, the setting in
``pandas.options.display.max_info_columns`` is used.
memory_usage : bool, str, optional
Specifies whether total memory usage of the DataFrame
elements (including the index) should be displayed. By default,
this follows the ``pandas.options.display.memory_usage`` setting.
True always show memory usage. False never shows memory usage.
A value of 'deep' is equivalent to "True with deep introspection".
Memory usage is shown in human-readable units (base-2
representation). Without deep introspection a memory estimation is
made based in column dtype and number of rows assuming values
consume the same memory amount for corresponding dtypes. With deep
memory introspection, a real memory usage calculation is performed
at the cost of computational resources.
null_counts : bool, optional
Whether to show the non-null counts. By default, this is shown
only if the frame is smaller than
``pandas.options.display.max_info_rows`` and
``pandas.options.display.max_info_columns``. A value of True always
shows the counts, and False never shows the counts.
Returns
-------
None
This method prints a summary of a DataFrame and returns None.
See Also
--------
DataFrame.describe: Generate descriptive statistics of DataFrame
columns.
DataFrame.memory_usage: Memory usage of DataFrame columns.
Examples
--------
>>> int_values = [1, 2, 3, 4, 5]
>>> text_values = ['alpha', 'beta', 'gamma', 'delta', 'epsilon']
>>> float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
>>> df = pd.DataFrame({"int_col": int_values, "text_col": text_values,
... "float_col": float_values})
>>> df
int_col text_col float_col
0 1 alpha 0.00
1 2 beta 0.25
2 3 gamma 0.50
3 4 delta 0.75
4 5 epsilon 1.00
Prints information of all columns:
>>> df.info(verbose=True)
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 int_col 5 non-null int64
1 text_col 5 non-null object
2 float_col 5 non-null float64
dtypes: float64(1), int64(1), object(1)
memory usage: 248.0+ bytes
Prints a summary of columns count and its dtypes but not per column
information:
>>> df.info(verbose=False)
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Columns: 3 entries, int_col to float_col
dtypes: float64(1), int64(1), object(1)
memory usage: 248.0+ bytes
Pipe output of DataFrame.info to buffer instead of sys.stdout, get
buffer content and writes to a text file:
>>> import io
>>> buffer = io.StringIO()
>>> df.info(buf=buffer)
>>> s = buffer.getvalue()
>>> with open("df_info.txt", "w",
... encoding="utf-8") as f: # doctest: +SKIP
... f.write(s)
260
The `memory_usage` parameter allows deep introspection mode, specially
useful for big DataFrames and fine-tune memory optimization:
>>> random_strings_array = np.random.choice(['a', 'b', 'c'], 10 ** 6)
>>> df = pd.DataFrame({
... 'column_1': np.random.choice(['a', 'b', 'c'], 10 ** 6),
... 'column_2': np.random.choice(['a', 'b', 'c'], 10 ** 6),
... 'column_3': np.random.choice(['a', 'b', 'c'], 10 ** 6)
... })
>>> df.info()
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1000000 entries, 0 to 999999
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 column_1 1000000 non-null object
1 column_2 1000000 non-null object
2 column_3 1000000 non-null object
dtypes: object(3)
memory usage: 22.9+ MB
>>> df.info(memory_usage='deep')
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1000000 entries, 0 to 999999
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 column_1 1000000 non-null object
1 column_2 1000000 non-null object
2 column_3 1000000 non-null object
dtypes: object(3)
memory usage: 188.8 MB
"""
if buf is None: # pragma: no cover
buf = sys.stdout
lines = []
lines.append(str(type(self)))
lines.append(self.index._summary())
if len(self.columns) == 0:
lines.append(f"Empty {type(self).__name__}")
fmt.buffer_put_lines(buf, lines)
return
cols = self.columns
col_count = len(self.columns)
# hack
if max_cols is None:
max_cols = get_option("display.max_info_columns", len(self.columns) + 1)
max_rows = get_option("display.max_info_rows", len(self) + 1)
if null_counts is None:
show_counts = (col_count <= max_cols) and (len(self) < max_rows)
else:
show_counts = null_counts
exceeds_info_cols = col_count > max_cols
def _verbose_repr():
lines.append(f"Data columns (total {len(self.columns)} columns):")
id_head = " # "
column_head = "Column"
col_space = 2
max_col = max(len(pprint_thing(k)) for k in cols)
len_column = len(pprint_thing(column_head))
space = max(max_col, len_column) + col_space
max_id = len(pprint_thing(col_count))
len_id = len(pprint_thing(id_head))
space_num = max(max_id, len_id) + col_space
counts = None
header = _put_str(id_head, space_num) + _put_str(column_head, space)
if show_counts:
counts = self.count()
if len(cols) != len(counts): # pragma: no cover
raise AssertionError(
f"Columns must equal counts ({len(cols)} != {len(counts)})"
)
count_header = "Non-Null Count"
len_count = len(count_header)
non_null = " non-null"
max_count = max(len(pprint_thing(k)) for k in counts) + len(non_null)
space_count = max(len_count, max_count) + col_space
count_temp = "{count}" + non_null
else:
count_header = ""
space_count = len(count_header)
len_count = space_count
count_temp = "{count}"
dtype_header = "Dtype"
len_dtype = len(dtype_header)
max_dtypes = max(len(pprint_thing(k)) for k in self.dtypes)
space_dtype = max(len_dtype, max_dtypes)
header += _put_str(count_header, space_count) + _put_str(
dtype_header, space_dtype
)
lines.append(header)
lines.append(
_put_str("-" * len_id, space_num)
+ _put_str("-" * len_column, space)
+ _put_str("-" * len_count, space_count)
+ _put_str("-" * len_dtype, space_dtype)
)
for i, col in enumerate(self.columns):
dtype = self.dtypes.iloc[i]
col = pprint_thing(col)
line_no = _put_str(f" {i}", space_num)
count = ""
if show_counts:
count = counts.iloc[i]
lines.append(
line_no
+ _put_str(col, space)
+ _put_str(count_temp.format(count=count), space_count)
+ _put_str(dtype, space_dtype)
)
def _non_verbose_repr():
lines.append(self.columns._summary(name="Columns"))
def _sizeof_fmt(num, size_qualifier):
# returns size in human readable format
for x in ["bytes", "KB", "MB", "GB", "TB"]:
if num < 1024.0:
return f"{num:3.1f}{size_qualifier} {x}"
num /= 1024.0
return f"{num:3.1f}{size_qualifier} PB"
if verbose:
_verbose_repr()
elif verbose is False: # specifically set to False, not nesc None
_non_verbose_repr()
else:
if exceeds_info_cols:
_non_verbose_repr()
else:
_verbose_repr()
counts = self._data.get_dtype_counts()
dtypes = [f"{k[0]}({k[1]:d})" for k in sorted(counts.items())]
lines.append(f"dtypes: {', '.join(dtypes)}")
if memory_usage is None:
memory_usage = get_option("display.memory_usage")
if memory_usage:
# append memory usage of df to display
size_qualifier = ""
if memory_usage == "deep":
deep = True
else:
# size_qualifier is just a best effort; not guaranteed to catch
# all cases (e.g., it misses categorical data even with object
# categories)
deep = False
if "object" in counts or self.index._is_memory_usage_qualified():
size_qualifier = "+"
mem_usage = self.memory_usage(index=True, deep=deep).sum()
lines.append(f"memory usage: {_sizeof_fmt(mem_usage, size_qualifier)}\n")
fmt.buffer_put_lines(buf, lines)
def memory_usage(self, index=True, deep=False) -> Series:
"""
Return the memory usage of each column in bytes.
The memory usage can optionally include the contribution of
the index and elements of `object` dtype.
This value is displayed in `DataFrame.info` by default. This can be
suppressed by setting ``pandas.options.display.memory_usage`` to False.
Parameters
----------
index : bool, default True
Specifies whether to include the memory usage of the DataFrame's
index in returned Series. If ``index=True``, the memory usage of
the index is the first item in the output.
deep : bool, default False
If True, introspect the data deeply by interrogating
`object` dtypes for system-level memory consumption, and include
it in the returned values.
Returns
-------
Series
A Series whose index is the original column names and whose values
is the memory usage of each column in bytes.
See Also
--------
numpy.ndarray.nbytes : Total bytes consumed by the elements of an
ndarray.
Series.memory_usage : Bytes consumed by a Series.
Categorical : Memory-efficient array for string values with
many repeated values.
DataFrame.info : Concise summary of a DataFrame.
Examples
--------
>>> dtypes = ['int64', 'float64', 'complex128', 'object', 'bool']
>>> data = dict([(t, np.ones(shape=5000).astype(t))
... for t in dtypes])
>>> df = pd.DataFrame(data)
>>> df.head()
int64 float64 complex128 object bool
0 1 1.0 1.000000+0.000000j 1 True
1 1 1.0 1.000000+0.000000j 1 True
2 1 1.0 1.000000+0.000000j 1 True
3 1 1.0 1.000000+0.000000j 1 True
4 1 1.0 1.000000+0.000000j 1 True
>>> df.memory_usage()
Index 128
int64 40000
float64 40000
complex128 80000
object 40000
bool 5000
dtype: int64
>>> df.memory_usage(index=False)
int64 40000
float64 40000
complex128 80000
object 40000
bool 5000
dtype: int64
The memory footprint of `object` dtype columns is ignored by default:
>>> df.memory_usage(deep=True)
Index 128
int64 40000
float64 40000
complex128 80000
object 160000
bool 5000
dtype: int64
Use a Categorical for efficient storage of an object-dtype column with
many repeated values.
>>> df['object'].astype('category').memory_usage(deep=True)
5216
"""
result = Series(
[c.memory_usage(index=False, deep=deep) for col, c in self.items()],
index=self.columns,
)
if index:
result = Series(self.index.memory_usage(deep=deep), index=["Index"]).append(
result
)
return result
def transpose(self, *args, copy: bool = False) -> "DataFrame":
"""
Transpose index and columns.
Reflect the DataFrame over its main diagonal by writing rows as columns
and vice-versa. The property :attr:`.T` is an accessor to the method
:meth:`transpose`.
Parameters
----------
*args : tuple, optional
Accepted for compatibility with NumPy.
copy : bool, default False
Whether to copy the data after transposing, even for DataFrames
with a single dtype.
Note that a copy is always required for mixed dtype DataFrames,
or for DataFrames with any extension types.
Returns
-------
DataFrame
The transposed DataFrame.
See Also
--------
numpy.transpose : Permute the dimensions of a given array.
Notes
-----
Transposing a DataFrame with mixed dtypes will result in a homogeneous
DataFrame with the `object` dtype. In such a case, a copy of the data
is always made.
Examples
--------
**Square DataFrame with homogeneous dtype**
>>> d1 = {'col1': [1, 2], 'col2': [3, 4]}
>>> df1 = pd.DataFrame(data=d1)
>>> df1
col1 col2
0 1 3
1 2 4
>>> df1_transposed = df1.T # or df1.transpose()
>>> df1_transposed
0 1
col1 1 2
col2 3 4
When the dtype is homogeneous in the original DataFrame, we get a
transposed DataFrame with the same dtype:
>>> df1.dtypes
col1 int64
col2 int64
dtype: object
>>> df1_transposed.dtypes
0 int64
1 int64
dtype: object
**Non-square DataFrame with mixed dtypes**
>>> d2 = {'name': ['Alice', 'Bob'],
... 'score': [9.5, 8],
... 'employed': [False, True],
... 'kids': [0, 0]}
>>> df2 = pd.DataFrame(data=d2)
>>> df2
name score employed kids
0 Alice 9.5 False 0
1 Bob 8.0 True 0
>>> df2_transposed = df2.T # or df2.transpose()
>>> df2_transposed
0 1
name Alice Bob
score 9.5 8
employed False True
kids 0 0
When the DataFrame has mixed dtypes, we get a transposed DataFrame with
the `object` dtype:
>>> df2.dtypes
name object
score float64
employed bool
kids int64
dtype: object
>>> df2_transposed.dtypes
0 object
1 object
dtype: object
"""
nv.validate_transpose(args, dict())
# construct the args
dtypes = list(self.dtypes)
if self._is_homogeneous_type and dtypes and is_extension_array_dtype(dtypes[0]):
# We have EAs with the same dtype. We can preserve that dtype in transpose.
dtype = dtypes[0]
arr_type = dtype.construct_array_type()
values = self.values
new_values = [arr_type._from_sequence(row, dtype=dtype) for row in values]
result = self._constructor(
dict(zip(self.index, new_values)), index=self.columns
)
else:
new_values = self.values.T
if copy:
new_values = new_values.copy()
result = self._constructor(
new_values, index=self.columns, columns=self.index
)
return result.__finalize__(self)
T = property(transpose)
# ----------------------------------------------------------------------
# Indexing Methods
def _ixs(self, i: int, axis: int = 0):
"""
Parameters
----------
i : int
axis : int
Notes
-----
If slice passed, the resulting data will be a view.
"""
# irow
if axis == 0:
new_values = self._data.fast_xs(i)
# if we are a copy, mark as such
copy = isinstance(new_values, np.ndarray) and new_values.base is None
result = self._constructor_sliced(
new_values,
index=self.columns,
name=self.index[i],
dtype=new_values.dtype,
)
result._set_is_copy(self, copy=copy)
return result
# icol
else:
label = self.columns[i]
values = self._data.iget(i)
result = self._box_col_values(values, label)
# this is a cached value, mark it so
result._set_as_cached(label, self)
return result
def __getitem__(self, key):
key = lib.item_from_zerodim(key)
key = com.apply_if_callable(key, self)
if is_hashable(key):
# shortcut if the key is in columns
if self.columns.is_unique and key in self.columns:
if self.columns.nlevels > 1:
return self._getitem_multilevel(key)
return self._get_item_cache(key)
# Do we have a slicer (on rows)?
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
# either we have a slice or we have a string that can be converted
# to a slice for partial-string date indexing
return self._slice(indexer, axis=0)
# Do we have a (boolean) DataFrame?
if isinstance(key, DataFrame):
return self.where(key)
# Do we have a (boolean) 1d indexer?
if com.is_bool_indexer(key):
return self._getitem_bool_array(key)
# We are left with two options: a single key, and a collection of keys,
# We interpret tuples as collections only for non-MultiIndex
is_single_key = isinstance(key, tuple) or not is_list_like(key)
if is_single_key:
if self.columns.nlevels > 1:
return self._getitem_multilevel(key)
indexer = self.columns.get_loc(key)
if is_integer(indexer):
indexer = [indexer]
else:
if is_iterator(key):
key = list(key)
indexer = self.loc._get_listlike_indexer(key, axis=1, raise_missing=True)[1]
# take() does not accept boolean indexers
if getattr(indexer, "dtype", None) == bool:
indexer = np.where(indexer)[0]
data = self.take(indexer, axis=1)
if is_single_key:
# What does looking for a single key in a non-unique index return?
# The behavior is inconsistent. It returns a Series, except when
# - the key itself is repeated (test on data.shape, #9519), or
# - we have a MultiIndex on columns (test on self.columns, #21309)
if data.shape[1] == 1 and not isinstance(self.columns, ABCMultiIndex):
data = data[key]
return data
def _getitem_bool_array(self, key):
# also raises Exception if object array with NA values
# warning here just in case -- previously __setitem__ was
# reindexing but __getitem__ was not; it seems more reasonable to
# go with the __setitem__ behavior since that is more consistent
# with all other indexing behavior
if isinstance(key, Series) and not key.index.equals(self.index):
warnings.warn(
"Boolean Series key will be reindexed to match DataFrame index.",
UserWarning,
stacklevel=3,
)
elif len(key) != len(self.index):
raise ValueError(
f"Item wrong length {len(key)} instead of {len(self.index)}."
)
# check_bool_indexer will throw exception if Series key cannot
# be reindexed to match DataFrame rows
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
return self.take(indexer, axis=0)
def _getitem_multilevel(self, key):
# self.columns is a MultiIndex
loc = self.columns.get_loc(key)
if isinstance(loc, (slice, Series, np.ndarray, Index)):
new_columns = self.columns[loc]
result_columns = maybe_droplevels(new_columns, key)
if self._is_mixed_type:
result = self.reindex(columns=new_columns)
result.columns = result_columns
else:
new_values = self.values[:, loc]
result = self._constructor(
new_values, index=self.index, columns=result_columns
)
result = result.__finalize__(self)
# If there is only one column being returned, and its name is
# either an empty string, or a tuple with an empty string as its
# first element, then treat the empty string as a placeholder
# and return the column as if the user had provided that empty
# string in the key. If the result is a Series, exclude the
# implied empty string from its name.
if len(result.columns) == 1:
top = result.columns[0]
if isinstance(top, tuple):
top = top[0]
if top == "":
result = result[""]
if isinstance(result, Series):
result = self._constructor_sliced(
result, index=self.index, name=key
)
result._set_is_copy(self)
return result
else:
return self._get_item_cache(key)
def _get_value(self, index, col, takeable: bool = False):
"""
Quickly retrieve single value at passed column and index.
Parameters
----------
index : row label
col : column label
takeable : interpret the index/col as indexers, default False
Returns
-------
scalar
"""
if takeable:
series = self._iget_item_cache(col)
return com.maybe_box_datetimelike(series._values[index])
series = self._get_item_cache(col)
engine = self.index._engine
try:
return engine.get_value(series._values, index)
except KeyError:
# GH 20629
if self.index.nlevels > 1:
# partial indexing forbidden
raise
except (TypeError, ValueError):
pass
# we cannot handle direct indexing
# use positional
col = self.columns.get_loc(col)
index = self.index.get_loc(index)
return self._get_value(index, col, takeable=True)
def __setitem__(self, key, value):
key = com.apply_if_callable(key, self)
# see if we can slice the rows
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
# either we have a slice or we have a string that can be converted
# to a slice for partial-string date indexing
return self._setitem_slice(indexer, value)
if isinstance(key, DataFrame) or getattr(key, "ndim", None) == 2:
self._setitem_frame(key, value)
elif isinstance(key, (Series, np.ndarray, list, Index)):
self._setitem_array(key, value)
else:
# set column
self._set_item(key, value)
def _setitem_slice(self, key, value):
self._check_setitem_copy()
self.loc[key] = value
def _setitem_array(self, key, value):
# also raises Exception if object array with NA values
if com.is_bool_indexer(key):
if len(key) != len(self.index):
raise ValueError(
f"Item wrong length {len(key)} instead of {len(self.index)}!"
)
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
self._check_setitem_copy()
self.loc._setitem_with_indexer(indexer, value)
else:
if isinstance(value, DataFrame):
if len(value.columns) != len(key):
raise ValueError("Columns must be same length as key")
for k1, k2 in zip(key, value.columns):
self[k1] = value[k2]
else:
indexer = self.loc._get_listlike_indexer(
key, axis=1, raise_missing=False
)[1]
self._check_setitem_copy()
self.loc._setitem_with_indexer((slice(None), indexer), value)
def _setitem_frame(self, key, value):
# support boolean setting with DataFrame input, e.g.
# df[df > df2] = 0
if isinstance(key, np.ndarray):
if key.shape != self.shape:
raise ValueError("Array conditional must be same shape as self")
key = self._constructor(key, **self._construct_axes_dict())
if key.values.size and not is_bool_dtype(key.values):
raise TypeError(
"Must pass DataFrame or 2-d ndarray with boolean values only"
)
self._check_inplace_setting(value)
self._check_setitem_copy()
self._where(-key, value, inplace=True)
def _set_item(self, key, value):
"""
Add series to DataFrame in specified column.
If series is a numpy-array (not a Series/TimeSeries), it must be the
same length as the DataFrames index or an error will be thrown.
Series/TimeSeries will be conformed to the DataFrames index to
ensure homogeneity.
"""
self._ensure_valid_index(value)
value = self._sanitize_column(key, value)
NDFrame._set_item(self, key, value)
# check if we are modifying a copy
# try to set first as we want an invalid
# value exception to occur first
if len(self):
self._check_setitem_copy()
def _set_value(self, index, col, value, takeable: bool = False):
"""
Put single value at passed column and index.
Parameters
----------
index : row label
col : column label
value : scalar
takeable : interpret the index/col as indexers, default False
Returns
-------
DataFrame
If label pair is contained, will be reference to calling DataFrame,
otherwise a new object.
"""
try:
if takeable is True:
series = self._iget_item_cache(col)
return series._set_value(index, value, takeable=True)
series = self._get_item_cache(col)
engine = self.index._engine
engine.set_value(series._values, index, value)
return self
except (KeyError, TypeError):
# set using a non-recursive method & reset the cache
if takeable:
self.iloc[index, col] = value
else:
self.loc[index, col] = value
self._item_cache.pop(col, None)
return self
def _ensure_valid_index(self, value):
"""
Ensure that if we don't have an index, that we can create one from the
passed value.
"""
# GH5632, make sure that we are a Series convertible
if not len(self.index) and is_list_like(value) and len(value):
try:
value = Series(value)
except (ValueError, NotImplementedError, TypeError):
raise ValueError(
"Cannot set a frame with no defined index "
"and a value that cannot be converted to a "
"Series"
)
self._data = self._data.reindex_axis(
value.index.copy(), axis=1, fill_value=np.nan
)
def _box_item_values(self, key, values):
items = self.columns[self.columns.get_loc(key)]
if values.ndim == 2:
return self._constructor(values.T, columns=items, index=self.index)
else:
return self._box_col_values(values, items)
def _box_col_values(self, values, items):
"""
Provide boxed values for a column.
"""
klass = self._constructor_sliced
return klass(values, index=self.index, name=items, fastpath=True)
# ----------------------------------------------------------------------
# Unsorted
def query(self, expr, inplace=False, **kwargs):
"""
Query the columns of a DataFrame with a boolean expression.
Parameters
----------
expr : str
The query string to evaluate.
You can refer to variables
in the environment by prefixing them with an '@' character like
``@a + b``.
You can refer to column names that contain spaces or operators by
surrounding them in backticks. This way you can also escape
names that start with a digit, or those that are a Python keyword.
Basically when it is not valid Python identifier. See notes down
for more details.
For example, if one of your columns is called ``a a`` and you want
to sum it with ``b``, your query should be ```a a` + b``.
.. versionadded:: 0.25.0
Backtick quoting introduced.
.. versionadded:: 1.0.0
Expanding functionality of backtick quoting for more than only spaces.
inplace : bool
Whether the query should modify the data in place or return
a modified copy.
**kwargs
See the documentation for :func:`eval` for complete details
on the keyword arguments accepted by :meth:`DataFrame.query`.
Returns
-------
DataFrame
DataFrame resulting from the provided query expression.
See Also
--------
eval : Evaluate a string describing operations on
DataFrame columns.
DataFrame.eval : Evaluate a string describing operations on
DataFrame columns.
Notes
-----
The result of the evaluation of this expression is first passed to
:attr:`DataFrame.loc` and if that fails because of a
multidimensional key (e.g., a DataFrame) then the result will be passed
to :meth:`DataFrame.__getitem__`.
This method uses the top-level :func:`eval` function to
evaluate the passed query.
The :meth:`~pandas.DataFrame.query` method uses a slightly
modified Python syntax by default. For example, the ``&`` and ``|``
(bitwise) operators have the precedence of their boolean cousins,
:keyword:`and` and :keyword:`or`. This *is* syntactically valid Python,
however the semantics are different.
You can change the semantics of the expression by passing the keyword
argument ``parser='python'``. This enforces the same semantics as
evaluation in Python space. Likewise, you can pass ``engine='python'``
to evaluate an expression using Python itself as a backend. This is not
recommended as it is inefficient compared to using ``numexpr`` as the
engine.
The :attr:`DataFrame.index` and
:attr:`DataFrame.columns` attributes of the
:class:`~pandas.DataFrame` instance are placed in the query namespace
by default, which allows you to treat both the index and columns of the
frame as a column in the frame.
The identifier ``index`` is used for the frame index; you can also
use the name of the index to identify it in a query. Please note that
Python keywords may not be used as identifiers.
For further details and examples see the ``query`` documentation in
:ref:`indexing <indexing.query>`.
*Backtick quoted variables*
Backtick quoted variables are parsed as literal Python code and
are converted internally to a Python valid identifier.
This can lead to the following problems.
During parsing a number of disallowed characters inside the backtick
quoted string are replaced by strings that are allowed as a Python identifier.
These characters include all operators in Python, the space character, the
question mark, the exclamation mark, the dollar sign, and the euro sign.
For other characters that fall outside the ASCII range (U+0001..U+007F)
and those that are not further specified in PEP 3131,
the query parser will raise an error.
This excludes whitespace different than the space character,
but also the hashtag (as it is used for comments) and the backtick
itself (backtick can also not be escaped).
In a special case, quotes that make a pair around a backtick can
confuse the parser.
For example, ```it's` > `that's``` will raise an error,
as it forms a quoted string (``'s > `that'``) with a backtick inside.
See also the Python documentation about lexical analysis
(https://docs.python.org/3/reference/lexical_analysis.html)
in combination with the source code in :mod:`pandas.core.computation.parsing`.
Examples
--------
>>> df = pd.DataFrame({'A': range(1, 6),
... 'B': range(10, 0, -2),
... 'C C': range(10, 5, -1)})
>>> df
A B C C
0 1 10 10
1 2 8 9
2 3 6 8
3 4 4 7
4 5 2 6
>>> df.query('A > B')
A B C C
4 5 2 6
The previous expression is equivalent to
>>> df[df.A > df.B]
A B C C
4 5 2 6
For columns with spaces in their name, you can use backtick quoting.
>>> df.query('B == `C C`')
A B C C
0 1 10 10
The previous expression is equivalent to
>>> df[df.B == df['C C']]
A B C C
0 1 10 10
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if not isinstance(expr, str):
msg = f"expr must be a string to be evaluated, {type(expr)} given"
raise ValueError(msg)
kwargs["level"] = kwargs.pop("level", 0) + 1
kwargs["target"] = None
res = self.eval(expr, **kwargs)
try:
new_data = self.loc[res]
except ValueError:
# when res is multi-dimensional loc raises, but this is sometimes a
# valid query
new_data = self[res]
if inplace:
self._update_inplace(new_data)
else:
return new_data
def eval(self, expr, inplace=False, **kwargs):
"""
Evaluate a string describing operations on DataFrame columns.
Operates on columns only, not specific rows or elements. This allows
`eval` to run arbitrary code, which can make you vulnerable to code
injection if you pass user input to this function.
Parameters
----------
expr : str
The expression string to evaluate.
inplace : bool, default False
If the expression contains an assignment, whether to perform the
operation inplace and mutate the existing DataFrame. Otherwise,
a new DataFrame is returned.
**kwargs
See the documentation for :func:`eval` for complete details
on the keyword arguments accepted by
:meth:`~pandas.DataFrame.query`.
Returns
-------
ndarray, scalar, or pandas object
The result of the evaluation.
See Also
--------
DataFrame.query : Evaluates a boolean expression to query the columns
of a frame.
DataFrame.assign : Can evaluate an expression or function to create new
values for a column.
eval : Evaluate a Python expression as a string using various
backends.
Notes
-----
For more details see the API documentation for :func:`~eval`.
For detailed examples see :ref:`enhancing performance with eval
<enhancingperf.eval>`.
Examples
--------
>>> df = pd.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)})
>>> df
A B
0 1 10
1 2 8
2 3 6
3 4 4
4 5 2
>>> df.eval('A + B')
0 11
1 10
2 9
3 8
4 7
dtype: int64
Assignment is allowed though by default the original DataFrame is not
modified.
>>> df.eval('C = A + B')
A B C
0 1 10 11
1 2 8 10
2 3 6 9
3 4 4 8
4 5 2 7
>>> df
A B
0 1 10
1 2 8
2 3 6
3 4 4
4 5 2
Use ``inplace=True`` to modify the original DataFrame.
>>> df.eval('C = A + B', inplace=True)
>>> df
A B C
0 1 10 11
1 2 8 10
2 3 6 9
3 4 4 8
4 5 2 7
"""
from pandas.core.computation.eval import eval as _eval
inplace = validate_bool_kwarg(inplace, "inplace")
resolvers = kwargs.pop("resolvers", None)
kwargs["level"] = kwargs.pop("level", 0) + 1
if resolvers is None:
index_resolvers = self._get_index_resolvers()
column_resolvers = self._get_cleaned_column_resolvers()
resolvers = column_resolvers, index_resolvers
if "target" not in kwargs:
kwargs["target"] = self
kwargs["resolvers"] = kwargs.get("resolvers", ()) + tuple(resolvers)
return _eval(expr, inplace=inplace, **kwargs)
def select_dtypes(self, include=None, exclude=None) -> "DataFrame":
"""
Return a subset of the DataFrame's columns based on the column dtypes.
Parameters
----------
include, exclude : scalar or list-like
A selection of dtypes or strings to be included/excluded. At least
one of these parameters must be supplied.
Returns
-------
DataFrame
The subset of the frame including the dtypes in ``include`` and
excluding the dtypes in ``exclude``.
Raises
------
ValueError
* If both of ``include`` and ``exclude`` are empty
* If ``include`` and ``exclude`` have overlapping elements
* If any kind of string dtype is passed in.
Notes
-----
* To select all *numeric* types, use ``np.number`` or ``'number'``
* To select strings you must use the ``object`` dtype, but note that
this will return *all* object dtype columns
* See the `numpy dtype hierarchy
<http://docs.scipy.org/doc/numpy/reference/arrays.scalars.html>`__
* To select datetimes, use ``np.datetime64``, ``'datetime'`` or
``'datetime64'``
* To select timedeltas, use ``np.timedelta64``, ``'timedelta'`` or
``'timedelta64'``
* To select Pandas categorical dtypes, use ``'category'``
* To select Pandas datetimetz dtypes, use ``'datetimetz'`` (new in
0.20.0) or ``'datetime64[ns, tz]'``
Examples
--------
>>> df = pd.DataFrame({'a': [1, 2] * 3,
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3})
>>> df
a b c
0 1 True 1.0
1 2 False 2.0
2 1 True 1.0
3 2 False 2.0
4 1 True 1.0
5 2 False 2.0
>>> df.select_dtypes(include='bool')
b
0 True
1 False
2 True
3 False
4 True
5 False
>>> df.select_dtypes(include=['float64'])
c
0 1.0
1 2.0
2 1.0
3 2.0
4 1.0
5 2.0
>>> df.select_dtypes(exclude=['int'])
b c
0 True 1.0
1 False 2.0
2 True 1.0
3 False 2.0
4 True 1.0
5 False 2.0
"""
if not is_list_like(include):
include = (include,) if include is not None else ()
if not is_list_like(exclude):
exclude = (exclude,) if exclude is not None else ()
selection = (frozenset(include), frozenset(exclude))
if not any(selection):
raise ValueError("at least one of include or exclude must be nonempty")
# convert the myriad valid dtypes object to a single representation
include = frozenset(infer_dtype_from_object(x) for x in include)
exclude = frozenset(infer_dtype_from_object(x) for x in exclude)
for dtypes in (include, exclude):
invalidate_string_dtypes(dtypes)
# can't both include AND exclude!
if not include.isdisjoint(exclude):
raise ValueError(f"include and exclude overlap on {(include & exclude)}")
# We raise when both include and exclude are empty
# Hence, we can just shrink the columns we want to keep
keep_these = np.full(self.shape[1], True)
def extract_unique_dtypes_from_dtypes_set(
dtypes_set: FrozenSet[Dtype], unique_dtypes: np.ndarray
) -> List[Dtype]:
extracted_dtypes = [
unique_dtype
for unique_dtype in unique_dtypes
if issubclass(unique_dtype.type, tuple(dtypes_set)) # type: ignore
]
return extracted_dtypes
unique_dtypes = self.dtypes.unique()
if include:
included_dtypes = extract_unique_dtypes_from_dtypes_set(
include, unique_dtypes
)
keep_these &= self.dtypes.isin(included_dtypes)
if exclude:
excluded_dtypes = extract_unique_dtypes_from_dtypes_set(
exclude, unique_dtypes
)
keep_these &= ~self.dtypes.isin(excluded_dtypes)
return self.iloc[:, keep_these.values]
def insert(self, loc, column, value, allow_duplicates=False) -> None:
"""
Insert column into DataFrame at specified location.
Raises a ValueError if `column` is already contained in the DataFrame,
unless `allow_duplicates` is set to True.
Parameters
----------
loc : int
Insertion index. Must verify 0 <= loc <= len(columns).
column : str, number, or hashable object
Label of the inserted column.
value : int, Series, or array-like
allow_duplicates : bool, optional
"""
self._ensure_valid_index(value)
value = self._sanitize_column(column, value, broadcast=False)
self._data.insert(loc, column, value, allow_duplicates=allow_duplicates)
def assign(self, **kwargs) -> "DataFrame":
r"""
Assign new columns to a DataFrame.
Returns a new object with all original columns in addition to new ones.
Existing columns that are re-assigned will be overwritten.
Parameters
----------
**kwargs : dict of {str: callable or Series}
The column names are keywords. If the values are
callable, they are computed on the DataFrame and
assigned to the new columns. The callable must not
change input DataFrame (though pandas doesn't check it).
If the values are not callable, (e.g. a Series, scalar, or array),
they are simply assigned.
Returns
-------
DataFrame
A new DataFrame with the new columns in addition to
all the existing columns.
Notes
-----
Assigning multiple columns within the same ``assign`` is possible.
Later items in '\*\*kwargs' may refer to newly created or modified
columns in 'df'; items are computed and assigned into 'df' in order.
.. versionchanged:: 0.23.0
Keyword argument order is maintained.
Examples
--------
>>> df = pd.DataFrame({'temp_c': [17.0, 25.0]},
... index=['Portland', 'Berkeley'])
>>> df
temp_c
Portland 17.0
Berkeley 25.0
Where the value is a callable, evaluated on `df`:
>>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32)
temp_c temp_f
Portland 17.0 62.6
Berkeley 25.0 77.0
Alternatively, the same behavior can be achieved by directly
referencing an existing Series or sequence:
>>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32)
temp_c temp_f
Portland 17.0 62.6
Berkeley 25.0 77.0
You can create multiple columns within the same assign where one
of the columns depends on another one defined within the same assign:
>>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32,
... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9)
temp_c temp_f temp_k
Portland 17.0 62.6 290.15
Berkeley 25.0 77.0 298.15
"""
data = self.copy()
for k, v in kwargs.items():
data[k] = com.apply_if_callable(v, data)
return data
def _sanitize_column(self, key, value, broadcast=True):
"""
Ensures new columns (which go into the BlockManager as new blocks) are
always copied and converted into an array.
Parameters
----------
key : object
value : scalar, Series, or array-like
broadcast : bool, default True
If ``key`` matches multiple duplicate column names in the
DataFrame, this parameter indicates whether ``value`` should be
tiled so that the returned array contains a (duplicated) column for
each occurrence of the key. If False, ``value`` will not be tiled.
Returns
-------
numpy.ndarray
"""
def reindexer(value):
# reindex if necessary
if value.index.equals(self.index) or not len(self.index):
value = value._values.copy()
else:
# GH 4107
try:
value = value.reindex(self.index)._values
except ValueError as err:
# raised in MultiIndex.from_tuples, see test_insert_error_msmgs
if not value.index.is_unique:
# duplicate axis
raise err
# other
raise TypeError(
"incompatible index of inserted column with frame index"
)
return value
if isinstance(value, Series):
value = reindexer(value)
elif isinstance(value, DataFrame):
# align right-hand-side columns if self.columns
# is multi-index and self[key] is a sub-frame
if isinstance(self.columns, ABCMultiIndex) and key in self.columns:
loc = self.columns.get_loc(key)
if isinstance(loc, (slice, Series, np.ndarray, Index)):
cols = maybe_droplevels(self.columns[loc], key)
if len(cols) and not cols.equals(value.columns):
value = value.reindex(cols, axis=1)
# now align rows
value = reindexer(value).T
elif isinstance(value, ExtensionArray):
# Explicitly copy here, instead of in sanitize_index,
# as sanitize_index won't copy an EA, even with copy=True
value = value.copy()
value = sanitize_index(value, self.index, copy=False)
elif isinstance(value, Index) or is_sequence(value):
# turn me into an ndarray
value = sanitize_index(value, self.index, copy=False)
if not isinstance(value, (np.ndarray, Index)):
if isinstance(value, list) and len(value) > 0:
value = maybe_convert_platform(value)
else:
value = com.asarray_tuplesafe(value)
elif value.ndim == 2:
value = value.copy().T
elif isinstance(value, Index):
value = value.copy(deep=True)
else:
value = value.copy()
# possibly infer to datetimelike
if is_object_dtype(value.dtype):
value = maybe_infer_to_datetimelike(value)
else:
# cast ignores pandas dtypes. so save the dtype first
infer_dtype, _ = infer_dtype_from_scalar(value, pandas_dtype=True)
# upcast
value = cast_scalar_to_array(len(self.index), value)
value = maybe_cast_to_datetime(value, infer_dtype)
# return internal types directly
if is_extension_array_dtype(value):
return value
# broadcast across multiple columns if necessary
if broadcast and key in self.columns and value.ndim == 1:
if not self.columns.is_unique or isinstance(self.columns, ABCMultiIndex):
existing_piece = self[key]
if isinstance(existing_piece, DataFrame):
value = np.tile(value, (len(existing_piece.columns), 1))
return np.atleast_2d(np.asarray(value))
@property
def _series(self):
return {
item: Series(self._data.iget(idx), index=self.index, name=item)
for idx, item in enumerate(self.columns)
}
def lookup(self, row_labels, col_labels) -> np.ndarray:
"""
Label-based "fancy indexing" function for DataFrame.
Given equal-length arrays of row and column labels, return an
array of the values corresponding to each (row, col) pair.
Parameters
----------
row_labels : sequence
The row labels to use for lookup.
col_labels : sequence
The column labels to use for lookup.
Returns
-------
numpy.ndarray
Examples
--------
values : ndarray
The found values
"""
n = len(row_labels)
if n != len(col_labels):
raise ValueError("Row labels must have same size as column labels")
thresh = 1000
if not self._is_mixed_type or n > thresh:
values = self.values
ridx = self.index.get_indexer(row_labels)
cidx = self.columns.get_indexer(col_labels)
if (ridx == -1).any():
raise KeyError("One or more row labels was not found")
if (cidx == -1).any():
raise KeyError("One or more column labels was not found")
flat_index = ridx * len(self.columns) + cidx
result = values.flat[flat_index]
else:
result = np.empty(n, dtype="O")
for i, (r, c) in enumerate(zip(row_labels, col_labels)):
result[i] = self._get_value(r, c)
if is_object_dtype(result):
result = lib.maybe_convert_objects(result)
return result
# ----------------------------------------------------------------------
# Reindexing and alignment
def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value, copy):
frame = self
columns = axes["columns"]
if columns is not None:
frame = frame._reindex_columns(
columns, method, copy, level, fill_value, limit, tolerance
)
index = axes["index"]
if index is not None:
frame = frame._reindex_index(
index, method, copy, level, fill_value, limit, tolerance
)
return frame
def _reindex_index(
self,
new_index,
method,
copy,
level,
fill_value=np.nan,
limit=None,
tolerance=None,
):
new_index, indexer = self.index.reindex(
new_index, method=method, level=level, limit=limit, tolerance=tolerance
)
return self._reindex_with_indexers(
{0: [new_index, indexer]},
copy=copy,
fill_value=fill_value,
allow_dups=False,
)
def _reindex_columns(
self,
new_columns,
method,
copy,
level,
fill_value=None,
limit=None,
tolerance=None,
):
new_columns, indexer = self.columns.reindex(
new_columns, method=method, level=level, limit=limit, tolerance=tolerance
)
return self._reindex_with_indexers(
{1: [new_columns, indexer]},
copy=copy,
fill_value=fill_value,
allow_dups=False,
)
def _reindex_multi(self, axes, copy, fill_value) -> "DataFrame":
"""
We are guaranteed non-Nones in the axes.
"""
new_index, row_indexer = self.index.reindex(axes["index"])
new_columns, col_indexer = self.columns.reindex(axes["columns"])
if row_indexer is not None and col_indexer is not None:
indexer = row_indexer, col_indexer
new_values = algorithms.take_2d_multi(
self.values, indexer, fill_value=fill_value
)
return self._constructor(new_values, index=new_index, columns=new_columns)
else:
return self._reindex_with_indexers(
{0: [new_index, row_indexer], 1: [new_columns, col_indexer]},
copy=copy,
fill_value=fill_value,
)
@Appender(_shared_docs["align"] % _shared_doc_kwargs)
def align(
self,
other,
join="outer",
axis=None,
level=None,
copy=True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
broadcast_axis=None,
) -> "DataFrame":
return super().align(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
broadcast_axis=broadcast_axis,
)
@Substitution(**_shared_doc_kwargs)
@Appender(NDFrame.reindex.__doc__)
@rewrite_axis_style_signature(
"labels",
[
("method", None),
("copy", True),
("level", None),
("fill_value", np.nan),
("limit", None),
("tolerance", None),
],
)
def reindex(self, *args, **kwargs) -> "DataFrame":
axes = validate_axis_style_args(self, args, kwargs, "labels", "reindex")
kwargs.update(axes)
# Pop these, since the values are in `kwargs` under different names
kwargs.pop("axis", None)
kwargs.pop("labels", None)
return self._ensure_type(super().reindex(**kwargs))
def drop(
self,
labels=None,
axis=0,
index=None,
columns=None,
level=None,
inplace=False,
errors="raise",
):
"""
Drop specified labels from rows or columns.
Remove rows or columns by specifying label names and corresponding
axis, or by specifying directly index or column names. When using a
multi-index, labels on different levels can be removed by specifying
the level.
Parameters
----------
labels : single label or list-like
Index or column labels to drop.
axis : {0 or 'index', 1 or 'columns'}, default 0
Whether to drop labels from the index (0 or 'index') or
columns (1 or 'columns').
index : single label or list-like
Alternative to specifying axis (``labels, axis=0``
is equivalent to ``index=labels``).
.. versionadded:: 0.21.0
columns : single label or list-like
Alternative to specifying axis (``labels, axis=1``
is equivalent to ``columns=labels``).
.. versionadded:: 0.21.0
level : int or level name, optional
For MultiIndex, level from which the labels will be removed.
inplace : bool, default False
If True, do operation inplace and return None.
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and only existing labels are
dropped.
Returns
-------
DataFrame
DataFrame without the removed index or column labels.
Raises
------
KeyError
If any of the labels is not found in the selected axis.
See Also
--------
DataFrame.loc : Label-location based indexer for selection by label.
DataFrame.dropna : Return DataFrame with labels on given axis omitted
where (all or any) data are missing.
DataFrame.drop_duplicates : Return DataFrame with duplicate rows
removed, optionally only considering certain columns.
Series.drop : Return Series with specified index labels removed.
Examples
--------
>>> df = pd.DataFrame(np.arange(12).reshape(3, 4),
... columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 0 1 2 3
1 4 5 6 7
2 8 9 10 11
Drop columns
>>> df.drop(['B', 'C'], axis=1)
A D
0 0 3
1 4 7
2 8 11
>>> df.drop(columns=['B', 'C'])
A D
0 0 3
1 4 7
2 8 11
Drop a row by index
>>> df.drop([0, 1])
A B C D
2 8 9 10 11
Drop columns and/or rows of MultiIndex DataFrame
>>> midx = pd.MultiIndex(levels=[['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> df = pd.DataFrame(index=midx, columns=['big', 'small'],
... data=[[45, 30], [200, 100], [1.5, 1], [30, 20],
... [250, 150], [1.5, 0.8], [320, 250],
... [1, 0.8], [0.3, 0.2]])
>>> df
big small
lama speed 45.0 30.0
weight 200.0 100.0
length 1.5 1.0
cow speed 30.0 20.0
weight 250.0 150.0
length 1.5 0.8
falcon speed 320.0 250.0
weight 1.0 0.8
length 0.3 0.2
>>> df.drop(index='cow', columns='small')
big
lama speed 45.0
weight 200.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
>>> df.drop(index='length', level=1)
big small
lama speed 45.0 30.0
weight 200.0 100.0
cow speed 30.0 20.0
weight 250.0 150.0
falcon speed 320.0 250.0
weight 1.0 0.8
"""
return super().drop(
labels=labels,
axis=axis,
index=index,
columns=columns,
level=level,
inplace=inplace,
errors=errors,
)
@rewrite_axis_style_signature(
"mapper",
[("copy", True), ("inplace", False), ("level", None), ("errors", "ignore")],
)
def rename(
self,
mapper: Optional[Renamer] = None,
*,
index: Optional[Renamer] = None,
columns: Optional[Renamer] = None,
axis: Optional[Axis] = None,
copy: bool = True,
inplace: bool = False,
level: Optional[Level] = None,
errors: str = "ignore",
) -> Optional["DataFrame"]:
"""
Alter axes labels.
Function / dict values must be unique (1-to-1). Labels not contained in
a dict / Series will be left as-is. Extra labels listed don't throw an
error.
See the :ref:`user guide <basics.rename>` for more.
Parameters
----------
mapper : dict-like or function
Dict-like or functions transformations to apply to
that axis' values. Use either ``mapper`` and ``axis`` to
specify the axis to target with ``mapper``, or ``index`` and
``columns``.
index : dict-like or function
Alternative to specifying axis (``mapper, axis=0``
is equivalent to ``index=mapper``).
columns : dict-like or function
Alternative to specifying axis (``mapper, axis=1``
is equivalent to ``columns=mapper``).
axis : int or str
Axis to target with ``mapper``. Can be either the axis name
('index', 'columns') or number (0, 1). The default is 'index'.
copy : bool, default True
Also copy underlying data.
inplace : bool, default False
Whether to return a new DataFrame. If True then value of copy is
ignored.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified
level.
errors : {'ignore', 'raise'}, default 'ignore'
If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`,
or `columns` contains labels that are not present in the Index
being transformed.
If 'ignore', existing keys will be renamed and extra keys will be
ignored.
Returns
-------
DataFrame
DataFrame with the renamed axis labels.
Raises
------
KeyError
If any of the labels is not found in the selected axis and
"errors='raise'".
See Also
--------
DataFrame.rename_axis : Set the name of the axis.
Examples
--------
``DataFrame.rename`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
Rename columns using a mapping:
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
>>> df.rename(columns={"A": "a", "B": "c"})
a c
0 1 4
1 2 5
2 3 6
Rename index using a mapping:
>>> df.rename(index={0: "x", 1: "y", 2: "z"})
A B
x 1 4
y 2 5
z 3 6
Cast index labels to a different type:
>>> df.index
RangeIndex(start=0, stop=3, step=1)
>>> df.rename(index=str).index
Index(['0', '1', '2'], dtype='object')
>>> df.rename(columns={"A": "a", "B": "b", "C": "c"}, errors="raise")
Traceback (most recent call last):
KeyError: ['C'] not found in axis
Using axis-style parameters
>>> df.rename(str.lower, axis='columns')
a b
0 1 4
1 2 5
2 3 6
>>> df.rename({1: 2, 2: 4}, axis='index')
A B
0 1 4
2 2 5
4 3 6
"""
return super().rename(
mapper=mapper,
index=index,
columns=columns,
axis=axis,
copy=copy,
inplace=inplace,
level=level,
errors=errors,
)
@Substitution(**_shared_doc_kwargs)
@Appender(NDFrame.fillna.__doc__)
def fillna(
self,
value=None,
method=None,
axis=None,
inplace=False,
limit=None,
downcast=None,
) -> Optional["DataFrame"]:
return super().fillna(
value=value,
method=method,
axis=axis,
inplace=inplace,
limit=limit,
downcast=downcast,
)
@Appender(_shared_docs["replace"] % _shared_doc_kwargs)
def replace(
self,
to_replace=None,
value=None,
inplace=False,
limit=None,
regex=False,
method="pad",
):
return super().replace(
to_replace=to_replace,
value=value,
inplace=inplace,
limit=limit,
regex=regex,
method=method,
)
@Appender(_shared_docs["shift"] % _shared_doc_kwargs)
def shift(self, periods=1, freq=None, axis=0, fill_value=None) -> "DataFrame":
return self._ensure_type(
super().shift(periods=periods, freq=freq, axis=axis, fill_value=fill_value)
)
def set_index(
self, keys, drop=True, append=False, inplace=False, verify_integrity=False
):
"""
Set the DataFrame index using existing columns.
Set the DataFrame index (row labels) using one or more existing
columns or arrays (of the correct length). The index can replace the
existing index or expand on it.
Parameters
----------
keys : label or array-like or list of labels/arrays
This parameter can be either a single column key, a single array of
the same length as the calling DataFrame, or a list containing an
arbitrary combination of column keys and arrays. Here, "array"
encompasses :class:`Series`, :class:`Index`, ``np.ndarray``, and
instances of :class:`~collections.abc.Iterator`.
drop : bool, default True
Delete columns to be used as the new index.
append : bool, default False
Whether to append columns to existing index.
inplace : bool, default False
Modify the DataFrame in place (do not create a new object).
verify_integrity : bool, default False
Check the new index for duplicates. Otherwise defer the check until
necessary. Setting to False will improve the performance of this
method.
Returns
-------
DataFrame
Changed row labels.
See Also
--------
DataFrame.reset_index : Opposite of set_index.
DataFrame.reindex : Change to new indices or expand indices.
DataFrame.reindex_like : Change to same indices as other DataFrame.
Examples
--------
>>> df = pd.DataFrame({'month': [1, 4, 7, 10],
... 'year': [2012, 2014, 2013, 2014],
... 'sale': [55, 40, 84, 31]})
>>> df
month year sale
0 1 2012 55
1 4 2014 40
2 7 2013 84
3 10 2014 31
Set the index to become the 'month' column:
>>> df.set_index('month')
year sale
month
1 2012 55
4 2014 40
7 2013 84
10 2014 31
Create a MultiIndex using columns 'year' and 'month':
>>> df.set_index(['year', 'month'])
sale
year month
2012 1 55
2014 4 40
2013 7 84
2014 10 31
Create a MultiIndex using an Index and a column:
>>> df.set_index([pd.Index([1, 2, 3, 4]), 'year'])
month sale
year
1 2012 1 55
2 2014 4 40
3 2013 7 84
4 2014 10 31
Create a MultiIndex using two Series:
>>> s = pd.Series([1, 2, 3, 4])
>>> df.set_index([s, s**2])
month year sale
1 1 1 2012 55
2 4 4 2014 40
3 9 7 2013 84
4 16 10 2014 31
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if not isinstance(keys, list):
keys = [keys]
err_msg = (
'The parameter "keys" may be a column key, one-dimensional '
"array, or a list containing only valid column keys and "
"one-dimensional arrays."
)
missing: List[Optional[Hashable]] = []
for col in keys:
if isinstance(
col, (ABCIndexClass, ABCSeries, np.ndarray, list, abc.Iterator)
):
# arrays are fine as long as they are one-dimensional
# iterators get converted to list below
if getattr(col, "ndim", 1) != 1:
raise ValueError(err_msg)
else:
# everything else gets tried as a key; see GH 24969
try:
found = col in self.columns
except TypeError:
raise TypeError(f"{err_msg}. Received column of type {type(col)}")
else:
if not found:
missing.append(col)
if missing:
raise KeyError(f"None of {missing} are in the columns")
if inplace:
frame = self
else:
frame = self.copy()
arrays = []
names = []
if append:
names = list(self.index.names)
if isinstance(self.index, ABCMultiIndex):
for i in range(self.index.nlevels):
arrays.append(self.index._get_level_values(i))
else:
arrays.append(self.index)
to_remove: List[Optional[Hashable]] = []
for col in keys:
if isinstance(col, ABCMultiIndex):
for n in range(col.nlevels):
arrays.append(col._get_level_values(n))
names.extend(col.names)
elif isinstance(col, (ABCIndexClass, ABCSeries)):
# if Index then not MultiIndex (treated above)
arrays.append(col)
names.append(col.name)
elif isinstance(col, (list, np.ndarray)):
arrays.append(col)
names.append(None)
elif isinstance(col, abc.Iterator):
arrays.append(list(col))
names.append(None)
# from here, col can only be a column label
else:
arrays.append(frame[col]._values)
names.append(col)
if drop:
to_remove.append(col)
if len(arrays[-1]) != len(self):
# check newest element against length of calling frame, since
# ensure_index_from_sequences would not raise for append=False.
raise ValueError(
f"Length mismatch: Expected {len(self)} rows, "
f"received array of length {len(arrays[-1])}"
)
index = ensure_index_from_sequences(arrays, names)
if verify_integrity and not index.is_unique:
duplicates = index[index.duplicated()].unique()
raise ValueError(f"Index has duplicate keys: {duplicates}")
# use set to handle duplicate column names gracefully in case of drop
for c in set(to_remove):
del frame[c]
# clear up memory usage
index._cleanup()
frame.index = index
if not inplace:
return frame
def reset_index(
self,
level: Optional[Union[Hashable, Sequence[Hashable]]] = None,
drop: bool = False,
inplace: bool = False,
col_level: Hashable = 0,
col_fill: Optional[Hashable] = "",
) -> Optional["DataFrame"]:
"""
Reset the index, or a level of it.
Reset the index of the DataFrame, and use the default one instead.
If the DataFrame has a MultiIndex, this method can remove one or more
levels.
Parameters
----------
level : int, str, tuple, or list, default None
Only remove the given levels from the index. Removes all levels by
default.
drop : bool, default False
Do not try to insert index into dataframe columns. This resets
the index to the default integer index.
inplace : bool, default False
Modify the DataFrame in place (do not create a new object).
col_level : int or str, default 0
If the columns have multiple levels, determines which level the
labels are inserted into. By default it is inserted into the first
level.
col_fill : object, default ''
If the columns have multiple levels, determines how the other
levels are named. If None then the index name is repeated.
Returns
-------
DataFrame or None
DataFrame with the new index or None if ``inplace=True``.
See Also
--------
DataFrame.set_index : Opposite of reset_index.
DataFrame.reindex : Change to new indices or expand indices.
DataFrame.reindex_like : Change to same indices as other DataFrame.
Examples
--------
>>> df = pd.DataFrame([('bird', 389.0),
... ('bird', 24.0),
... ('mammal', 80.5),
... ('mammal', np.nan)],
... index=['falcon', 'parrot', 'lion', 'monkey'],
... columns=('class', 'max_speed'))
>>> df
class max_speed
falcon bird 389.0
parrot bird 24.0
lion mammal 80.5
monkey mammal NaN
When we reset the index, the old index is added as a column, and a
new sequential index is used:
>>> df.reset_index()
index class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
We can use the `drop` parameter to avoid the old index being added as
a column:
>>> df.reset_index(drop=True)
class max_speed
0 bird 389.0
1 bird 24.0
2 mammal 80.5
3 mammal NaN
You can also use `reset_index` with `MultiIndex`.
>>> index = pd.MultiIndex.from_tuples([('bird', 'falcon'),
... ('bird', 'parrot'),
... ('mammal', 'lion'),
... ('mammal', 'monkey')],
... names=['class', 'name'])
>>> columns = pd.MultiIndex.from_tuples([('speed', 'max'),
... ('species', 'type')])
>>> df = pd.DataFrame([(389.0, 'fly'),
... ( 24.0, 'fly'),
... ( 80.5, 'run'),
... (np.nan, 'jump')],
... index=index,
... columns=columns)
>>> df
speed species
max type
class name
bird falcon 389.0 fly
parrot 24.0 fly
mammal lion 80.5 run
monkey NaN jump
If the index has multiple levels, we can reset a subset of them:
>>> df.reset_index(level='class')
class speed species
max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we are not dropping the index, by default, it is placed in the top
level. We can place it in another level:
>>> df.reset_index(level='class', col_level=1)
speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
When the index is inserted under another level, we can specify under
which one with the parameter `col_fill`:
>>> df.reset_index(level='class', col_level=1, col_fill='species')
species speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we specify a nonexistent level for `col_fill`, it is created:
>>> df.reset_index(level='class', col_level=1, col_fill='genus')
genus speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if inplace:
new_obj = self
else:
new_obj = self.copy()
def _maybe_casted_values(index, labels=None):
values = index._values
if not isinstance(index, (PeriodIndex, DatetimeIndex)):
if values.dtype == np.object_:
values = lib.maybe_convert_objects(values)
# if we have the labels, extract the values with a mask
if labels is not None:
mask = labels == -1
# we can have situations where the whole mask is -1,
# meaning there is nothing found in labels, so make all nan's
if mask.all():
values = np.empty(len(mask))
values.fill(np.nan)
else:
values = values.take(labels)
# TODO(https://github.com/pandas-dev/pandas/issues/24206)
# Push this into maybe_upcast_putmask?
# We can't pass EAs there right now. Looks a bit
# complicated.
# So we unbox the ndarray_values, op, re-box.
values_type = type(values)
values_dtype = values.dtype
if issubclass(values_type, DatetimeLikeArray):
values = values._data
if mask.any():
values, _ = maybe_upcast_putmask(values, mask, np.nan)
if issubclass(values_type, DatetimeLikeArray):
values = values_type(values, dtype=values_dtype)
return values
new_index = ibase.default_index(len(new_obj))
if level is not None:
if not isinstance(level, (tuple, list)):
level = [level]
level = [self.index._get_level_number(lev) for lev in level]
if len(level) < self.index.nlevels:
new_index = self.index.droplevel(level)
if not drop:
to_insert: Iterable[Tuple[Any, Optional[Any]]]
if isinstance(self.index, ABCMultiIndex):
names = [
(n if n is not None else f"level_{i}")
for i, n in enumerate(self.index.names)
]
to_insert = zip(self.index.levels, self.index.codes)
else:
default = "index" if "index" not in self else "level_0"
names = [default] if self.index.name is None else [self.index.name]
to_insert = ((self.index, None),)
multi_col = isinstance(self.columns, ABCMultiIndex)
for i, (lev, lab) in reversed(list(enumerate(to_insert))):
if not (level is None or i in level):
continue
name = names[i]
if multi_col:
col_name = list(name) if isinstance(name, tuple) else [name]
if col_fill is None:
if len(col_name) not in (1, self.columns.nlevels):
raise ValueError(
"col_fill=None is incompatible "
f"with incomplete column name {name}"
)
col_fill = col_name[0]
lev_num = self.columns._get_level_number(col_level)
name_lst = [col_fill] * lev_num + col_name
missing = self.columns.nlevels - len(name_lst)
name_lst += [col_fill] * missing
name = tuple(name_lst)
# to ndarray and maybe infer different dtype
level_values = _maybe_casted_values(lev, lab)
new_obj.insert(0, name, level_values)
new_obj.index = new_index
if not inplace:
return new_obj
return None
# ----------------------------------------------------------------------
# Reindex-based selection methods
@Appender(_shared_docs["isna"] % _shared_doc_kwargs)
def isna(self) -> "DataFrame":
return super().isna()
@Appender(_shared_docs["isna"] % _shared_doc_kwargs)
def isnull(self) -> "DataFrame":
return super().isnull()
@Appender(_shared_docs["notna"] % _shared_doc_kwargs)
def notna(self) -> "DataFrame":
return super().notna()
@Appender(_shared_docs["notna"] % _shared_doc_kwargs)
def notnull(self) -> "DataFrame":
return super().notnull()
def dropna(self, axis=0, how="any", thresh=None, subset=None, inplace=False):
"""
Remove missing values.
See the :ref:`User Guide <missing_data>` for more on which values are
considered missing, and how to work with missing data.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
Determine if rows or columns which contain missing values are
removed.
* 0, or 'index' : Drop rows which contain missing values.
* 1, or 'columns' : Drop columns which contain missing value.
.. versionchanged:: 1.0.0
Pass tuple or list to drop on multiple axes.
Only a single axis is allowed.
how : {'any', 'all'}, default 'any'
Determine if row or column is removed from DataFrame, when we have
at least one NA or all NA.
* 'any' : If any NA values are present, drop that row or column.
* 'all' : If all values are NA, drop that row or column.
thresh : int, optional
Require that many non-NA values.
subset : array-like, optional
Labels along other axis to consider, e.g. if you are dropping rows
these would be a list of columns to include.
inplace : bool, default False
If True, do operation inplace and return None.
Returns
-------
DataFrame
DataFrame with NA entries dropped from it.
See Also
--------
DataFrame.isna: Indicate missing values.
DataFrame.notna : Indicate existing (non-missing) values.
DataFrame.fillna : Replace missing values.
Series.dropna : Drop missing values.
Index.dropna : Drop missing indices.
Examples
--------
>>> df = pd.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'],
... "toy": [np.nan, 'Batmobile', 'Bullwhip'],
... "born": [pd.NaT, pd.Timestamp("1940-04-25"),
... pd.NaT]})
>>> df
name toy born
0 Alfred NaN NaT
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Drop the rows where at least one element is missing.
>>> df.dropna()
name toy born
1 Batman Batmobile 1940-04-25
Drop the columns where at least one element is missing.
>>> df.dropna(axis='columns')
name
0 Alfred
1 Batman
2 Catwoman
Drop the rows where all elements are missing.
>>> df.dropna(how='all')
name toy born
0 Alfred NaN NaT
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Keep only the rows with at least 2 non-NA values.
>>> df.dropna(thresh=2)
name toy born
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Define in which columns to look for missing values.
>>> df.dropna(subset=['name', 'born'])
name toy born
1 Batman Batmobile 1940-04-25
Keep the DataFrame with valid entries in the same variable.
>>> df.dropna(inplace=True)
>>> df
name toy born
1 Batman Batmobile 1940-04-25
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if isinstance(axis, (tuple, list)):
# GH20987
raise TypeError("supplying multiple axes to axis is no longer supported.")
axis = self._get_axis_number(axis)
agg_axis = 1 - axis
agg_obj = self
if subset is not None:
ax = self._get_axis(agg_axis)
indices = ax.get_indexer_for(subset)
check = indices == -1
if check.any():
raise KeyError(list(np.compress(check, subset)))
agg_obj = self.take(indices, axis=agg_axis)
count = agg_obj.count(axis=agg_axis)
if thresh is not None:
mask = count >= thresh
elif how == "any":
mask = count == len(agg_obj._get_axis(agg_axis))
elif how == "all":
mask = count > 0
else:
if how is not None:
raise ValueError(f"invalid how option: {how}")
else:
raise TypeError("must specify how or thresh")
result = self.loc(axis=axis)[mask]
if inplace:
self._update_inplace(result)
else:
return result
def drop_duplicates(
self,
subset: Optional[Union[Hashable, Sequence[Hashable]]] = None,
keep: Union[str, bool] = "first",
inplace: bool = False,
ignore_index: bool = False,
) -> Optional["DataFrame"]:
"""
Return DataFrame with duplicate rows removed.
Considering certain columns is optional. Indexes, including time indexes
are ignored.
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns.
keep : {'first', 'last', False}, default 'first'
Determines which duplicates (if any) to keep.
- ``first`` : Drop duplicates except for the first occurrence.
- ``last`` : Drop duplicates except for the last occurrence.
- False : Drop all duplicates.
inplace : bool, default False
Whether to drop duplicates in place or to return a copy.
ignore_index : bool, default False
If True, the resulting axis will be labeled 0, 1, …, n - 1.
.. versionadded:: 1.0.0
Returns
-------
DataFrame
DataFrame with duplicates removed or None if ``inplace=True``.
"""
if self.empty:
return self.copy()
inplace = validate_bool_kwarg(inplace, "inplace")
duplicated = self.duplicated(subset, keep=keep)
if inplace:
(inds,) = (-duplicated)._ndarray_values.nonzero()
new_data = self._data.take(inds)
if ignore_index:
new_data.axes[1] = ibase.default_index(len(inds))
self._update_inplace(new_data)
else:
result = self[-duplicated]
if ignore_index:
result.index = ibase.default_index(len(result))
return result
return None
def duplicated(
self,
subset: Optional[Union[Hashable, Sequence[Hashable]]] = None,
keep: Union[str, bool] = "first",
) -> "Series":
"""
Return boolean Series denoting duplicate rows.
Considering certain columns is optional.
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns.
keep : {'first', 'last', False}, default 'first'
Determines which duplicates (if any) to mark.
- ``first`` : Mark duplicates as ``True`` except for the first occurrence.
- ``last`` : Mark duplicates as ``True`` except for the last occurrence.
- False : Mark all duplicates as ``True``.
Returns
-------
Series
"""
from pandas.core.sorting import get_group_index
from pandas._libs.hashtable import duplicated_int64, _SIZE_HINT_LIMIT
if self.empty:
return Series(dtype=bool)
def f(vals):
labels, shape = algorithms.factorize(
vals, size_hint=min(len(self), _SIZE_HINT_LIMIT)
)
return labels.astype("i8", copy=False), len(shape)
if subset is None:
subset = self.columns
elif (
not np.iterable(subset)
or isinstance(subset, str)
or isinstance(subset, tuple)
and subset in self.columns
):
subset = (subset,)
# needed for mypy since can't narrow types using np.iterable
subset = cast(Iterable, subset)
# Verify all columns in subset exist in the queried dataframe
# Otherwise, raise a KeyError, same as if you try to __getitem__ with a
# key that doesn't exist.
diff = Index(subset).difference(self.columns)
if not diff.empty:
raise KeyError(diff)
vals = (col.values for name, col in self.items() if name in subset)
labels, shape = map(list, zip(*map(f, vals)))
ids = get_group_index(labels, shape, sort=False, xnull=False)
return Series(duplicated_int64(ids, keep), index=self.index)
# ----------------------------------------------------------------------
# Sorting
@Substitution(**_shared_doc_kwargs)
@Appender(NDFrame.sort_values.__doc__)
def sort_values(
self,
by,
axis=0,
ascending=True,
inplace=False,
kind="quicksort",
na_position="last",
ignore_index=False,
):
inplace = validate_bool_kwarg(inplace, "inplace")
axis = self._get_axis_number(axis)
if not isinstance(by, list):
by = [by]
if is_sequence(ascending) and len(by) != len(ascending):
raise ValueError(
f"Length of ascending ({len(ascending)}) != length of by ({len(by)})"
)
if len(by) > 1:
from pandas.core.sorting import lexsort_indexer
keys = [self._get_label_or_level_values(x, axis=axis) for x in by]
indexer = lexsort_indexer(keys, orders=ascending, na_position=na_position)
indexer = ensure_platform_int(indexer)
else:
from pandas.core.sorting import nargsort
by = by[0]
k = self._get_label_or_level_values(by, axis=axis)
if isinstance(ascending, (tuple, list)):
ascending = ascending[0]
indexer = nargsort(
k, kind=kind, ascending=ascending, na_position=na_position
)
new_data = self._data.take(
indexer, axis=self._get_block_manager_axis(axis), verify=False
)
if ignore_index:
new_data.axes[1] = ibase.default_index(len(indexer))
if inplace:
return self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
def sort_index(
self,
axis=0,
level=None,
ascending: bool = True,
inplace: bool = False,
kind: str = "quicksort",
na_position: str = "last",
sort_remaining: bool = True,
ignore_index: bool = False,
):
"""
Sort object by labels (along an axis).
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis along which to sort. The value 0 identifies the rows,
and 1 identifies the columns.
level : int or level name or list of ints or list of level names
If not None, sort on values in specified index level(s).
ascending : bool, default True
Sort ascending vs. descending.
inplace : bool, default False
If True, perform operation in-place.
kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'
Choice of sorting algorithm. See also ndarray.np.sort for more
information. `mergesort` is the only stable algorithm. For
DataFrames, this option is only applied when sorting on a single
column or label.
na_position : {'first', 'last'}, default 'last'
Puts NaNs at the beginning if `first`; `last` puts NaNs at the end.
Not implemented for MultiIndex.
sort_remaining : bool, default True
If True and sorting by level and index is multilevel, sort by other
levels too (in order) after sorting by specified level.
ignore_index : bool, default False
If True, the resulting axis will be labeled 0, 1, …, n - 1.
.. versionadded:: 1.0.0
Returns
-------
sorted_obj : DataFrame or None
DataFrame with sorted index if inplace=False, None otherwise.
"""
# TODO: this can be combined with Series.sort_index impl as
# almost identical
inplace = validate_bool_kwarg(inplace, "inplace")
axis = self._get_axis_number(axis)
labels = self._get_axis(axis)
# make sure that the axis is lexsorted to start
# if not we need to reconstruct to get the correct indexer
labels = labels._sort_levels_monotonic()
if level is not None:
new_axis, indexer = labels.sortlevel(
level, ascending=ascending, sort_remaining=sort_remaining
)
elif isinstance(labels, ABCMultiIndex):
from pandas.core.sorting import lexsort_indexer
indexer = lexsort_indexer(
labels._get_codes_for_sorting(),
orders=ascending,
na_position=na_position,
)
else:
from pandas.core.sorting import nargsort
# Check monotonic-ness before sort an index
# GH11080
if (ascending and labels.is_monotonic_increasing) or (
not ascending and labels.is_monotonic_decreasing
):
if inplace:
return
else:
return self.copy()
indexer = nargsort(
labels, kind=kind, ascending=ascending, na_position=na_position
)
baxis = self._get_block_manager_axis(axis)
new_data = self._data.take(indexer, axis=baxis, verify=False)
# reconstruct axis if needed
new_data.axes[baxis] = new_data.axes[baxis]._sort_levels_monotonic()
if ignore_index:
new_data.axes[1] = ibase.default_index(len(indexer))
if inplace:
return self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
def nlargest(self, n, columns, keep="first") -> "DataFrame":
"""
Return the first `n` rows ordered by `columns` in descending order.
Return the first `n` rows with the largest values in `columns`, in
descending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to
``df.sort_values(columns, ascending=False).head(n)``, but more
performant.
Parameters
----------
n : int
Number of rows to return.
columns : label or list of labels
Column label(s) to order by.
keep : {'first', 'last', 'all'}, default 'first'
Where there are duplicate values:
- `first` : prioritize the first occurrence(s)
- `last` : prioritize the last occurrence(s)
- ``all`` : do not drop any duplicates, even it means
selecting more than `n` items.
.. versionadded:: 0.24.0
Returns
-------
DataFrame
The first `n` rows ordered by the given columns in descending
order.
See Also
--------
DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in
ascending order.
DataFrame.sort_values : Sort DataFrame by the values.
DataFrame.head : Return the first `n` rows without re-ordering.
Notes
-----
This function cannot be used with all column types. For example, when
specifying columns with `object` or `category` dtypes, ``TypeError`` is
raised.
Examples
--------
>>> df = pd.DataFrame({'population': [59000000, 65000000, 434000,
... 434000, 434000, 337000, 11300,
... 11300, 11300],
... 'GDP': [1937894, 2583560 , 12011, 4520, 12128,
... 17036, 182, 38, 311],
... 'alpha-2': ["IT", "FR", "MT", "MV", "BN",
... "IS", "NR", "TV", "AI"]},
... index=["Italy", "France", "Malta",
... "Maldives", "Brunei", "Iceland",
... "Nauru", "Tuvalu", "Anguilla"])
>>> df
population GDP alpha-2
Italy 59000000 1937894 IT
France 65000000 2583560 FR
Malta 434000 12011 MT
Maldives 434000 4520 MV
Brunei 434000 12128 BN
Iceland 337000 17036 IS
Nauru 11300 182 NR
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
In the following example, we will use ``nlargest`` to select the three
rows having the largest values in column "population".
>>> df.nlargest(3, 'population')
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
When using ``keep='last'``, ties are resolved in reverse order:
>>> df.nlargest(3, 'population', keep='last')
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Brunei 434000 12128 BN
When using ``keep='all'``, all duplicate items are maintained:
>>> df.nlargest(3, 'population', keep='all')
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
Maldives 434000 4520 MV
Brunei 434000 12128 BN
To order by the largest values in column "population" and then "GDP",
we can specify multiple columns like in the next example.
>>> df.nlargest(3, ['population', 'GDP'])
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Brunei 434000 12128 BN
"""
return algorithms.SelectNFrame(self, n=n, keep=keep, columns=columns).nlargest()
def nsmallest(self, n, columns, keep="first") -> "DataFrame":
"""
Return the first `n` rows ordered by `columns` in ascending order.
Return the first `n` rows with the smallest values in `columns`, in
ascending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to
``df.sort_values(columns, ascending=True).head(n)``, but more
performant.
Parameters
----------
n : int
Number of items to retrieve.
columns : list or str
Column name or names to order by.
keep : {'first', 'last', 'all'}, default 'first'
Where there are duplicate values:
- ``first`` : take the first occurrence.
- ``last`` : take the last occurrence.
- ``all`` : do not drop any duplicates, even it means
selecting more than `n` items.
.. versionadded:: 0.24.0
Returns
-------
DataFrame
See Also
--------
DataFrame.nlargest : Return the first `n` rows ordered by `columns` in
descending order.
DataFrame.sort_values : Sort DataFrame by the values.
DataFrame.head : Return the first `n` rows without re-ordering.
Examples
--------
>>> df = pd.DataFrame({'population': [59000000, 65000000, 434000,
... 434000, 434000, 337000, 11300,
... 11300, 11300],
... 'GDP': [1937894, 2583560 , 12011, 4520, 12128,
... 17036, 182, 38, 311],
... 'alpha-2': ["IT", "FR", "MT", "MV", "BN",
... "IS", "NR", "TV", "AI"]},
... index=["Italy", "France", "Malta",
... "Maldives", "Brunei", "Iceland",
... "Nauru", "Tuvalu", "Anguilla"])
>>> df
population GDP alpha-2
Italy 59000000 1937894 IT
France 65000000 2583560 FR
Malta 434000 12011 MT
Maldives 434000 4520 MV
Brunei 434000 12128 BN
Iceland 337000 17036 IS
Nauru 11300 182 NR
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
In the following example, we will use ``nsmallest`` to select the
three rows having the smallest values in column "a".
>>> df.nsmallest(3, 'population')
population GDP alpha-2
Nauru 11300 182 NR
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
When using ``keep='last'``, ties are resolved in reverse order:
>>> df.nsmallest(3, 'population', keep='last')
population GDP alpha-2
Anguilla 11300 311 AI
Tuvalu 11300 38 TV
Nauru 11300 182 NR
When using ``keep='all'``, all duplicate items are maintained:
>>> df.nsmallest(3, 'population', keep='all')
population GDP alpha-2
Nauru 11300 182 NR
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
To order by the largest values in column "a" and then "c", we can
specify multiple columns like in the next example.
>>> df.nsmallest(3, ['population', 'GDP'])
population GDP alpha-2
Tuvalu 11300 38 TV
Nauru 11300 182 NR
Anguilla 11300 311 AI
"""
return algorithms.SelectNFrame(
self, n=n, keep=keep, columns=columns
).nsmallest()
def swaplevel(self, i=-2, j=-1, axis=0) -> "DataFrame":
"""
Swap levels i and j in a MultiIndex on a particular axis.
Parameters
----------
i, j : int or str
Levels of the indices to be swapped. Can pass level name as string.
Returns
-------
DataFrame
"""
result = self.copy()
axis = self._get_axis_number(axis)
if axis == 0:
result.index = result.index.swaplevel(i, j)
else:
result.columns = result.columns.swaplevel(i, j)
return result
def reorder_levels(self, order, axis=0) -> "DataFrame":
"""
Rearrange index levels using input order. May not drop or duplicate levels.
Parameters
----------
order : list of int or list of str
List representing new level order. Reference level by number
(position) or by key (label).
axis : int
Where to reorder levels.
Returns
-------
DataFrame
"""
axis = self._get_axis_number(axis)
if not isinstance(self._get_axis(axis), ABCMultiIndex): # pragma: no cover
raise TypeError("Can only reorder levels on a hierarchical axis.")
result = self.copy()
if axis == 0:
result.index = result.index.reorder_levels(order)
else:
result.columns = result.columns.reorder_levels(order)
return result
# ----------------------------------------------------------------------
# Arithmetic / combination related
def _combine_frame(self, other, func, fill_value=None, level=None):
# at this point we have `self._indexed_same(other)`
if fill_value is None:
# since _arith_op may be called in a loop, avoid function call
# overhead if possible by doing this check once
_arith_op = func
else:
def _arith_op(left, right):
# for the mixed_type case where we iterate over columns,
# _arith_op(left, right) is equivalent to
# left._binop(right, func, fill_value=fill_value)
left, right = ops.fill_binop(left, right, fill_value)
return func(left, right)
if ops.should_series_dispatch(self, other, func):
# iterate over columns
new_data = ops.dispatch_to_series(self, other, _arith_op)
else:
with np.errstate(all="ignore"):
res_values = _arith_op(self.values, other.values)
new_data = dispatch_fill_zeros(func, self.values, other.values, res_values)
return new_data
def _combine_match_index(self, other, func):
# at this point we have `self.index.equals(other.index)`
if ops.should_series_dispatch(self, other, func):
# operate column-wise; avoid costly object-casting in `.values`
new_data = ops.dispatch_to_series(self, other, func)
else:
# fastpath --> operate directly on values
with np.errstate(all="ignore"):
new_data = func(self.values.T, other.values).T
return new_data
def _construct_result(self, result) -> "DataFrame":
"""
Wrap the result of an arithmetic, comparison, or logical operation.
Parameters
----------
result : DataFrame
Returns
-------
DataFrame
"""
out = self._constructor(result, index=self.index, copy=False)
# Pin columns instead of passing to constructor for compat with
# non-unique columns case
out.columns = self.columns
return out
def combine(
self, other: "DataFrame", func, fill_value=None, overwrite=True
) -> "DataFrame":
"""
Perform column-wise combine with another DataFrame.
Combines a DataFrame with `other` DataFrame using `func`
to element-wise combine columns. The row and column indexes of the
resulting DataFrame will be the union of the two.
Parameters
----------
other : DataFrame
The DataFrame to merge column-wise.
func : function
Function that takes two series as inputs and return a Series or a
scalar. Used to merge the two dataframes column by columns.
fill_value : scalar value, default None
The value to fill NaNs with prior to passing any column to the
merge func.
overwrite : bool, default True
If True, columns in `self` that do not exist in `other` will be
overwritten with NaNs.
Returns
-------
DataFrame
Combination of the provided DataFrames.
See Also
--------
DataFrame.combine_first : Combine two DataFrame objects and default to
non-null values in frame calling the method.
Examples
--------
Combine using a simple function that chooses the smaller column.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> take_smaller = lambda s1, s2: s1 if s1.sum() < s2.sum() else s2
>>> df1.combine(df2, take_smaller)
A B
0 0 3
1 0 3
Example using a true element-wise combine function.
>>> df1 = pd.DataFrame({'A': [5, 0], 'B': [2, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine(df2, np.minimum)
A B
0 1 2
1 0 3
Using `fill_value` fills Nones prior to passing the column to the
merge function.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine(df2, take_smaller, fill_value=-5)
A B
0 0 -5.0
1 0 4.0
However, if the same element in both dataframes is None, that None
is preserved
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [None, 3]})
>>> df1.combine(df2, take_smaller, fill_value=-5)
A B
0 0 -5.0
1 0 3.0
Example that demonstrates the use of `overwrite` and behavior when
the axis differ between the dataframes.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]})
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [-10, 1], }, index=[1, 2])
>>> df1.combine(df2, take_smaller)
A B C
0 NaN NaN NaN
1 NaN 3.0 -10.0
2 NaN 3.0 1.0
>>> df1.combine(df2, take_smaller, overwrite=False)
A B C
0 0.0 NaN NaN
1 0.0 3.0 -10.0
2 NaN 3.0 1.0
Demonstrating the preference of the passed in dataframe.
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1], }, index=[1, 2])
>>> df2.combine(df1, take_smaller)
A B C
0 0.0 NaN NaN
1 0.0 3.0 NaN
2 NaN 3.0 NaN
>>> df2.combine(df1, take_smaller, overwrite=False)
A B C
0 0.0 NaN NaN
1 0.0 3.0 1.0
2 NaN 3.0 1.0
"""
other_idxlen = len(other.index) # save for compare
this, other = self.align(other, copy=False)
new_index = this.index
if other.empty and len(new_index) == len(self.index):
return self.copy()
if self.empty and len(other) == other_idxlen:
return other.copy()
# sorts if possible
new_columns = this.columns.union(other.columns)
do_fill = fill_value is not None
result = {}
for col in new_columns:
series = this[col]
otherSeries = other[col]
this_dtype = series.dtype
other_dtype = otherSeries.dtype
this_mask = isna(series)
other_mask = isna(otherSeries)
# don't overwrite columns unnecessarily
# DO propagate if this column is not in the intersection
if not overwrite and other_mask.all():
result[col] = this[col].copy()
continue
if do_fill:
series = series.copy()
otherSeries = otherSeries.copy()
series[this_mask] = fill_value
otherSeries[other_mask] = fill_value
if col not in self.columns:
# If self DataFrame does not have col in other DataFrame,
# try to promote series, which is all NaN, as other_dtype.
new_dtype = other_dtype
try:
series = series.astype(new_dtype, copy=False)
except ValueError:
# e.g. new_dtype is integer types
pass
else:
# if we have different dtypes, possibly promote
new_dtype = find_common_type([this_dtype, other_dtype])
if not is_dtype_equal(this_dtype, new_dtype):
series = series.astype(new_dtype)
if not is_dtype_equal(other_dtype, new_dtype):
otherSeries = otherSeries.astype(new_dtype)
arr = func(series, otherSeries)
arr = maybe_downcast_to_dtype(arr, this_dtype)
result[col] = arr
# convert_objects just in case
return self._constructor(result, index=new_index, columns=new_columns)
def combine_first(self, other: "DataFrame") -> "DataFrame":
"""
Update null elements with value in the same location in `other`.
Combine two DataFrame objects by filling null values in one DataFrame
with non-null values from other DataFrame. The row and column indexes
of the resulting DataFrame will be the union of the two.
Parameters
----------
other : DataFrame
Provided DataFrame to use to fill null values.
Returns
-------
DataFrame
See Also
--------
DataFrame.combine : Perform series-wise operation on two DataFrames
using a given function.
Examples
--------
>>> df1 = pd.DataFrame({'A': [None, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine_first(df2)
A B
0 1.0 3.0
1 0.0 4.0
Null values still persist if the location of that null value
does not exist in `other`
>>> df1 = pd.DataFrame({'A': [None, 0], 'B': [4, None]})
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1]}, index=[1, 2])
>>> df1.combine_first(df2)
A B C
0 NaN 4.0 NaN
1 0.0 3.0 1.0
2 NaN 3.0 1.0
"""
import pandas.core.computation.expressions as expressions
def extract_values(arr):
# Does two things:
# 1. maybe gets the values from the Series / Index
# 2. convert datelike to i8
if isinstance(arr, (ABCIndexClass, ABCSeries)):
arr = arr._values
if needs_i8_conversion(arr):
if is_extension_array_dtype(arr.dtype):
arr = arr.asi8
else:
arr = arr.view("i8")
return arr
def combiner(x, y):
mask = isna(x)
if isinstance(mask, (ABCIndexClass, ABCSeries)):
mask = mask._values
x_values = extract_values(x)
y_values = extract_values(y)
# If the column y in other DataFrame is not in first DataFrame,
# just return y_values.
if y.name not in self.columns:
return y_values
return expressions.where(mask, y_values, x_values)
return self.combine(other, combiner, overwrite=False)
def update(
self, other, join="left", overwrite=True, filter_func=None, errors="ignore"
) -> None:
"""
Modify in place using non-NA values from another DataFrame.
Aligns on indices. There is no return value.
Parameters
----------
other : DataFrame, or object coercible into a DataFrame
Should have at least one matching index/column label
with the original DataFrame. If a Series is passed,
its name attribute must be set, and that will be
used as the column name to align with the original DataFrame.
join : {'left'}, default 'left'
Only left join is implemented, keeping the index and columns of the
original object.
overwrite : bool, default True
How to handle non-NA values for overlapping keys:
* True: overwrite original DataFrame's values
with values from `other`.
* False: only update values that are NA in
the original DataFrame.
filter_func : callable(1d-array) -> bool 1d-array, optional
Can choose to replace values other than NA. Return True for values
that should be updated.
errors : {'raise', 'ignore'}, default 'ignore'
If 'raise', will raise a ValueError if the DataFrame and `other`
both contain non-NA data in the same place.
.. versionchanged:: 0.24.0
Changed from `raise_conflict=False|True`
to `errors='ignore'|'raise'`.
Returns
-------
None : method directly changes calling object
Raises
------
ValueError
* When `errors='raise'` and there's overlapping non-NA data.
* When `errors` is not either `'ignore'` or `'raise'`
NotImplementedError
* If `join != 'left'`
See Also
--------
dict.update : Similar method for dictionaries.
DataFrame.merge : For column(s)-on-columns(s) operations.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, 5, 6],
... 'C': [7, 8, 9]})
>>> df.update(new_df)
>>> df
A B
0 1 4
1 2 5
2 3 6
The DataFrame's length does not increase as a result of the update,
only values at matching index/column labels are updated.
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']})
>>> df.update(new_df)
>>> df
A B
0 a d
1 b e
2 c f
For Series, it's name attribute must be set.
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_column = pd.Series(['d', 'e'], name='B', index=[0, 2])
>>> df.update(new_column)
>>> df
A B
0 a d
1 b y
2 c e
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e']}, index=[1, 2])
>>> df.update(new_df)
>>> df
A B
0 a x
1 b d
2 c e
If `other` contains NaNs the corresponding values are not updated
in the original dataframe.
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, np.nan, 6]})
>>> df.update(new_df)
>>> df
A B
0 1 4.0
1 2 500.0
2 3 6.0
"""
import pandas.core.computation.expressions as expressions
# TODO: Support other joins
if join != "left": # pragma: no cover
raise NotImplementedError("Only left join is supported")
if errors not in ["ignore", "raise"]:
raise ValueError("The parameter errors must be either 'ignore' or 'raise'")
if not isinstance(other, DataFrame):
other = DataFrame(other)
other = other.reindex_like(self)
for col in self.columns:
this = self[col]._values
that = other[col]._values
if filter_func is not None:
with np.errstate(all="ignore"):
mask = ~filter_func(this) | isna(that)
else:
if errors == "raise":
mask_this = notna(that)
mask_that = notna(this)
if any(mask_this & mask_that):
raise ValueError("Data overlaps.")
if overwrite:
mask = isna(that)
else:
mask = notna(this)
# don't overwrite columns unnecessarily
if mask.all():
continue
self[col] = expressions.where(mask, this, that)
# ----------------------------------------------------------------------
# Data reshaping
@Appender(
"""
Examples
--------
>>> df = pd.DataFrame({'Animal': ['Falcon', 'Falcon',
... 'Parrot', 'Parrot'],
... 'Max Speed': [380., 370., 24., 26.]})
>>> df
Animal Max Speed
0 Falcon 380.0
1 Falcon 370.0
2 Parrot 24.0
3 Parrot 26.0
>>> df.groupby(['Animal']).mean()
Max Speed
Animal
Falcon 375.0
Parrot 25.0
**Hierarchical Indexes**
We can groupby different levels of a hierarchical index
using the `level` parameter:
>>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'],
... ['Captive', 'Wild', 'Captive', 'Wild']]
>>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type'))
>>> df = pd.DataFrame({'Max Speed': [390., 350., 30., 20.]},
... index=index)
>>> df
Max Speed
Animal Type
Falcon Captive 390.0
Wild 350.0
Parrot Captive 30.0
Wild 20.0
>>> df.groupby(level=0).mean()
Max Speed
Animal
Falcon 370.0
Parrot 25.0
>>> df.groupby(level="Type").mean()
Max Speed
Type
Captive 210.0
Wild 185.0
"""
)
@Appender(_shared_docs["groupby"] % _shared_doc_kwargs)
def groupby(
self,
by=None,
axis=0,
level=None,
as_index: bool = True,
sort: bool = True,
group_keys: bool = True,
squeeze: bool = False,
observed: bool = False,
) -> "DataFrameGroupBy":
from pandas.core.groupby.generic import DataFrameGroupBy
if level is None and by is None:
raise TypeError("You have to supply one of 'by' and 'level'")
axis = self._get_axis_number(axis)
return DataFrameGroupBy(
obj=self,
keys=by,
axis=axis,
level=level,
as_index=as_index,
sort=sort,
group_keys=group_keys,
squeeze=squeeze,
observed=observed,
)
_shared_docs[
"pivot"
] = """
Return reshaped DataFrame organized by given index / column values.
Reshape data (produce a "pivot" table) based on column values. Uses
unique values from specified `index` / `columns` to form axes of the
resulting DataFrame. This function does not support data
aggregation, multiple values will result in a MultiIndex in the
columns. See the :ref:`User Guide <reshaping>` for more on reshaping.
Parameters
----------%s
index : str or object, optional
Column to use to make new frame's index. If None, uses
existing index.
columns : str or object
Column to use to make new frame's columns.
values : str, object or a list of the previous, optional
Column(s) to use for populating new frame's values. If not
specified, all remaining columns will be used and the result will
have hierarchically indexed columns.
.. versionchanged:: 0.23.0
Also accept list of column names.
Returns
-------
DataFrame
Returns reshaped DataFrame.
Raises
------
ValueError:
When there are any `index`, `columns` combinations with multiple
values. `DataFrame.pivot_table` when you need to aggregate.
See Also
--------
DataFrame.pivot_table : Generalization of pivot that can handle
duplicate values for one index/column pair.
DataFrame.unstack : Pivot based on the index values instead of a
column.
Notes
-----
For finer-tuned control, see hierarchical indexing documentation along
with the related stack/unstack methods.
Examples
--------
>>> df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two',
... 'two'],
... 'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
... 'baz': [1, 2, 3, 4, 5, 6],
... 'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
>>> df
foo bar baz zoo
0 one A 1 x
1 one B 2 y
2 one C 3 z
3 two A 4 q
4 two B 5 w
5 two C 6 t
>>> df.pivot(index='foo', columns='bar', values='baz')
bar A B C
foo
one 1 2 3
two 4 5 6
>>> df.pivot(index='foo', columns='bar')['baz']
bar A B C
foo
one 1 2 3
two 4 5 6
>>> df.pivot(index='foo', columns='bar', values=['baz', 'zoo'])
baz zoo
bar A B C A B C
foo
one 1 2 3 x y z
two 4 5 6 q w t
A ValueError is raised if there are any duplicates.
>>> df = pd.DataFrame({"foo": ['one', 'one', 'two', 'two'],
... "bar": ['A', 'A', 'B', 'C'],
... "baz": [1, 2, 3, 4]})
>>> df
foo bar baz
0 one A 1
1 one A 2
2 two B 3
3 two C 4
Notice that the first two rows are the same for our `index`
and `columns` arguments.
>>> df.pivot(index='foo', columns='bar', values='baz')
Traceback (most recent call last):
...
ValueError: Index contains duplicate entries, cannot reshape
"""
@Substitution("")
@Appender(_shared_docs["pivot"])
def pivot(self, index=None, columns=None, values=None) -> "DataFrame":
from pandas.core.reshape.pivot import pivot
return pivot(self, index=index, columns=columns, values=values)
_shared_docs[
"pivot_table"
] = """
Create a spreadsheet-style pivot table as a DataFrame.
The levels in the pivot table will be stored in MultiIndex objects
(hierarchical indexes) on the index and columns of the result DataFrame.
Parameters
----------%s
values : column to aggregate, optional
index : column, Grouper, array, or list of the previous
If an array is passed, it must be the same length as the data. The
list can contain any of the other types (except list).
Keys to group by on the pivot table index. If an array is passed,
it is being used as the same manner as column values.
columns : column, Grouper, array, or list of the previous
If an array is passed, it must be the same length as the data. The
list can contain any of the other types (except list).
Keys to group by on the pivot table column. If an array is passed,
it is being used as the same manner as column values.
aggfunc : function, list of functions, dict, default numpy.mean
If list of functions passed, the resulting pivot table will have
hierarchical columns whose top level are the function names
(inferred from the function objects themselves)
If dict is passed, the key is column to aggregate and value
is function or list of functions.
fill_value : scalar, default None
Value to replace missing values with.
margins : bool, default False
Add all row / columns (e.g. for subtotal / grand totals).
dropna : bool, default True
Do not include columns whose entries are all NaN.
margins_name : str, default 'All'
Name of the row / column that will contain the totals
when margins is True.
observed : bool, default False
This only applies if any of the groupers are Categoricals.
If True: only show observed values for categorical groupers.
If False: show all values for categorical groupers.
.. versionchanged:: 0.25.0
Returns
-------
DataFrame
An Excel style pivot table.
See Also
--------
DataFrame.pivot : Pivot without aggregation that can handle
non-numeric data.
Examples
--------
>>> df = pd.DataFrame({"A": ["foo", "foo", "foo", "foo", "foo",
... "bar", "bar", "bar", "bar"],
... "B": ["one", "one", "one", "two", "two",
... "one", "one", "two", "two"],
... "C": ["small", "large", "large", "small",
... "small", "large", "small", "small",
... "large"],
... "D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
... "E": [2, 4, 5, 5, 6, 6, 8, 9, 9]})
>>> df
A B C D E
0 foo one small 1 2
1 foo one large 2 4
2 foo one large 2 5
3 foo two small 3 5
4 foo two small 3 6
5 bar one large 4 6
6 bar one small 5 8
7 bar two small 6 9
8 bar two large 7 9
This first example aggregates values by taking the sum.
>>> table = pd.pivot_table(df, values='D', index=['A', 'B'],
... columns=['C'], aggfunc=np.sum)
>>> table
C large small
A B
bar one 4.0 5.0
two 7.0 6.0
foo one 4.0 1.0
two NaN 6.0
We can also fill missing values using the `fill_value` parameter.
>>> table = pd.pivot_table(df, values='D', index=['A', 'B'],
... columns=['C'], aggfunc=np.sum, fill_value=0)
>>> table
C large small
A B
bar one 4 5
two 7 6
foo one 4 1
two 0 6
The next example aggregates by taking the mean across multiple columns.
>>> table = pd.pivot_table(df, values=['D', 'E'], index=['A', 'C'],
... aggfunc={'D': np.mean,
... 'E': np.mean})
>>> table
D E
A C
bar large 5.500000 7.500000
small 5.500000 8.500000
foo large 2.000000 4.500000
small 2.333333 4.333333
We can also calculate multiple types of aggregations for any given
value column.
>>> table = pd.pivot_table(df, values=['D', 'E'], index=['A', 'C'],
... aggfunc={'D': np.mean,
... 'E': [min, max, np.mean]})
>>> table
D E
mean max mean min
A C
bar large 5.500000 9.0 7.500000 6.0
small 5.500000 9.0 8.500000 8.0
foo large 2.000000 5.0 4.500000 4.0
small 2.333333 6.0 4.333333 2.0
"""
@Substitution("")
@Appender(_shared_docs["pivot_table"])
def pivot_table(
self,
values=None,
index=None,
columns=None,
aggfunc="mean",
fill_value=None,
margins=False,
dropna=True,
margins_name="All",
observed=False,
) -> "DataFrame":
from pandas.core.reshape.pivot import pivot_table
return pivot_table(
self,
values=values,
index=index,
columns=columns,
aggfunc=aggfunc,
fill_value=fill_value,
margins=margins,
dropna=dropna,
margins_name=margins_name,
observed=observed,
)
def stack(self, level=-1, dropna=True):
"""
Stack the prescribed level(s) from columns to index.
Return a reshaped DataFrame or Series having a multi-level
index with one or more new inner-most levels compared to the current
DataFrame. The new inner-most levels are created by pivoting the
columns of the current dataframe:
- if the columns have a single level, the output is a Series;
- if the columns have multiple levels, the new index
level(s) is (are) taken from the prescribed level(s) and
the output is a DataFrame.
The new index levels are sorted.
Parameters
----------
level : int, str, list, default -1
Level(s) to stack from the column axis onto the index
axis, defined as one index or label, or a list of indices
or labels.
dropna : bool, default True
Whether to drop rows in the resulting Frame/Series with
missing values. Stacking a column level onto the index
axis can create combinations of index and column values
that are missing from the original dataframe. See Examples
section.
Returns
-------
DataFrame or Series
Stacked dataframe or series.
See Also
--------
DataFrame.unstack : Unstack prescribed level(s) from index axis
onto column axis.
DataFrame.pivot : Reshape dataframe from long format to wide
format.
DataFrame.pivot_table : Create a spreadsheet-style pivot table
as a DataFrame.
Notes
-----
The function is named by analogy with a collection of books
being reorganized from being side by side on a horizontal
position (the columns of the dataframe) to being stacked
vertically on top of each other (in the index of the
dataframe).
Examples
--------
**Single level columns**
>>> df_single_level_cols = pd.DataFrame([[0, 1], [2, 3]],
... index=['cat', 'dog'],
... columns=['weight', 'height'])
Stacking a dataframe with a single level column axis returns a Series:
>>> df_single_level_cols
weight height
cat 0 1
dog 2 3
>>> df_single_level_cols.stack()
cat weight 0
height 1
dog weight 2
height 3
dtype: int64
**Multi level columns: simple case**
>>> multicol1 = pd.MultiIndex.from_tuples([('weight', 'kg'),
... ('weight', 'pounds')])
>>> df_multi_level_cols1 = pd.DataFrame([[1, 2], [2, 4]],
... index=['cat', 'dog'],
... columns=multicol1)
Stacking a dataframe with a multi-level column axis:
>>> df_multi_level_cols1
weight
kg pounds
cat 1 2
dog 2 4
>>> df_multi_level_cols1.stack()
weight
cat kg 1
pounds 2
dog kg 2
pounds 4
**Missing values**
>>> multicol2 = pd.MultiIndex.from_tuples([('weight', 'kg'),
... ('height', 'm')])
>>> df_multi_level_cols2 = pd.DataFrame([[1.0, 2.0], [3.0, 4.0]],
... index=['cat', 'dog'],
... columns=multicol2)
It is common to have missing values when stacking a dataframe
with multi-level columns, as the stacked dataframe typically
has more values than the original dataframe. Missing values
are filled with NaNs:
>>> df_multi_level_cols2
weight height
kg m
cat 1.0 2.0
dog 3.0 4.0
>>> df_multi_level_cols2.stack()
height weight
cat kg NaN 1.0
m 2.0 NaN
dog kg NaN 3.0
m 4.0 NaN
**Prescribing the level(s) to be stacked**
The first parameter controls which level or levels are stacked:
>>> df_multi_level_cols2.stack(0)
kg m
cat height NaN 2.0
weight 1.0 NaN
dog height NaN 4.0
weight 3.0 NaN
>>> df_multi_level_cols2.stack([0, 1])
cat height m 2.0
weight kg 1.0
dog height m 4.0
weight kg 3.0
dtype: float64
**Dropping missing values**
>>> df_multi_level_cols3 = pd.DataFrame([[None, 1.0], [2.0, 3.0]],
... index=['cat', 'dog'],
... columns=multicol2)
Note that rows where all values are missing are dropped by
default but this behaviour can be controlled via the dropna
keyword parameter:
>>> df_multi_level_cols3
weight height
kg m
cat NaN 1.0
dog 2.0 3.0
>>> df_multi_level_cols3.stack(dropna=False)
height weight
cat kg NaN NaN
m 1.0 NaN
dog kg NaN 2.0
m 3.0 NaN
>>> df_multi_level_cols3.stack(dropna=True)
height weight
cat m 1.0 NaN
dog kg NaN 2.0
m 3.0 NaN
"""
from pandas.core.reshape.reshape import stack, stack_multiple
if isinstance(level, (tuple, list)):
return stack_multiple(self, level, dropna=dropna)
else:
return stack(self, level, dropna=dropna)
def explode(self, column: Union[str, Tuple]) -> "DataFrame":
"""
Transform each element of a list-like to a row, replicating index values.
.. versionadded:: 0.25.0
Parameters
----------
column : str or tuple
Column to explode.
Returns
-------
DataFrame
Exploded lists to rows of the subset columns;
index will be duplicated for these rows.
Raises
------
ValueError :
if columns of the frame are not unique.
See Also
--------
DataFrame.unstack : Pivot a level of the (necessarily hierarchical)
index labels.
DataFrame.melt : Unpivot a DataFrame from wide format to long format.
Series.explode : Explode a DataFrame from list-like columns to long format.
Notes
-----
This routine will explode list-likes including lists, tuples,
Series, and np.ndarray. The result dtype of the subset rows will
be object. Scalars will be returned unchanged. Empty list-likes will
result in a np.nan for that row.
Examples
--------
>>> df = pd.DataFrame({'A': [[1, 2, 3], 'foo', [], [3, 4]], 'B': 1})
>>> df
A B
0 [1, 2, 3] 1
1 foo 1
2 [] 1
3 [3, 4] 1
>>> df.explode('A')
A B
0 1 1
0 2 1
0 3 1
1 foo 1
2 NaN 1
3 3 1
3 4 1
"""
if not (is_scalar(column) or isinstance(column, tuple)):
raise ValueError("column must be a scalar")
if not self.columns.is_unique:
raise ValueError("columns must be unique")
df = self.reset_index(drop=True)
# TODO: use overload to refine return type of reset_index
assert df is not None # needed for mypy
result = df[column].explode()
result = df.drop([column], axis=1).join(result)
result.index = self.index.take(result.index)
result = result.reindex(columns=self.columns, copy=False)
return result
def unstack(self, level=-1, fill_value=None):
"""
Pivot a level of the (necessarily hierarchical) index labels.
Returns a DataFrame having a new level of column labels whose inner-most level
consists of the pivoted index labels.
If the index is not a MultiIndex, the output will be a Series
(the analogue of stack when the columns are not a MultiIndex).
The level involved will automatically get sorted.
Parameters
----------
level : int, str, or list of these, default -1 (last level)
Level(s) of index to unstack, can pass level name.
fill_value : int, str or dict
Replace NaN with this value if the unstack produces missing values.
Returns
-------
Series or DataFrame
See Also
--------
DataFrame.pivot : Pivot a table based on column values.
DataFrame.stack : Pivot a level of the column labels (inverse operation
from `unstack`).
Examples
--------
>>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),
... ('two', 'a'), ('two', 'b')])
>>> s = pd.Series(np.arange(1.0, 5.0), index=index)
>>> s
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
>>> s.unstack(level=-1)
a b
one 1.0 2.0
two 3.0 4.0
>>> s.unstack(level=0)
one two
a 1.0 3.0
b 2.0 4.0
>>> df = s.unstack(level=0)
>>> df.unstack()
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
"""
from pandas.core.reshape.reshape import unstack
return unstack(self, level, fill_value)
_shared_docs[
"melt"
] = """
Unpivot a DataFrame from wide to long format, optionally leaving identifiers set.
This function is useful to massage a DataFrame into a format where one
or more columns are identifier variables (`id_vars`), while all other
columns, considered measured variables (`value_vars`), are "unpivoted" to
the row axis, leaving just two non-identifier columns, 'variable' and
'value'.
%(versionadded)s
Parameters
----------
id_vars : tuple, list, or ndarray, optional
Column(s) to use as identifier variables.
value_vars : tuple, list, or ndarray, optional
Column(s) to unpivot. If not specified, uses all columns that
are not set as `id_vars`.
var_name : scalar
Name to use for the 'variable' column. If None it uses
``frame.columns.name`` or 'variable'.
value_name : scalar, default 'value'
Name to use for the 'value' column.
col_level : int or str, optional
If columns are a MultiIndex then use this level to melt.
Returns
-------
DataFrame
Unpivoted DataFrame.
See Also
--------
%(other)s
pivot_table
DataFrame.pivot
Series.explode
Examples
--------
>>> df = pd.DataFrame({'A': {0: 'a', 1: 'b', 2: 'c'},
... 'B': {0: 1, 1: 3, 2: 5},
... 'C': {0: 2, 1: 4, 2: 6}})
>>> df
A B C
0 a 1 2
1 b 3 4
2 c 5 6
>>> %(caller)sid_vars=['A'], value_vars=['B'])
A variable value
0 a B 1
1 b B 3
2 c B 5
>>> %(caller)sid_vars=['A'], value_vars=['B', 'C'])
A variable value
0 a B 1
1 b B 3
2 c B 5
3 a C 2
4 b C 4
5 c C 6
The names of 'variable' and 'value' columns can be customized:
>>> %(caller)sid_vars=['A'], value_vars=['B'],
... var_name='myVarname', value_name='myValname')
A myVarname myValname
0 a B 1
1 b B 3
2 c B 5
If you have multi-index columns:
>>> df.columns = [list('ABC'), list('DEF')]
>>> df
A B C
D E F
0 a 1 2
1 b 3 4
2 c 5 6
>>> %(caller)scol_level=0, id_vars=['A'], value_vars=['B'])
A variable value
0 a B 1
1 b B 3
2 c B 5
>>> %(caller)sid_vars=[('A', 'D')], value_vars=[('B', 'E')])
(A, D) variable_0 variable_1 value
0 a B E 1
1 b B E 3
2 c B E 5
"""
@Appender(
_shared_docs["melt"]
% dict(
caller="df.melt(", versionadded=".. versionadded:: 0.20.0\n", other="melt"
)
)
def melt(
self,
id_vars=None,
value_vars=None,
var_name=None,
value_name="value",
col_level=None,
) -> "DataFrame":
from pandas.core.reshape.melt import melt
return melt(
self,
id_vars=id_vars,
value_vars=value_vars,
var_name=var_name,
value_name=value_name,
col_level=col_level,
)
# ----------------------------------------------------------------------
# Time series-related
def diff(self, periods=1, axis=0) -> "DataFrame":
"""
First discrete difference of element.
Calculates the difference of a DataFrame element compared with another
element in the DataFrame (default is the element in the same column
of the previous row).
Parameters
----------
periods : int, default 1
Periods to shift for calculating difference, accepts negative
values.
axis : {0 or 'index', 1 or 'columns'}, default 0
Take difference over rows (0) or columns (1).
Returns
-------
DataFrame
See Also
--------
Series.diff: First discrete difference for a Series.
DataFrame.pct_change: Percent change over given number of periods.
DataFrame.shift: Shift index by desired number of periods with an
optional time freq.
Examples
--------
Difference with previous row
>>> df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6],
... 'b': [1, 1, 2, 3, 5, 8],
... 'c': [1, 4, 9, 16, 25, 36]})
>>> df
a b c
0 1 1 1
1 2 1 4
2 3 2 9
3 4 3 16
4 5 5 25
5 6 8 36
>>> df.diff()
a b c
0 NaN NaN NaN
1 1.0 0.0 3.0
2 1.0 1.0 5.0
3 1.0 1.0 7.0
4 1.0 2.0 9.0
5 1.0 3.0 11.0
Difference with previous column
>>> df.diff(axis=1)
a b c
0 NaN 0.0 0.0
1 NaN -1.0 3.0
2 NaN -1.0 7.0
3 NaN -1.0 13.0
4 NaN 0.0 20.0
5 NaN 2.0 28.0
Difference with 3rd previous row
>>> df.diff(periods=3)
a b c
0 NaN NaN NaN
1 NaN NaN NaN
2 NaN NaN NaN
3 3.0 2.0 15.0
4 3.0 4.0 21.0
5 3.0 6.0 27.0
Difference with following row
>>> df.diff(periods=-1)
a b c
0 -1.0 0.0 -3.0
1 -1.0 -1.0 -5.0
2 -1.0 -1.0 -7.0
3 -1.0 -2.0 -9.0
4 -1.0 -3.0 -11.0
5 NaN NaN NaN
"""
bm_axis = self._get_block_manager_axis(axis)
new_data = self._data.diff(n=periods, axis=bm_axis)
return self._constructor(new_data)
# ----------------------------------------------------------------------
# Function application
def _gotitem(
self,
key: Union[str, List[str]],
ndim: int,
subset: Optional[Union[Series, ABCDataFrame]] = None,
) -> Union[Series, ABCDataFrame]:
"""
Sub-classes to define. Return a sliced object.
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
if subset is None:
subset = self
elif subset.ndim == 1: # is Series
return subset
# TODO: _shallow_copy(subset)?
return subset[key]
_agg_summary_and_see_also_doc = dedent(
"""
The aggregation operations are always performed over an axis, either the
index (default) or the column axis. This behavior is different from
`numpy` aggregation functions (`mean`, `median`, `prod`, `sum`, `std`,
`var`), where the default is to compute the aggregation of the flattened
array, e.g., ``numpy.mean(arr_2d)`` as opposed to
``numpy.mean(arr_2d, axis=0)``.
`agg` is an alias for `aggregate`. Use the alias.
See Also
--------
DataFrame.apply : Perform any type of operations.
DataFrame.transform : Perform transformation type operations.
core.groupby.GroupBy : Perform operations over groups.
core.resample.Resampler : Perform operations over resampled bins.
core.window.Rolling : Perform operations over rolling window.
core.window.Expanding : Perform operations over expanding window.
core.window.EWM : Perform operation over exponential weighted
window.
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> df = pd.DataFrame([[1, 2, 3],
... [4, 5, 6],
... [7, 8, 9],
... [np.nan, np.nan, np.nan]],
... columns=['A', 'B', 'C'])
Aggregate these functions over the rows.
>>> df.agg(['sum', 'min'])
A B C
sum 12.0 15.0 18.0
min 1.0 2.0 3.0
Different aggregations per column.
>>> df.agg({'A' : ['sum', 'min'], 'B' : ['min', 'max']})
A B
max NaN 8.0
min 1.0 2.0
sum 12.0 NaN
Aggregate over the columns.
>>> df.agg("mean", axis="columns")
0 2.0
1 5.0
2 8.0
3 NaN
dtype: float64
"""
)
@Substitution(
see_also=_agg_summary_and_see_also_doc,
examples=_agg_examples_doc,
versionadded="\n.. versionadded:: 0.20.0\n",
**_shared_doc_kwargs,
)
@Appender(_shared_docs["aggregate"])
def aggregate(self, func, axis=0, *args, **kwargs):
axis = self._get_axis_number(axis)
result = None
try:
result, how = self._aggregate(func, axis=axis, *args, **kwargs)
except TypeError:
pass
if result is None:
return self.apply(func, axis=axis, args=args, **kwargs)
return result
def _aggregate(self, arg, axis=0, *args, **kwargs):
if axis == 1:
# NDFrame.aggregate returns a tuple, and we need to transpose
# only result
result, how = self.T._aggregate(arg, *args, **kwargs)
result = result.T if result is not None else result
return result, how
return super()._aggregate(arg, *args, **kwargs)
agg = aggregate
@Appender(_shared_docs["transform"] % _shared_doc_kwargs)
def transform(self, func, axis=0, *args, **kwargs) -> "DataFrame":
axis = self._get_axis_number(axis)
if axis == 1:
return self.T.transform(func, *args, **kwargs).T
return super().transform(func, *args, **kwargs)
def apply(self, func, axis=0, raw=False, result_type=None, args=(), **kwds):
"""
Apply a function along an axis of the DataFrame.
Objects passed to the function are Series objects whose index is
either the DataFrame's index (``axis=0``) or the DataFrame's columns
(``axis=1``). By default (``result_type=None``), the final return type
is inferred from the return type of the applied function. Otherwise,
it depends on the `result_type` argument.
Parameters
----------
func : function
Function to apply to each column or row.
axis : {0 or 'index', 1 or 'columns'}, default 0
Axis along which the function is applied:
* 0 or 'index': apply function to each column.
* 1 or 'columns': apply function to each row.
raw : bool, default False
Determines if row or column is passed as a Series or ndarray object:
* ``False`` : passes each row or column as a Series to the
function.
* ``True`` : the passed function will receive ndarray objects
instead.
If you are just applying a NumPy reduction function this will
achieve much better performance.
result_type : {'expand', 'reduce', 'broadcast', None}, default None
These only act when ``axis=1`` (columns):
* 'expand' : list-like results will be turned into columns.
* 'reduce' : returns a Series if possible rather than expanding
list-like results. This is the opposite of 'expand'.
* 'broadcast' : results will be broadcast to the original shape
of the DataFrame, the original index and columns will be
retained.
The default behaviour (None) depends on the return value of the
applied function: list-like results will be returned as a Series
of those. However if the apply function returns a Series these
are expanded to columns.
.. versionadded:: 0.23.0
args : tuple
Positional arguments to pass to `func` in addition to the
array/series.
**kwds
Additional keyword arguments to pass as keywords arguments to
`func`.
Returns
-------
Series or DataFrame
Result of applying ``func`` along the given axis of the
DataFrame.
See Also
--------
DataFrame.applymap: For elementwise operations.
DataFrame.aggregate: Only perform aggregating type operations.
DataFrame.transform: Only perform transforming type operations.
Examples
--------
>>> df = pd.DataFrame([[4, 9]] * 3, columns=['A', 'B'])
>>> df
A B
0 4 9
1 4 9
2 4 9
Using a numpy universal function (in this case the same as
``np.sqrt(df)``):
>>> df.apply(np.sqrt)
A B
0 2.0 3.0
1 2.0 3.0
2 2.0 3.0
Using a reducing function on either axis
>>> df.apply(np.sum, axis=0)
A 12
B 27
dtype: int64
>>> df.apply(np.sum, axis=1)
0 13
1 13
2 13
dtype: int64
Returning a list-like will result in a Series
>>> df.apply(lambda x: [1, 2], axis=1)
0 [1, 2]
1 [1, 2]
2 [1, 2]
dtype: object
Passing result_type='expand' will expand list-like results
to columns of a Dataframe
>>> df.apply(lambda x: [1, 2], axis=1, result_type='expand')
0 1
0 1 2
1 1 2
2 1 2
Returning a Series inside the function is similar to passing
``result_type='expand'``. The resulting column names
will be the Series index.
>>> df.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1)
foo bar
0 1 2
1 1 2
2 1 2
Passing ``result_type='broadcast'`` will ensure the same shape
result, whether list-like or scalar is returned by the function,
and broadcast it along the axis. The resulting column names will
be the originals.
>>> df.apply(lambda x: [1, 2], axis=1, result_type='broadcast')
A B
0 1 2
1 1 2
2 1 2
"""
from pandas.core.apply import frame_apply
op = frame_apply(
self,
func=func,
axis=axis,
raw=raw,
result_type=result_type,
args=args,
kwds=kwds,
)
return op.get_result()
def applymap(self, func) -> "DataFrame":
"""
Apply a function to a Dataframe elementwise.
This method applies a function that accepts and returns a scalar
to every element of a DataFrame.
Parameters
----------
func : callable
Python function, returns a single value from a single value.
Returns
-------
DataFrame
Transformed DataFrame.
See Also
--------
DataFrame.apply : Apply a function along input axis of DataFrame.
Notes
-----
In the current implementation applymap calls `func` twice on the
first column/row to decide whether it can take a fast or slow
code path. This can lead to unexpected behavior if `func` has
side-effects, as they will take effect twice for the first
column/row.
Examples
--------
>>> df = pd.DataFrame([[1, 2.12], [3.356, 4.567]])
>>> df
0 1
0 1.000 2.120
1 3.356 4.567
>>> df.applymap(lambda x: len(str(x)))
0 1
0 3 4
1 5 5
Note that a vectorized version of `func` often exists, which will
be much faster. You could square each number elementwise.
>>> df.applymap(lambda x: x**2)
0 1
0 1.000000 4.494400
1 11.262736 20.857489
But it's better to avoid applymap in that case.
>>> df ** 2
0 1
0 1.000000 4.494400
1 11.262736 20.857489
"""
# if we have a dtype == 'M8[ns]', provide boxed values
def infer(x):
if x.empty:
return lib.map_infer(x, func)
return lib.map_infer(x.astype(object).values, func)
return self.apply(infer)
# ----------------------------------------------------------------------
# Merging / joining methods
def append(
self, other, ignore_index=False, verify_integrity=False, sort=False
) -> "DataFrame":
"""
Append rows of `other` to the end of caller, returning a new object.
Columns in `other` that are not in the caller are added as new columns.
Parameters
----------
other : DataFrame or Series/dict-like object, or list of these
The data to append.
ignore_index : bool, default False
If True, do not use the index labels.
verify_integrity : bool, default False
If True, raise ValueError on creating index with duplicates.
sort : bool, default False
Sort columns if the columns of `self` and `other` are not aligned.
.. versionadded:: 0.23.0
.. versionchanged:: 1.0.0
Changed to not sort by default.
Returns
-------
DataFrame
See Also
--------
concat : General function to concatenate DataFrame or Series objects.
Notes
-----
If a list of dict/series is passed and the keys are all contained in
the DataFrame's index, the order of the columns in the resulting
DataFrame will be unchanged.
Iteratively appending rows to a DataFrame can be more computationally
intensive than a single concatenate. A better solution is to append
those rows to a list and then concatenate the list with the original
DataFrame all at once.
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=list('AB'))
>>> df
A B
0 1 2
1 3 4
>>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=list('AB'))
>>> df.append(df2)
A B
0 1 2
1 3 4
0 5 6
1 7 8
With `ignore_index` set to True:
>>> df.append(df2, ignore_index=True)
A B
0 1 2
1 3 4
2 5 6
3 7 8
The following, while not recommended methods for generating DataFrames,
show two ways to generate a DataFrame from multiple data sources.
Less efficient:
>>> df = pd.DataFrame(columns=['A'])
>>> for i in range(5):
... df = df.append({'A': i}, ignore_index=True)
>>> df
A
0 0
1 1
2 2
3 3
4 4
More efficient:
>>> pd.concat([pd.DataFrame([i], columns=['A']) for i in range(5)],
... ignore_index=True)
A
0 0
1 1
2 2
3 3
4 4
"""
if isinstance(other, (Series, dict)):
if isinstance(other, dict):
other = Series(other)
if other.name is None and not ignore_index:
raise TypeError(
"Can only append a Series if ignore_index=True "
"or if the Series has a name"
)
index = Index([other.name], name=self.index.name)
idx_diff = other.index.difference(self.columns)
try:
combined_columns = self.columns.append(idx_diff)
except TypeError:
combined_columns = self.columns.astype(object).append(idx_diff)
other = (
other.reindex(combined_columns, copy=False)
.to_frame()
.T.infer_objects()
.rename_axis(index.names, copy=False)
)
if not self.columns.equals(combined_columns):
self = self.reindex(columns=combined_columns)
elif isinstance(other, list):
if not other:
pass
elif not isinstance(other[0], DataFrame):
other = DataFrame(other)
if (self.columns.get_indexer(other.columns) >= 0).all():
other = other.reindex(columns=self.columns)
from pandas.core.reshape.concat import concat
if isinstance(other, (list, tuple)):
to_concat = [self, *other]
else:
to_concat = [self, other]
return concat(
to_concat,
ignore_index=ignore_index,
verify_integrity=verify_integrity,
sort=sort,
)
def join(
self, other, on=None, how="left", lsuffix="", rsuffix="", sort=False
) -> "DataFrame":
"""
Join columns of another DataFrame.
Join columns with `other` DataFrame either on index or on a key
column. Efficiently join multiple DataFrame objects by index at once by
passing a list.
Parameters
----------
other : DataFrame, Series, or list of DataFrame
Index should be similar to one of the columns in this one. If a
Series is passed, its name attribute must be set, and that will be
used as the column name in the resulting joined DataFrame.
on : str, list of str, or array-like, optional
Column or index level name(s) in the caller to join on the index
in `other`, otherwise joins index-on-index. If multiple
values given, the `other` DataFrame must have a MultiIndex. Can
pass an array as the join key if it is not already contained in
the calling DataFrame. Like an Excel VLOOKUP operation.
how : {'left', 'right', 'outer', 'inner'}, default 'left'
How to handle the operation of the two objects.
* left: use calling frame's index (or column if on is specified)
* right: use `other`'s index.
* outer: form union of calling frame's index (or column if on is
specified) with `other`'s index, and sort it.
lexicographically.
* inner: form intersection of calling frame's index (or column if
on is specified) with `other`'s index, preserving the order
of the calling's one.
lsuffix : str, default ''
Suffix to use from left frame's overlapping columns.
rsuffix : str, default ''
Suffix to use from right frame's overlapping columns.
sort : bool, default False
Order result DataFrame lexicographically by the join key. If False,
the order of the join key depends on the join type (how keyword).
Returns
-------
DataFrame
A dataframe containing columns from both the caller and `other`.
See Also
--------
DataFrame.merge : For column(s)-on-columns(s) operations.
Notes
-----
Parameters `on`, `lsuffix`, and `rsuffix` are not supported when
passing a list of `DataFrame` objects.
Support for specifying index levels as the `on` parameter was added
in version 0.23.0.
Examples
--------
>>> df = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'],
... 'A': ['A0', 'A1', 'A2', 'A3', 'A4', 'A5']})
>>> df
key A
0 K0 A0
1 K1 A1
2 K2 A2
3 K3 A3
4 K4 A4
5 K5 A5
>>> other = pd.DataFrame({'key': ['K0', 'K1', 'K2'],
... 'B': ['B0', 'B1', 'B2']})
>>> other
key B
0 K0 B0
1 K1 B1
2 K2 B2
Join DataFrames using their indexes.
>>> df.join(other, lsuffix='_caller', rsuffix='_other')
key_caller A key_other B
0 K0 A0 K0 B0
1 K1 A1 K1 B1
2 K2 A2 K2 B2
3 K3 A3 NaN NaN
4 K4 A4 NaN NaN
5 K5 A5 NaN NaN
If we want to join using the key columns, we need to set key to be
the index in both `df` and `other`. The joined DataFrame will have
key as its index.
>>> df.set_index('key').join(other.set_index('key'))
A B
key
K0 A0 B0
K1 A1 B1
K2 A2 B2
K3 A3 NaN
K4 A4 NaN
K5 A5 NaN
Another option to join using the key columns is to use the `on`
parameter. DataFrame.join always uses `other`'s index but we can use
any column in `df`. This method preserves the original DataFrame's
index in the result.
>>> df.join(other.set_index('key'), on='key')
key A B
0 K0 A0 B0
1 K1 A1 B1
2 K2 A2 B2
3 K3 A3 NaN
4 K4 A4 NaN
5 K5 A5 NaN
"""
return self._join_compat(
other, on=on, how=how, lsuffix=lsuffix, rsuffix=rsuffix, sort=sort
)
def _join_compat(
self, other, on=None, how="left", lsuffix="", rsuffix="", sort=False
):
from pandas.core.reshape.merge import merge
from pandas.core.reshape.concat import concat
if isinstance(other, Series):
if other.name is None:
raise ValueError("Other Series must have a name")
other = DataFrame({other.name: other})
if isinstance(other, DataFrame):
return merge(
self,
other,
left_on=on,
how=how,
left_index=on is None,
right_index=True,
suffixes=(lsuffix, rsuffix),
sort=sort,
)
else:
if on is not None:
raise ValueError(
"Joining multiple DataFrames only supported for joining on index"
)
frames = [self] + list(other)
can_concat = all(df.index.is_unique for df in frames)
# join indexes only using concat
if can_concat:
if how == "left":
res = concat(
frames, axis=1, join="outer", verify_integrity=True, sort=sort
)
return res.reindex(self.index, copy=False)
else:
return concat(
frames, axis=1, join=how, verify_integrity=True, sort=sort
)
joined = frames[0]
for frame in frames[1:]:
joined = merge(
joined, frame, how=how, left_index=True, right_index=True
)
return joined
@Substitution("")
@Appender(_merge_doc, indents=2)
def merge(
self,
right,
how="inner",
on=None,
left_on=None,
right_on=None,
left_index=False,
right_index=False,
sort=False,
suffixes=("_x", "_y"),
copy=True,
indicator=False,
validate=None,
) -> "DataFrame":
from pandas.core.reshape.merge import merge
return merge(
self,
right,
how=how,
on=on,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
sort=sort,
suffixes=suffixes,
copy=copy,
indicator=indicator,
validate=validate,
)
def round(self, decimals=0, *args, **kwargs) -> "DataFrame":
"""
Round a DataFrame to a variable number of decimal places.
Parameters
----------
decimals : int, dict, Series
Number of decimal places to round each column to. If an int is
given, round each column to the same number of places.
Otherwise dict and Series round to variable numbers of places.
Column names should be in the keys if `decimals` is a
dict-like, or in the index if `decimals` is a Series. Any
columns not included in `decimals` will be left as is. Elements
of `decimals` which are not columns of the input will be
ignored.
*args
Additional keywords have no effect but might be accepted for
compatibility with numpy.
**kwargs
Additional keywords have no effect but might be accepted for
compatibility with numpy.
Returns
-------
DataFrame
A DataFrame with the affected columns rounded to the specified
number of decimal places.
See Also
--------
numpy.around : Round a numpy array to the given number of decimals.
Series.round : Round a Series to the given number of decimals.
Examples
--------
>>> df = pd.DataFrame([(.21, .32), (.01, .67), (.66, .03), (.21, .18)],
... columns=['dogs', 'cats'])
>>> df
dogs cats
0 0.21 0.32
1 0.01 0.67
2 0.66 0.03
3 0.21 0.18
By providing an integer each column is rounded to the same number
of decimal places
>>> df.round(1)
dogs cats
0 0.2 0.3
1 0.0 0.7
2 0.7 0.0
3 0.2 0.2
With a dict, the number of places for specific columns can be
specified with the column names as key and the number of decimal
places as value
>>> df.round({'dogs': 1, 'cats': 0})
dogs cats
0 0.2 0.0
1 0.0 1.0
2 0.7 0.0
3 0.2 0.0
Using a Series, the number of places for specific columns can be
specified with the column names as index and the number of
decimal places as value
>>> decimals = pd.Series([0, 1], index=['cats', 'dogs'])
>>> df.round(decimals)
dogs cats
0 0.2 0.0
1 0.0 1.0
2 0.7 0.0
3 0.2 0.0
"""
from pandas.core.reshape.concat import concat
def _dict_round(df, decimals):
for col, vals in df.items():
try:
yield _series_round(vals, decimals[col])
except KeyError:
yield vals
def _series_round(s, decimals):
if is_integer_dtype(s) or is_float_dtype(s):
return s.round(decimals)
return s
nv.validate_round(args, kwargs)
if isinstance(decimals, (dict, Series)):
if isinstance(decimals, Series):
if not decimals.index.is_unique:
raise ValueError("Index of decimals must be unique")
new_cols = list(_dict_round(self, decimals))
elif is_integer(decimals):
# Dispatch to Series.round
new_cols = [_series_round(v, decimals) for _, v in self.items()]
else:
raise TypeError("decimals must be an integer, a dict-like or a Series")
if len(new_cols) > 0:
return self._constructor(
concat(new_cols, axis=1), index=self.index, columns=self.columns
)
else:
return self
# ----------------------------------------------------------------------
# Statistical methods, etc.
def corr(self, method="pearson", min_periods=1) -> "DataFrame":
"""
Compute pairwise correlation of columns, excluding NA/null values.
Parameters
----------
method : {'pearson', 'kendall', 'spearman'} or callable
Method of correlation:
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
* callable: callable with input two 1d ndarrays
and returning a float. Note that the returned matrix from corr
will have 1 along the diagonals and will be symmetric
regardless of the callable's behavior.
.. versionadded:: 0.24.0
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result. Currently only available for Pearson
and Spearman correlation.
Returns
-------
DataFrame
Correlation matrix.
See Also
--------
DataFrame.corrwith
Series.corr
Examples
--------
>>> def histogram_intersection(a, b):
... v = np.minimum(a, b).sum().round(decimals=1)
... return v
>>> df = pd.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df.corr(method=histogram_intersection)
dogs cats
dogs 1.0 0.3
cats 0.3 1.0
"""
numeric_df = self._get_numeric_data()
cols = numeric_df.columns
idx = cols.copy()
mat = numeric_df.values
if method == "pearson":
correl = libalgos.nancorr(ensure_float64(mat), minp=min_periods)
elif method == "spearman":
correl = libalgos.nancorr_spearman(ensure_float64(mat), minp=min_periods)
elif method == "kendall" or callable(method):
if min_periods is None:
min_periods = 1
mat = ensure_float64(mat).T
corrf = nanops.get_corr_func(method)
K = len(cols)
correl = np.empty((K, K), dtype=float)
mask = np.isfinite(mat)
for i, ac in enumerate(mat):
for j, bc in enumerate(mat):
if i > j:
continue
valid = mask[i] & mask[j]
if valid.sum() < min_periods:
c = np.nan
elif i == j:
c = 1.0
elif not valid.all():
c = corrf(ac[valid], bc[valid])
else:
c = corrf(ac, bc)
correl[i, j] = c
correl[j, i] = c
else:
raise ValueError(
"method must be either 'pearson', "
"'spearman', 'kendall', or a callable, "
f"'{method}' was supplied"
)
return self._constructor(correl, index=idx, columns=cols)
def cov(self, min_periods=None) -> "DataFrame":
"""
Compute pairwise covariance of columns, excluding NA/null values.
Compute the pairwise covariance among the series of a DataFrame.
The returned data frame is the `covariance matrix
<https://en.wikipedia.org/wiki/Covariance_matrix>`__ of the columns
of the DataFrame.
Both NA and null values are automatically excluded from the
calculation. (See the note below about bias from missing values.)
A threshold can be set for the minimum number of
observations for each value created. Comparisons with observations
below this threshold will be returned as ``NaN``.
This method is generally used for the analysis of time series data to
understand the relationship between different measures
across time.
Parameters
----------
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result.
Returns
-------
DataFrame
The covariance matrix of the series of the DataFrame.
See Also
--------
Series.cov : Compute covariance with another Series.
core.window.EWM.cov: Exponential weighted sample covariance.
core.window.Expanding.cov : Expanding sample covariance.
core.window.Rolling.cov : Rolling sample covariance.
Notes
-----
Returns the covariance matrix of the DataFrame's time series.
The covariance is normalized by N-1.
For DataFrames that have Series that are missing data (assuming that
data is `missing at random
<https://en.wikipedia.org/wiki/Missing_data#Missing_at_random>`__)
the returned covariance matrix will be an unbiased estimate
of the variance and covariance between the member Series.
However, for many applications this estimate may not be acceptable
because the estimate covariance matrix is not guaranteed to be positive
semi-definite. This could lead to estimate correlations having
absolute values which are greater than one, and/or a non-invertible
covariance matrix. See `Estimation of covariance matrices
<http://en.wikipedia.org/w/index.php?title=Estimation_of_covariance_
matrices>`__ for more details.
Examples
--------
>>> df = pd.DataFrame([(1, 2), (0, 3), (2, 0), (1, 1)],
... columns=['dogs', 'cats'])
>>> df.cov()
dogs cats
dogs 0.666667 -1.000000
cats -1.000000 1.666667
>>> np.random.seed(42)
>>> df = pd.DataFrame(np.random.randn(1000, 5),
... columns=['a', 'b', 'c', 'd', 'e'])
>>> df.cov()
a b c d e
a 0.998438 -0.020161 0.059277 -0.008943 0.014144
b -0.020161 1.059352 -0.008543 -0.024738 0.009826
c 0.059277 -0.008543 1.010670 -0.001486 -0.000271
d -0.008943 -0.024738 -0.001486 0.921297 -0.013692
e 0.014144 0.009826 -0.000271 -0.013692 0.977795
**Minimum number of periods**
This method also supports an optional ``min_periods`` keyword
that specifies the required minimum number of non-NA observations for
each column pair in order to have a valid result:
>>> np.random.seed(42)
>>> df = pd.DataFrame(np.random.randn(20, 3),
... columns=['a', 'b', 'c'])
>>> df.loc[df.index[:5], 'a'] = np.nan
>>> df.loc[df.index[5:10], 'b'] = np.nan
>>> df.cov(min_periods=12)
a b c
a 0.316741 NaN -0.150812
b NaN 1.248003 0.191417
c -0.150812 0.191417 0.895202
"""
numeric_df = self._get_numeric_data()
cols = numeric_df.columns
idx = cols.copy()
mat = numeric_df.values
if notna(mat).all():
if min_periods is not None and min_periods > len(mat):
baseCov = np.empty((mat.shape[1], mat.shape[1]))
baseCov.fill(np.nan)
else:
baseCov = np.cov(mat.T)
baseCov = baseCov.reshape((len(cols), len(cols)))
else:
baseCov = libalgos.nancorr(ensure_float64(mat), cov=True, minp=min_periods)
return self._constructor(baseCov, index=idx, columns=cols)
def corrwith(self, other, axis=0, drop=False, method="pearson") -> Series:
"""
Compute pairwise correlation.
Pairwise correlation is computed between rows or columns of
DataFrame with rows or columns of Series or DataFrame. DataFrames
are first aligned along both axes before computing the
correlations.
Parameters
----------
other : DataFrame, Series
Object with which to compute correlations.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to use. 0 or 'index' to compute column-wise, 1 or 'columns' for
row-wise.
drop : bool, default False
Drop missing indices from result.
method : {'pearson', 'kendall', 'spearman'} or callable
Method of correlation:
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
* callable: callable with input two 1d ndarrays
and returning a float.
.. versionadded:: 0.24.0
Returns
-------
Series
Pairwise correlations.
See Also
--------
DataFrame.corr
"""
axis = self._get_axis_number(axis)
this = self._get_numeric_data()
if isinstance(other, Series):
return this.apply(lambda x: other.corr(x, method=method), axis=axis)
other = other._get_numeric_data()
left, right = this.align(other, join="inner", copy=False)
if axis == 1:
left = left.T
right = right.T
if method == "pearson":
# mask missing values
left = left + right * 0
right = right + left * 0
# demeaned data
ldem = left - left.mean()
rdem = right - right.mean()
num = (ldem * rdem).sum()
dom = (left.count() - 1) * left.std() * right.std()
correl = num / dom
elif method in ["kendall", "spearman"] or callable(method):
def c(x):
return nanops.nancorr(x[0], x[1], method=method)
correl = Series(
map(c, zip(left.values.T, right.values.T)), index=left.columns
)
else:
raise ValueError(
f"Invalid method {method} was passed, "
"valid methods are: 'pearson', 'kendall', "
"'spearman', or callable"
)
if not drop:
# Find non-matching labels along the given axis
# and append missing correlations (GH 22375)
raxis = 1 if axis == 0 else 0
result_index = this._get_axis(raxis).union(other._get_axis(raxis))
idx_diff = result_index.difference(correl.index)
if len(idx_diff) > 0:
correl = correl.append(Series([np.nan] * len(idx_diff), index=idx_diff))
return correl
# ----------------------------------------------------------------------
# ndarray-like stats methods
def count(self, axis=0, level=None, numeric_only=False):
"""
Count non-NA cells for each column or row.
The values `None`, `NaN`, `NaT`, and optionally `numpy.inf` (depending
on `pandas.options.mode.use_inf_as_na`) are considered NA.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
If 0 or 'index' counts are generated for each column.
If 1 or 'columns' counts are generated for each **row**.
level : int or str, optional
If the axis is a `MultiIndex` (hierarchical), count along a
particular `level`, collapsing into a `DataFrame`.
A `str` specifies the level name.
numeric_only : bool, default False
Include only `float`, `int` or `boolean` data.
Returns
-------
Series or DataFrame
For each column/row the number of non-NA/null entries.
If `level` is specified returns a `DataFrame`.
See Also
--------
Series.count: Number of non-NA elements in a Series.
DataFrame.shape: Number of DataFrame rows and columns (including NA
elements).
DataFrame.isna: Boolean same-sized DataFrame showing places of NA
elements.
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = pd.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]})
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 Lewis 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
Counts for each **row**:
>>> df.count(axis='columns')
0 3
1 2
2 3
3 3
4 3
dtype: int64
Counts for one level of a `MultiIndex`:
>>> df.set_index(["Person", "Single"]).count(level="Person")
Age
Person
John 2
Lewis 1
Myla 1
"""
axis = self._get_axis_number(axis)
if level is not None:
return self._count_level(level, axis=axis, numeric_only=numeric_only)
if numeric_only:
frame = self._get_numeric_data()
else:
frame = self
# GH #423
if len(frame._get_axis(axis)) == 0:
result = Series(0, index=frame._get_agg_axis(axis))
else:
if frame._is_mixed_type or frame._data.any_extension_types:
# the or any_extension_types is really only hit for single-
# column frames with an extension array
result = notna(frame).sum(axis=axis)
else:
# GH13407
series_counts = notna(frame).sum(axis=axis)
counts = series_counts.values
result = Series(counts, index=frame._get_agg_axis(axis))
return result.astype("int64")
def _count_level(self, level, axis=0, numeric_only=False):
if numeric_only:
frame = self._get_numeric_data()
else:
frame = self
count_axis = frame._get_axis(axis)
agg_axis = frame._get_agg_axis(axis)
if not isinstance(count_axis, ABCMultiIndex):
raise TypeError(
f"Can only count levels on hierarchical {self._get_axis_name(axis)}."
)
if frame._is_mixed_type:
# Since we have mixed types, calling notna(frame.values) might
# upcast everything to object
mask = notna(frame).values
else:
# But use the speedup when we have homogeneous dtypes
mask = notna(frame.values)
if axis == 1:
# We're transposing the mask rather than frame to avoid potential
# upcasts to object, which induces a ~20x slowdown
mask = mask.T
if isinstance(level, str):
level = count_axis._get_level_number(level)
level_name = count_axis._names[level]
level_index = count_axis.levels[level]._shallow_copy(name=level_name)
level_codes = ensure_int64(count_axis.codes[level])
counts = lib.count_level_2d(mask, level_codes, len(level_index), axis=0)
result = DataFrame(counts, index=level_index, columns=agg_axis)
if axis == 1:
# Undo our earlier transpose
return result.T
else:
return result
def _reduce(
self, op, name, axis=0, skipna=True, numeric_only=None, filter_type=None, **kwds
):
if axis is None and filter_type == "bool":
labels = None
constructor = None
else:
# TODO: Make other agg func handle axis=None properly
axis = self._get_axis_number(axis)
labels = self._get_agg_axis(axis)
constructor = self._constructor
def f(x):
return op(x, axis=axis, skipna=skipna, **kwds)
def _get_data(axis_matters):
if filter_type is None or filter_type == "numeric":
data = self._get_numeric_data()
elif filter_type == "bool":
if axis_matters:
# GH#25101, GH#24434
data = self._get_bool_data() if axis == 0 else self
else:
data = self._get_bool_data()
else: # pragma: no cover
msg = (
f"Generating numeric_only data with filter_type {filter_type} "
"not supported."
)
raise NotImplementedError(msg)
return data
if numeric_only is not None and axis in [0, 1]:
df = self
if numeric_only is True:
df = _get_data(axis_matters=True)
if axis == 1:
df = df.T
axis = 0
out_dtype = "bool" if filter_type == "bool" else None
# After possibly _get_data and transposing, we are now in the
# simple case where we can use BlockManager._reduce
res = df._data.reduce(op, axis=1, skipna=skipna, **kwds)
assert isinstance(res, dict)
if len(res):
assert len(res) == max(list(res.keys())) + 1, res.keys()
out = df._constructor_sliced(res, index=range(len(res)), dtype=out_dtype)
out.index = df.columns
return out
if numeric_only is None:
values = self.values
try:
result = f(values)
if filter_type == "bool" and is_object_dtype(values) and axis is None:
# work around https://github.com/numpy/numpy/issues/10489
# TODO: combine with hasattr(result, 'dtype') further down
# hard since we don't have `values` down there.
result = np.bool_(result)
except TypeError:
# e.g. in nanops trying to convert strs to float
# try by-column first
if filter_type is None and axis == 0:
# this can end up with a non-reduction
# but not always. if the types are mixed
# with datelike then need to make sure a series
# we only end up here if we have not specified
# numeric_only and yet we have tried a
# column-by-column reduction, where we have mixed type.
# So let's just do what we can
from pandas.core.apply import frame_apply
opa = frame_apply(
self, func=f, result_type="expand", ignore_failures=True
)
result = opa.get_result()
if result.ndim == self.ndim:
result = result.iloc[0]
return result
# TODO: why doesnt axis matter here?
data = _get_data(axis_matters=False)
with np.errstate(all="ignore"):
result = f(data.values)
labels = data._get_agg_axis(axis)
else:
if numeric_only:
data = _get_data(axis_matters=True)
values = data.values
labels = data._get_agg_axis(axis)
else:
values = self.values
result = f(values)
if hasattr(result, "dtype") and is_object_dtype(result.dtype):
try:
if filter_type is None or filter_type == "numeric":
result = result.astype(np.float64)
elif filter_type == "bool" and notna(result).all():
result = result.astype(np.bool_)
except (ValueError, TypeError):
# try to coerce to the original dtypes item by item if we can
if axis == 0:
result = coerce_to_dtypes(result, self.dtypes)
if constructor is not None:
result = Series(result, index=labels)
return result
def nunique(self, axis=0, dropna=True) -> Series:
"""
Count distinct observations over requested axis.
Return Series with number of distinct observations. Can ignore NaN
values.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for
column-wise.
dropna : bool, default True
Don't include NaN in the counts.
Returns
-------
Series
See Also
--------
Series.nunique: Method nunique for Series.
DataFrame.count: Count non-NA cells for each column or row.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [1, 1, 1]})
>>> df.nunique()
A 3
B 1
dtype: int64
>>> df.nunique(axis=1)
0 1
1 2
2 2
dtype: int64
"""
return self.apply(Series.nunique, axis=axis, dropna=dropna)
def idxmin(self, axis=0, skipna=True) -> Series:
"""
Return index of first occurrence of minimum over requested axis.
NA/null values are excluded.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
Returns
-------
Series
Indexes of minima along the specified axis.
Raises
------
ValueError
* If the row/column is empty
See Also
--------
Series.idxmin
Notes
-----
This method is the DataFrame version of ``ndarray.argmin``.
"""
axis = self._get_axis_number(axis)
indices = nanops.nanargmin(self.values, axis=axis, skipna=skipna)
index = self._get_axis(axis)
result = [index[i] if i >= 0 else np.nan for i in indices]
return Series(result, index=self._get_agg_axis(axis))
def idxmax(self, axis=0, skipna=True) -> Series:
"""
Return index of first occurrence of maximum over requested axis.
NA/null values are excluded.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
Returns
-------
Series
Indexes of maxima along the specified axis.
Raises
------
ValueError
* If the row/column is empty
See Also
--------
Series.idxmax
Notes
-----
This method is the DataFrame version of ``ndarray.argmax``.
"""
axis = self._get_axis_number(axis)
indices = nanops.nanargmax(self.values, axis=axis, skipna=skipna)
index = self._get_axis(axis)
result = [index[i] if i >= 0 else np.nan for i in indices]
return Series(result, index=self._get_agg_axis(axis))
def _get_agg_axis(self, axis_num):
"""
Let's be explicit about this.
"""
if axis_num == 0:
return self.columns
elif axis_num == 1:
return self.index
else:
raise ValueError(f"Axis must be 0 or 1 (got {repr(axis_num)})")
def mode(self, axis=0, numeric_only=False, dropna=True) -> "DataFrame":
"""
Get the mode(s) of each element along the selected axis.
The mode of a set of values is the value that appears most often.
It can be multiple values.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to iterate over while searching for the mode:
* 0 or 'index' : get mode of each column
* 1 or 'columns' : get mode of each row.
numeric_only : bool, default False
If True, only apply to numeric columns.
dropna : bool, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
DataFrame
The modes of each column or row.
See Also
--------
Series.mode : Return the highest frequency value in a Series.
Series.value_counts : Return the counts of values in a Series.
Examples
--------
>>> df = pd.DataFrame([('bird', 2, 2),
... ('mammal', 4, np.nan),
... ('arthropod', 8, 0),
... ('bird', 2, np.nan)],
... index=('falcon', 'horse', 'spider', 'ostrich'),
... columns=('species', 'legs', 'wings'))
>>> df
species legs wings
falcon bird 2 2.0
horse mammal 4 NaN
spider arthropod 8 0.0
ostrich bird 2 NaN
By default, missing values are not considered, and the mode of wings
are both 0 and 2. The second row of species and legs contains ``NaN``,
because they have only one mode, but the DataFrame has two rows.
>>> df.mode()
species legs wings
0 bird 2.0 0.0
1 NaN NaN 2.0
Setting ``dropna=False`` ``NaN`` values are considered and they can be
the mode (like for wings).
>>> df.mode(dropna=False)
species legs wings
0 bird 2 NaN
Setting ``numeric_only=True``, only the mode of numeric columns is
computed, and columns of other types are ignored.
>>> df.mode(numeric_only=True)
legs wings
0 2.0 0.0
1 NaN 2.0
To compute the mode over columns and not rows, use the axis parameter:
>>> df.mode(axis='columns', numeric_only=True)
0 1
falcon 2.0 NaN
horse 4.0 NaN
spider 0.0 8.0
ostrich 2.0 NaN
"""
data = self if not numeric_only else self._get_numeric_data()
def f(s):
return s.mode(dropna=dropna)
return data.apply(f, axis=axis)
def quantile(self, q=0.5, axis=0, numeric_only=True, interpolation="linear"):
"""
Return values at the given quantile over requested axis.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
Value between 0 <= q <= 1, the quantile(s) to compute.
axis : {0, 1, 'index', 'columns'} (default 0)
Equals 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
numeric_only : bool, default True
If False, the quantile of datetime and timedelta data will be
computed as well.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
Returns
-------
Series or DataFrame
If ``q`` is an array, a DataFrame will be returned where the
index is ``q``, the columns are the columns of self, and the
values are the quantiles.
If ``q`` is a float, a Series will be returned where the
index is the columns of self and the values are the quantiles.
See Also
--------
core.window.Rolling.quantile: Rolling quantile.
numpy.percentile: Numpy function to compute the percentile.
Examples
--------
>>> df = pd.DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]),
... columns=['a', 'b'])
>>> df.quantile(.1)
a 1.3
b 3.7
Name: 0.1, dtype: float64
>>> df.quantile([.1, .5])
a b
0.1 1.3 3.7
0.5 2.5 55.0
Specifying `numeric_only=False` will also compute the quantile of
datetime and timedelta data.
>>> df = pd.DataFrame({'A': [1, 2],
... 'B': [pd.Timestamp('2010'),
... pd.Timestamp('2011')],
... 'C': [pd.Timedelta('1 days'),
... pd.Timedelta('2 days')]})
>>> df.quantile(0.5, numeric_only=False)
A 1.5
B 2010-07-02 12:00:00
C 1 days 12:00:00
Name: 0.5, dtype: object
"""
validate_percentile(q)
data = self._get_numeric_data() if numeric_only else self
axis = self._get_axis_number(axis)
is_transposed = axis == 1
if is_transposed:
data = data.T
if len(data.columns) == 0:
# GH#23925 _get_numeric_data may have dropped all columns
cols = Index([], name=self.columns.name)
if is_list_like(q):
return self._constructor([], index=q, columns=cols)
return self._constructor_sliced([], index=cols, name=q, dtype=np.float64)
result = data._data.quantile(
qs=q, axis=1, interpolation=interpolation, transposed=is_transposed
)
if result.ndim == 2:
result = self._constructor(result)
else:
result = self._constructor_sliced(result, name=q)
if is_transposed:
result = result.T
return result
def to_timestamp(self, freq=None, how="start", axis=0, copy=True) -> "DataFrame":
"""
Cast to DatetimeIndex of timestamps, at *beginning* of period.
Parameters
----------
freq : str, default frequency of PeriodIndex
Desired frequency.
how : {'s', 'e', 'start', 'end'}
Convention for converting period to timestamp; start of period
vs. end.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to convert (the index by default).
copy : bool, default True
If False then underlying input data is not copied.
Returns
-------
DataFrame with DatetimeIndex
"""
new_data = self._data
if copy:
new_data = new_data.copy()
axis = self._get_axis_number(axis)
if axis == 0:
new_data.set_axis(1, self.index.to_timestamp(freq=freq, how=how))
elif axis == 1:
new_data.set_axis(0, self.columns.to_timestamp(freq=freq, how=how))
else: # pragma: no cover
raise AssertionError(f"Axis must be 0 or 1. Got {axis}")
return self._constructor(new_data)
def to_period(self, freq=None, axis=0, copy=True) -> "DataFrame":
"""
Convert DataFrame from DatetimeIndex to PeriodIndex.
Convert DataFrame from DatetimeIndex to PeriodIndex with desired
frequency (inferred from index if not passed).
Parameters
----------
freq : str, default
Frequency of the PeriodIndex.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to convert (the index by default).
copy : bool, default True
If False then underlying input data is not copied.
Returns
-------
TimeSeries with PeriodIndex
"""
new_data = self._data
if copy:
new_data = new_data.copy()
axis = self._get_axis_number(axis)
if axis == 0:
new_data.set_axis(1, self.index.to_period(freq=freq))
elif axis == 1:
new_data.set_axis(0, self.columns.to_period(freq=freq))
else: # pragma: no cover
raise AssertionError(f"Axis must be 0 or 1. Got {axis}")
return self._constructor(new_data)
def isin(self, values) -> "DataFrame":
"""
Whether each element in the DataFrame is contained in values.
Parameters
----------
values : iterable, Series, DataFrame or dict
The result will only be true at a location if all the
labels match. If `values` is a Series, that's the index. If
`values` is a dict, the keys must be the column names,
which must match. If `values` is a DataFrame,
then both the index and column labels must match.
Returns
-------
DataFrame
DataFrame of booleans showing whether each element in the DataFrame
is contained in values.
See Also
--------
DataFrame.eq: Equality test for DataFrame.
Series.isin: Equivalent method on Series.
Series.str.contains: Test if pattern or regex is contained within a
string of a Series or Index.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]},
... index=['falcon', 'dog'])
>>> df
num_legs num_wings
falcon 2 2
dog 4 0
When ``values`` is a list check whether every value in the DataFrame
is present in the list (which animals have 0 or 2 legs or wings)
>>> df.isin([0, 2])
num_legs num_wings
falcon True True
dog False True
When ``values`` is a dict, we can pass values to check for each
column separately:
>>> df.isin({'num_wings': [0, 3]})
num_legs num_wings
falcon False False
dog False True
When ``values`` is a Series or DataFrame the index and column must
match. Note that 'falcon' does not match based on the number of legs
in df2.
>>> other = pd.DataFrame({'num_legs': [8, 2], 'num_wings': [0, 2]},
... index=['spider', 'falcon'])
>>> df.isin(other)
num_legs num_wings
falcon True True
dog False False
"""
if isinstance(values, dict):
from pandas.core.reshape.concat import concat
values = collections.defaultdict(list, values)
return self._ensure_type(
concat(
(
self.iloc[:, [i]].isin(values[col])
for i, col in enumerate(self.columns)
),
axis=1,
)
)
elif isinstance(values, Series):
if not values.index.is_unique:
raise ValueError("cannot compute isin with a duplicate axis.")
return self.eq(values.reindex_like(self), axis="index")
elif isinstance(values, DataFrame):
if not (values.columns.is_unique and values.index.is_unique):
raise ValueError("cannot compute isin with a duplicate axis.")
return self.eq(values.reindex_like(self))
else:
if not is_list_like(values):
raise TypeError(
"only list-like or dict-like objects are allowed "
"to be passed to DataFrame.isin(), "
f"you passed a {repr(type(values).__name__)}"
)
return DataFrame(
algorithms.isin(self.values.ravel(), values).reshape(self.shape),
self.index,
self.columns,
)
# ----------------------------------------------------------------------
# Add plotting methods to DataFrame
plot = CachedAccessor("plot", pandas.plotting.PlotAccessor)
hist = pandas.plotting.hist_frame
boxplot = pandas.plotting.boxplot_frame
sparse = CachedAccessor("sparse", SparseFrameAccessor)
DataFrame._setup_axes(
["index", "columns"],
docs={
"index": "The index (row labels) of the DataFrame.",
"columns": "The column labels of the DataFrame.",
},
)
DataFrame._add_numeric_operations()
DataFrame._add_series_or_dataframe_operations()
ops.add_flex_arithmetic_methods(DataFrame)
ops.add_special_arithmetic_methods(DataFrame)
def _from_nested_dict(data):
# TODO: this should be seriously cythonized
new_data = {}
for index, s in data.items():
for col, v in s.items():
new_data[col] = new_data.get(col, {})
new_data[col][index] = v
return new_data
def _put_str(s, space):
return str(s)[:space].ljust(space)
|
the-stack_0_9939 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals, print_function
import frappe
import unittest, json, sys, os
import xmlrunner
import importlib
from frappe.modules import load_doctype_module, get_module_name
from frappe.utils import cstr
import frappe.utils.scheduler
import cProfile, pstats
from six import StringIO
from six.moves import reload_module
from frappe.model.naming import revert_series_if_last
unittest_runner = unittest.TextTestRunner
def xmlrunner_wrapper(output):
"""Convenience wrapper to keep method signature unchanged for XMLTestRunner and TextTestRunner"""
def _runner(*args, **kwargs):
kwargs['output'] = output
return xmlrunner.XMLTestRunner(*args, **kwargs)
return _runner
def main(app=None, module=None, doctype=None, verbose=False, tests=(),
force=False, profile=False, junit_xml_output=None, ui_tests=False,
doctype_list_path=None, skip_test_records=False, failfast=False):
global unittest_runner
if doctype_list_path:
app, doctype_list_path = doctype_list_path.split(os.path.sep, 1)
with open(frappe.get_app_path(app, doctype_list_path), 'r') as f:
doctype = f.read().strip().splitlines()
xmloutput_fh = None
if junit_xml_output:
xmloutput_fh = open(junit_xml_output, 'wb')
unittest_runner = xmlrunner_wrapper(xmloutput_fh)
else:
unittest_runner = unittest.TextTestRunner
try:
frappe.flags.print_messages = verbose
frappe.flags.in_test = True
if not frappe.db:
frappe.connect()
# if not frappe.conf.get("db_name").startswith("test_"):
# raise Exception, 'db_name must start with "test_"'
# workaround! since there is no separate test db
frappe.clear_cache()
frappe.utils.scheduler.disable_scheduler()
set_test_email_config()
if not frappe.flags.skip_before_tests:
if verbose:
print('Running "before_tests" hooks')
for fn in frappe.get_hooks("before_tests", app_name=app):
frappe.get_attr(fn)()
if doctype:
ret = run_tests_for_doctype(doctype, verbose, tests, force, profile)
elif module:
ret = run_tests_for_module(module, verbose, tests, profile)
else:
ret = run_all_tests(app, verbose, profile, ui_tests, failfast=failfast)
frappe.db.commit()
# workaround! since there is no separate test db
frappe.clear_cache()
return ret
finally:
if xmloutput_fh:
xmloutput_fh.flush()
xmloutput_fh.close()
def set_test_email_config():
frappe.conf.update({
"auto_email_id": "[email protected]",
"mail_server": "smtp.example.com",
"mail_login": "[email protected]",
"mail_password": "test",
"admin_password": "admin"
})
def run_all_tests(app=None, verbose=False, profile=False, ui_tests=False, failfast=False):
import os
apps = [app] if app else frappe.get_installed_apps()
test_suite = unittest.TestSuite()
for app in apps:
for path, folders, files in os.walk(frappe.get_pymodule_path(app)):
for dontwalk in ('locals', '.git', 'public'):
if dontwalk in folders:
folders.remove(dontwalk)
# print path
for filename in files:
filename = cstr(filename)
if filename.startswith("test_") and filename.endswith(".py")\
and filename != 'test_runner.py':
# print filename[:-3]
_add_test(app, path, filename, verbose,
test_suite, ui_tests)
runner = unittest_runner(verbosity=1+(verbose and 1 or 0), failfast=failfast)
if profile:
pr = cProfile.Profile()
pr.enable()
out = runner.run(test_suite)
if profile:
pr.disable()
s = StringIO()
ps = pstats.Stats(pr, stream=s).sort_stats('cumulative')
ps.print_stats()
print(s.getvalue())
return out
def run_tests_for_doctype(doctypes, verbose=False, tests=(), force=False, profile=False):
modules = []
if not isinstance(doctypes, (list, tuple)):
doctypes = [doctypes]
for doctype in doctypes:
module = frappe.db.get_value("DocType", doctype, "module")
if not module:
print('Invalid doctype {0}'.format(doctype))
sys.exit(1)
test_module = get_module_name(doctype, module, "test_")
if force:
for name in frappe.db.sql_list("select name from `tab%s`" % doctype):
frappe.delete_doc(doctype, name, force=True)
make_test_records(doctype, verbose=verbose, force=force)
modules.append(importlib.import_module(test_module))
return _run_unittest(modules, verbose=verbose, tests=tests, profile=profile)
def run_tests_for_module(module, verbose=False, tests=(), profile=False):
module = importlib.import_module(module)
if hasattr(module, "test_dependencies"):
for doctype in module.test_dependencies:
make_test_records(doctype, verbose=verbose)
return _run_unittest(module, verbose=verbose, tests=tests, profile=profile)
def run_setup_wizard_ui_test(app=None, verbose=False, profile=False):
'''Run setup wizard UI test using test_test_runner'''
frappe.flags.run_setup_wizard_ui_test = 1
return run_ui_tests(app=app, test=None, verbose=verbose, profile=profile)
def run_ui_tests(app=None, test=None, test_list=None, verbose=False, profile=False):
'''Run a single unit test for UI using test_test_runner'''
module = importlib.import_module('frappe.tests.ui.test_test_runner')
frappe.flags.ui_test_app = app
if test_list:
frappe.flags.ui_test_list = test_list
else:
frappe.flags.ui_test_path = test
return _run_unittest(module, verbose=verbose, tests=(), profile=profile)
def _run_unittest(modules, verbose=False, tests=(), profile=False):
test_suite = unittest.TestSuite()
if not isinstance(modules, (list, tuple)):
modules = [modules]
for module in modules:
module_test_cases = unittest.TestLoader().loadTestsFromModule(module)
if tests:
for each in module_test_cases:
for test_case in each.__dict__["_tests"]:
if test_case.__dict__["_testMethodName"] in tests:
test_suite.addTest(test_case)
else:
test_suite.addTest(module_test_cases)
runner = unittest_runner(verbosity=1+(verbose and 1 or 0))
if profile:
pr = cProfile.Profile()
pr.enable()
frappe.flags.tests_verbose = verbose
out = runner.run(test_suite)
if profile:
pr.disable()
s = StringIO()
ps = pstats.Stats(pr, stream=s).sort_stats('cumulative')
ps.print_stats()
print(s.getvalue())
return out
def _add_test(app, path, filename, verbose, test_suite=None, ui_tests=False):
import os
if os.path.sep.join(["doctype", "doctype", "boilerplate"]) in path:
# in /doctype/doctype/boilerplate/
return
app_path = frappe.get_pymodule_path(app)
relative_path = os.path.relpath(path, app_path)
if relative_path=='.':
module_name = app
else:
module_name = '{app}.{relative_path}.{module_name}'.format(app=app,
relative_path=relative_path.replace('/', '.'), module_name=filename[:-3])
module = importlib.import_module(module_name)
if hasattr(module, "test_dependencies"):
for doctype in module.test_dependencies:
make_test_records(doctype, verbose=verbose)
is_ui_test = True if hasattr(module, 'TestDriver') else False
if is_ui_test != ui_tests:
return
if not test_suite:
test_suite = unittest.TestSuite()
if os.path.basename(os.path.dirname(path))=="doctype":
txt_file = os.path.join(path, filename[5:].replace(".py", ".json"))
with open(txt_file, 'r') as f:
doc = json.loads(f.read())
doctype = doc["name"]
make_test_records(doctype, verbose)
test_suite.addTest(unittest.TestLoader().loadTestsFromModule(module))
def make_test_records(doctype, verbose=0, force=False):
if not frappe.db:
frappe.connect()
if frappe.flags.skip_test_records:
return
for options in get_dependencies(doctype):
if options == "[Select]":
continue
if not options in frappe.local.test_objects:
frappe.local.test_objects[options] = []
make_test_records(options, verbose, force)
make_test_records_for_doctype(options, verbose, force)
def get_modules(doctype):
module = frappe.db.get_value("DocType", doctype, "module")
try:
test_module = load_doctype_module(doctype, module, "test_")
if test_module:
reload_module(test_module)
except ImportError:
test_module = None
return module, test_module
def get_dependencies(doctype):
module, test_module = get_modules(doctype)
meta = frappe.get_meta(doctype)
link_fields = meta.get_link_fields()
for df in meta.get_table_fields():
link_fields.extend(frappe.get_meta(df.options).get_link_fields())
options_list = [df.options for df in link_fields] + [doctype]
if hasattr(test_module, "test_dependencies"):
options_list += test_module.test_dependencies
options_list = list(set(options_list))
if hasattr(test_module, "test_ignore"):
for doctype_name in test_module.test_ignore:
if doctype_name in options_list:
options_list.remove(doctype_name)
return options_list
def make_test_records_for_doctype(doctype, verbose=0, force=False):
if not force and doctype in get_test_record_log():
return
module, test_module = get_modules(doctype)
if verbose:
print("Making for " + doctype)
if hasattr(test_module, "_make_test_records"):
frappe.local.test_objects[doctype] += test_module._make_test_records(verbose)
elif hasattr(test_module, "test_records"):
frappe.local.test_objects[doctype] += make_test_objects(doctype, test_module.test_records, verbose, force)
else:
test_records = frappe.get_test_records(doctype)
if test_records:
frappe.local.test_objects[doctype] += make_test_objects(doctype, test_records, verbose, force)
elif verbose:
print_mandatory_fields(doctype)
add_to_test_record_log(doctype)
def make_test_objects(doctype, test_records=None, verbose=None, reset=False):
'''Make test objects from given list of `test_records` or from `test_records.json`'''
records = []
def revert_naming(d):
if getattr(d, 'naming_series', None):
revert_series_if_last(d.naming_series, d.name)
if test_records is None:
test_records = frappe.get_test_records(doctype)
for doc in test_records:
if not doc.get("doctype"):
doc["doctype"] = doctype
d = frappe.copy_doc(doc)
if d.meta.get_field("naming_series"):
if not d.naming_series:
d.naming_series = "_T-" + d.doctype + "-"
if doc.get('name'):
d.name = doc.get('name')
else:
d.set_new_name()
if frappe.db.exists(d.doctype, d.name) and not reset:
frappe.db.rollback()
# do not create test records, if already exists
continue
# submit if docstatus is set to 1 for test record
docstatus = d.docstatus
d.docstatus = 0
try:
d.run_method("before_test_insert")
d.insert()
if docstatus == 1:
d.submit()
except frappe.NameError:
revert_naming(d)
except Exception as e:
if d.flags.ignore_these_exceptions_in_test and e.__class__ in d.flags.ignore_these_exceptions_in_test:
revert_naming(d)
else:
raise
records.append(d.name)
frappe.db.commit()
return records
def print_mandatory_fields(doctype):
print("Please setup make_test_records for: " + doctype)
print("-" * 60)
meta = frappe.get_meta(doctype)
print("Autoname: " + (meta.autoname or ""))
print("Mandatory Fields: ")
for d in meta.get("fields", {"reqd":1}):
print(d.parent + ":" + d.fieldname + " | " + d.fieldtype + " | " + (d.options or ""))
print()
def add_to_test_record_log(doctype):
'''Add `doctype` to site/.test_log
`.test_log` is a cache of all doctypes for which test records are created'''
test_record_log = get_test_record_log()
if not doctype in test_record_log:
frappe.flags.test_record_log.append(doctype)
with open(frappe.get_site_path('.test_log'), 'w') as f:
f.write('\n'.join(filter(None, frappe.flags.test_record_log)))
def get_test_record_log():
'''Return the list of doctypes for which test records have been created'''
if 'test_record_log' not in frappe.flags:
if os.path.exists(frappe.get_site_path('.test_log')):
with open(frappe.get_site_path('.test_log'), 'r') as f:
frappe.flags.test_record_log = f.read().splitlines()
else:
frappe.flags.test_record_log = []
return frappe.flags.test_record_log |
the-stack_0_9941 | """Functions for converting between color spaces.
The "central" color space in this module is RGB, more specifically the linear
sRGB color space using D65 as a white-point [1]_. This represents a
standard monitor (w/o gamma correction). For a good FAQ on color spaces see
[2]_.
The API consists of functions to convert to and from RGB as defined above, as
well as a generic function to convert to and from any supported color space
(which is done through RGB in most cases).
Supported color spaces
----------------------
* RGB : Red Green Blue.
Here the sRGB standard [1]_.
* HSV : Hue, Saturation, Value.
Uniquely defined when related to sRGB [3]_.
* RGB CIE : Red Green Blue.
The original RGB CIE standard from 1931 [4]_. Primary colors are 700 nm
(red), 546.1 nm (blue) and 435.8 nm (green).
* XYZ CIE : XYZ
Derived from the RGB CIE color space. Chosen such that
``x == y == z == 1/3`` at the whitepoint, and all color matching
functions are greater than zero everywhere.
* LAB CIE : Lightness, a, b
Colorspace derived from XYZ CIE that is intended to be more
perceptually uniform
* LUV CIE : Lightness, u, v
Colorspace derived from XYZ CIE that is intended to be more
perceptually uniform
* LCH CIE : Lightness, Chroma, Hue
Defined in terms of LAB CIE. C and H are the polar representation of
a and b. The polar angle C is defined to be on ``(0, 2*pi)``
:author: Nicolas Pinto (rgb2hsv)
:author: Ralf Gommers (hsv2rgb)
:author: Travis Oliphant (XYZ and RGB CIE functions)
:author: Matt Terry (lab2lch)
:author: Alex Izvorski (yuv2rgb, rgb2yuv and related)
:license: modified BSD
References
----------
.. [1] Official specification of sRGB, IEC 61966-2-1:1999.
.. [2] http://www.poynton.com/ColorFAQ.html
.. [3] https://en.wikipedia.org/wiki/HSL_and_HSV
.. [4] https://en.wikipedia.org/wiki/CIE_1931_color_space
"""
from warnings import warn
import numpy as np
from scipy import linalg
from ..util import dtype, dtype_limits
def guess_spatial_dimensions(image):
"""Make an educated guess about whether an image has a channels dimension.
Parameters
----------
image : ndarray
The input image.
Returns
-------
spatial_dims : int or None
The number of spatial dimensions of `image`. If ambiguous, the value
is ``None``.
Raises
------
ValueError
If the image array has less than two or more than four dimensions.
"""
if image.ndim == 2:
return 2
if image.ndim == 3 and image.shape[-1] != 3:
return 3
if image.ndim == 3 and image.shape[-1] == 3:
return None
if image.ndim == 4 and image.shape[-1] == 3:
return 3
else:
raise ValueError("Expected 2D, 3D, or 4D array, got %iD." % image.ndim)
def convert_colorspace(arr, fromspace, tospace):
"""Convert an image array to a new color space.
Valid color spaces are:
'RGB', 'HSV', 'RGB CIE', 'XYZ', 'YUV', 'YIQ', 'YPbPr', 'YCbCr', 'YDbDr'
Parameters
----------
arr : array_like
The image to convert.
fromspace : valid color space
The color space to convert from. Can be specified in lower case.
tospace : valid color space
The color space to convert to. Can be specified in lower case.
Returns
-------
out : ndarray
The converted image.
Notes
-----
Conversion is performed through the "central" RGB color space,
i.e. conversion from XYZ to HSV is implemented as ``XYZ -> RGB -> HSV``
instead of directly.
Examples
--------
>>> from skimage import data
>>> img = data.astronaut()
>>> img_hsv = convert_colorspace(img, 'RGB', 'HSV')
"""
fromdict = {'rgb': lambda im: im, 'hsv': hsv2rgb, 'rgb cie': rgbcie2rgb,
'xyz': xyz2rgb, 'yuv': yuv2rgb, 'yiq': yiq2rgb,
'ypbpr': ypbpr2rgb, 'ycbcr': ycbcr2rgb, 'ydbdr': ydbdr2rgb}
todict = {'rgb': lambda im: im, 'hsv': rgb2hsv, 'rgb cie': rgb2rgbcie,
'xyz': rgb2xyz, 'yuv': rgb2yuv, 'yiq': rgb2yiq,
'ypbpr': rgb2ypbpr, 'ycbcr': rgb2ycbcr, 'ydbdr': rgb2ydbdr}
fromspace = fromspace.lower()
tospace = tospace.lower()
if fromspace not in fromdict:
msg = '`fromspace` has to be one of {}'.format(fromdict.keys())
raise ValueError(msg)
if tospace not in todict:
msg = '`tospace` has to be one of {}'.format(todict.keys())
raise ValueError(msg)
return todict[tospace](fromdict[fromspace](arr))
def _prepare_colorarray(arr):
"""Check the shape of the array and convert it to
floating point representation.
"""
arr = np.asanyarray(arr)
if arr.ndim not in [3, 4] or arr.shape[-1] != 3:
msg = ("the input array must be have a shape == (.., ..,[ ..,] 3)), " +
"got (" + (", ".join(map(str, arr.shape))) + ")")
raise ValueError(msg)
return dtype.img_as_float(arr)
def _prepare_rgba_array(arr):
"""Check the shape of the array to be RGBA and convert it to
floating point representation.
"""
arr = np.asanyarray(arr)
if arr.ndim not in [3, 4] or arr.shape[-1] != 4:
msg = ("the input array must have a shape == (.., ..,[ ..,] 4)), "
"got {0}".format(arr.shape))
raise ValueError(msg)
return dtype.img_as_float(arr)
def rgba2rgb(rgba, background=(1, 1, 1)):
"""RGBA to RGB conversion.
Parameters
----------
rgba : array_like
The image in RGBA format, in a 3-D array of shape ``(.., .., 4)``.
background : array_like
The color of the background to blend the image with. A tuple
containing 3 floats between 0 to 1 - the RGB value of the background.
Returns
-------
out : ndarray
The image in RGB format, in a 3-D array of shape ``(.., .., 3)``.
Raises
------
ValueError
If `rgba` is not a 3-D array of shape ``(.., .., 4)``.
References
----------
.. [1] https://en.wikipedia.org/wiki/Alpha_compositing#Alpha_blending
Examples
--------
>>> from skimage import color
>>> from skimage import data
>>> img_rgba = data.logo()
>>> img_rgb = color.rgba2rgb(img_rgba)
"""
arr = _prepare_rgba_array(rgba)
if isinstance(background, tuple) and len(background) != 3:
raise ValueError('the background must be a tuple with 3 items - the '
'RGB color of the background. Got {0} items.'
.format(len(background)))
alpha = arr[..., -1]
channels = arr[..., :-1]
out = np.empty_like(channels)
for ichan in range(channels.shape[-1]):
out[..., ichan] = np.clip(
(1 - alpha) * background[ichan] + alpha * channels[..., ichan],
a_min=0, a_max=1)
return out
def rgb2hsv(rgb):
"""RGB to HSV color space conversion.
Parameters
----------
rgb : array_like
The image in RGB format, in a 3-D array of shape ``(.., .., 3)``.
Returns
-------
out : ndarray
The image in HSV format, in a 3-D array of shape ``(.., .., 3)``.
Raises
------
ValueError
If `rgb` is not a 3-D array of shape ``(.., .., 3)``.
Notes
-----
Conversion between RGB and HSV color spaces results in some loss of
precision, due to integer arithmetic and rounding [1]_.
References
----------
.. [1] https://en.wikipedia.org/wiki/HSL_and_HSV
Examples
--------
>>> from skimage import color
>>> from skimage import data
>>> img = data.astronaut()
>>> img_hsv = color.rgb2hsv(img)
"""
arr = _prepare_colorarray(rgb)
out = np.empty_like(arr)
# -- V channel
out_v = arr.max(-1)
# -- S channel
delta = arr.ptp(-1)
# Ignore warning for zero divided by zero
old_settings = np.seterr(invalid='ignore')
out_s = delta / out_v
out_s[delta == 0.] = 0.
# -- H channel
# red is max
idx = (arr[:, :, 0] == out_v)
out[idx, 0] = (arr[idx, 1] - arr[idx, 2]) / delta[idx]
# green is max
idx = (arr[:, :, 1] == out_v)
out[idx, 0] = 2. + (arr[idx, 2] - arr[idx, 0]) / delta[idx]
# blue is max
idx = (arr[:, :, 2] == out_v)
out[idx, 0] = 4. + (arr[idx, 0] - arr[idx, 1]) / delta[idx]
out_h = (out[:, :, 0] / 6.) % 1.
out_h[delta == 0.] = 0.
np.seterr(**old_settings)
# -- output
out[:, :, 0] = out_h
out[:, :, 1] = out_s
out[:, :, 2] = out_v
# remove NaN
out[np.isnan(out)] = 0
return out
def hsv2rgb(hsv):
"""HSV to RGB color space conversion.
Parameters
----------
hsv : array_like
The image in HSV format, in a 3-D array of shape ``(.., .., 3)``.
Returns
-------
out : ndarray
The image in RGB format, in a 3-D array of shape ``(.., .., 3)``.
Raises
------
ValueError
If `hsv` is not a 3-D array of shape ``(.., .., 3)``.
Notes
-----
Conversion between RGB and HSV color spaces results in some loss of
precision, due to integer arithmetic and rounding [1]_.
References
----------
.. [1] https://en.wikipedia.org/wiki/HSL_and_HSV
Examples
--------
>>> from skimage import data
>>> img = data.astronaut()
>>> img_hsv = rgb2hsv(img)
>>> img_rgb = hsv2rgb(img_hsv)
"""
arr = _prepare_colorarray(hsv)
hi = np.floor(arr[:, :, 0] * 6)
f = arr[:, :, 0] * 6 - hi
p = arr[:, :, 2] * (1 - arr[:, :, 1])
q = arr[:, :, 2] * (1 - f * arr[:, :, 1])
t = arr[:, :, 2] * (1 - (1 - f) * arr[:, :, 1])
v = arr[:, :, 2]
hi = np.dstack([hi, hi, hi]).astype(np.uint8) % 6
out = np.choose(hi, [np.dstack((v, t, p)),
np.dstack((q, v, p)),
np.dstack((p, v, t)),
np.dstack((p, q, v)),
np.dstack((t, p, v)),
np.dstack((v, p, q))])
return out
# ---------------------------------------------------------------
# Primaries for the coordinate systems
# ---------------------------------------------------------------
cie_primaries = np.array([700, 546.1, 435.8])
sb_primaries = np.array([1. / 155, 1. / 190, 1. / 225]) * 1e5
# ---------------------------------------------------------------
# Matrices that define conversion between different color spaces
# ---------------------------------------------------------------
# From sRGB specification
xyz_from_rgb = np.array([[0.412453, 0.357580, 0.180423],
[0.212671, 0.715160, 0.072169],
[0.019334, 0.119193, 0.950227]])
rgb_from_xyz = linalg.inv(xyz_from_rgb)
# From https://en.wikipedia.org/wiki/CIE_1931_color_space
# Note: Travis's code did not have the divide by 0.17697
xyz_from_rgbcie = np.array([[0.49, 0.31, 0.20],
[0.17697, 0.81240, 0.01063],
[0.00, 0.01, 0.99]]) / 0.17697
rgbcie_from_xyz = linalg.inv(xyz_from_rgbcie)
# construct matrices to and from rgb:
rgbcie_from_rgb = rgbcie_from_xyz @ xyz_from_rgb
rgb_from_rgbcie = rgb_from_xyz @ xyz_from_rgbcie
gray_from_rgb = np.array([[0.2125, 0.7154, 0.0721],
[0, 0, 0],
[0, 0, 0]])
yuv_from_rgb = np.array([[ 0.299 , 0.587 , 0.114 ],
[-0.14714119, -0.28886916, 0.43601035 ],
[ 0.61497538, -0.51496512, -0.10001026 ]])
rgb_from_yuv = linalg.inv(yuv_from_rgb)
yiq_from_rgb = np.array([[0.299 , 0.587 , 0.114 ],
[0.59590059, -0.27455667, -0.32134392],
[0.21153661, -0.52273617, 0.31119955]])
rgb_from_yiq = linalg.inv(yiq_from_rgb)
ypbpr_from_rgb = np.array([[ 0.299 , 0.587 , 0.114 ],
[-0.168736,-0.331264, 0.5 ],
[ 0.5 ,-0.418688,-0.081312]])
rgb_from_ypbpr = linalg.inv(ypbpr_from_rgb)
ycbcr_from_rgb = np.array([[ 65.481, 128.553, 24.966],
[ -37.797, -74.203, 112.0 ],
[ 112.0 , -93.786, -18.214]])
rgb_from_ycbcr = linalg.inv(ycbcr_from_rgb)
ydbdr_from_rgb = np.array([[ 0.299, 0.587, 0.114],
[ -0.45 , -0.883, 1.333],
[ -1.333, 1.116, 0.217]])
rgb_from_ydbdr = linalg.inv(ydbdr_from_rgb)
# CIE LAB constants for Observer=2A, Illuminant=D65
# NOTE: this is actually the XYZ values for the illuminant above.
lab_ref_white = np.array([0.95047, 1., 1.08883])
# XYZ coordinates of the illuminants, scaled to [0, 1]. For each illuminant I
# we have:
#
# illuminant[I][0] corresponds to the XYZ coordinates for the 2 degree
# field of view.
#
# illuminant[I][1] corresponds to the XYZ coordinates for the 10 degree
# field of view.
#
# The XYZ coordinates are calculated from [1], using the formula:
#
# X = x * ( Y / y )
# Y = Y
# Z = ( 1 - x - y ) * ( Y / y )
#
# where Y = 1. The only exception is the illuminant "D65" with aperture angle
# 2, whose coordinates are copied from 'lab_ref_white' for
# backward-compatibility reasons.
#
# References
# ----------
# .. [1] https://en.wikipedia.org/wiki/Standard_illuminant
illuminants = \
{"A": {'2': (1.098466069456375, 1, 0.3558228003436005),
'10': (1.111420406956693, 1, 0.3519978321919493)},
"D50": {'2': (0.9642119944211994, 1, 0.8251882845188288),
'10': (0.9672062750333777, 1, 0.8142801513128616)},
"D55": {'2': (0.956797052643698, 1, 0.9214805860173273),
'10': (0.9579665682254781, 1, 0.9092525159847462)},
"D65": {'2': (0.95047, 1., 1.08883), # This was: `lab_ref_white`
'10': (0.94809667673716, 1, 1.0730513595166162)},
"D75": {'2': (0.9497220898840717, 1, 1.226393520724154),
'10': (0.9441713925645873, 1, 1.2064272211720228)},
"E": {'2': (1.0, 1.0, 1.0),
'10': (1.0, 1.0, 1.0)}}
def get_xyz_coords(illuminant, observer):
"""Get the XYZ coordinates of the given illuminant and observer [1]_.
Parameters
----------
illuminant : {"A", "D50", "D55", "D65", "D75", "E"}, optional
The name of the illuminant (the function is NOT case sensitive).
observer : {"2", "10"}, optional
The aperture angle of the observer.
Returns
-------
(x, y, z) : tuple
A tuple with 3 elements containing the XYZ coordinates of the given
illuminant.
Raises
------
ValueError
If either the illuminant or the observer angle are not supported or
unknown.
References
----------
.. [1] https://en.wikipedia.org/wiki/Standard_illuminant
"""
illuminant = illuminant.upper()
try:
return illuminants[illuminant][observer]
except KeyError:
raise ValueError("Unknown illuminant/observer combination\
(\'{0}\', \'{1}\')".format(illuminant, observer))
# Haematoxylin-Eosin-DAB colorspace
# From original Ruifrok's paper: A. C. Ruifrok and D. A. Johnston,
# "Quantification of histochemical staining by color deconvolution.,"
# Analytical and quantitative cytology and histology / the International
# Academy of Cytology [and] American Society of Cytology, vol. 23, no. 4,
# pp. 291-9, Aug. 2001.
rgb_from_hed = np.array([[0.65, 0.70, 0.29],
[0.07, 0.99, 0.11],
[0.27, 0.57, 0.78]])
hed_from_rgb = linalg.inv(rgb_from_hed)
# Following matrices are adapted form the Java code written by G.Landini.
# The original code is available at:
# http://www.dentistry.bham.ac.uk/landinig/software/cdeconv/cdeconv.html
# Hematoxylin + DAB
rgb_from_hdx = np.array([[0.650, 0.704, 0.286],
[0.268, 0.570, 0.776],
[0.0, 0.0, 0.0]])
rgb_from_hdx[2, :] = np.cross(rgb_from_hdx[0, :], rgb_from_hdx[1, :])
hdx_from_rgb = linalg.inv(rgb_from_hdx)
# Feulgen + Light Green
rgb_from_fgx = np.array([[0.46420921, 0.83008335, 0.30827187],
[0.94705542, 0.25373821, 0.19650764],
[0.0, 0.0, 0.0]])
rgb_from_fgx[2, :] = np.cross(rgb_from_fgx[0, :], rgb_from_fgx[1, :])
fgx_from_rgb = linalg.inv(rgb_from_fgx)
# Giemsa: Methyl Blue + Eosin
rgb_from_bex = np.array([[0.834750233, 0.513556283, 0.196330403],
[0.092789, 0.954111, 0.283111],
[0.0, 0.0, 0.0]])
rgb_from_bex[2, :] = np.cross(rgb_from_bex[0, :], rgb_from_bex[1, :])
bex_from_rgb = linalg.inv(rgb_from_bex)
# FastRed + FastBlue + DAB
rgb_from_rbd = np.array([[0.21393921, 0.85112669, 0.47794022],
[0.74890292, 0.60624161, 0.26731082],
[0.268, 0.570, 0.776]])
rbd_from_rgb = linalg.inv(rgb_from_rbd)
# Methyl Green + DAB
rgb_from_gdx = np.array([[0.98003, 0.144316, 0.133146],
[0.268, 0.570, 0.776],
[0.0, 0.0, 0.0]])
rgb_from_gdx[2, :] = np.cross(rgb_from_gdx[0, :], rgb_from_gdx[1, :])
gdx_from_rgb = linalg.inv(rgb_from_gdx)
# Hematoxylin + AEC
rgb_from_hax = np.array([[0.650, 0.704, 0.286],
[0.2743, 0.6796, 0.6803],
[0.0, 0.0, 0.0]])
rgb_from_hax[2, :] = np.cross(rgb_from_hax[0, :], rgb_from_hax[1, :])
hax_from_rgb = linalg.inv(rgb_from_hax)
# Blue matrix Anilline Blue + Red matrix Azocarmine + Orange matrix Orange-G
rgb_from_bro = np.array([[0.853033, 0.508733, 0.112656],
[0.09289875, 0.8662008, 0.49098468],
[0.10732849, 0.36765403, 0.9237484]])
bro_from_rgb = linalg.inv(rgb_from_bro)
# Methyl Blue + Ponceau Fuchsin
rgb_from_bpx = np.array([[0.7995107, 0.5913521, 0.10528667],
[0.09997159, 0.73738605, 0.6680326],
[0.0, 0.0, 0.0]])
rgb_from_bpx[2, :] = np.cross(rgb_from_bpx[0, :], rgb_from_bpx[1, :])
bpx_from_rgb = linalg.inv(rgb_from_bpx)
# Alcian Blue + Hematoxylin
rgb_from_ahx = np.array([[0.874622, 0.457711, 0.158256],
[0.552556, 0.7544, 0.353744],
[0.0, 0.0, 0.0]])
rgb_from_ahx[2, :] = np.cross(rgb_from_ahx[0, :], rgb_from_ahx[1, :])
ahx_from_rgb = linalg.inv(rgb_from_ahx)
# Hematoxylin + PAS
rgb_from_hpx = np.array([[0.644211, 0.716556, 0.266844],
[0.175411, 0.972178, 0.154589],
[0.0, 0.0, 0.0]])
rgb_from_hpx[2, :] = np.cross(rgb_from_hpx[0, :], rgb_from_hpx[1, :])
hpx_from_rgb = linalg.inv(rgb_from_hpx)
# -------------------------------------------------------------
# The conversion functions that make use of the matrices above
# -------------------------------------------------------------
def _convert(matrix, arr):
"""Do the color space conversion.
Parameters
----------
matrix : array_like
The 3x3 matrix to use.
arr : array_like
The input array.
Returns
-------
out : ndarray, dtype=float
The converted array.
"""
arr = _prepare_colorarray(arr)
return arr @ matrix.T.copy()
def xyz2rgb(xyz):
"""XYZ to RGB color space conversion.
Parameters
----------
xyz : array_like
The image in XYZ format, in a 3-D array of shape ``(.., .., 3)``.
Returns
-------
out : ndarray
The image in RGB format, in a 3-D array of shape ``(.., .., 3)``.
Raises
------
ValueError
If `xyz` is not a 3-D array of shape ``(.., .., 3)``.
Notes
-----
The CIE XYZ color space is derived from the CIE RGB color space. Note
however that this function converts to sRGB.
References
----------
.. [1] https://en.wikipedia.org/wiki/CIE_1931_color_space
Examples
--------
>>> from skimage import data
>>> from skimage.color import rgb2xyz, xyz2rgb
>>> img = data.astronaut()
>>> img_xyz = rgb2xyz(img)
>>> img_rgb = xyz2rgb(img_xyz)
"""
# Follow the algorithm from http://www.easyrgb.com/index.php
# except we don't multiply/divide by 100 in the conversion
arr = _convert(rgb_from_xyz, xyz)
mask = arr > 0.0031308
arr[mask] = 1.055 * np.power(arr[mask], 1 / 2.4) - 0.055
arr[~mask] *= 12.92
np.clip(arr, 0, 1, out=arr)
return arr
def rgb2xyz(rgb):
"""RGB to XYZ color space conversion.
Parameters
----------
rgb : array_like
The image in RGB format, in a 3- or 4-D array of shape
``(.., ..,[ ..,] 3)``.
Returns
-------
out : ndarray
The image in XYZ format, in a 3- or 4-D array of shape
``(.., ..,[ ..,] 3)``.
Raises
------
ValueError
If `rgb` is not a 3- or 4-D array of shape ``(.., ..,[ ..,] 3)``.
Notes
-----
The CIE XYZ color space is derived from the CIE RGB color space. Note
however that this function converts from sRGB.
References
----------
.. [1] https://en.wikipedia.org/wiki/CIE_1931_color_space
Examples
--------
>>> from skimage import data
>>> img = data.astronaut()
>>> img_xyz = rgb2xyz(img)
"""
# Follow the algorithm from http://www.easyrgb.com/index.php
# except we don't multiply/divide by 100 in the conversion
arr = _prepare_colorarray(rgb).copy()
mask = arr > 0.04045
arr[mask] = np.power((arr[mask] + 0.055) / 1.055, 2.4)
arr[~mask] /= 12.92
return _convert(xyz_from_rgb, arr)
def rgb2rgbcie(rgb):
"""RGB to RGB CIE color space conversion.
Parameters
----------
rgb : array_like
The image in RGB format, in a 3-D array of shape ``(.., .., 3)``.
Returns
-------
out : ndarray
The image in RGB CIE format, in a 3-D array of shape ``(.., .., 3)``.
Raises
------
ValueError
If `rgb` is not a 3-D array of shape ``(.., .., 3)``.
References
----------
.. [1] https://en.wikipedia.org/wiki/CIE_1931_color_space
Examples
--------
>>> from skimage import data
>>> from skimage.color import rgb2rgbcie
>>> img = data.astronaut()
>>> img_rgbcie = rgb2rgbcie(img)
"""
return _convert(rgbcie_from_rgb, rgb)
def rgbcie2rgb(rgbcie):
"""RGB CIE to RGB color space conversion.
Parameters
----------
rgbcie : array_like
The image in RGB CIE format, in a 3-D array of shape ``(.., .., 3)``.
Returns
-------
out : ndarray
The image in RGB format, in a 3-D array of shape ``(.., .., 3)``.
Raises
------
ValueError
If `rgbcie` is not a 3-D array of shape ``(.., .., 3)``.
References
----------
.. [1] https://en.wikipedia.org/wiki/CIE_1931_color_space
Examples
--------
>>> from skimage import data
>>> from skimage.color import rgb2rgbcie, rgbcie2rgb
>>> img = data.astronaut()
>>> img_rgbcie = rgb2rgbcie(img)
>>> img_rgb = rgbcie2rgb(img_rgbcie)
"""
return _convert(rgb_from_rgbcie, rgbcie)
def rgb2gray(rgb):
"""Compute luminance of an RGB image.
Parameters
----------
rgb : array_like
The image in RGB format, in a 3-D or 4-D array of shape
``(.., ..,[ ..,] 3)``, or in RGBA format with shape
``(.., ..,[ ..,] 4)``.
Returns
-------
out : ndarray
The luminance image - an array which is the same size as the input
array, but with the channel dimension removed.
Raises
------
ValueError
If `rgb2gray` is not a 3-D or 4-D arrays of shape
``(.., ..,[ ..,] 3)`` or ``(.., ..,[ ..,] 4)``.
References
----------
.. [1] http://www.poynton.com/PDFs/ColorFAQ.pdf
Notes
-----
The weights used in this conversion are calibrated for contemporary
CRT phosphors::
Y = 0.2125 R + 0.7154 G + 0.0721 B
If there is an alpha channel present, it is ignored.
Examples
--------
>>> from skimage.color import rgb2gray
>>> from skimage import data
>>> img = data.astronaut()
>>> img_gray = rgb2gray(img)
"""
if rgb.ndim == 2:
return np.ascontiguousarray(rgb)
rgb = _prepare_colorarray(rgb[..., :3])
coeffs = np.array([0.2125, 0.7154, 0.0721], dtype=rgb.dtype)
return rgb @ coeffs
rgb2grey = rgb2gray
def gray2rgb(image, alpha=None):
"""Create an RGB representation of a gray-level image.
Parameters
----------
image : array_like
Input image of shape ``(M[, N][, P])``.
alpha : bool, optional
Ensure that the output image has an alpha layer. If None,
alpha layers are passed through but not created.
Returns
-------
rgb : ndarray
RGB image of shape ``(M[, N][, P], 3)``.
Raises
------
ValueError
If the input is not a 1-, 2- or 3-dimensional image.
Notes
-----
If the input is a 1-dimensional image of shape ``(M, )``, the output
will be shape ``(M, 3)``.
"""
is_rgb = False
is_alpha = False
dims = np.squeeze(image).ndim
if dims == 3:
if image.shape[2] == 3:
is_rgb = True
elif image.shape[2] == 4:
is_alpha = True
is_rgb = True
if is_rgb:
if alpha is False:
image = image[..., :3]
elif alpha is True and is_alpha is False:
alpha_layer = (np.ones_like(image[..., 0, np.newaxis]) *
dtype_limits(image, clip_negative=False)[1])
image = np.concatenate((image, alpha_layer), axis=2)
return image
elif dims in (1, 2, 3):
image = image[..., np.newaxis]
if alpha:
alpha_layer = (np.ones_like(image) * dtype_limits(image, clip_negative=False)[1])
return np.concatenate(3 * (image,) + (alpha_layer,), axis=-1)
else:
return np.concatenate(3 * (image,), axis=-1)
else:
raise ValueError("Input image expected to be RGB, RGBA or gray.")
grey2rgb = gray2rgb
def xyz2lab(xyz, illuminant="D65", observer="2"):
"""XYZ to CIE-LAB color space conversion.
Parameters
----------
xyz : array_like
The image in XYZ format, in a 3- or 4-D array of shape
``(.., ..,[ ..,] 3)``.
illuminant : {"A", "D50", "D55", "D65", "D75", "E"}, optional
The name of the illuminant (the function is NOT case sensitive).
observer : {"2", "10"}, optional
The aperture angle of the observer.
Returns
-------
out : ndarray
The image in CIE-LAB format, in a 3- or 4-D array of shape
``(.., ..,[ ..,] 3)``.
Raises
------
ValueError
If `xyz` is not a 3-D array of shape ``(.., ..,[ ..,] 3)``.
ValueError
If either the illuminant or the observer angle is unsupported or
unknown.
Notes
-----
By default Observer= 2A, Illuminant= D65. CIE XYZ tristimulus values
x_ref=95.047, y_ref=100., z_ref=108.883. See function `get_xyz_coords` for
a list of supported illuminants.
References
----------
.. [1] http://www.easyrgb.com/index.php?X=MATH&H=07#text7
.. [2] https://en.wikipedia.org/wiki/Lab_color_space
Examples
--------
>>> from skimage import data
>>> from skimage.color import rgb2xyz, xyz2lab
>>> img = data.astronaut()
>>> img_xyz = rgb2xyz(img)
>>> img_lab = xyz2lab(img_xyz)
"""
arr = _prepare_colorarray(xyz)
xyz_ref_white = get_xyz_coords(illuminant, observer)
# scale by CIE XYZ tristimulus values of the reference white point
arr = arr / xyz_ref_white
# Nonlinear distortion and linear transformation
mask = arr > 0.008856
arr[mask] = np.cbrt(arr[mask])
arr[~mask] = 7.787 * arr[~mask] + 16. / 116.
x, y, z = arr[..., 0], arr[..., 1], arr[..., 2]
# Vector scaling
L = (116. * y) - 16.
a = 500.0 * (x - y)
b = 200.0 * (y - z)
return np.concatenate([x[..., np.newaxis] for x in [L, a, b]], axis=-1)
def lab2xyz(lab, illuminant="D65", observer="2"):
"""CIE-LAB to XYZcolor space conversion.
Parameters
----------
lab : array_like
The image in lab format, in a 3-D array of shape ``(.., .., 3)``.
illuminant : {"A", "D50", "D55", "D65", "D75", "E"}, optional
The name of the illuminant (the function is NOT case sensitive).
observer : {"2", "10"}, optional
The aperture angle of the observer.
Returns
-------
out : ndarray
The image in XYZ format, in a 3-D array of shape ``(.., .., 3)``.
Raises
------
ValueError
If `lab` is not a 3-D array of shape ``(.., .., 3)``.
ValueError
If either the illuminant or the observer angle are not supported or
unknown.
UserWarning
If any of the pixels are invalid (Z < 0).
Notes
-----
By default Observer= 2A, Illuminant= D65. CIE XYZ tristimulus values x_ref
= 95.047, y_ref = 100., z_ref = 108.883. See function 'get_xyz_coords' for
a list of supported illuminants.
References
----------
.. [1] http://www.easyrgb.com/index.php?X=MATH&H=07#text7
.. [2] https://en.wikipedia.org/wiki/Lab_color_space
"""
arr = _prepare_colorarray(lab).copy()
L, a, b = arr[:, :, 0], arr[:, :, 1], arr[:, :, 2]
y = (L + 16.) / 116.
x = (a / 500.) + y
z = y - (b / 200.)
if np.any(z < 0):
invalid = np.nonzero(z < 0)
warn('Color data out of range: Z < 0 in %s pixels' % invalid[0].size)
z[invalid] = 0
out = np.dstack([x, y, z])
mask = out > 0.2068966
out[mask] = np.power(out[mask], 3.)
out[~mask] = (out[~mask] - 16.0 / 116.) / 7.787
# rescale to the reference white (illuminant)
xyz_ref_white = get_xyz_coords(illuminant, observer)
out *= xyz_ref_white
return out
def rgb2lab(rgb, illuminant="D65", observer="2"):
"""RGB to lab color space conversion.
Parameters
----------
rgb : array_like
The image in RGB format, in a 3- or 4-D array of shape
``(.., ..,[ ..,] 3)``.
illuminant : {"A", "D50", "D55", "D65", "D75", "E"}, optional
The name of the illuminant (the function is NOT case sensitive).
observer : {"2", "10"}, optional
The aperture angle of the observer.
Returns
-------
out : ndarray
The image in Lab format, in a 3- or 4-D array of shape
``(.., ..,[ ..,] 3)``.
Raises
------
ValueError
If `rgb` is not a 3- or 4-D array of shape ``(.., ..,[ ..,] 3)``.
References
----------
.. [1] https://en.wikipedia.org/wiki/Standard_illuminant
Notes
-----
This function uses rgb2xyz and xyz2lab.
By default Observer= 2A, Illuminant= D65. CIE XYZ tristimulus values
x_ref=95.047, y_ref=100., z_ref=108.883. See function `get_xyz_coords` for
a list of supported illuminants.
"""
return xyz2lab(rgb2xyz(rgb), illuminant, observer)
def lab2rgb(lab, illuminant="D65", observer="2"):
"""Lab to RGB color space conversion.
Parameters
----------
lab : array_like
The image in Lab format, in a 3-D array of shape ``(.., .., 3)``.
illuminant : {"A", "D50", "D55", "D65", "D75", "E"}, optional
The name of the illuminant (the function is NOT case sensitive).
observer : {"2", "10"}, optional
The aperture angle of the observer.
Returns
-------
out : ndarray
The image in RGB format, in a 3-D array of shape ``(.., .., 3)``.
Raises
------
ValueError
If `lab` is not a 3-D array of shape ``(.., .., 3)``.
References
----------
.. [1] https://en.wikipedia.org/wiki/Standard_illuminant
Notes
-----
This function uses lab2xyz and xyz2rgb.
By default Observer= 2A, Illuminant= D65. CIE XYZ tristimulus values
x_ref=95.047, y_ref=100., z_ref=108.883. See function `get_xyz_coords` for
a list of supported illuminants.
"""
return xyz2rgb(lab2xyz(lab, illuminant, observer))
def xyz2luv(xyz, illuminant="D65", observer="2"):
"""XYZ to CIE-Luv color space conversion.
Parameters
----------
xyz : (M, N, [P,] 3) array_like
The 3 or 4 dimensional image in XYZ format. Final dimension denotes
channels.
illuminant : {"A", "D50", "D55", "D65", "D75", "E"}, optional
The name of the illuminant (the function is NOT case sensitive).
observer : {"2", "10"}, optional
The aperture angle of the observer.
Returns
-------
out : (M, N, [P,] 3) ndarray
The image in CIE-Luv format. Same dimensions as input.
Raises
------
ValueError
If `xyz` is not a 3-D or 4-D array of shape ``(M, N, [P,] 3)``.
ValueError
If either the illuminant or the observer angle are not supported or
unknown.
Notes
-----
By default XYZ conversion weights use observer=2A. Reference whitepoint
for D65 Illuminant, with XYZ tristimulus values of ``(95.047, 100.,
108.883)``. See function 'get_xyz_coords' for a list of supported
illuminants.
References
----------
.. [1] http://www.easyrgb.com/index.php?X=MATH&H=16#text16
.. [2] https://en.wikipedia.org/wiki/CIELUV
Examples
--------
>>> from skimage import data
>>> from skimage.color import rgb2xyz, xyz2luv
>>> img = data.astronaut()
>>> img_xyz = rgb2xyz(img)
>>> img_luv = xyz2luv(img_xyz)
"""
arr = _prepare_colorarray(xyz)
# extract channels
x, y, z = arr[..., 0], arr[..., 1], arr[..., 2]
eps = np.finfo(np.float).eps
# compute y_r and L
xyz_ref_white = np.array(get_xyz_coords(illuminant, observer))
L = y / xyz_ref_white[1]
mask = L > 0.008856
L[mask] = 116. * np.cbrt(L[mask]) - 16.
L[~mask] = 903.3 * L[~mask]
u0 = 4 * xyz_ref_white[0] / ([1, 15, 3] @ xyz_ref_white)
v0 = 9 * xyz_ref_white[1] / ([1, 15, 3] @ xyz_ref_white)
# u' and v' helper functions
def fu(X, Y, Z):
return (4. * X) / (X + 15. * Y + 3. * Z + eps)
def fv(X, Y, Z):
return (9. * Y) / (X + 15. * Y + 3. * Z + eps)
# compute u and v using helper functions
u = 13. * L * (fu(x, y, z) - u0)
v = 13. * L * (fv(x, y, z) - v0)
return np.concatenate([q[..., np.newaxis] for q in [L, u, v]], axis=-1)
def luv2xyz(luv, illuminant="D65", observer="2"):
"""CIE-Luv to XYZ color space conversion.
Parameters
----------
luv : (M, N, [P,] 3) array_like
The 3 or 4 dimensional image in CIE-Luv format. Final dimension denotes
channels.
illuminant : {"A", "D50", "D55", "D65", "D75", "E"}, optional
The name of the illuminant (the function is NOT case sensitive).
observer : {"2", "10"}, optional
The aperture angle of the observer.
Returns
-------
out : (M, N, [P,] 3) ndarray
The image in XYZ format. Same dimensions as input.
Raises
------
ValueError
If `luv` is not a 3-D or 4-D array of shape ``(M, N, [P,] 3)``.
ValueError
If either the illuminant or the observer angle are not supported or
unknown.
Notes
-----
XYZ conversion weights use observer=2A. Reference whitepoint for D65
Illuminant, with XYZ tristimulus values of ``(95.047, 100., 108.883)``. See
function 'get_xyz_coords' for a list of supported illuminants.
References
----------
.. [1] http://www.easyrgb.com/index.php?X=MATH&H=16#text16
.. [2] https://en.wikipedia.org/wiki/CIELUV
"""
arr = _prepare_colorarray(luv).copy()
L, u, v = arr[:, :, 0], arr[:, :, 1], arr[:, :, 2]
eps = np.finfo(np.float).eps
# compute y
y = L.copy()
mask = y > 7.999625
y[mask] = np.power((y[mask] + 16.) / 116., 3.)
y[~mask] = y[~mask] / 903.3
xyz_ref_white = get_xyz_coords(illuminant, observer)
y *= xyz_ref_white[1]
# reference white x,z
uv_weights = np.array([1, 15, 3])
u0 = 4 * xyz_ref_white[0] / (uv_weights @ xyz_ref_white)
v0 = 9 * xyz_ref_white[1] / (uv_weights @ xyz_ref_white)
# compute intermediate values
a = u0 + u / (13. * L + eps)
b = v0 + v / (13. * L + eps)
c = 3 * y * (5 * b - 3)
# compute x and z
z = ((a - 4) * c - 15 * a * b * y) / (12 * b)
x = -(c / b + 3. * z)
return np.concatenate([q[..., np.newaxis] for q in [x, y, z]], axis=-1)
def rgb2luv(rgb):
"""RGB to CIE-Luv color space conversion.
Parameters
----------
rgb : (M, N, [P,] 3) array_like
The 3 or 4 dimensional image in RGB format. Final dimension denotes
channels.
Returns
-------
out : (M, N, [P,] 3) ndarray
The image in CIE Luv format. Same dimensions as input.
Raises
------
ValueError
If `rgb` is not a 3-D or 4-D array of shape ``(M, N, [P,] 3)``.
Notes
-----
This function uses rgb2xyz and xyz2luv.
References
----------
.. [1] http://www.easyrgb.com/index.php?X=MATH&H=16#text16
.. [2] http://www.easyrgb.com/index.php?X=MATH&H=02#text2
.. [3] https://en.wikipedia.org/wiki/CIELUV
"""
return xyz2luv(rgb2xyz(rgb))
def luv2rgb(luv):
"""Luv to RGB color space conversion.
Parameters
----------
luv : (M, N, [P,] 3) array_like
The 3 or 4 dimensional image in CIE Luv format. Final dimension denotes
channels.
Returns
-------
out : (M, N, [P,] 3) ndarray
The image in RGB format. Same dimensions as input.
Raises
------
ValueError
If `luv` is not a 3-D or 4-D array of shape ``(M, N, [P,] 3)``.
Notes
-----
This function uses luv2xyz and xyz2rgb.
"""
return xyz2rgb(luv2xyz(luv))
def rgb2hed(rgb):
"""RGB to Haematoxylin-Eosin-DAB (HED) color space conversion.
Parameters
----------
rgb : array_like
The image in RGB format, in a 3-D array of shape ``(.., .., 3)``.
Returns
-------
out : ndarray
The image in HED format, in a 3-D array of shape ``(.., .., 3)``.
Raises
------
ValueError
If `rgb` is not a 3-D array of shape ``(.., .., 3)``.
References
----------
.. [1] A. C. Ruifrok and D. A. Johnston, "Quantification of histochemical
staining by color deconvolution.," Analytical and quantitative
cytology and histology / the International Academy of Cytology [and]
American Society of Cytology, vol. 23, no. 4, pp. 291-9, Aug. 2001.
Examples
--------
>>> from skimage import data
>>> from skimage.color import rgb2hed
>>> ihc = data.immunohistochemistry()
>>> ihc_hed = rgb2hed(ihc)
"""
return separate_stains(rgb, hed_from_rgb)
def hed2rgb(hed):
"""Haematoxylin-Eosin-DAB (HED) to RGB color space conversion.
Parameters
----------
hed : array_like
The image in the HED color space, in a 3-D array of shape
``(.., .., 3)``.
Returns
-------
out : ndarray
The image in RGB, in a 3-D array of shape ``(.., .., 3)``.
Raises
------
ValueError
If `hed` is not a 3-D array of shape ``(.., .., 3)``.
References
----------
.. [1] A. C. Ruifrok and D. A. Johnston, "Quantification of histochemical
staining by color deconvolution.," Analytical and quantitative
cytology and histology / the International Academy of Cytology [and]
American Society of Cytology, vol. 23, no. 4, pp. 291-9, Aug. 2001.
Examples
--------
>>> from skimage import data
>>> from skimage.color import rgb2hed, hed2rgb
>>> ihc = data.immunohistochemistry()
>>> ihc_hed = rgb2hed(ihc)
>>> ihc_rgb = hed2rgb(ihc_hed)
"""
return combine_stains(hed, rgb_from_hed)
def separate_stains(rgb, conv_matrix):
"""RGB to stain color space conversion.
Parameters
----------
rgb : array_like
The image in RGB format, in a 3-D array of shape ``(.., .., 3)``.
conv_matrix: ndarray
The stain separation matrix as described by G. Landini [1]_.
Returns
-------
out : ndarray
The image in stain color space, in a 3-D array of shape
``(.., .., 3)``.
Raises
------
ValueError
If `rgb` is not a 3-D array of shape ``(.., .., 3)``.
Notes
-----
Stain separation matrices available in the ``color`` module and their
respective colorspace:
* ``hed_from_rgb``: Hematoxylin + Eosin + DAB
* ``hdx_from_rgb``: Hematoxylin + DAB
* ``fgx_from_rgb``: Feulgen + Light Green
* ``bex_from_rgb``: Giemsa stain : Methyl Blue + Eosin
* ``rbd_from_rgb``: FastRed + FastBlue + DAB
* ``gdx_from_rgb``: Methyl Green + DAB
* ``hax_from_rgb``: Hematoxylin + AEC
* ``bro_from_rgb``: Blue matrix Anilline Blue + Red matrix Azocarmine\
+ Orange matrix Orange-G
* ``bpx_from_rgb``: Methyl Blue + Ponceau Fuchsin
* ``ahx_from_rgb``: Alcian Blue + Hematoxylin
* ``hpx_from_rgb``: Hematoxylin + PAS
References
----------
.. [1] http://www.dentistry.bham.ac.uk/landinig/software/cdeconv/cdeconv.html
Examples
--------
>>> from skimage import data
>>> from skimage.color import separate_stains, hdx_from_rgb
>>> ihc = data.immunohistochemistry()
>>> ihc_hdx = separate_stains(ihc, hdx_from_rgb)
"""
rgb = dtype.img_as_float(rgb, force_copy=True)
rgb += 2
stains = np.reshape(-np.log10(rgb), (-1, 3)) @ conv_matrix
return np.reshape(stains, rgb.shape)
def combine_stains(stains, conv_matrix):
"""Stain to RGB color space conversion.
Parameters
----------
stains : array_like
The image in stain color space, in a 3-D array of shape
``(.., .., 3)``.
conv_matrix: ndarray
The stain separation matrix as described by G. Landini [1]_.
Returns
-------
out : ndarray
The image in RGB format, in a 3-D array of shape ``(.., .., 3)``.
Raises
------
ValueError
If `stains` is not a 3-D array of shape ``(.., .., 3)``.
Notes
-----
Stain combination matrices available in the ``color`` module and their
respective colorspace:
* ``rgb_from_hed``: Hematoxylin + Eosin + DAB
* ``rgb_from_hdx``: Hematoxylin + DAB
* ``rgb_from_fgx``: Feulgen + Light Green
* ``rgb_from_bex``: Giemsa stain : Methyl Blue + Eosin
* ``rgb_from_rbd``: FastRed + FastBlue + DAB
* ``rgb_from_gdx``: Methyl Green + DAB
* ``rgb_from_hax``: Hematoxylin + AEC
* ``rgb_from_bro``: Blue matrix Anilline Blue + Red matrix Azocarmine\
+ Orange matrix Orange-G
* ``rgb_from_bpx``: Methyl Blue + Ponceau Fuchsin
* ``rgb_from_ahx``: Alcian Blue + Hematoxylin
* ``rgb_from_hpx``: Hematoxylin + PAS
References
----------
.. [1] http://www.dentistry.bham.ac.uk/landinig/software/cdeconv/cdeconv.html
Examples
--------
>>> from skimage import data
>>> from skimage.color import (separate_stains, combine_stains,
... hdx_from_rgb, rgb_from_hdx)
>>> ihc = data.immunohistochemistry()
>>> ihc_hdx = separate_stains(ihc, hdx_from_rgb)
>>> ihc_rgb = combine_stains(ihc_hdx, rgb_from_hdx)
"""
from ..exposure import rescale_intensity
stains = dtype.img_as_float(stains)
logrgb2 = -np.reshape(stains, (-1, 3)) @ conv_matrix
rgb2 = np.power(10, logrgb2)
return rescale_intensity(np.reshape(rgb2 - 2, stains.shape),
in_range=(-1, 1))
def lab2lch(lab):
"""CIE-LAB to CIE-LCH color space conversion.
LCH is the cylindrical representation of the LAB (Cartesian) colorspace
Parameters
----------
lab : array_like
The N-D image in CIE-LAB format. The last (``N+1``-th) dimension must
have at least 3 elements, corresponding to the ``L``, ``a``, and ``b``
color channels. Subsequent elements are copied.
Returns
-------
out : ndarray
The image in LCH format, in a N-D array with same shape as input `lab`.
Raises
------
ValueError
If `lch` does not have at least 3 color channels (i.e. l, a, b).
Notes
-----
The Hue is expressed as an angle between ``(0, 2*pi)``
Examples
--------
>>> from skimage import data
>>> from skimage.color import rgb2lab, lab2lch
>>> img = data.astronaut()
>>> img_lab = rgb2lab(img)
>>> img_lch = lab2lch(img_lab)
"""
lch = _prepare_lab_array(lab)
a, b = lch[..., 1], lch[..., 2]
lch[..., 1], lch[..., 2] = _cart2polar_2pi(a, b)
return lch
def _cart2polar_2pi(x, y):
"""convert cartesian coordinates to polar (uses non-standard theta range!)
NON-STANDARD RANGE! Maps to ``(0, 2*pi)`` rather than usual ``(-pi, +pi)``
"""
r, t = np.hypot(x, y), np.arctan2(y, x)
t += np.where(t < 0., 2 * np.pi, 0)
return r, t
def lch2lab(lch):
"""CIE-LCH to CIE-LAB color space conversion.
LCH is the cylindrical representation of the LAB (Cartesian) colorspace
Parameters
----------
lch : array_like
The N-D image in CIE-LCH format. The last (``N+1``-th) dimension must
have at least 3 elements, corresponding to the ``L``, ``a``, and ``b``
color channels. Subsequent elements are copied.
Returns
-------
out : ndarray
The image in LAB format, with same shape as input `lch`.
Raises
------
ValueError
If `lch` does not have at least 3 color channels (i.e. l, c, h).
Examples
--------
>>> from skimage import data
>>> from skimage.color import rgb2lab, lch2lab
>>> img = data.astronaut()
>>> img_lab = rgb2lab(img)
>>> img_lch = lab2lch(img_lab)
>>> img_lab2 = lch2lab(img_lch)
"""
lch = _prepare_lab_array(lch)
c, h = lch[..., 1], lch[..., 2]
lch[..., 1], lch[..., 2] = c * np.cos(h), c * np.sin(h)
return lch
def _prepare_lab_array(arr):
"""Ensure input for lab2lch, lch2lab are well-posed.
Arrays must be in floating point and have at least 3 elements in
last dimension. Return a new array.
"""
arr = np.asarray(arr)
shape = arr.shape
if shape[-1] < 3:
raise ValueError('Input array has less than 3 color channels')
return dtype.img_as_float(arr, force_copy=True)
def rgb2yuv(rgb):
"""RGB to YUV color space conversion.
Parameters
----------
rgb : array_like
The image in RGB format, in a 3- or 4-D array of shape
``(M, N, [P,] 3)``.
Returns
-------
out : ndarray
The image in YUV format, in a 3- or 4-D array of shape
``(M, N, [P,] 3)``.
Raises
------
ValueError
If `rgb` is not a 3- or 4-D array of shape ``(M, N, [P,] 3)``.
Notes
-----
Y is between 0 and 1. Use YCbCr instead of YUV for the color space which
is commonly used by video codecs (where Y ranges from 16 to 235)
References
----------
.. [1] https://en.wikipedia.org/wiki/YUV
"""
return _convert(yuv_from_rgb, rgb)
def rgb2yiq(rgb):
"""RGB to YIQ color space conversion.
Parameters
----------
rgb : array_like
The image in RGB format, in a 3- or 4-D array of shape
``(M, N, [P,] 3)``.
Returns
-------
out : ndarray
The image in YIQ format, in a 3- or 4-D array of shape
``(M, N, [P,] 3)``.
Raises
------
ValueError
If `rgb` is not a 3- or 4-D array of shape ``(M, N, [P,] 3)``.
"""
return _convert(yiq_from_rgb, rgb)
def rgb2ypbpr(rgb):
"""RGB to YPbPr color space conversion.
Parameters
----------
rgb : array_like
The image in RGB format, in a 3- or 4-D array of shape
``(M, N, [P,] 3)``.
Returns
-------
out : ndarray
The image in YPbPr format, in a 3- or 4-D array of shape
``(M, N, [P,] 3)``.
Raises
------
ValueError
If `rgb` is not a 3- or 4-D array of shape ``(M, N, [P,] 3)``.
References
----------
.. [1] https://en.wikipedia.org/wiki/YPbPr
"""
return _convert(ypbpr_from_rgb, rgb)
def rgb2ycbcr(rgb):
"""RGB to YCbCr color space conversion.
Parameters
----------
rgb : array_like
The image in RGB format, in a 3- or 4-D array of shape
``(M, N, [P,] 3)``.
Returns
-------
out : ndarray
The image in YCbCr format, in a 3- or 4-D array of shape
``(M, N, [P,] 3)``.
Raises
------
ValueError
If `rgb` is not a 3- or 4-D array of shape ``(M, N, [P,] 3)``.
Notes
-----
Y is between 16 and 235. This is the color space which is commonly used
by video codecs, it is sometimes incorrectly called "YUV"
References
----------
.. [1] https://en.wikipedia.org/wiki/YCbCr
"""
arr = _convert(ycbcr_from_rgb, rgb)
arr[..., 0] += 16
arr[..., 1] += 128
arr[..., 2] += 128
return arr
def rgb2ydbdr(rgb):
"""RGB to YDbDr color space conversion.
Parameters
----------
rgb : array_like
The image in RGB format, in a 3- or 4-D array of shape
``(M, N, [P,] 3)``.
Returns
-------
out : ndarray
The image in YDbDr format, in a 3- or 4-D array of shape
``(M, N, [P,] 3)``.
Raises
------
ValueError
If `rgb` is not a 3- or 4-D array of shape ``(M, N, [P,] 3)``.
Notes
-----
This is the color space which is commonly used
by video codecs, it is also the reversible color transform in JPEG2000.
References
----------
.. [1] https://en.wikipedia.org/wiki/YDbDr
"""
arr = _convert(ydbdr_from_rgb, rgb)
return arr
def yuv2rgb(yuv):
"""YUV to RGB color space conversion.
Parameters
----------
yuv : array_like
The image in YUV format, in a 3- or 4-D array of shape
``(M, N, [P,] 3)``.
Returns
-------
out : ndarray
The image in RGB format, in a 3- or 4-D array of shape
``(M, N, [P,] 3)``.
Raises
------
ValueError
If `yuv` is not a 3- or 4-D array of shape ``(M, N, [P,] 3)``.
References
----------
.. [1] https://en.wikipedia.org/wiki/YUV
"""
return _convert(rgb_from_yuv, yuv)
def yiq2rgb(yiq):
"""YIQ to RGB color space conversion.
Parameters
----------
yiq : array_like
The image in YIQ format, in a 3- or 4-D array of shape
``(M, N, [P,] 3)``.
Returns
-------
out : ndarray
The image in RGB format, in a 3- or 4-D array of shape
``(M, N, [P,] 3)``.
Raises
------
ValueError
If `yiq` is not a 3- or 4-D array of shape ``(M, N, [P,] 3)``.
"""
return _convert(rgb_from_yiq, yiq)
def ypbpr2rgb(ypbpr):
"""YPbPr to RGB color space conversion.
Parameters
----------
ypbpr : array_like
The image in YPbPr format, in a 3- or 4-D array of shape
``(M, N, [P,] 3)``.
Returns
-------
out : ndarray
The image in RGB format, in a 3- or 4-D array of shape
``(M, N, [P,] 3)``.
Raises
------
ValueError
If `ypbpr` is not a 3- or 4-D array of shape ``(M, N, [P,] 3)``.
References
----------
.. [1] https://en.wikipedia.org/wiki/YPbPr
"""
return _convert(rgb_from_ypbpr, ypbpr)
def ycbcr2rgb(ycbcr):
"""YCbCr to RGB color space conversion.
Parameters
----------
ycbcr : array_like
The image in YCbCr format, in a 3- or 4-D array of shape
``(M, N, [P,] 3)``.
Returns
-------
out : ndarray
The image in RGB format, in a 3- or 4-D array of shape
``(M, N, [P,] 3)``.
Raises
------
ValueError
If `ycbcr` is not a 3- or 4-D array of shape ``(M, N, [P,] 3)``.
Notes
-----
Y is between 16 and 235. This is the color space which is commonly used
by video codecs, it is sometimes incorrectly called "YUV"
References
----------
.. [1] https://en.wikipedia.org/wiki/YCbCr
"""
arr = ycbcr.copy()
arr[..., 0] -= 16
arr[..., 1] -= 128
arr[..., 2] -= 128
return _convert(rgb_from_ycbcr, arr)
def ydbdr2rgb(ydbdr):
"""YDbDr to RGB color space conversion.
Parameters
----------
ydbdr : array_like
The image in YDbDr format, in a 3- or 4-D array of shape
``(M, N, [P,] 3)``.
Returns
-------
out : ndarray
The image in RGB format, in a 3- or 4-D array of shape
``(M, N, [P,] 3)``.
Raises
------
ValueError
If `ydbdr` is not a 3- or 4-D array of shape ``(M, N, [P,] 3)``.
Notes
-----
This is the color space which is commonly used
by video codecs, it is also the reversible color transform in JPEG2000.
References
----------
.. [1] https://en.wikipedia.org/wiki/YDbDr
"""
arr = ydbdr.copy()
return _convert(rgb_from_ydbdr, arr)
|
the-stack_0_9942 |
# Code from Chapter 7 of Machine Learning: An Algorithmic Perspective
# by Stephen Marsland (http://seat.massey.ac.nz/personal/s.r.marsland/MLBook.html)
# You are free to use, change, or redistribute the code in any way you wish for
# non-commercial purposes, but please maintain the name of the original author.
# This code comes with no warranty of any kind.
# Stephen Marsland, 2008
from numpy import *
import dtree
class bagger:
"""The bagging algorithm based on the decision tree of Chapter 6"""
def __init__(self):
""" Constructor """
self.tree = dtree.dtree()
def bag(self,data,targets,features,nSamples):
nPoints = shape(data)[0]
nDim = shape(data)[1]
self.nSamples = nSamples
# Compute boostrap samples
samplePoints = random.randint(0,nPoints,(nPoints,nSamples))
classifiers = []
for i in range(nSamples):
sample = []
sampleTarget = []
for j in range(nPoints):
sample.append(data[samplePoints[j,i]])
sampleTarget.append(targets[samplePoints[j,i]])
# Train classifiers
classifiers.append(self.tree.make_tree(sample,sampleTarget,features,1))
return classifiers
def bagclass(self,classifiers,data):
decision = []
# Majority voting
for j in range(len(data)):
outputs = []
#print data[j]
for i in range(self.nSamples):
out = self.tree.classify(classifiers[i],data[j])
if out is not None:
outputs.append(out)
# List the possible outputs
out = []
for each in outputs:
if out.count(each)==0:
out.append(each)
frequency = zeros(len(out))
index = 0
if len(out)>0:
for each in out:
frequency[index] = outputs.count(each)
index += 1
decision.append(out[frequency.argmax()])
else:
decision.append(None)
return decision
|
the-stack_0_9943 | import typing
import inspect
import functools
from base64 import b64decode
from types import FunctionType
import httpx
from rpcpy.serializers import BaseSerializer, JSONSerializer
from rpcpy.utils.openapi import set_type_model
__all__ = ["Client"]
Function = typing.TypeVar("Function", bound=FunctionType)
class ClientMeta(type):
def __call__(cls, *args: typing.Any, **kwargs: typing.Any) -> typing.Any:
if cls.__name__ == "Client":
if isinstance(args[0], httpx.Client):
return SyncClient(*args, **kwargs)
if isinstance(args[0], httpx.AsyncClient):
return AsyncClient(*args, **kwargs)
raise TypeError(
"The parameter `client` must be an httpx.Client or httpx.AsyncClient object."
)
return super().__call__(*args, **kwargs)
class Client(metaclass=ClientMeta):
def __init__(
self,
client: typing.Union[httpx.Client, httpx.AsyncClient],
*,
base_url: str,
request_serializer: BaseSerializer = JSONSerializer(),
response_serializer: BaseSerializer = JSONSerializer(),
) -> None:
assert base_url.endswith("/"), "base_url must be end with '/'"
self.base_url = base_url
self.client = client
self.request_serializer = request_serializer
self.response_serializer = response_serializer
def remote_call(self, func: Function) -> Function:
set_type_model(func) # try set `__body_model__`
return func
def _get_url(self, func: Function) -> str:
return self.base_url + func.__name__
def _get_content(
self, func: typing.Callable, *args: typing.Any, **kwargs: typing.Any
) -> bytes:
sig = inspect.signature(func)
bound_values = sig.bind(*args, **kwargs)
if hasattr(func, "__body_model__"):
_params = getattr(func, "__body_model__")(**bound_values.arguments).dict()
else:
_params = dict(**bound_values.arguments)
return self.request_serializer.encode(_params)
class AsyncClient(Client):
if typing.TYPE_CHECKING:
client: httpx.AsyncClient
def remote_call(self, func: Function) -> Function:
if not (inspect.iscoroutinefunction(func) or inspect.isasyncgenfunction(func)):
raise TypeError(
"Asynchronous Client can only register asynchronous functions."
)
func = super().remote_call(func)
url = self._get_url(func)
if not inspect.isasyncgenfunction(func):
@functools.wraps(func)
async def wrapper(*args: typing.Any, **kwargs: typing.Any) -> typing.Any:
post_content = self._get_content(func, *args, **kwargs)
resp = await self.client.post(
url,
content=post_content,
headers={
"content-type": self.request_serializer.content_type,
"serializer": self.request_serializer.name,
},
)
resp.raise_for_status()
return self.response_serializer.decode(resp.content)
else:
@functools.wraps(func)
async def wrapper(*args: typing.Any, **kwargs: typing.Any) -> typing.Any:
post_content = self._get_content(func, *args, **kwargs)
async with self.client.stream(
"POST",
url,
content=post_content,
headers={
"content-type": self.request_serializer.content_type,
"serializer": self.request_serializer.name,
},
) as resp:
resp.raise_for_status()
async for line in resp.aiter_lines():
if line.startswith("data:"):
data = line.split(":", maxsplit=1)[1]
yield self.response_serializer.decode(
b64decode(data.encode("ascii"))
)
return typing.cast(Function, wrapper)
class SyncClient(Client):
if typing.TYPE_CHECKING:
client: httpx.Client
def remote_call(self, func: Function) -> Function:
if inspect.iscoroutinefunction(func) or inspect.isasyncgenfunction(func):
raise TypeError(
"Synchronization Client can only register synchronization functions."
)
func = super().remote_call(func)
url = self._get_url(func)
if not inspect.isgeneratorfunction(func):
@functools.wraps(func)
def wrapper(*args: typing.Any, **kwargs: typing.Any) -> typing.Any:
post_content = self._get_content(func, *args, **kwargs)
resp = self.client.post(
url,
content=post_content,
headers={
"content-type": self.request_serializer.content_type,
"serializer": self.request_serializer.name,
},
)
resp.raise_for_status()
return self.response_serializer.decode(resp.content)
else:
@functools.wraps(func)
def wrapper(*args: typing.Any, **kwargs: typing.Any) -> typing.Any:
post_content = self._get_content(func, *args, **kwargs)
with self.client.stream(
"POST",
url,
content=post_content,
headers={
"content-type": self.request_serializer.content_type,
"serializer": self.request_serializer.name,
},
) as resp:
resp.raise_for_status()
for line in resp.iter_lines():
if line.startswith("data:"):
data = line.split(":", maxsplit=1)[1]
yield self.response_serializer.decode(
b64decode(data.encode("ascii"))
)
return typing.cast(Function, wrapper)
|
the-stack_0_9944 | """
Expressions
-----------
Offer fast expression evaluation through numexpr
"""
import warnings
import numpy as np
from pandas._config import get_option
from pandas._libs.lib import values_from_object
from pandas.core.dtypes.generic import ABCDataFrame
from pandas.core.computation.check import _NUMEXPR_INSTALLED
if _NUMEXPR_INSTALLED:
import numexpr as ne
_TEST_MODE = None
_TEST_RESULT = None
_USE_NUMEXPR = _NUMEXPR_INSTALLED
_evaluate = None
_where = None
# the set of dtypes that we will allow pass to numexpr
_ALLOWED_DTYPES = {
"evaluate": {"int64", "int32", "float64", "float32", "bool"},
"where": {"int64", "float64", "bool"},
}
# the minimum prod shape that we will use numexpr
_MIN_ELEMENTS = 10000
def set_use_numexpr(v=True):
# set/unset to use numexpr
global _USE_NUMEXPR
if _NUMEXPR_INSTALLED:
_USE_NUMEXPR = v
# choose what we are going to do
global _evaluate, _where
if not _USE_NUMEXPR:
_evaluate = _evaluate_standard
_where = _where_standard
else:
_evaluate = _evaluate_numexpr
_where = _where_numexpr
def set_numexpr_threads(n=None):
# if we are using numexpr, set the threads to n
# otherwise reset
if _NUMEXPR_INSTALLED and _USE_NUMEXPR:
if n is None:
n = ne.detect_number_of_cores()
ne.set_num_threads(n)
def _evaluate_standard(op, op_str, a, b):
""" standard evaluation """
if _TEST_MODE:
_store_test_result(False)
with np.errstate(all="ignore"):
return op(a, b)
def _can_use_numexpr(op, op_str, a, b, dtype_check):
""" return a boolean if we WILL be using numexpr """
if op_str is not None:
# required min elements (otherwise we are adding overhead)
if np.prod(a.shape) > _MIN_ELEMENTS:
# check for dtype compatibility
dtypes = set()
for o in [a, b]:
# Series implements dtypes, check for dimension count as well
if hasattr(o, "dtypes") and o.ndim > 1:
s = o.dtypes.value_counts()
if len(s) > 1:
return False
dtypes |= set(s.index.astype(str))
# ndarray and Series Case
elif hasattr(o, "dtype"):
dtypes |= {o.dtype.name}
# allowed are a superset
if not len(dtypes) or _ALLOWED_DTYPES[dtype_check] >= dtypes:
return True
return False
def _evaluate_numexpr(op, op_str, a, b):
result = None
if _can_use_numexpr(op, op_str, a, b, "evaluate"):
is_reversed = op.__name__.strip("_").startswith("r")
if is_reversed:
# we were originally called by a reversed op method
a, b = b, a
a_value = getattr(a, "values", a)
b_value = getattr(b, "values", b)
result = ne.evaluate(
"a_value {op} b_value".format(op=op_str),
local_dict={"a_value": a_value, "b_value": b_value},
casting="safe",
)
if _TEST_MODE:
_store_test_result(result is not None)
if result is None:
result = _evaluate_standard(op, op_str, a, b)
return result
def _where_standard(cond, a, b):
return np.where(
values_from_object(cond), values_from_object(a), values_from_object(b)
)
def _where_numexpr(cond, a, b):
result = None
if _can_use_numexpr(None, "where", a, b, "where"):
cond_value = getattr(cond, "values", cond)
a_value = getattr(a, "values", a)
b_value = getattr(b, "values", b)
result = ne.evaluate(
"where(cond_value, a_value, b_value)",
local_dict={
"cond_value": cond_value,
"a_value": a_value,
"b_value": b_value,
},
casting="safe",
)
if result is None:
result = _where_standard(cond, a, b)
return result
# turn myself on
set_use_numexpr(get_option("compute.use_numexpr"))
def _has_bool_dtype(x):
if isinstance(x, ABCDataFrame):
return "bool" in x.dtypes
try:
return x.dtype == bool
except AttributeError:
return isinstance(x, (bool, np.bool_))
def _bool_arith_check(
op_str, a, b, not_allowed=frozenset(("/", "//", "**")), unsupported=None
):
if unsupported is None:
unsupported = {"+": "|", "*": "&", "-": "^"}
if _has_bool_dtype(a) and _has_bool_dtype(b):
if op_str in unsupported:
warnings.warn(
f"evaluating in Python space because the {repr(op_str)} "
f"operator is not supported by numexpr for "
f"the bool dtype, use {repr(unsupported[op_str])} instead"
)
return False
if op_str in not_allowed:
raise NotImplementedError(
f"operator {repr(op_str)} not implemented for bool dtypes"
)
return True
def evaluate(op, op_str, a, b, use_numexpr=True):
"""
Evaluate and return the expression of the op on a and b.
Parameters
----------
op : the actual operand
op_str : str
The string version of the op.
a : left operand
b : right operand
use_numexpr : bool, default True
Whether to try to use numexpr.
"""
use_numexpr = use_numexpr and _bool_arith_check(op_str, a, b)
if use_numexpr:
return _evaluate(op, op_str, a, b)
return _evaluate_standard(op, op_str, a, b)
def where(cond, a, b, use_numexpr=True):
"""
Evaluate the where condition cond on a and b.
Parameters
----------
cond : np.ndarray[bool]
a : return if cond is True
b : return if cond is False
use_numexpr : bool, default True
Whether to try to use numexpr.
"""
if use_numexpr:
return _where(cond, a, b)
return _where_standard(cond, a, b)
def set_test_mode(v=True):
"""
Keeps track of whether numexpr was used. Stores an additional ``True``
for every successful use of evaluate with numexpr since the last
``get_test_result``
"""
global _TEST_MODE, _TEST_RESULT
_TEST_MODE = v
_TEST_RESULT = []
def _store_test_result(used_numexpr):
global _TEST_RESULT
if used_numexpr:
_TEST_RESULT.append(used_numexpr)
def get_test_result():
"""get test result and reset test_results"""
global _TEST_RESULT
res = _TEST_RESULT
_TEST_RESULT = []
return res
|
the-stack_0_9945 | from __future__ import with_statement
from time import time
from fabric.api import cd, run, env, roles
from fabric.decorators import task
from fabric.contrib.files import exists
env.use_ssh_config = True
releases_dir = "/home/deploy/issadmin/releases"
git_branch = "master"
git_repo = "https://github.com/wgerez/iss-dashboard.git"
repo_dir = "/home/deploy/issadmin/repo"
persist_dir = "/home/deploy/issadmin/persist"
next_release = "%(time).0f" % {'time': time()}
current_release = "/home/deploy/issadmin/current"
env.roledefs = {
'test': ['issqa'],
'production': ['iss']
}
@task
def deploy(migrate='no'):
init()
update_git()
create_release()
build_site()
if migrate=='yes':
migrate_from = "%s/%s" % (releases_dir, next_release)
migrate_forward(migrate_from)
swap_symlinks()
@task
def migrate():
migrate_forward()
@task
def migrate_back():
migrate_backward()
def migrate_forward(release_dir=None, env='production'):
if not release_dir:
release_dir=current_release
with cd(release_dir):
run('php artisan migrate --env=%s' % env)
def migrate_backward(release_dir=None, env='production'):
if not release_dir:
release_dir = current_release
with cd(release_dir):
run('php artisan migrate:rollback --env=%s' % env)
def init():
if not exists(releases_dir):
run("mkdir -p %s" % releases_dir)
if not exists(repo_dir):
run("git clone -b %s %s %s" % (git_branch, git_repo, repo_dir) )
if not exists("%s/storage" % persist_dir):
run("mkdir -p %s/storage/cache" % persist_dir)
run("mkdir -p %s/storage/fonts" % persist_dir)
run("mkdir -p %s/storage/logs" % persist_dir)
run("mkdir -p %s/storage/meta" % persist_dir)
run("mkdir -p %s/storage/sessions" % persist_dir)
run("mkdir -p %s/storage/views" % persist_dir)
def update_git():
with cd(repo_dir):
run("git checkout %s" % git_branch)
run("git pull origin %s" % git_branch)
def create_release():
release_into = "%s/%s" % (releases_dir, next_release)
run("mkdir -p %s" % release_into)
with cd(repo_dir):
run("git archive --worktree-attributes %s | tar -x -C %s" % (git_branch, release_into))
def build_site():
with cd("%s/%s" % (releases_dir, next_release)):
run("rm composer.lock")
run("composer install")
def swap_symlinks():
release_into = "%s/%s" % (releases_dir, next_release)
run("ln -nfs %s/database.php %s/app/config/database.php" % (persist_dir, release_into))
run("rm -rf %s/app/storage" % release_into)
run("rm -rf %s/public/alumnos" % release_into)
run("rm -rf %s/public/docentes" % release_into)
run("ln -nfs %s/storage %s/app/storage" % (persist_dir, release_into))
run("ln -nfs %s/alumnos %s/public/alumnos" % (persist_dir, release_into))
run("ln -nfs %s/docentes %s/public/docentes" % (persist_dir, release_into))
run("ln -nfs %s %s" % (release_into, current_release))
run("sudo service php7.0-fpm reload")
|
the-stack_0_9947 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import paddle.fluid as fluid
import parl
from parl import layers
from parl.utils import machine_info
class AtariAgent(parl.Agent):
def __init__(self, algorithm, obs_shape, act_dim,
learn_data_provider=None):
assert isinstance(obs_shape, (list, tuple))
assert isinstance(act_dim, int)
self.obs_shape = obs_shape
self.act_dim = act_dim
super(AtariAgent, self).__init__(algorithm)
if learn_data_provider:
self.learn_reader.decorate_tensor_provider(learn_data_provider)
self.learn_reader.start()
def build_program(self):
self.sample_program = fluid.Program()
self.predict_program = fluid.Program()
self.learn_program = fluid.Program()
with fluid.program_guard(self.sample_program):
obs = layers.data(
name='obs', shape=self.obs_shape, dtype='float32')
self.sample_actions, self.behaviour_logits = self.alg.sample(obs)
with fluid.program_guard(self.predict_program):
obs = layers.data(
name='obs', shape=self.obs_shape, dtype='float32')
self.predict_actions = self.alg.predict(obs)
with fluid.program_guard(self.learn_program):
obs = layers.data(
name='obs', shape=self.obs_shape, dtype='float32')
actions = layers.data(name='actions', shape=[], dtype='int64')
behaviour_logits = layers.data(
name='behaviour_logits', shape=[self.act_dim], dtype='float32')
rewards = layers.data(name='rewards', shape=[], dtype='float32')
dones = layers.data(name='dones', shape=[], dtype='float32')
lr = layers.data(
name='lr', shape=[1], dtype='float32', append_batch_size=False)
entropy_coeff = layers.data(
name='entropy_coeff', shape=[], dtype='float32')
self.learn_reader = fluid.layers.create_py_reader_by_data(
capacity=32,
feed_list=[
obs, actions, behaviour_logits, rewards, dones, lr,
entropy_coeff
])
obs, actions, behaviour_logits, rewards, dones, lr, entropy_coeff = fluid.layers.read_file(
self.learn_reader)
vtrace_loss, kl = self.alg.learn(obs, actions, behaviour_logits,
rewards, dones, lr, entropy_coeff)
self.learn_outputs = [
vtrace_loss.total_loss, vtrace_loss.pi_loss,
vtrace_loss.vf_loss, vtrace_loss.entropy, kl
]
self.learn_program = parl.compile(self.learn_program, total_loss)
def sample(self, obs_np):
"""
Args:
obs_np: a numpy float32 array of shape ([B] + observation_space).
Format of image input should be NCHW format.
Returns:
sample_ids: a numpy int64 array of shape [B]
"""
obs_np = obs_np.astype('float32')
sample_actions, behaviour_logits = self.fluid_executor.run(
self.sample_program,
feed={'obs': obs_np},
fetch_list=[self.sample_actions, self.behaviour_logits])
return sample_actions, behaviour_logits
def predict(self, obs_np):
"""
Args:
obs_np: a numpy float32 array of shape ([B] + observation_space)
Format of image input should be NCHW format.
Returns:
sample_ids: a numpy int64 array of shape [B]
"""
obs_np = obs_np.astype('float32')
predict_actions = self.fluid_executor.run(
self.predict_program,
feed={'obs': obs_np},
fetch_list=[self.predict_actions])[0]
return predict_actions
def learn(self):
total_loss, pi_loss, vf_loss, entropy, kl = self.fluid_executor.run(
self.learn_program, fetch_list=self.learn_outputs)
return total_loss, pi_loss, vf_loss, entropy, kl
|
the-stack_0_9948 | """Utilities for calculating and reporting statistics about types."""
import cgi
import os.path
import re
from typing import Any, Dict, List, cast, Tuple
from mypy.traverser import TraverserVisitor
from mypy.types import (
Type, AnyType, Instance, FunctionLike, TupleType, Void, TypeVarType,
TypeQuery, ANY_TYPE_STRATEGY, CallableType
)
from mypy import nodes
from mypy.nodes import (
Node, FuncDef, TypeApplication, AssignmentStmt, NameExpr, CallExpr,
MemberExpr, OpExpr, ComparisonExpr, IndexExpr, UnaryExpr, YieldFromExpr
)
TYPE_EMPTY = 0
TYPE_PRECISE = 1
TYPE_IMPRECISE = 2
TYPE_ANY = 3
precision_names = [
'empty',
'precise',
'imprecise',
'any',
]
class StatisticsVisitor(TraverserVisitor):
def __init__(self, inferred: bool, typemap: Dict[Node, Type] = None,
all_nodes: bool = False) -> None:
self.inferred = inferred
self.typemap = typemap
self.all_nodes = all_nodes
self.num_precise = 0
self.num_imprecise = 0
self.num_any = 0
self.num_simple = 0
self.num_generic = 0
self.num_tuple = 0
self.num_function = 0
self.num_typevar = 0
self.num_complex = 0
self.line = -1
self.line_map = {} # type: Dict[int, int]
self.output = [] # type: List[str]
TraverserVisitor.__init__(self)
def visit_func_def(self, o: FuncDef) -> None:
self.line = o.line
if len(o.expanded) > 1:
if o in o.expanded:
print('ERROR: cycle in function expansion; skipping')
return
for defn in o.expanded:
self.visit_func_def(cast(FuncDef, defn))
else:
if o.type:
sig = cast(CallableType, o.type)
arg_types = sig.arg_types
if (sig.arg_names and sig.arg_names[0] == 'self' and
not self.inferred):
arg_types = arg_types[1:]
for arg in arg_types:
self.type(arg)
self.type(sig.ret_type)
elif self.all_nodes:
self.record_line(self.line, TYPE_ANY)
super().visit_func_def(o)
def visit_type_application(self, o: TypeApplication) -> None:
self.line = o.line
for t in o.types:
self.type(t)
super().visit_type_application(o)
def visit_assignment_stmt(self, o: AssignmentStmt) -> None:
self.line = o.line
if (isinstance(o.rvalue, nodes.CallExpr) and
isinstance(cast(nodes.CallExpr, o.rvalue).analyzed,
nodes.TypeVarExpr)):
# Type variable definition -- not a real assignment.
return
if o.type:
self.type(o.type)
elif self.inferred:
for lvalue in o.lvalues:
if isinstance(lvalue, nodes.TupleExpr):
items = lvalue.items
elif isinstance(lvalue, nodes.ListExpr):
items = lvalue.items
else:
items = [lvalue]
for item in items:
if hasattr(item, 'is_def') and cast(Any, item).is_def:
t = self.typemap.get(item)
if t:
self.type(t)
else:
self.log(' !! No inferred type on line %d' %
self.line)
self.record_line(self.line, TYPE_ANY)
super().visit_assignment_stmt(o)
def visit_name_expr(self, o: NameExpr) -> None:
self.process_node(o)
super().visit_name_expr(o)
def visit_yield_from_expr(self, o: YieldFromExpr) -> None:
if o.expr:
o.expr.accept(self)
def visit_call_expr(self, o: CallExpr) -> None:
self.process_node(o)
if o.analyzed:
o.analyzed.accept(self)
else:
o.callee.accept(self)
for a in o.args:
a.accept(self)
def visit_member_expr(self, o: MemberExpr) -> None:
self.process_node(o)
super().visit_member_expr(o)
def visit_op_expr(self, o: OpExpr) -> None:
self.process_node(o)
super().visit_op_expr(o)
def visit_comparison_expr(self, o: ComparisonExpr) -> None:
self.process_node(o)
super().visit_comparison_expr(o)
def visit_index_expr(self, o: IndexExpr) -> None:
self.process_node(o)
super().visit_index_expr(o)
def visit_unary_expr(self, o: UnaryExpr) -> None:
self.process_node(o)
super().visit_unary_expr(o)
def process_node(self, node: Node) -> None:
if self.all_nodes:
typ = self.typemap.get(node)
if typ:
self.line = node.line
self.type(typ)
def type(self, t: Type) -> None:
if isinstance(t, AnyType):
self.log(' !! Any type around line %d' % self.line)
self.num_any += 1
self.record_line(self.line, TYPE_ANY)
elif ((not self.all_nodes and is_imprecise(t)) or
(self.all_nodes and is_imprecise2(t))):
self.log(' !! Imprecise type around line %d' % self.line)
self.num_imprecise += 1
self.record_line(self.line, TYPE_IMPRECISE)
else:
self.num_precise += 1
self.record_line(self.line, TYPE_PRECISE)
if isinstance(t, Instance):
if t.args:
if any(is_complex(arg) for arg in t.args):
self.num_complex += 1
else:
self.num_generic += 1
else:
self.num_simple += 1
elif isinstance(t, Void):
self.num_simple += 1
elif isinstance(t, FunctionLike):
self.num_function += 1
elif isinstance(t, TupleType):
if any(is_complex(item) for item in t.items):
self.num_complex += 1
else:
self.num_tuple += 1
elif isinstance(t, TypeVarType):
self.num_typevar += 1
def log(self, string: str) -> None:
self.output.append(string)
def record_line(self, line: int, precision: int) -> None:
self.line_map[line] = max(precision,
self.line_map.get(line, TYPE_PRECISE))
def dump_type_stats(tree: Node, path: str, inferred: bool = False,
typemap: Dict[Node, Type] = None) -> None:
if is_special_module(path):
return
print(path)
visitor = StatisticsVisitor(inferred, typemap)
tree.accept(visitor)
for line in visitor.output:
print(line)
print(' ** precision **')
print(' precise ', visitor.num_precise)
print(' imprecise', visitor.num_imprecise)
print(' any ', visitor.num_any)
print(' ** kinds **')
print(' simple ', visitor.num_simple)
print(' generic ', visitor.num_generic)
print(' function ', visitor.num_function)
print(' tuple ', visitor.num_tuple)
print(' TypeVar ', visitor.num_typevar)
print(' complex ', visitor.num_complex)
print(' any ', visitor.num_any)
def is_special_module(path: str) -> bool:
return os.path.basename(path) in ('abc.py', 'typing.py', 'builtins.py')
def is_imprecise(t: Type) -> bool:
return t.accept(HasAnyQuery())
class HasAnyQuery(TypeQuery):
def __init__(self) -> None:
super().__init__(False, ANY_TYPE_STRATEGY)
def visit_any(self, t: AnyType) -> bool:
return True
def visit_instance(self, t: Instance) -> bool:
if t.type.fullname() == 'builtins.tuple':
return True
else:
return super().visit_instance(t)
def is_imprecise2(t: Type) -> bool:
return t.accept(HasAnyQuery2())
class HasAnyQuery2(HasAnyQuery):
def visit_callable_type(self, t: CallableType) -> bool:
# We don't want to flag references to functions with some Any
# argument types (etc.) since they generally don't mean trouble.
return False
def is_generic(t: Type) -> bool:
return isinstance(t, Instance) and bool(cast(Instance, t).args)
def is_complex(t: Type) -> bool:
return is_generic(t) or isinstance(t, (FunctionLike, TupleType,
TypeVarType))
html_files = [] # type: List[Tuple[str, str, int, int]]
def generate_html_report(tree: Node, path: str, type_map: Dict[Node, Type],
output_dir: str) -> None:
if is_special_module(path):
return
# There may be more than one right answer for "what should we do here?"
# but this is a reasonable one.
path = os.path.relpath(path)
if path.startswith('..'):
return
visitor = StatisticsVisitor(inferred=True, typemap=type_map, all_nodes=True)
tree.accept(visitor)
assert not os.path.isabs(path) and not path.startswith('..')
# This line is *wrong* if the preceding assert fails.
target_path = os.path.join(output_dir, 'html', path)
# replace .py or .pyi with .html
target_path = os.path.splitext(target_path)[0] + '.html'
assert target_path.endswith('.html')
ensure_dir_exists(os.path.dirname(target_path))
output = [] # type: List[str]
append = output.append
append('''\
<html>
<head>
<style>
.red { background-color: #faa; }
.yellow { background-color: #ffa; }
.white { }
.lineno { color: #999; }
</style>
</head>
<body>
<pre>''')
num_imprecise_lines = 0
num_lines = 0
with open(path) as input_file:
for i, line in enumerate(input_file):
lineno = i + 1
status = visitor.line_map.get(lineno, TYPE_PRECISE)
style_map = {TYPE_PRECISE: 'white',
TYPE_IMPRECISE: 'yellow',
TYPE_ANY: 'red'}
style = style_map[status]
append('<span class="lineno">%4d</span> ' % lineno +
'<span class="%s">%s</span>' % (style,
cgi.escape(line)))
if status != TYPE_PRECISE:
num_imprecise_lines += 1
if line.strip():
num_lines += 1
append('</pre>')
append('</body></html>')
with open(target_path, 'w') as output_file:
output_file.writelines(output)
target_path = target_path[len(output_dir) + 1:]
html_files.append((path, target_path, num_lines, num_imprecise_lines))
def generate_html_index(output_dir: str) -> None:
path = os.path.join(output_dir, 'index.html')
output = [] # type: List[str]
append = output.append
append('''\
<html>
<head>
<style>
body { font-family: courier; }
table { border-collapse: collapse; }
table tr td { border: 1px solid black; }
td { padding: 0.4em; }
.red { background-color: #faa; }
.yellow { background-color: #ffa; }
</style>
</head>
<body>''')
append('<h1>Mypy Type Check Coverage Report</h1>\n')
append('<table>\n')
for source_path, target_path, num_lines, num_imprecise in sorted(html_files):
if num_lines == 0:
continue
source_path = os.path.normpath(source_path)
# TODO: Windows paths.
if (source_path.startswith('stubs/') or
'/stubs/' in source_path):
continue
percent = 100.0 * num_imprecise / num_lines
style = ''
if percent >= 20:
style = 'class="red"'
elif percent >= 5:
style = 'class="yellow"'
append('<tr %s><td><a href="%s">%s</a><td>%.1f%% imprecise<td>%d LOC\n' % (
style, target_path, source_path, percent, num_lines))
append('</table>\n')
append('</body></html>')
with open(path, 'w') as file:
file.writelines(output)
print('Generated HTML report (old): %s' % os.path.abspath(path))
def ensure_dir_exists(dir: str) -> None:
if not os.path.exists(dir):
os.makedirs(dir)
|
the-stack_0_9949 | #025: Crie um programa que leia o nome de uma pessoa e diga se ela tem "SILVA" no nome.
nome = str(input('Escreva seu nome: '))
nome = nome.title()
nome = nome.strip()
nomeA = nome.split()
if ('Silva' in nome):
print('Seu nome tem Silva!')
else:
print('Seu nome não tem Silva!')
|
the-stack_0_9950 | """String functions in R"""
import re
import numpy as np
from pipda import register_func
from ..core.backends import pandas as pd
from ..core.backends.pandas import Series
from ..core.backends.pandas.core.base import PandasObject
from ..core.backends.pandas.core.groupby import SeriesGroupBy
from ..core.backends.pandas.api.types import is_string_dtype, is_scalar
from ..core.tibble import Tibble, TibbleGrouped, TibbleRowwise
from ..core.contexts import Context
from ..core.factory import func_factory, dispatching
from ..core.utils import (
arg_match,
logger,
regcall,
)
from .casting import _as_type
from .testing import _register_type_testing
from .logical import as_logical
@register_func(None, context=Context.EVAL)
def as_character(
x,
str_dtype=str,
_na=np.nan,
):
"""Convert an object or elements of an iterable into string
Aliases `as_str` and `as_string`
Args:
x: The object
str_dtype: The string dtype to convert to
_na: How NAs should be casted. Specify np.nan will keep them unchanged.
But the dtype will be object then.
Returns:
When x is an array or a series, return x.astype(str).
When x is iterable, convert elements of it into strings
Otherwise, convert x to string.
"""
return _as_type(x, str_dtype, na=_na)
as_str = as_string = as_character
is_character = _register_type_testing(
"is_character",
scalar_types=(str, np.str_),
dtype_checker=is_string_dtype,
doc="""Test if a value is characters/string
Alias `is_str` and `is_string`
Args:
x: The value to be checked
Returns:
True if the value is string; with a string dtype;
or all elements are strings
""",
)
is_str = is_string = is_character
# Grep family -----------------------------------
@dispatching(kind="transform", qualname="datar.base.grep")
def _grep(
x, pattern, ignore_case=False, value=False, fixed=False, invert=False
):
matched = _grepl.dispatch(Series)(
x,
pattern,
ignore_case=ignore_case,
fixed=fixed,
invert=invert,
)
if value:
return x[matched]
return np.flatnonzero(matched)
@register_func(None, context=Context.EVAL)
def grep(
pattern,
x,
ignore_case=False,
value=False,
fixed=False,
invert=False,
):
"""R's grep, get the element in x matching the pattern
Args:
pattern: The pattern
x: A string or an iterable of strings; or those can be coerced to
ignore_case: Do case-insensitive matching?
value: Return values instead of indices?
fixed: Fixed matching (instead of regex matching)?
invert: Return elements thata don't match instead?
Returns:
The matched (or unmatched (`invert=True`)) indices
(or values (`value=True`)).
"""
return _grep(
x if isinstance(x, (Series, SeriesGroupBy)) else Series(x),
pattern,
ignore_case=ignore_case,
value=value,
fixed=fixed,
invert=invert,
)
@dispatching(kind="transform", qualname="datar.base.grepl")
def _grepl(x, pattern, ignore_case, fixed, invert):
pattern = _warn_more_pat_or_rep(pattern, "grepl")
return _match(
x,
pattern,
ignore_case=ignore_case,
invert=invert,
fixed=fixed,
)
@register_func(None, context=Context.EVAL)
def grepl(
pattern,
x,
ignore_case=False,
fixed=False,
invert=False,
):
"""R's grepl, check whether elements in x matching the pattern
Args:
pattern: The pattern
x: A string or an iterable of strings; or those can be coerced to
ignore_case: Do case-insensitive matching?
fixed: Fixed matching (instead of regex matching)?
invert: Return elements thata don't match instead?
Returns:
A bool array indicating whether the elements in x match the pattern
"""
return _grepl(
x if isinstance(x, (Series, SeriesGroupBy)) else Series(x),
pattern,
ignore_case=ignore_case,
fixed=fixed,
invert=invert,
)
@dispatching(kind="transform", qualname="datar.base.sub")
def _sub(x, pattern, replacement, ignore_case, fixed):
return _sub_(
pattern=pattern,
replacement=replacement,
x=x,
ignore_case=ignore_case,
fixed=fixed,
)
@register_func(None, context=Context.EVAL)
def sub(
pattern,
replacement,
x,
ignore_case=False,
fixed=False,
):
"""R's sub, replace a pattern with replacement for elements in x,
each only once
Args:
pattern: The pattern
replacement: The replacement
x: A string or an iterable of strings; or those can be coerced to
ignore_case: Do case-insensitive matching?
fixed: Fixed matching (instead of regex matching)?
Returns:
An array of strings with matched parts replaced.
"""
return _sub(
x if isinstance(x, (Series, SeriesGroupBy)) else Series(x),
pattern,
replacement,
ignore_case=ignore_case,
fixed=fixed,
)
@dispatching(kind="transform", qualname="datar.base.gsub")
def _gsub(x, pattern, replacement, ignore_case, fixed):
return _sub_(
pattern=pattern,
replacement=replacement,
x=x,
ignore_case=ignore_case,
fixed=fixed,
count=0,
fun="gsub",
)
@register_func(None, context=Context.EVAL)
def gsub(
pattern,
replacement,
x,
ignore_case=False,
fixed=False,
):
"""R's gsub, replace a pattern with replacement for elements in x,
each for all matched parts
See Also:
[sub()](datar.base.string.sub)
"""
return _gsub(
x if isinstance(x, (Series, SeriesGroupBy)) else Series(x),
pattern,
replacement,
ignore_case=ignore_case,
fixed=fixed,
)
# Grep family helpers --------------------------------
def _warn_more_pat_or_rep(pattern, fun, arg="pattern"):
"""Warn when there are more than one pattern or replacement provided"""
if is_scalar(pattern):
return pattern
if len(pattern) == 1:
return pattern[0]
logger.warning(
"In %s(...), argument `%s` has length > 1 and only the "
"first element will be used",
fun,
arg,
)
return pattern[0]
def _match(text, pattern, ignore_case, invert, fixed):
"""Do the regex match"""
if pd.isnull(text):
return False
flags = re.IGNORECASE if ignore_case else 0
if fixed:
pattern = re.escape(pattern)
pattern = re.compile(pattern, flags)
matched = pattern.search(text)
if invert:
matched = not bool(matched)
return bool(matched)
_match = np.vectorize(_match, excluded={"pattern"})
def _sub_(
pattern,
replacement,
x,
ignore_case=False,
fixed=False,
count=1,
fun="sub",
):
"""Replace a pattern with replacement for elements in x,
with argument count available
"""
pattern = _warn_more_pat_or_rep(pattern, fun)
replacement = _warn_more_pat_or_rep(replacement, fun, "replacement")
if fixed:
pattern = re.escape(pattern)
flags = re.IGNORECASE if ignore_case else 0
pattern = re.compile(pattern, flags)
return pattern.sub(repl=replacement, count=count, string=x)
_sub_ = np.vectorize(_sub_, excluded={"pattern", "replacement"})
@func_factory("transform", "x")
def nchar(
x,
type="chars",
allow_na=True, # i.e.: '\ud861'
keep_na=None,
_na_len=2,
):
"""Get the size of the elements in x"""
x, keep_na = _prepare_nchar(x, type, keep_na)
return _nchar_scalar(
x, retn=type, allow_na=allow_na, keep_na=keep_na, na_len=_na_len
)
@func_factory("transform", "x")
def nzchar(x, keep_na=False):
"""Find out if elements of a character vector are non-empty strings.
Args:
x: Strings to test
keep_na: What to return when for NA's
Returns:
A bool array to tell whether elements in x are non-empty strings
"""
x = regcall(as_character, x, _na=np.nan if keep_na else "")
if not keep_na:
return x.fillna(False).astype(bool)
return as_logical(x, na=np.nan)
# nchar helpers --------------------------------
def _prepare_nchar(x, type, keep_na):
"""Prepare arguments for n(z)char"""
arg_match(type, "type", ["chars", "bytes", "width"])
if keep_na is None:
keep_na = type != "width"
return regcall(as_character, x), keep_na
@np.vectorize
def _nchar_scalar(x, retn, allow_na, keep_na, na_len):
"""Get the size of a scalar string"""
if pd.isnull(x):
return np.nan if keep_na else na_len
if retn == "width":
try:
from wcwidth import wcswidth
except ImportError as imperr: # pragma: no cover
raise ValueError(
"`nchar(x, type='width')` requires `wcwidth` package.\n"
"Try: pip install -U datar[wcwidth]"
) from imperr
return wcswidth(x)
if retn == "chars":
return len(x)
try:
x = x.encode("utf-8")
except UnicodeEncodeError:
if allow_na:
return np.nan
raise
return len(x)
# paste and paste0 --------------------
_is_empty = lambda x: (
(is_scalar(x) and not x) or (not is_scalar(x) and len(x) == 0)
)
@register_func(None, context=Context.EVAL)
def paste(*args, sep=" ", collapse=None):
"""Concatenate vectors after converting to character.
Args:
*args: strings to be concatenated
sep: The separator
collapse: The separator to collapse the final string arrays
Returns:
A single string if collapse is given, otherwise an array of strings.
"""
if len(args) == 1 and isinstance(args[0], TibbleRowwise):
out = args[0].apply(
lambda row: row.astype(str).str.cat(sep=sep), axis=1
)
return collapse.join(out) if collapse else out
from ..tibble import tibble
if all(_is_empty(arg) for arg in args):
df = tibble(*args, _name_repair="minimal")
else:
df = tibble(
*("" if _is_empty(arg) else arg for arg in args),
_name_repair="minimal",
)
if not isinstance(df, TibbleGrouped):
out = df.apply(lambda col: col.astype(str).str.cat(sep=sep), axis=1)
if collapse:
return collapse.join(out)
if any(isinstance(x, PandasObject) for x in args):
return out
return np.array(out, dtype=object)
out = df.apply(
lambda row: row.astype(str).str.cat(sep=sep), axis=1
).groupby(df._datar["grouped"].grouper)
if collapse:
out = out.agg(lambda x: x.str.cat(sep=collapse))
return out
@register_func(None, context=Context.EVAL)
def paste0(*args, sep="", collapse=None):
"""Paste with empty string as sep"""
return regcall(paste, *args, sep="", collapse=collapse)
# sprintf ----------------------------------------------------------------
@register_func(None, context=Context.EVAL)
def sprintf(fmt, *args):
"""C-style String Formatting
Args:
fmt: The formats
*args: The values
Returns:
A scalar string if all fmt, *args are scalar strings, otherwise
an array of formatted strings
"""
if is_scalar(fmt) and all(is_scalar(x) for x in args):
if pd.isnull(fmt):
return np.nan
return fmt % args
from ..tibble import tibble
df = tibble(fmt, *args, _name_repair="minimal")
aggfunc = lambda row: (
np.nan
if pd.isnull(row.values[0])
else row.values[0] % tuple(row.values[1:])
)
if isinstance(df, TibbleGrouped):
return Tibble(df, copy=False).agg(aggfunc, axis=1).groupby(
df._datar["grouped"].grouper
)
return df.agg(aggfunc, axis=1)
# substr, substring ----------------------------------
@func_factory("transform", "x")
def substr(x, start, stop):
"""Extract substrings in strings.
Args:
x: The strings
start: The start positions to extract
stop: The stop positions to extract
Returns:
The substrings from `x`
"""
x = regcall(as_character, x)
return x.str[start:stop]
@func_factory("transform", "x")
def substring(x, first, last=1000000):
"""Extract substrings in strings.
Args:
x: The strings
start: The start positions to extract
stop: The stop positions to extract
Returns:
The substrings from `x`
"""
x = regcall(as_character, x)
return x.str[first:last]
# strsplit --------------------------------
@func_factory("transform", {"x", "split"})
def strsplit(x, split, fixed=False):
"""Split strings by separator
Args:
x: The strings. Have to be strings, no casting will be done.
split: The separators to split
fixed: fixed matching (instead of regex matching)?
Returns:
List of split strings of x if both x and split are scalars. Otherwise,
an array of split strings
"""
def split_str(string, sep):
if fixed:
return string.split(sep)
sep = re.compile(sep)
return sep.split(string)
return np.vectorize(split_str, [object])(x, split)
# startsWith, endsWith
@func_factory("transform", "x")
def startswith(x, prefix):
"""Determines if entries of x start with prefix
Args:
x: A vector of strings or a string
prefix: The prefix to test against
Returns:
A bool vector for each element in x if element startswith the prefix
"""
x = regcall(as_character, x)
return x.str.startswith(prefix)
@func_factory("transform", "x")
def endswith(x, suffix):
"""Determines if entries of x end with suffix
Args:
x: A vector of strings or a string
suffix: The suffix to test against
Returns:
A bool vector for each element in x if element endswith the suffix
"""
x = regcall(as_character, x)
return x.str.endswith(suffix)
@func_factory("transform", "x")
def strtoi(x, base=0):
"""Convert strings to integers according to the given base
Args:
x: A string or vector of strings
base: an integer which is between 2 and 36 inclusive, or zero.
With zero, a suitable base will be chosen following the C rules.
Returns:
Converted integers
"""
return x.transform(int, base=base)
@func_factory("transform", "x")
def chartr(old, new, x):
"""Replace strings char by char
Args:
x: A string or vector of strings
old: A set of characters to replace
new: A set of characters to replace with
Returns:
The strings in x being replaced
"""
old = _warn_more_pat_or_rep(old, "chartr", "old")
new = _warn_more_pat_or_rep(new, "chartr", "new")
if len(old) > len(new):
raise ValueError("'old' is longer than 'new'")
new = new[: len(old)]
for oldc, newc in zip(old, new):
x = x.str.replace(oldc, newc)
return x
@func_factory("transform", "x")
def tolower(x):
"""Convert strings to lower case
Args:
x: A string or vector of strings
Returns:
Converted strings
"""
x = regcall(as_character, x)
return x.str.lower()
@func_factory("transform", "x")
def toupper(x):
"""Convert strings to upper case
Args:
x: A string or vector of strings
Returns:
Converted strings
"""
x = regcall(as_character, x)
return x.str.upper()
@func_factory("transform", "x")
def trimws(x, which="both", whitespace=r"[ \t\r\n]"):
"""Remove leading and/or trailing whitespace from character strings.
Args:
x: A string or vector of strings
which: A character string specifying whether to remove
both leading and trailing whitespace (default),
or only leading ("left") or trailing ("right").
whitespace: a string specifying a regular expression to
match (one character of) “white space”
Returns:
The strings with whitespaces removed
"""
which = arg_match(which, "which", ["both", "left", "right"])
x = regcall(as_character, x)
if which == "both":
expr = f"^{whitespace}|{whitespace}$"
elif which == "left":
expr = f"^{whitespace}"
else:
expr = f"{whitespace}$"
return np.vectorize(re.sub, excluded={"pattern", "repl"})(expr, "", x)
|
the-stack_0_9952 | #!/usr/bin/env python
"""Basic pipeline building blocks.
This modules provides the basic building blocks in a JIP pipeline and a way
to search and find them at run-time. The basic buiding blocks are instances
of :py:class:`Tool`. The JIP library comes with two sub-classes that can be
used to create tool implementations:
:py:class:`ScriptTool`
This sub-class of `Tool` integrates file or script based tool
implementations which can be served from stand-alone script files
:py:class:`PythonTool`
In contrast to the script tool, this `Tool` extension allows to create
`Tool` instances from other, possibly non-related, python classes. The
easiest way to used this is with the :py:class:`jip.tools.tool` decorator,
which allows you to take arbitrary python classes and *make* them jip
tools.
In addition to the `Tool` implementations, this module provides the
:py:class:`Scanner` class, which is used to find tool implementations either
form disk or from an arbitrary python module. This class is supposed to be
used as a *singleton* and an configured instance is available in the main
`jip` module, exposed as `jip.scanner`. The scanner class itself is
configured either through the :py:mod:`jip.configuration`, or through
environment variables. The :py:class:`Scanner` documentation covers both
the environment variables that can be used as well as the configuration
properties.
"""
import copy
import inspect
from textwrap import dedent
from os import remove, getcwd, getenv, listdir
from os.path import exists, basename, dirname, abspath
import os
import sys
import types
import shutil
import base64
import jip.templates
from jip.options import Options, TYPE_OUTPUT, TYPE_INPUT, Option
from jip.templates import render_template, set_global_context
from jip.utils import list_dir
from jip.logger import getLogger
from jip.six import iteritems, string_types, PY3, PY2
from jip.six.moves import cPickle
import jip.profiles
from io import IOBase
log = getLogger('jip.tools')
# the pickle template to store a pyton tool
_pickel_template = """
python -c '
import sys
try:
import pickle
except ImportError:
import cPickle as pickle
import jip
import jip.tools
import types
import base64
jip._disable_module_search = True
source=base64.b64decode("".join([l for l in sys.stdin]))
data = pickle.loads(source)
deco = jip.tools.tool()
tool = jip.tools.PythonTool(
data["instance"],
deco
)
tool._options = data["options"]
if isinstance(tool, types.FunctionType):
tool()
else:
tool.run()
'<< __EOF__
%s__EOF__
"""
#########################################################
# Exceptions
#########################################################
class ValidationError(Exception):
"""Exception raised in validation steps. The exception
carries the source tool and a message.
"""
def __init__(self, source, message):
self.source = source
self.message = message
def __repr__(self):
import jip.cli
if self.source:
return "%s: %s" % (
jip.cli.colorize(self.source, jip.cli.RED),
jip.cli.colorize(self.message, jip.cli.BLUE)
)
else:
return "%s" % (
jip.cli.colorize(self.message, jip.cli.RED)
)
def __str__(self):
return self.__repr__()
class ToolNotFoundException(Exception):
"""Raised in case a tool is not found by the scanner"""
pass
#########################################################
# decorators
#########################################################
class tool(object):
"""Decorate functions and classes and convert them to tools.
The @jip.tool decorator turns classes and functions into valid JIP
tools. The simplest way to use this decorator is to annotate a python
function that returns a string. This string is then interpreted as a
JIP script template. The functions docstring is used, similar to
JIP scripts, to parse command line options and tool input and
output parameters. For example::
@tool()
def mytool():
'''
Send a greeting
usage:
mytool <name>
'''
return 'echo "hello ${name}'"
This create a single *bash* interpreted script and exposes a tool,
`mytool`, into the JIP environment. You can use the decorators
arguments to further customize the tool specification, i.e. specify
a different name. If you want to use a different interpreter, you can
return a tuple where the first element is the interpreter name and the
second is the script template.
:param name: specify a tool name. If no name is specified, the name
of the decorated function or class is used as the tool
name
:param inputs: specify a list of option names that are treated
as input options
:param outputs: specify a list of option names that are treated as output
options
:param argparse: specify the name of the function or a function reference
that take an ``ArgumentParser`` instance and populates
it. This takes precedence over the doc string if the
function exists.
:param get_command: name of the function or a function reference that
implements the tools ``get_command`` function
:param validate: name of the function or a function reference that
implements the tools ``validate`` function
:param setup: name of the function or a function reference that
implements the tools ``setup`` function
:param init: name of the function or a function reference that
implements the tools ``init`` function
:param run: name of the function or a function reference that
implements the tools ``run`` function
:param pipeline: name of the function or a function reference that
implements the tools ``pipeline`` function
:param is_done: name of the function or a function reference that
implements the tools ``is_done`` function
:param cleanup: name of the function or a function reference that
implements the tools ``cleanup`` function
:param help: name of the function or a function reference that
implements the tools ``help`` function
:param add_outputs: takes a list of values to add hidden output
options
:param check_files: takes a list of option names that will be passed
through file checks on validation
"""
def __init__(self, name=None, inputs=None, outputs=None,
argparse='register', get_command='get_command',
validate='validate',
setup='setup',
init='init',
run='run',
pipeline='pipeline',
is_done='is_done',
cleanup='cleanup',
help='help',
add_outputs=None,
check_files=None,
ensure=None,
pytool=False,
force_pipeline=False):
self.name = name
self.inputs = inputs
self.outputs = outputs
self.argparse = argparse
self.add_outputs = add_outputs
self._check_files = check_files
self._ensure = ensure
self._pytool = pytool
self._force_pipeline = force_pipeline
################################################################
# tool delegates
################################################################
self._validate = validate if validate else "validate"
self._setup = setup if setup else "setup"
self._init = init if init else "init"
self._is_done = is_done if is_done else "is_done"
self._pipeline = pipeline if pipeline else "pipeline"
self._get_command = get_command if get_command else "get_command"
self._cleanup = cleanup if cleanup else "cleanup"
self._help = help if help else "help"
self._run = run if run else "run"
def __call__(self, *args):
cls = args[0]
log.debug("Decorated tool or pipeline: %s", cls)
# check the name
if self.name is None:
if isinstance(cls, types.FunctionType) and PY2:
self.name = cls.func_name
else:
self.name = cls.__name__
# overwrite the string representation
is_class = False
if not isinstance(cls, types.FunctionType):
cls.__repr__ = lambda x: self.name
is_class = True
if is_class:
old = None
if hasattr(cls, '__setattr__'):
old = cls.__setattr__
def setatr(slf, name, value):
ov = slf.__dict__.get(name, None)
if ov is not None and isinstance(ov, Option):
ov.set(value)
else:
if old:
old(slf, name, value)
else:
if name in slf.__dict__:
slf.__dict__[name] = value
else:
raise AttributeError()
cls.__setattr__ = setatr
tool_instance = PythonTool(cls, self, self.add_outputs)
Scanner.registry[self.name] = tool_instance
log.debug("Registered tool from module: %s", self.name)
return cls
################################################################
# tool delegates
################################################################
def _update_delegate(self, wrapper, instance):
# helper function to expose a name function directly
def set_name(name):
# set the job name
wrapper.job.name = name
# inject helper functions
helper_function = {
"name": set_name,
"job": wrapper.job,
"profile": wrapper.job,
"add_output": wrapper.options.add_output,
"add_input": wrapper.options.add_input,
"add_option": wrapper.options.add_option,
'r': render_template,
'render_template': render_template,
'options': wrapper.options,
'opts': wrapper.options,
'args': wrapper.args,
'ensure': wrapper.ensure,
'check_file': wrapper.check_file,
'validation_error': wrapper.validation_error
}
for k, v in iteritems(helper_function):
if not hasattr(instance, k):
instance.__dict__[k] = v
# inject options if they don't exists
for o in wrapper.options:
if not hasattr(instance, o.name):
instance.__dict__[o.name] = o
def __call_delegate(self, fun, wrapper, instance):
if not callable(fun):
name = fun
try:
fun = getattr(instance, name)
except:
# don't double validate, the python tool will call the
# Tool validate already
if name == 'validate':
return
# try to get the function frow main Tool implementation
fun = getattr(Tool, name)
if fun:
# make sure the instance is aware of the options
if (hasattr(fun, "__self__") and fun.__self__ is not None) or \
(hasattr(fun, "im_self") and fun.im_self is not None):
self._update_delegate(wrapper, instance)
# force options and args
instance.options = wrapper.options
instance.opts = wrapper.options
instance.args = wrapper.args
return fun()
else:
# function based implementation
self._update_delegate(wrapper, wrapper)
return fun(wrapper)
def validate(self, wrapper, instance):
try:
r = self.__call_delegate(self._validate, wrapper, instance)
if self._check_files:
for check in self._check_files:
wrapper.check_file(check)
if self._ensure:
for e in self._ensure:
wrapper.ensure(e[0], e[1], None if len(e) < 3 else e[2])
return r
except Exception as err:
if not isinstance(err, ValidationError):
log.debug("Validation error: %s", str(err).strip())
err = ValidationError(wrapper, str(err))
raise err
def setup(self, wrapper, instance):
return self.__call_delegate(self._setup, wrapper, instance)
def init(self, wrapper, instance):
return self.__call_delegate(self._init, wrapper, instance)
def is_done(self, wrapper, instance):
return self.__call_delegate(self._is_done, wrapper, instance)
def pipeline(self, wrapper, instance):
return self.__call_delegate(self._pipeline, wrapper, instance)
def get_command(self, wrapper, instance):
interp = "bash"
cmd = None
if not self._pytool and not isinstance(instance, types.FunctionType):
cmds = self.__call_delegate(self._get_command, wrapper,
instance)
else:
if self._pytool:
# this is a python tool that wrapps a class or function.
# In order to get a single command, we pickle the
# wrapped instance and the options and then push it
# through the pickel template
data = {
"instance": instance,
"options": wrapper.options
}
r = ('bash', _pickel_template %
(base64.b64encode(cPickle.dumps(data))))
return r
else:
# this is not a python tool function but a function
# that will return a template
argspec = inspect.getargspec(instance)
if len(argspec[0]) > 0:
cmds = instance(wrapper)
else:
cmds = instance()
if isinstance(cmds, (list, tuple)):
interp = cmds[0]
cmd = cmds[1]
else:
cmd = cmds
if interp and cmd:
block = Block(content=cmd, interpreter=interp)
return interp, block.render(wrapper)
return None, None
def cleanup(self, wrapper, instance):
return self.__call_delegate(self._cleanup, wrapper, instance)
def run(self, wrapper, instance):
return self.__call_delegate(self._run, wrapper, instance)
def help(self, wrapper, instance):
return self.__call_delegate(self._help, wrapper, instance)
class pytool(tool):
"""This is a decorator that can be used to mark single python functions
as tools. The function will be wrapped in a PythonTool instance and
the function must accept a single paramter self to access to tools
options.
"""
def __init__(self, *args, **kwargs):
kwargs['pytool'] = True
tool.__init__(self, *args, **kwargs)
class pipeline(tool):
"""This is a decorator that can be used to mark single python functions
as pipelines.
"""
def __init__(self, *args, **kwargs):
kwargs['force_pipeline'] = True
tool.__init__(self, *args, **kwargs)
class Scanner():
"""
This class holds a script/tool cache
The cache is organized in to dicts, the script_cache, which
store name->instance pairs pointing form the name of the tool
to its cahced instance. The find implementations will return
clones of the instances in the cache.
"""
registry = {}
def __init__(self, jip_path=None, jip_modules=None):
self.initialized = False
self.instances = {}
self.jip_path = jip_path if jip_path else ""
self.jip_modules = jip_modules if jip_modules else []
self.jip_file_paths = set([])
self.__scanned = False
self.__scanned_files = None
def find(self, name, path=None, is_pipeline=False):
"""Finds a tool by its name or file name.
If the given name points to an existing file, the file is loaded
as a script tools and returned. Otherwise, a default search is
triggered, optionally including the specified path.
:returns: a new instance of the tool
:rtype: :class:`Tool`
:raises ToolNotFoundException: if the tool could not be found
"""
if name is None:
return None
s = name.split(" ", 1)
args = None
if len(s) > 1:
import shlex
name = s[0]
args = shlex.split(s[1])
if exists(name) and os.path.isfile(name):
## the passed argument is a file. Try to load it at a
## script and add the files directory to the search path
tool = ScriptTool.from_file(name, is_pipeline=is_pipeline)
self._register_tool(name, tool)
self.jip_file_paths.add(dirname(name))
clone = tool.clone()
clone.init()
if args:
log.debug("Scanner | Parsing arguments passed "
"through tool name")
clone.parse_args(args)
return clone
if not self.initialized:
self.scan()
self.initialized = True
self.instances.update(Scanner.registry)
tool = self.instances.get(name, None)
if tool is None:
tool = self.instances.get(name + ".jip", None)
if tool is None:
raise ToolNotFoundException("No tool named '%s' found!" % name)
if isinstance(tool, string_types):
## the tool is not loaded, load the script,
## and add it to the cache
tool = ScriptTool.from_file(tool, is_pipeline=is_pipeline)
self._register_tool(name, tool)
log.debug("Scanner | Cloning tool %s [%s]", tool, tool.__hash__())
clone = tool.clone()
clone.init()
if args:
log.debug("Scanner | Parsing arguments passed through tool name")
clone.parse_args(args)
return clone
def scan(self, path=None):
"""Searches for scripts and python modules in the configured
locations and returns a dictionary of the detected instances
:param path: optional path value to define a folder to scan
:returns: dict of tools
"""
log.debug("Searching for JIP tools")
if self.instances is None:
self.instances = {}
self.scan_files(parent=path)
self.scan_modules()
for n, m in iteritems(Scanner.registry):
self._register_tool(n, m)
return self.instances
def _register_tool(self, name, tool):
self.instances[name] = tool
# check and load profile for the given tool
if tool.path:
spec_file = tool.path
# replace extension with .spec
try:
i = spec_file.rindex(".")
if i >= 0:
spec_file = spec_file[:i] + ".spec"
log.debug("Checking for spec file at: %s", spec_file)
if os.path.exists(spec_file):
log.info("Loading spec for %s from %s",
name, spec_file)
profile = jip.profiles.Profile.from_file(spec_file)
tool._job = profile
except Exception as err:
log.error("Error while loading spec for %s: %s", name, err,
exc_info=True)
def scan_files(self, parent=None):
"""Scan files for jip tools. This functions detects files with
the ``.jip`` extension in the default search locations.
:param parent: optional parent folder
:returns: list of found files
"""
if parent is None and self.__scanned_files is not None:
return self.__scanned_files
import re
pattern = re.compile(r'^.*(.jip)$')
files = {}
if parent:
for path in self.__search(parent, pattern):
self.instances[basename(path)] = path
files[basename(path)] = path
#check cwd
for path in self.__search(getcwd(), pattern, False):
self.instances[basename(path)] = path
files[basename(path)] = path
jip_path = "%s:%s" % (self.jip_path, getenv("JIP_PATH", ""))
for folder in jip_path.split(":") + list(self.jip_file_paths):
for path in self.__search(folder, pattern):
self.instances[basename(path)] = path
files[basename(path)] = path
if parent is None:
self.__scanned_files = files
return files
def __search(self, folder, pattern, recursive=True):
log.debug("Searching folder: %s", folder)
for path in list_dir(folder, recursive=recursive):
if pattern.match(path) and os.path.isfile(path):
log.debug("Found tool: %s", path)
yield path
def add_module(self, path):
"""Add a module or a python file to the list of module that are
scanned for tools.
:param: path to the module that will be added to the search path
"""
self.jip_modules.append(path)
self.__scanned = False
def add_folder(self, path):
"""Add a folder to the list of folders that are
scanned for tools.
:param: path to the folder that will be added to the search path
"""
self.jip_file_paths.add(path)
self.__scanned = False
self.__scanned_files = None
self.initialized = False
def scan_modules(self):
"""Loads the python modules specified in the JIP configuration.
This will register any functions and classes decorated with
one of the JIP decorators.
"""
if self.__scanned:
return
path = getenv("JIP_MODULES", "")
log.debug("Scanning modules")
for module in path.split(":") + self.jip_modules + ['jip.scripts']:
try:
if module:
log.debug("Importing module: %s", module)
__import__(module)
except ImportError as e:
log.debug("Error while importing module: %s. "
"Trying file import", str(e))
if exists(module):
self._load_from_file(module)
self.__scanned = True
def _load_from_file(self, path):
"""Try to load a module from the given file. No module is loaded
if the file does not exists. Otherwise, a fukk module name us guessed
by checking for __init__.py files upwards. Then imp.load_source is
used to import the module
:param path: the path to the module file
"""
if not exists(path):
return
name, parent_dir = self._guess_module_name(path)
log.debug("Importing module from file: %s %s %s", name, path,
parent_dir)
sys.path.insert(0, parent_dir)
mod = __import__(name)
log.debug("Imported module from file %s : %s", path, mod)
#imp.load_source(name, path)
def _guess_module_name(self, path):
"""Guess the absolute module name for the given file by checking for
__init__.py files in the current folder structure and upwards"""
path = abspath(path)
base = basename(path)
if base.endswith('.py'):
base = base[:-3]
name = [base]
def _load_package_name(current, module_name):
if '__init__.py' in listdir(current):
module_name.append(basename(current))
return _load_package_name(dirname(current), module_name)
return module_name, current
# check if this is in a package
name, parent_dir = _load_package_name(dirname(path), name)
name.reverse()
return ".".join(name), parent_dir
class Block(object):
"""Base class for executable blocks that can render themselves to scripts
and provide information about the interpreter that should be used to
run the script.
"""
def __init__(self, content=None, interpreter=None, interpreter_args=None,
lineno=0):
self._lineno = lineno
self.interpreter = interpreter
self._process = None
self.content = content
if self.content is None:
self.content = []
self.interpreter_args = interpreter_args
def run(self, tool, stdin=None, stdout=None):
"""Execute this block
"""
import subprocess
import jip
# write template to named temp file and run with interpreter
script_file = jip.create_temp_file()
try:
script_file.write(self.render(tool))
script_file.close()
cmd = [self.interpreter if self.interpreter else "bash"]
if self.interpreter_args:
cmd += self.interpreter_args
self.process = subprocess.Popen(
cmd + [script_file.name],
stdin=stdin,
stdout=stdout
)
return self.process
except OSError as err:
# catch the errno 2 No such file or directory, which indicates the
# interpreter is not available
if err.errno == 2:
raise Exception("Interpreter %s not found!" % self.interpreter)
raise err
def render(self, tool):
"""Renders this blocks content within the context of the given tool
:param tool: the tool
:returns: rendered block content
:rtype: string
"""
content = self.content
if isinstance(content, (list, tuple)):
content = "\n".join(content)
ctx = dict(tool.options.to_dict(raw=True))
ctx['tool'] = tool
ctx['__file__'] = tool.path
ctx['args'] = tool.options.to_dict()
ctx['options'] = tool.options.to_cmd
return render_template(content, **ctx)
def terminate(self):
"""
Terminate currently running blocks
"""
if self._process is not None:
if self._process._popen is not None:
self._process.terminate()
import time
# sleep and check job states a few times before we do a hard
# kill
for t in [0.01, 0.05, 0.10, 2, 3]:
time.sleep(t)
if not self.process.is_alive():
break
if self.process.is_alive():
# kill it
import os
import signal
os.kill(self.process._popen.pid, signal.SIGKILL)
def __str__(self):
return "Block['%s']" % self.interpreter
class PythonBlockUtils(object):
"""Utility functions that are exposed in template blocks and template
functions
The block utilities store a reference to the *local* and *global*
environment, to the current *tool* and to the current *pipeline*.
"""
def __init__(self, tool, local_env):
self.tool = tool
self._pipeline = None
self._local_env = local_env
self._global_env = None
if hasattr(tool, "_pipeline"):
self._pipeline = tool._pipeline
@property
def pipeline(self):
from jip import Pipeline
if self._pipeline is None:
self._pipeline = Pipeline()
self._pipeline._utils = self
return self._pipeline
def check_file(self, name):
"""Checks for the existence of a file referenced by an options.
Please note that this doe **not** take a file name, but the name
of an option. This function is preferred over a simple check
using ``os.path.exists()`` because it also checks for job dependencies.
This is important because a mandatory file might not *yet* exist
within the context of a pipeline, but it will be created at runtime
in a previous step.
:param name: the options name
:returns: True if the file exists or the file is created by another
job that will run before this options job is executed.
:rtype: boolean
"""
opt = self.tool.options[name]
if not opt.is_dependency():
self.tool.options[name].validate()
def validation_error(self, message, *args):
"""Quickly raise a validation error with a custom message.
This function simply raises a ValidationError. You can use it
in a custom validation implementation to quickly fail the validation
:param message: the message
:param args: argument interpolated into the message
:raises ValidationError: always
"""
raise ValidationError(self.tool, message % args)
def set(self, name, value):
"""Set an options value.
:param name: the options name
:type name: string
:param value: the new value
"""
self.tool.options[name].value = value
def run(self, _name, **kwargs):
"""Searches for a tool with the specified name and adds it as a
new :py:class:`~jip.pipelines.Node` to the current pipeline.
All specified keyword argument are passed as option values to
the tool.
Delegates to the pipelines :py:meth:`jip.pipelines.Pipeline.run`
method.
:param _name: the name of the tool
:type _name: string
:param kwargs: additional argument passed to the tool as options
:returns: a new node that executes the specified tool and is added
to the current pipeline
:rtype: :py:class:`jip.pipelines.Node`
"""
return self.pipeline.run(_name, **kwargs)
def job(self, *args, **kwargs):
"""Create and returns a new :class:`~jip.pipelines.Job`.
The job instance can be used to customize the execution environment
for *the next* job. For example::
job("Test", threads=2).run('mytool', ...)
This is a typical usage in a pipeline context, where a new job
environment is created and then applied to a new 'mytool' pipeline
node.
:param args: job arguments
:param kwargs: job keyword arguments
:returns: a new job instance
:rtype: :class:`jip.pipelines.Job`
"""
return self.pipeline.job(*args, **kwargs)
def name(self, name):
"""Set the runtime name of a pipeline.
The runtime name of the pipeline is stored in the database and is
used as a general identifier for a pipeline run.
**Note** that this set the name of the *pipeline* if used in a pipeline
context, otherwise it set the name of the tool/job.
Within a pipeline context, you can be changed using a :py:func:`job`::
job("my job").run(...)
or after the node was created:
myrun = run(...)
myrun.job.name = "my job"
:param name: the name of the pipeline
:type name: string
"""
self.tool._job.name = name
def bash(self, command, **kwargs):
"""Create a *bash* job that executes a bash command.
This us a fast way to build pipelines that execute shell commands. The
functions wraps the given command string in the *bash tool* that
is defined with ``input``, ``output``, and ``outfile``. Input and
output default to stdin and stdout. Note that you can access your
local context within the command string. Take for example the following
pipeline script::
name = "Joe"
bash("echo 'Hello ${name}'")
This will work as expected. The command template can access local
variables. Please keep in mind that the tools context takes precedence
over the script context. That means that::
input="myfile.txt"
bash("wc -l ${input}")
in this example, the command ``wc -l`` will be rendered and wait for
input on stdin. The bash command has an ``input`` option and that takes
precedence before the globally defined ``input`` variable. This is true
for ``input``, ``output``, and ``outfile``, even if they are not
explicitly set.
You can however access variables defined in the global context using
the `_ctx`::
input="myfile.txt"
bash("wc -l ${_ctx.input}")
will indeed render and execute ``wc -l myfile.txt``.
:param command: the bash command to execute
:type command: string
:param kwargs: arguments passed into the context used to render the
bash command. ``input``, ``output``, and ``outfile`` are
passed as options to the *bash* tool that is used to
run the command
:returns: a new pipeline node that represents the bash job
:rtype: :class:`jip.pipelines.Node`
"""
bash_node = self.pipeline.run('bash', cmd=command, **kwargs)
return bash_node
def _update_global_env(self, env):
if not self._global_env:
self._global_env = {}
self._global_env.update(env)
def _update_context(self, ctx, kwargs=None, base_node=None):
if self._global_env:
for k, v in iteritems(self._global_env):
if k not in ctx:
ctx[k] = v
if kwargs:
ctx.update(kwargs)
## update all Nodes with their default output options
if base_node is not None:
from jip.pipelines import Node
class OptionWrapper(object):
def __init__(self, node, option):
self.node = node
self.option = option
def __str__(self):
if base_node != self.node:
base_node.depends_on(self.node)
if self.option.option_type != jip.options.TYPE_OPTION:
log.debug("Adding additional input option "
"for node %s : %s",
base_node, self.option.name)
self.node._tool.options.make_absolute(
self.node._job.working_dir
)
base_node._additional_input_options.add(
self.option
)
return str(self.option)
def __getattr__(self, name):
# check that the option exists (Issue #43)
opt = self.node._tool.options[name]
if opt is None:
log.info("Option '%s' not found in %s",
name, self.node, exc_info=True)
raise ValidationError(
self.node,
"Option '%s' not found in node '%s'" % (
name, self.node
)
)
return OptionWrapper(
self.node, self.node._tool.options[name]
)
for k in ctx.keys():
v = ctx[k]
if isinstance(v, Node):
try:
ctx[k] = OptionWrapper(
v,
v._tool.options.get_default_output()
)
except LookupError:
# no default output option
pass
return ctx
class PythonBlock(Block):
"""Extends block and runs the content as embedded python
"""
def __init__(self, content=None, lineno=0):
Block.__init__(self, content=content, lineno=lineno)
self.interpreter = "__embedded__"
def run(self, tool, stdin=None, stdout=None):
"""Execute this block as an embedded python script
"""
log.debug("Block: run python block for: %s", tool)
#tmpl = self.render(tool)
content = self.content
if isinstance(content, (list, tuple)):
content = "\n".join(content)
local_env = locals()
utils = PythonBlockUtils(tool, local_env)
profile = jip.profiles.Profile()
if hasattr(tool, '_job'):
profile = tool._job
env = {
"tool": tool,
"args": tool.options.to_dict(),
"opts": tool.options,
"options": tool.options,
"check_file": utils.check_file,
"ensure": tool.ensure,
"run": utils.run,
"validation_error": utils.validation_error,
"bash": utils.bash,
"job": utils.job,
"name": utils.name,
"add_output": tool.options.add_output,
"add_input": tool.options.add_input,
"add_option": tool.options.add_option,
"set": utils.set,
'r': render_template,
'render_template': render_template,
'utils': utils,
'profile': profile,
'basename': basename,
'dirname': dirname,
'abspath': abspath,
'pwd': getcwd(),
'exists': exists,
'__file__': tool.path if tool.path else None
}
# link known tools into the context
from jip import scanner
from functools import partial
scanner.scan_modules()
for name, cls in iteritems(scanner.registry):
if not name in env:
env[name] = partial(utils.run, name)
for name, path in iteritems(scanner.scan_files()):
k = name
if k.endswith(".jip"):
k = k[:-4]
if not k in env:
env[k] = partial(utils.run, name)
# link options to context
for o in tool.options:
if not o.name in env:
n = o.name.replace("-", "_").replace(" ", "_")
env[n] = o
utils._global_env = env
old_global_context = jip.templates.global_context
set_global_context(env)
try:
env.update(local_env)
exec(content, env)
except Exception as e:
if hasattr(e, 'lineno'):
e.lineno += self._lineno
raise
# auto naming for tools
from jip.pipelines import Node
for k, v in iteritems(env):
if isinstance(v, Node):
if v._job.name is None:
v._job.name = k
# reset index
log.debug("Block: block for: %s executed", tool)
return env
def terminate(self):
"""The terminate function on a python block does nothing. A
Python block can not be terminated directly"""
pass
def __str__(self):
return "PythonBlock"
class Tool(object):
"""The base class for all implementation of executable units.
This class provides all the building block to integrated new tool
implementations that can be executed, submitted and integrated in pipelines
to construct more complex setups.
A `Tool` in a JIP setup is considered to be a container for the executions
meta-data, i.e. options and files that are needed to the actual run. The
main function of the `Tool` class is it :py:meth:`get_command`
function, which returns a tuple `(interpreter, command)`, where the
`interpreter` is a string like "bash" or "perl" or even a *path* to some
interpreter executable that will be used to execute the `command`. The
command itself is the string representation of the content of a script that
will be passed to the `interpreter` at execution time. Please note that
the :py:meth:`get_command` functions command part is supposed to be
fully *rendered*, it will not be modified any further. The JIP default
tool classes that are used, for example, to provide script to the system,
are already integrated with the :py:mod:`jip.templates` system, but you can
easily use the rendering function directly to create more dynamic commands
that can adopt easily to changed in the configuration of a tool.
The class exposes a name and a path to a source file as properties. Both
are optional and can be omitted in order to implement anonymous tools. In
addition to these *meta* data, the tools :py:meth:`__init__` function
allows you to provide a *options_source*. This object is used to create the
:py:class:`jip.options.Options` that cover the runtime configuration of a
tool. The options are initialize lazily on first access using the
`options_source` provided at initialization time. This object can be either
a string or an instance of an `argparse.ArgumentParser`. Both styles of
providing tool options are described in the :py:mod:`jip.options` module.
"""
def __init__(self, options_source=None, name=None):
"""Initialize a tool instance. If no options_source is given
the class docstring is used as a the options source.
:param options_source: either a string or an argparser instance
defaults to the class docstring
:param name: the name of this tool
"""
#: the tools name
self._name = name
#: path to the tools source file
self.path = None
self._options = None
self._options_source = options_source
self._job = None
self._is_pipeline = False
def setup(self):
"""Setup method that can be implemented to manipulate tool options
before rendering and validation. Note that options here might still
contain template string. You are also allowed to set option values
to template strings.
:raises Exception: in case of a critical error
"""
pass
def init(self):
"""Initialization method that can be implemented to initialize the tool
instance and, for example, add options. ``init`` is called once for
the tool instance and the logic within the ``init`` is not allowed to
rely on any values set or applied to the tool.
:raises Exception: in case of a critical error
"""
pass
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
@property
def job(self):
if self._job is None:
self._job = jip.profiles.Profile()
return self._job
def profile(self):
return self.job
@property
def options(self):
"""Access this tools :py:class:`jip.options.Options` instance.
The tools options are the main way to interact with and configure a
tool instance either from outside or from within a pipeline.
"""
if self._options is None:
if self._options_source is not None:
self._options = self._parse_options(self._options_source)
return self._options
@property
def args(self):
"""Returns a dictionary from the option names to the option values
"""
return self.options.to_dict()
def parse_args(self, args):
"""Parses the given argument. An excetion is raised if
an error ocurres during argument parsing
:param args: the argument list
:type args: list of strings
"""
self.options.parse(args)
def _parse_options(self, options_source, inputs=None, outputs=None):
"""Initialize the options from the docstring or an argparser.
In addition to the options, the function tries to deduce a tool
name if none was specified at construction time.
Optional inputs and outputs lists can be specified. Both must
be lists of strings containing option names. If the option is found
the option type is set accordingly to input or output. This is
usefull if the options are not organized in groups and the
parser can not automatically identify the options type.
:param options_source: ther a docstring or an argparser instance
:type options_source: string or argparse.ArgumentParser
:param inputs: list of option names that will be marked as inputs
:type inputs: list of strings
:param outputs: list of option names that will be marked as outputs
:type outputs: list of strings
"""
if options_source is None:
raise Exception("No docstring or argument parser provided!")
opts = None
if not isinstance(options_source, string_types):
opts = Options.from_argparse(options_source, source=self,
inputs=inputs, outputs=outputs)
else:
opts = Options.from_docopt(options_source, source=self,
inputs=inputs, outputs=outputs)
if self.name is None:
import re
match = re.match(r'usage:\s*\n*(\w+).*', opts.usage(),
re.IGNORECASE | re.MULTILINE)
if match:
self.name = match.groups()[0]
return opts
def validate(self):
"""The default implementation validates all options that belong to
this tool and checks that all options that are of `TYPE_INPUT`
reference existing files.
The method raises a :py:class:`ValidationError` in case an option could
not be validated or an input file does not exist.
"""
log.debug("Default options validation for %s", self)
try:
self.options.validate()
except Exception as e:
log.debug("Validation error: %s", str(e).strip())
raise ValidationError(self, str(e))
for opt in self.options.get_by_type(TYPE_INPUT):
if opt.source is not None and opt.source != self:
continue
if opt.is_dependency():
continue
for value in opt._value:
if isinstance(value, string_types):
if not exists(value):
raise ValidationError(self,
"Input file not found: %s" %
value)
def validation_error(self, message, *args):
"""Quickly raise a validation error with a custom message.
This function simply raises a ValidationError. You can use it
in a custom validation implementation to quickly fail the validation
:param message: the message
:param args: argument interpolated into the message
:raises ValidationError: always
"""
raise ValidationError(self, message % args)
def ensure(self, option_name, check, message=None):
"""Check a given option value using the check pattern or function and
raise a ValidationError in case the pattern does not match or the
function does return False.
In case of list values, please note that in case check is a pattern,
all values are checked independently. If check is a function, the
list is passed on as is if the option takes list values, otherwise,
the check function is called for each value independently.
Note also that you should not use this function to check for file
existence. Use the `check_file()` function on the option or on the
tool instead. `check_file` checks for incoming dependencies in
pipelines, in which case the file does not exist _yet_ but it
will be created by a parent job.
:param option_name: the name of the option to check
:param check: either a string that is interpreter as a regexp pattern
or a function that takes the options value as a single
paramter and returns True if the value is valid
"""
o = self.options[option_name]
if isinstance(check, string_types):
# regexp patter
import re
for v in o.value:
if not re.match(check, str(v)):
self.validation_error(
message if message else "check failed for %s" % str(v)
)
return
elif callable(check):
if o.nargs == 0 or o.nargs == 1:
for v in o.value:
if not check(v):
self.validation_error(
message if message
else "check failed for %s" % str(v)
)
else:
if not check(o.value):
self.validation_error(
message if message else "check failed for %s" % o.name
)
return
raise Exception("Ensure check paramter has to be a "
"function or a pattern")
def check_file(self, option_name):
"""Delegates to the options check name function
:param option_name: the name of the option
"""
try:
self.options[option_name].check_file()
except ValueError as e:
self.validation_error(str(e))
def is_done(self):
"""The default implementation return true if the tools has output
files and all output files exist.
"""
outfiles = set(self.get_output_files())
if len(outfiles) == 0:
return False
for outfile in outfiles:
if not exists(outfile):
return False
return True
def pipeline(self):
"""Create and return the pipeline that will run this tool"""
return None
def get_command(self):
"""Return a tuple of (template, interpreter) where the template is
a string that will be rendered and the interpreter is a name of
an interpreter that will be used to run the filled template.
"""
return "bash", _pickel_template % \
(cPickle.dumps(self).encode("base64"))
def cleanup(self):
"""The celanup method removes all output files for this tool"""
outfiles = list(self.get_output_files(sticky=False))
log.debug("Tool cleanup check files: %s", outfiles)
for outfile in outfiles:
if exists(outfile):
log.warning("Tool cleanup! Removing: %s", outfile)
if os.path.isfile(outfile):
remove(outfile)
elif os.path.isdir(outfile):
shutil.rmtree(outfile)
def get_output_files(self, sticky=True):
"""Yields a list of all output files for the options
of this tool. Only TYPE_OUTPUT options are considered
whose values are strings. If a source for the option
is not None, it has to be equal to this tool.
If `sticky` is set to False, all options marked with the
sticky flag are ignored
:param sticky: by default all output option values are returned,
if this is set to False, only non-sticky output
options are yield
:type sticky: boolean
:returns: list of file names
"""
for opt in self.options.get_by_type(TYPE_OUTPUT):
if (opt.source and opt.source != self) or \
(not sticky and opt.sticky):
continue
values = opt.value
if not isinstance(values, (list, tuple)):
values = [values]
for value in values:
if isinstance(value, string_types):
import glob
globbed = glob.glob(value)
if globbed:
for v in globbed:
yield v
else:
yield value
def get_input_files(self):
"""Yields a list of all input files for the options
of this tool. Only TYPE_INPUT options are considered
whose values are strings. If a source for the option
is not None, it has to be equal to this tool.
:returns: list of file names
"""
for opt in self.options.get_by_type(TYPE_INPUT):
if opt.source and opt.source != self:
continue
values = opt.raw()
if not isinstance(values, (list, tuple)):
values = [values]
for value in values:
if isinstance(value, string_types):
yield value
def help(self):
"""Return help for this tool. By default this delegates
to the options help.
"""
return dedent(self.options.help())
def __repr__(self):
return self.name if self.name else "<Unknown>"
def __str__(self):
return self.__repr__()
def clone(self, counter=None):
"""Clones this instance of the tool and returns the clone. If the
optional counter is profiled, the name of the cloned tool will be
updated using .counter as a suffix.
"""
cloned_tool = copy.copy(self)
cloned_tool._options = self.options.copy()
if cloned_tool.name and counter is not None:
cloned_tool.name = "%s.%d" % (cloned_tool.name, str(counter))
cloned_tool._options._help = self.options._help
cloned_tool._options._usage = self.options._usage
# update the options source
cloned_tool._options.source = cloned_tool
for o in cloned_tool._options:
o.source = cloned_tool
log.debug("Tool | cloned instance %s [%s->%s]",
self, self.__hash__(), cloned_tool.__hash__())
return cloned_tool
class PythonTool(Tool):
"""An extension of the tool class that is initialized
with a decorated class to simplify the process of implementing
Tools in python.
"""
def __init__(self, cls, decorator, add_outputs=None):
"""Initialize a new python tool
:param cls: the wrapped class
:type cls: class
:param decorator: an instance of the :class:`jip.tool` decorator
:type decorator: jip.tool
:param add_outputs: list of additional names that will be added
to the list of output options
"""
Tool.__init__(self)
self.decorator = decorator
self.cls = cls
self.name = decorator.name
try:
if not isinstance(cls, types.FunctionType):
self.instance = cls()
else:
self.instance = cls
except:
self.instance = cls
try:
self.path = inspect.getsourcefile(cls)
except:
log.debug("Unable to find source file for %s", self.name)
################################################################
# Load options either through a argparser function that was
# specified by name in the decorator or load them from the
# docstring of the instance
################################################################
self._options_source = None
self._add_outputs = add_outputs
self._is_pipeline = decorator._force_pipeline
def clone(self, counter=None):
cloned_tool = Tool.clone(self, counter=counter)
try:
if not isinstance(self.cls, types.FunctionType):
cloned_tool.instance = self.cls()
else:
cloned_tool.instance = self.cls
except:
cloned_tool.instance = self.cls
return cloned_tool
@property
def options(self):
if self._options is not None:
return self._options
if self.decorator.argparse and hasattr(self.instance,
self.decorator.argparse):
#initialize the options from argparse
import argparse
class PrintDefaultsFormatter(argparse.HelpFormatter):
def _get_help_string(self, action):
help = action.help
if '%(default)' not in action.help and \
'(default: ' not in action.help:
if action.default is not argparse.SUPPRESS:
defaulting_nargs = [argparse.OPTIONAL,
argparse.ZERO_OR_MORE]
if action.option_strings or \
action.nargs in defaulting_nargs:
if isinstance(action.default, IOBase):
if action.default == sys.stdout:
help += ' (default: stdout)'
elif action.default == sys.stdin:
help += ' (default: stdin)'
elif action.default == sys.stderr:
help += ' (default: stderr)'
else:
help += ' (default: <stream>)'
else:
help += ' (default: %(default)s)'
return help
self._options_source = argparse.ArgumentParser(
prog=self.name,
formatter_class=PrintDefaultsFormatter
)
init_parser = getattr(self.instance, self.decorator.argparse)
init_parser(self._options_source)
else:
# initialize options from doc string
import textwrap
if self.instance.__doc__ is not None:
self._options_source = textwrap.dedent(self.instance.__doc__)
else:
self._options_source = ""
# create the options
self._options = self._parse_options(self._options_source,
inputs=self.decorator.inputs,
outputs=self.decorator.outputs)
## add additional output arguments
if self._add_outputs is not None:
for arg in self._add_outputs:
if isinstance(arg, (list, tuple)):
# get default value
arg = arg[0]
self._options.add(Option(
arg,
option_type=TYPE_OUTPUT,
nargs=1,
hidden=True
))
return self._options
def run(self):
self.instance.options = self.options
self.instance.tool_instance = self
if isinstance(self.instance, types.FunctionType):
# check if the function takes a parameter
argspec = inspect.getargspec(self.instance)
if len(argspec[0]) > 0:
self.instance(self)
else:
self.instance()
else:
self.decorator.run(self, self.instance)
def validate(self):
r = self.decorator.validate(self, self.instance)
Tool.validate(self)
return r
def setup(self):
return self.decorator.setup(self, self.instance)
def init(self):
if self._add_outputs is not None:
for arg in self._add_outputs:
if isinstance(arg, (list, tuple)):
value = arg[1]
arg = arg[0]
if callable(value):
try:
value = value(self)
except Exception as err:
log.debug("Error evaluating output value: %s",
str(err), exc_info=True)
self.options[arg].set(value)
return self.decorator.init(self, self.instance)
def is_done(self):
return self.decorator.is_done(self, self.instance)
def pipeline(self):
if self.decorator._force_pipeline and isinstance(self.instance,
types.FunctionType):
# force pipeline generation. Call the instance function
# and check if the retrned value is a pipeline or a string
# strings go into a pipeline block for evaluation, pipelines
# are returned unmodified
# check if the function takes a paramter
argspec = inspect.getargspec(self.instance)
r = None
if len(argspec[0]) > 0:
r = self.instance(self)
else:
r = self.instance()
if isinstance(r, string_types):
# create a pipeline block and evaluate it
block = PythonBlock(r)
e = block.run(self)
return e['utils']._pipeline
else:
return r
return self.decorator.pipeline(self, self.instance)
def help(self):
return self.decorator.help(self, self.instance)
def cleanup(self):
return self.decorator.cleanup(self, self.instance)
def get_command(self):
return self.decorator.get_command(self, self.instance)
class ScriptTool(Tool):
"""An extension of the tool class that is initialized
with a docstring and operates on Blocks that can be loade
form a script file or from string.
If specified as initializer parameters, both the validation and the
pipeline block will be handled with special care.
Pipeline blocks currently can only be embedded python block. Therefore
the interpreter has to be 'python'. Validation blocks where the
interpreter is 'python' will be converted to embedded python blocks. This
allows the validation process to modify the tool and its arguments during
validation.
"""
def __init__(self, docstring, command_block=None, setup_block=None,
init_block=None, validation_block=None, pipeline_block=None):
Tool.__init__(self, docstring)
self.command_block = command_block
self.validation_block = validation_block
self.pipeline_block = pipeline_block
self.setup_block = setup_block
self.init_block = init_block
if self.pipeline_block:
if self.pipeline_block.interpreter is not None and \
self.pipeline_block.interpreter != 'python':
raise Exception("Pipeline blocks have to be implemented in "
"python! Sorry about that, but its really a "
"nice language :)")
self.pipeline_block = PythonBlock(
lineno=self.pipeline_block._lineno,
content=self.pipeline_block.content
)
if self.validation_block and \
(self.validation_block.interpreter is None or
self.validation_block.interpreter == 'python'):
self.validation_block = PythonBlock(
lineno=self.validation_block._lineno,
content=self.validation_block.content
)
if self.setup_block:
self.setup_block = PythonBlock(
lineno=self.setup_block._lineno,
content=self.setup_block.content
)
if self.init_block:
self.init_block = PythonBlock(
lineno=self.init_block._lineno,
content=self.init_block.content
)
if not self.command_block and not self.pipeline_block:
raise Exception("No executable or pipeline block found!")
self._is_pipeline = self.pipeline_block is not None
def pipeline(self):
if self.pipeline_block:
r = self.pipeline_block.run(self)
return r['utils'].pipeline
return Tool.pipeline(self)
def run(self):
if self.command_block:
self.command_block.run(self)
def validate(self):
if self.validation_block:
self.validation_block.run(self)
Tool.validate(self)
def init(self):
if self.init_block:
self.init_block.run(self)
Tool.init(self)
def setup(self):
if self.setup_block:
self.setup_block.run(self)
Tool.setup(self)
def get_command(self):
if self.command_block:
return self.command_block.interpreter, \
self.command_block.render(self)
return None, None
@classmethod
def from_string(cls, content):
from jip.parser import load
return load(content, script_class=cls)
@classmethod
def from_file(cls, path, is_pipeline=False):
log.debug("Load script from file: %s", path)
from jip.parser import loads
s = loads(path, script_class=cls, is_pipeline=is_pipeline)
return s
|
the-stack_0_9954 | """Gitlab service support.
API docs: https://docs.gitlab.com/ee/api/
"""
from dateutil.parser import parse as parsetime
from snakeoil.klass import aliased, alias
from urllib.parse import urlparse, urlunparse, quote_plus
from ._jsonrest import JsonREST
from ..exceptions import RequestError, BiteError
from ..objects import Item, Attachment, Comment, TimeInterval
from ._reqs import LinkHeaderPagedRequest, PagedRequest, ParseRequest, req_cmd
from ._rest import RESTRequest
class GitlabError(RequestError):
def __init__(self, msg, code=None, text=None):
msg = 'Gitlab error: ' + msg
super().__init__(msg, code, text)
class GitlabIssue(Item):
attributes = {
'created': 'Created',
'updated': 'Modified',
}
attribute_aliases = {
'title': 'summary',
'creator': 'author',
'owner': 'assignee',
}
_print_fields = (
('summary', 'Title'),
('assignee', 'Assignee'),
('id', 'ID'),
)
type = 'issue'
def __init__(self, repo=None, comments=None, attachments=None, **kw):
for k, v in kw.items():
# Prefix project ID to issue iid depending on the connection type.
# The 'id' field unique across all issues is essentially useless
# for us since most API calls only use project IDs and iids.
# https://docs.gitlab.com/ee/api/README.html#id-vs-iid
if k == 'id':
continue
elif k == 'iid':
k = 'id'
if repo is None:
v = f"{kw['project_id']}-{v}"
elif k in ('created_at', 'updated_at', 'closed_at') and v:
v = parsetime(v)
elif k in ('author', 'assignee') and v:
v = v['username']
setattr(self, k, v)
self.attachments = attachments if attachments is not None else ()
self.comments = comments if comments is not None else ()
class GitlabComment(Comment):
pass
class GitlabAttachment(Attachment):
pass
class GitlabProject(object):
def __init__(self, **kw):
self.id = kw['id']
self.desc = kw['description']
self.owner, self.name = kw['path_with_namespace'].split('/', 1)
self.created = parsetime(kw['created_at'])
self.updated = parsetime(kw['last_activity_at'])
self.git_repo = kw['http_url_to_repo']
self.webbase = kw['web_url']
self.tags = tuple(kw['tag_list'])
self.stars = kw['star_count']
self.forks = kw['forks_count']
class Gitlab(JsonREST):
"""Service supporting the Gitlab issue tracker."""
_service = 'gitlab'
_service_error_cls = GitlabError
item = GitlabIssue
item_endpoint = '/issues'
attachment = GitlabAttachment
#attachment_endpoint = '/file'
def __init__(self, base, max_results=None, **kw):
# extract gitlab domain
url = urlparse(base)
# TODO: generalize and allow versioned API support
api_base = urlunparse((
url.scheme,
url.netloc,
'/api/v4',
None, None, None))
paths = url.path.strip('/').split('/')
try:
group, project = paths
self.repo = f'{group}/{project}'
except ValueError:
group = paths[0] if paths[0] else None
self.repo = None
self.group = group
# gitlab maxes out at 100 results per page
if max_results is None:
max_results = 100
# use endpoint for namespaced API calls:
# https://docs.gitlab.com/ee/api/README.html#namespaced-path-encoding
endpoint = f"/projects/{quote_plus(self.repo)}" if self.repo is not None else ''
super().__init__(endpoint=endpoint, base=api_base, max_results=max_results, **kw)
self.webbase = base
def parse_response(self, response):
data = super().parse_response(response)
if 'error' not in data:
return data
else:
self.handle_error(code=response.status_code, msg=data['error'])
class GitlabPagedRequest(PagedRequest, LinkHeaderPagedRequest, RESTRequest):
"""Requests supporting gitlab's pagination method.
Docs: https://docs.gitlab.com/ee/api/README.html#pagination
"""
# Gitlab supports link headers as the canonical method for pagination, but
# it also provides parameters to request a given page so use those instead
# in order to easily generate async calls for future pages. Note that the
# total size of the query is still extracted from the headers though since
# that information isn't provided in the data response.
_page_key = 'page'
_size_key = 'per_page'
_total_key = 'NONE'
_total_header = 'X-Total'
# gitlab defaults to starting at page 1
_start_page = 1
# TODO: Add more specific Elasticsearch functionality to another search req
# class, especially since gitlab.com doesn't support elasticsearch queries yet
# but newer self-hosted instances should.
@req_cmd(Gitlab, cmd='search')
class _SearchRequest(ParseRequest, GitlabPagedRequest):
"""Construct a search request.
Gitlab uses Elasticsearch on the backend so advanced queries use its syntax.
Docs: https://docs.gitlab.com/ee/user/search/advanced_search_syntax.html
"""
# map from standardized kwargs name to expected service parameter name
_params_map = {
'status': 'state',
}
def __init__(self, **kw):
if kw['service'].group is not None and kw['service'].repo is None:
self.endpoint = f"/groups/{kw['service'].group}/issues"
else:
self.endpoint = '/issues'
self._repo = kw['service'].repo
super().__init__(endpoint=self.endpoint, **kw)
def parse(self, data):
issues = super().parse(data)
for issue in issues:
yield self.service.item(repo=self._repo, **issue)
@aliased
class ParamParser(ParseRequest.ParamParser):
# map of allowed status input values to service parameters, aliases are
# capitalized
_status_map = {
'open': 'opened',
'closed': 'closed',
'ALL': 'ALL',
}
def _finalize(self, **kw):
if not self.params:
raise BiteError('no supported search terms or options specified')
# default to returning only open issues
self.params.setdefault('status', 'opened')
# status must be unset to search across all values
if self.params['status'] == 'ALL':
del self.params['status']
# don't restrict scope by default
self.params.setdefault('scope', 'all')
# show issues in ascending order by default
self.params.setdefault('sort', 'asc')
def terms(self, k, v):
self.params['search'] = v
self.options.append(f"Summary: {', '.join(v)}")
def id(self, k, v):
self.params['iids[]'] = v
self.options.append(f"IDs: {', '.join(map(str, v))}")
def labels(self, k, v):
self.params[k] = ','.join(v)
self.options.append(f"{k.capitalize()}: {', '.join(v)}")
def milestone(self, k, v):
self.params[k] = v
self.options.append(f"{k.capitalize()}: {v}")
def status(self, k, v):
value = self._status_map.get(v)
if value is None:
raise BiteError(
f"invalid status value: {v} "
f"(available: {', '.join(sorted(self._status_map))})")
self.params[k] = value
self.options.append(f"{k.capitalize()}: {v}")
def group(self, k, v):
self.request.kwargs['endpoint'] = f'/groups/{v}/issues'
self.options.append(f"{k.capitalize()}: {v}")
def repo(self, k, v):
if self.service.group is None:
if '/' not in v:
raise BiteError(f'repo missing group: {v!r}')
repo = v
else:
repo = f'{self.service.group}/{v}'
self.request.kwargs['endpoint'] = f"/projects/{quote_plus(repo)}/issues"
self.request._repo = repo
self.options.append(f"{k.capitalize()}: {v}")
def project(self, k, v):
if self.service.group is None:
raise BiteError(f'missing group')
repo = f'{self.service.group}/{v}'
self.request.kwargs['endpoint'] = f"/projects/{quote_plus(repo)}/issues"
self.request._repo = repo
self.options.append(f"{k.capitalize()}: {v}")
@alias('modified')
def created(self, k, v):
field = 'updated' if k == 'modified' else k
if not isinstance(v, TimeInterval):
v = TimeInterval(v)
start, end = v
if start:
self.params[f'{field}_after'] = start.isoformat()
if end:
self.params[f'{field}_before'] = end.isoformat()
self.options.append(f'{k.capitalize()}: {v}')
# TODO: move to using search API
@req_cmd(Gitlab, cmd='project_search')
class _ProjectSearchRequest(ParseRequest, GitlabPagedRequest):
"""Construct a project search request."""
def __init__(self, **kw):
if kw['service'].group is not None and kw['service'].repo is None:
self.endpoint = f"/groups/{kw['service'].group}/projects"
else:
self.endpoint = '/projects'
super().__init__(endpoint=self.endpoint, **kw)
def parse(self, data):
projects = list(super().parse(data))
for project in projects:
yield GitlabProject(**project)
@aliased
class ParamParser(ParseRequest.ParamParser):
def _finalize(self, **kw):
if not self.params:
raise BiteError('no supported search terms or options specified')
# show issues in ascending order by default
self.params.setdefault('sort', 'asc')
def terms(self, k, v):
self.params['search'] = v
self.options.append(f"Summary: {', '.join(v)}")
|
the-stack_0_9955 | # pylint: disable=wildcard-import, unused-wildcard-import
"""Model store which handles pretrained models from both
mxnet.gluon.model_zoo.vision and gluoncv.models
"""
from mxnet import gluon
from .ssd import *
from .faster_rcnn import *
from .fcn import *
from .pspnet import *
from .cifarresnet import *
from .cifarresnext import *
from .cifarwideresnet import *
from .resnetv1b import *
from .resnext import *
from .senet import *
from .se_resnet import *
from .yolo import *
from .nasnet import *
__all__ = ['get_model', 'get_model_list']
_models = {
'ssd_300_vgg16_atrous_voc': ssd_300_vgg16_atrous_voc,
'ssd_300_vgg16_atrous_coco': ssd_300_vgg16_atrous_coco,
'ssd_300_vgg16_atrous_custom' : ssd_300_vgg16_atrous_custom,
'ssd_512_vgg16_atrous_voc': ssd_512_vgg16_atrous_voc,
'ssd_512_vgg16_atrous_coco': ssd_512_vgg16_atrous_coco,
'ssd_512_vgg16_atrous_custom': ssd_512_vgg16_atrous_custom,
'ssd_512_resnet18_v1_voc': ssd_512_resnet18_v1_voc,
'ssd_512_resnet18_v1_coco': ssd_512_resnet18_v1_coco,
'ssd_512_resnet50_v1_voc': ssd_512_resnet50_v1_voc,
'ssd_512_resnet50_v1_coco': ssd_512_resnet50_v1_coco,
'ssd_512_resnet50_v1_custom': ssd_512_resnet50_v1_custom,
'ssd_512_resnet101_v2_voc': ssd_512_resnet101_v2_voc,
'ssd_512_resnet152_v2_voc': ssd_512_resnet152_v2_voc,
'ssd_512_mobilenet1_0_voc': ssd_512_mobilenet1_0_voc,
'ssd_512_mobilenet1_0_coco': ssd_512_mobilenet1_0_coco,
'ssd_512_mobilenet1_0_custom': ssd_512_mobilenet1_0_custom,
'faster_rcnn_resnet50_v1b_voc': faster_rcnn_resnet50_v1b_voc,
'faster_rcnn_resnet50_v1b_coco': faster_rcnn_resnet50_v1b_coco,
'cifar_resnet20_v1': cifar_resnet20_v1,
'cifar_resnet56_v1': cifar_resnet56_v1,
'cifar_resnet110_v1': cifar_resnet110_v1,
'cifar_resnet20_v2': cifar_resnet20_v2,
'cifar_resnet56_v2': cifar_resnet56_v2,
'cifar_resnet110_v2': cifar_resnet110_v2,
'cifar_wideresnet16_10': cifar_wideresnet16_10,
'cifar_wideresnet28_10': cifar_wideresnet28_10,
'cifar_wideresnet40_8': cifar_wideresnet40_8,
'cifar_resnext29_32x4d': cifar_resnext29_32x4d,
'cifar_resnext29_16x64d': cifar_resnext29_16x64d,
'fcn_resnet50_voc': get_fcn_voc_resnet50,
'fcn_resnet101_voc': get_fcn_voc_resnet101,
'fcn_resnet50_ade': get_fcn_ade_resnet50,
'psp_resnet50_ade': get_psp_ade_resnet50,
'resnet18_v1b': resnet18_v1b,
'resnet34_v1b': resnet34_v1b,
'resnet50_v1b': resnet50_v1b,
'resnet101_v1b': resnet101_v1b,
'resnet152_v1b': resnet152_v1b,
'resnet50_v1c': resnet50_v1c,
'resnet101_v1c': resnet101_v1c,
'resnet152_v1c': resnet152_v1c,
'resnet50_v1d': resnet50_v1d,
'resnet101_v1d': resnet101_v1d,
'resnet152_v1d': resnet152_v1d,
'resnext50_32x4d': resnext50_32x4d,
'resnext101_32x4d': resnext101_32x4d,
'resnext101_64x4d': resnext101_64x4d,
'se_resnext50_32x4d': se_resnext50_32x4d,
'se_resnext101_32x4d': se_resnext101_32x4d,
'se_resnext101_64x4d': se_resnext101_64x4d,
'senet_52': senet_52,
'senet_103': senet_103,
'senet_154': senet_154,
'se_resnet18_v1': se_resnet18_v1,
'se_resnet34_v1': se_resnet34_v1,
'se_resnet50_v1': se_resnet50_v1,
'se_resnet101_v1': se_resnet101_v1,
'se_resnet152_v1': se_resnet152_v1,
'se_resnet18_v2': se_resnet18_v2,
'se_resnet34_v2': se_resnet34_v2,
'se_resnet50_v2': se_resnet50_v2,
'se_resnet101_v2': se_resnet101_v2,
'se_resnet152_v2': se_resnet152_v2,
'darknet53': darknet53,
'yolo3_darknet53_coco': yolo3_darknet53_coco,
'yolo3_darknet53_voc': yolo3_darknet53_voc,
'yolo3_darknet53_custom': yolo3_darknet53_custom,
'nasnet_4_1056': nasnet_4_1056,
'nasnet_5_1538': nasnet_5_1538,
'nasnet_7_1920': nasnet_7_1920,
'nasnet_6_4032': nasnet_6_4032,
}
def get_model(name, **kwargs):
"""Returns a pre-defined model by name
Parameters
----------
name : str
Name of the model.
pretrained : bool
Whether to load the pretrained weights for model.
classes : int
Number of classes for the output layer.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
Returns
-------
HybridBlock
The model.
"""
try:
net = gluon.model_zoo.vision.get_model(name, **kwargs)
return net
except ValueError as e:
upstream_supported = str(e)
# avoid raising inside which cause a bit messy error message
name = name.lower()
if name not in _models:
raise ValueError('%s\n\t%s' % (upstream_supported, '\n\t'.join(sorted(_models.keys()))))
net = _models[name](**kwargs)
return net
def get_model_list():
"""Get the entire list of model names in model_zoo.
Returns
-------
list of str
Entire list of model names in model_zoo.
"""
return _models.keys()
|
the-stack_0_9957 | from functools import wraps
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.losses import (
BinaryCrossentropy,
CategoricalCrossentropy,
MeanAbsoluteError,
MeanSquaredError,
)
def distributed_sum_over_batch_size(batch_size: int):
def _distributed_sum_over_batch_size(function):
@wraps(function)
def wrapper(*args, **kwargs):
output_tensor = function(*args, **kwargs)
return tf.nn.compute_average_loss(
output_tensor, global_batch_size=batch_size
)
return wrapper
return _distributed_sum_over_batch_size
def distributed_mean(function):
@wraps(function)
def wrapper(*args, **kwargs):
output_tensor = function(*args, **kwargs)
return tf.math.reduce_mean(output_tensor) / 2
return wrapper
@tf.function
def apply_softmax(logits):
return keras.activations.softmax(logits)
@distributed_mean
@tf.function
def compute_l1_loss(fake_outputs, ground_truth):
return MeanAbsoluteError(reduction=keras.losses.Reduction.NONE)(
ground_truth, fake_outputs
)
@distributed_mean
@tf.function
def compute_binary_crossentropy(y_true, y_predicted) -> float:
"""Compute Binary Categorical Crossentropy.
### Parameters:
y_true:
y_predicted:
### Returns:
the computed loss.
"""
return BinaryCrossentropy(
from_logits=False,
reduction=keras.losses.Reduction.NONE,
)(y_true, y_predicted)
@tf.function
def compute_categorical_crossentropy(logits, labels) -> float:
"""Compute Sparse Categorical Crossentropy.
### Parameters:
logits: the logits
labels: the labels
### Returns:
Computed loss.
"""
# return CategoricalCrossentropy()( # reduction=keras.losses.Reduction.NONE)(
# logits, labels
# )
return CategoricalCrossentropy(reduction=keras.losses.Reduction.NONE)(
labels, logits
)
@distributed_mean
@tf.function
def compute_euclidean_distance(fake_outputs, ground_truth) -> float:
return MeanSquaredError(reduction=keras.losses.Reduction.NONE)(
ground_truth, fake_outputs
)
@tf.function
def normalize(logits, axis: int = None, name: str = None):
normalized = tf.linalg.normalize(logits, ord="euclidean", axis=axis, name=name)[0]
return tf.squeeze(normalized)
|
the-stack_0_9958 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Download and build the data if it does not exist.
import parlai.core.build_data as build_data
import gzip
import os
import re
from parlai.core.build_data import DownloadableFile
from parlai.utils.io import PathManager
RESOURCES = [
DownloadableFile(
'http://opus.lingfil.uu.se/download.php?f=OpenSubtitles/en.tar.gz',
'OpenSubtitles.tar.gz',
'aef6d57db36c221b8cff1cf2356309874c27ef6a36bb8ca714509b37d0db29bc',
)
]
def _regularize(sent):
sent = sent.replace('i>', '').replace('<', '').replace('>', '')
sent = re.sub(r'x[0-9|a-f][0-9|a-f]', ' ', sent)
sent = sent.replace('\\', '').replace('-', '')
sent = ' '.join(re.findall(r"[\w']+|[.,!?:;]", sent))
sent = sent.replace('. .', '...')
sent = ' '.join(sent.split())
return sent
def create_fb_format(inpath, outpath):
print('[building fbformat]')
with PathManager.open(
os.path.join(outpath, 'train.txt'), 'w'
) as ftrain, PathManager.open(
os.path.join(outpath, 'valid.txt'), 'w'
) as fvalid, PathManager.open(
os.path.join(outpath, 'test.txt'), 'w'
) as ftest:
conv_id = 0
# find all the files.
for root, _subfolder, files in os.walk(inpath):
for f in files:
if f.endswith('.gz'):
dialog = []
conv_id = conv_id + 1
with gzip.open(os.path.join(root, f), 'r') as f1:
words = []
line_id = 1
turn_id = 0
for line in f1:
line = str(line)
if line.find('<s id="') != -1:
# new sentence
if len(words) > 0:
curr_words = _regularize(''.join(words))
if len(curr_words) > 0:
if (turn_id % 2) == 0:
dialog.append(str(line_id))
dialog.append(' ')
dialog.append(curr_words)
else:
dialog.append('\t')
dialog.append(curr_words)
dialog.append('\n')
line_id += 1
turn_id += +1
words.clear()
else:
i1 = line.find('<w id="')
if i1 >= 0:
line = line[i1:]
word = line[line.find('>') + 1 : line.find('</w')]
words.append(' ')
words.append(word.replace('\t', ' '))
handle = ftrain
if (conv_id % 10) == 0:
handle = ftest
if (conv_id % 10) == 1:
handle = fvalid
dialog.append('\n')
handle.write(''.join(dialog))
def build(datapath):
dpath = os.path.join(datapath, 'OpenSubtitles')
version = '2'
if not build_data.built(dpath, version_string=version):
print('[building data: ' + dpath + ']')
if build_data.built(dpath):
# An older version exists, so remove these outdated files.
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
# Download the data.
for downloadable_file in RESOURCES:
downloadable_file.download_file(dpath)
create_fb_format(os.path.join(dpath, 'OpenSubtitles', 'en'), dpath)
# Mark the data as built.
build_data.mark_done(dpath, version_string=version)
return dpath
|
the-stack_0_9959 | import itertools as it, operator as op, functools as ft
from xml.sax.saxutils import escape as xml_escape
import html.parser, html.entities
import os, re, collections as cs, urllib.request as ulr
import gi
gi.require_version('Gtk', '3.0')
gi.require_version('Gdk', '3.0')
from gi.repository import Gtk, Gdk, GdkPixbuf, GLib, Pango
from . import core
import logging
log = logging.getLogger(__name__)
class MarkupToText(html.parser.HTMLParser):
def handle_starttag(self, tag, attrs): pass
def handle_endtag(self, tag): pass
def handle_entityref(self, ref): self.d.append(f'&{ref};')
def handle_charref(self, ref): self.d.append(f'&#{ref};')
def handle_data(self, data): self.d.append(data)
def __call__(self, s):
self.d = list()
self.feed(s)
return ''.join(self.d).strip()
strip_markup = MarkupToText()
class NotificationDisplay:
'''Interface to display notification stack.
Should have "display(note, cb_dismiss=None) -> nid(UInt32, >0)", "close(nid)"
methods and NoWindowError(nid) exception, raised on erroneous nid's in close().
Current implementation based on notipy: git://github.com/the-isz/notipy.git'''
window = cs.namedtuple('Window', 'gobj event_boxes')
base_css = b'''
#notification { background: transparent; }
#notification #frame { background-color: #d4ded8; padding: 3px; }
#notification #hs { background-color: black; }
#notification #critical { background-color: #ffaeae; }
#notification #normal { background-color: #f0ffec; }
#notification #low { background-color: #bee3c6; }
#notification #summary {
color: black;
padding-left: 5px;
font-size: 1.2em;
text-shadow: 1px 1px 0px gray;
}
#notification #body { color: black; font-size: 1em; }
#notification #body * { color: black; background-color: #d4ded8; }
'''
base_css_min = b'#notification * { font-size: 8; }' # simpliest fallback
def __init__( self, layout_margin,
layout_anchor, layout_direction, icon_scale=dict(),
markup_default=False, markup_warn=False, markup_strip=False ):
self.margins = dict(it.chain.from_iterable(map(
lambda ax: ( (2**ax, layout_margin),
(-2**ax, layout_margin) ), range(2) )))
self.layout_anchor = layout_anchor
self.layout_direction = layout_direction
self.icon_scale = icon_scale
self.markup_default = markup_default
self.markup_warn, self.markup_strip = markup_warn, markup_strip
self._windows = dict()
self._default_style = self._get_default_css()
screen = Gdk.Screen.get_default()
if not screen: raise core.StartupFailure('No X screen detected')
Gtk.StyleContext.add_provider_for_screen(
screen, self._default_style,
Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION )
def _pango_markup_parse(self, text, _err_mark='[TN82u8] '):
try:
success, _, text, _ = Pango.parse_markup(text, -1, '\0')
if not success: raise GLib.GError('pango_parse_markup failure')
except GLib.GError as err:
success = False # should be rendered as text
if self.markup_warn:
msg_start = f'{_err_mark}Pango formatting failed'
if msg_start not in text: # detect and avoid possible feedback loops
log.warn('%s (%s) for text, stripping markup: %r', msg_start, err, text)
if self.markup_strip: # strip + re-parse to convert xml entities and such
text = strip_markup(text)
try: _, _, text, _ = Pango.parse_markup(text, -1, '\0')
except GLib.GError: pass
return success, text
def _get_default_css(self):
css, base_css = Gtk.CssProvider(), self.base_css
for attempt in range(6):
try: css.load_from_data(base_css)
except GLib.GError as err:
log.warn('Failed to load default CSS style (try %s): %s', attempt+1, err)
# print(base_css)
else: break
# Try to work around https://bugzilla.gnome.org/show_bug.cgi?id=678876 and similar issues
if attempt == 0:
base_css = re.sub(br'\b(background-color:)\s*rgba\([^;]+;', br'\1 white;', base_css)
elif attempt == 1:
base_css = re.sub(br'\b(font-size:)\s*(\d+)px\s*;', br'\1 \2;', base_css)
elif attempt == 2:
base_css = re.sub(br'\b(text-shadow:)[^;]+;', br'\1 1 1 0 gray;', base_css)
elif attempt == 3: base_css = re.sub(br'\btext-shadow:[^;]+;', b'', base_css)
elif attempt == 4: base_css = self.base_css_min # last resort before no-css-at-all
else: break # don't load any css
return css
def _update_layout(self):
# Get the coordinates of the "anchor" corner (screen corner +/- margins)
base = tuple(map(
lambda ax, gdk_dim=('width', 'height'):\
(getattr(Gdk.Screen, gdk_dim[ax])() - self.margins[2**ax])\
if 2**ax & self.layout_anchor else self.margins[-2**ax], range(2) ))
# Iterate over windows in order, placing each one starting from a "base" corner
for win in map(op.attrgetter('gobj'), self._windows.values()):
win.move(*map(lambda ax: base[ax] - ( win.get_size()[ax]
if 2**ax & self.layout_anchor else 0 ), range(2)))
margin = self.margins[(2 * ( (2**self.layout_direction)
& self.layout_anchor ) / 2**self.layout_direction - 1) * 2**self.layout_direction]
base = tuple(map(
lambda ax: base[ax] if self.layout_direction != ax else\
base[ax] + (margin + win.get_size()[ax])\
* (2 * (2**ax ^ (2**ax & self.layout_anchor)) / 2**ax - 1), range(2) ))
def _get_icon(self, icon, remote=False):
widget_icon = None
if icon is not None:
if isinstance(icon, str):
icon_path = os.path.expanduser(ulr.url2pathname(icon))
if icon_path.startswith('file://'): icon_path = icon_path[7:]
if os.path.isfile(icon_path):
widget_icon = GdkPixbuf.Pixbuf.new_from_file(icon_path)
else:
# Available names: Gtk.IconTheme.get_default().list_icons(None)
theme = Gtk.IconTheme.get_default()
icon_size = any(self.icon_scale.get('fixed', list())) or 32
widget_icon = theme.lookup_icon(
icon, icon_size, Gtk.IconLookupFlags.USE_BUILTIN )
if widget_icon: widget_icon = widget_icon.load_icon()
else:
# Msgs from remote hosts natually can have non-local icon paths in them
(log.warn if not remote else log.debug)(
'Provided icon info seem to be neither valid icon file nor'
' a name in a freedesktop.org-compliant icon theme (or current theme'
' does not have that one), ignoring it: %r', core.format_trunc(icon) )
else:
w, h, rowstride, has_alpha, bits_per_sample, channels, data = icon
data = bytes(bytearray(data))
widget_icon = GdkPixbuf.Pixbuf.new_from_data(
data, GdkPixbuf.Colorspace.RGB, bool(has_alpha),
int(bits_per_sample), int(w), int(h), int(rowstride) )
widget_icon._data = data # must be preserved from gc
if widget_icon:
if any(it.chain.from_iterable(self.icon_scale.values())): # scale icon
w, h = widget_icon.get_width(), widget_icon.get_height()
for k in 'fixed', 'min', 'max':
box_w, box_h = self.icon_scale.get(k, (0, 0))
if not any([box_w, box_h]): continue
if k == 'min' and not ((box_w and w < box_w) or (box_h and h < box_h)): continue
if k == 'max' and not ((box_w and w > box_w) or (box_h and h > box_h)): continue
scale_down = (box_w and w > box_w) or (box_h and h > box_h)
if scale_down: scale = min # factor<1, unspec=1, must fit on both dimensions
elif box_w and box_h: scale = min # factor>1, but still pick min to fit on both
else: scale = max # ignore unspec=1 and scale to max possible factor
scale = scale(float(box_w or w) / w, float(box_h or h) / h)
box_w, box_h = w * scale, h * scale
log.debug( 'Scaling image (%s, criteria: %s) by a factor of'
' %.3f: %dx%d -> %dx%d', ['up', 'down'][scale_down], k, scale, w, h, box_w, box_h )
widget_icon = widget_icon.scale_simple(box_w, box_h, GdkPixbuf.InterpType.BILINEAR)
if k == 'fixed': break # no need to apply min/max after that
widget_icon, pixbuf = Gtk.Image(), widget_icon
widget_icon.set_from_pixbuf(pixbuf)
return widget_icon
def _set_visual(self, win, ev=None):
visual = win.get_screen().get_rgba_visual()
if visual: win.set_visual(visual)
def _create_win( self, summary, body,
icon=None, urgency_label=None, markup=False, remote=None ):
log.debug( 'Creating window with parameters: %s',
core.repr_trunc_rec(dict( summary=summary, body=body,
icon=icon, urgency=urgency_label, markup=markup )) )
win = Gtk.Window(name='notification', type=Gtk.WindowType.POPUP)
win.set_default_size(400, 20)
win.connect('screen-changed', self._set_visual)
self._set_visual(win)
ev_boxes = [win]
frame = Gtk.Box(name='frame')
win.add(frame)
try: widget_icon = self._get_icon(icon, remote=remote)
except Exception: # Gdk may raise errors for some images/formats
log.exception('Failed to set notification icon')
widget_icon = None
box_margin = 3
v_box = Gtk.VBox(spacing=box_margin, expand=False)
if widget_icon is not None:
h_box = Gtk.HBox(spacing=box_margin * 2)
frame.pack_start(h_box, True, True, 0)
h_box.pack_start(widget_icon, False, False, 0)
h_box.pack_start(v_box, True, True, 0)
ev_boxes.append(h_box)
else: frame.pack_start(v_box, True, True, 0)
widget_summary = Gtk.Label(name='summary')
# Sanitize tags through pango first, so set_markup won't produce empty label
markup_summary = markup
if markup_summary:
markup_summary, text = self._pango_markup_parse(summary)
if markup_summary: widget_summary.set_markup(summary)
else: summary = text
if not markup_summary: widget_summary.set_text(summary)
widget_summary.set_alignment(0, 0)
if urgency_label:
summary_box = Gtk.EventBox(name=urgency_label)
summary_box.add(widget_summary)
else: summary_box = widget_summary
v_box.pack_start(summary_box, False, False, 0)
ev_boxes.append(summary_box)
v_box.pack_start(Gtk.HSeparator(name='hs'), False, False, 0)
widget_body = Gtk.TextView( name='body',
wrap_mode=Gtk.WrapMode.WORD_CHAR,
cursor_visible=False, editable=False )
widget_body_buffer = widget_body.get_buffer()
# Same as with summary - sanitize tags through pango first
markup_body = markup
if markup_body:
markup_body, text = self._pango_markup_parse(body)
if markup_body:
cursor = widget_body_buffer.get_end_iter()
widget_body_buffer.insert_markup(cursor, body, -1)
else: body = text
if not markup_body: widget_body_buffer.set_text(body)
v_box.pack_start(widget_body, True, True, 0)
ev_boxes.append(widget_body)
# Make sure the window is initially drawn off-screen, because it can't be
# placed properly until it's size is known, and it's size is unknown until it's
# actually handled by window manager and then drawn by X
# Proper placement is done on update_layout() call
win.move(-2000, -2000)
win.show_all()
return self.window(win, ev_boxes)
def get_note_markup(self, note):
return note.hints.get('x-nt-markup', self.markup_default)
def get_note_text(self, note):
'Returns note text, stripped of all markup, if any (and if enabled).'
markup, summary, body = self.get_note_markup(note), note.summary, note.body
if markup:
_, summary = self._pango_markup_parse(summary)
_, body = self._pango_markup_parse(body)
return summary, body
def display(self, note, cb_dismiss=None, cb_hover=None, cb_leave=None):
try:
# Priorities for icon sources:
# image{-,_}data: hint. raw image data structure of signature (iiibiiay)
# image{-,_}path: hint. either an URI (file://...) or a name in a f.o-compliant icon theme
# app_icon: parameter. same as image-path
# icon_data: hint. same as image-data
# image_* is a deprecated hints from 1.1 spec, 1.2 is preferred
# (don't seem to be even mentioned in 1.2 spec icon priorities section)
hints = note.hints.copy()
k = '__app_icon' # to avoid clobbering anything
hints[k] = note.icon
for k in 'image-data', 'image_data',\
'image-path', 'image_path', k, 'icon_data':
image = hints.get(k)
if image:
log.debug('Got icon image from hint: %s', k)
break
urgency = note.hints.get('urgency')
if urgency is not None: urgency = core.urgency_levels.by_id(int(urgency))
markup = self.get_note_markup(note)
win = self._create_win( note.summary, note.body,
image, urgency, markup=markup, remote=note.hints.get('x-nt-from-remote') )
for eb in win.event_boxes:
eb.add_events(
Gdk.EventMask.BUTTON_PRESS_MASK
| Gdk.EventMask.POINTER_MOTION_MASK
| Gdk.EventMask.LEAVE_NOTIFY_MASK )
for ev,cb in [
('button-press-event', cb_dismiss),
('motion-notify-event', cb_hover),
('leave-notify-event', cb_leave) ]:
if cb: eb.connect(ev, lambda w,ev,cb,nid: cb(nid), cb, note.id)
if cb_dismiss and win.event_boxes:
# Connect only to window object (or first eventbox in the list)
win.event_boxes[0].connect( 'destroy',
lambda w,cb,nid: cb(nid), cb_dismiss, note.id )
# update_layout() *must* be delayed until window "configure-event", because
# actual window size is unknown until it's resized by window manager and drawn by X
# See the list of caveats here:
# http://developer.gnome.org/gtk3/unstable/GtkWindow.html#gtk-window-get-size
win.gobj.connect('configure-event', lambda w,void: self._update_layout())
self._windows[note.id] = win
except: log.exception('Failed to create notification window')
class NoWindowError(Exception): pass
def _close(self, nid):
try: win = self._windows.pop(nid).gobj
except KeyError: raise self.NoWindowError(nid)
win.hide(), win.destroy()
def close(self, nid):
self._close(nid)
self._update_layout()
|
the-stack_0_9960 | # =============================================================================
# PROJECT CHRONO - http://projectchrono.org
#
# Copyright (c) 2021 projectchrono.org
# All rights reserved.
#
# Use of this source code is governed by a BSD-style license that can be found
# in the LICENSE file at the top level of the distribution and at
# http://projectchrono.org/license-chrono.txt.
#
# =============================================================================
# Authors: Radu Serban
# =============================================================================
#
# Main driver function for the FED-alpha full model.
#
# The vehicle reference frame has Z up, X towards the front of the vehicle, and
# Y pointing to the left.
#
# =============================================================================
import pychrono as chrono
import pychrono.vehicle as veh
import pychrono.irrlicht as irr
import math as m
# =============================================================================
def main():
#print("Copyright (c) 2017 projectchrono.org\nChrono version: ", CHRONO_VERSION , "\n\n")
# Create systems
# Create the FEDA vehicle, set parameters, and initialize
my_feda = veh.FEDA()
my_feda.SetContactMethod(contact_method)
my_feda.SetChassisCollisionType(chassis_collision_type)
my_feda.SetChassisFixed(False)
my_feda.SetInitPosition(chrono.ChCoordsysD(initLoc, initRot))
my_feda.SetPowertrainType(powertrain_model)
my_feda.SetTireType(tire_model)
my_feda.SetTireStepSize(tire_step_size)
my_feda.Initialize()
my_feda.SetChassisVisualizationType(chassis_vis_type)
my_feda.SetSuspensionVisualizationType(suspension_vis_type)
my_feda.SetSteeringVisualizationType(steering_vis_type)
my_feda.SetWheelVisualizationType(wheel_vis_type)
my_feda.SetTireVisualizationType(tire_vis_type)
# Create the terrain
terrain = veh.RigidTerrain(my_feda.GetSystem())
if (contact_method == chrono.ChContactMethod_NSC):
patch_mat = chrono.ChMaterialSurfaceNSC()
patch_mat.SetFriction(0.9)
patch_mat.SetRestitution(0.01)
elif (contact_method == chrono.ChContactMethod_SMC):
patch_mat = chrono.ChMaterialSurfaceSMC()
patch_mat.SetFriction(0.9)
patch_mat.SetRestitution(0.01)
patch_mat.SetYoungModulus(2e7)
patch = terrain.AddPatch(patch_mat,
chrono.ChVectorD(0, 0, 0), chrono.ChVectorD(0, 0, 1),
terrainLength, terrainWidth)
patch.SetTexture(veh.GetDataFile("terrain/textures/tile4.jpg"), 200, 200)
patch.SetColor(chrono.ChColor(0.8, 0.8, 0.5))
terrain.Initialize()
# Create the vehicle Irrlicht interface
vis = veh.ChWheeledVehicleVisualSystemIrrlicht()
my_feda.GetVehicle().SetVisualSystem(vis)
vis.SetWindowTitle('FED-Alpha')
vis.SetWindowSize(1280, 1024)
vis.SetChaseCamera(trackPoint, 6.0, 0.5)
vis.Initialize()
vis.AddLogo(chrono.GetChronoDataFile('logo_pychrono_alpha.png'))
vis.AddTypicalLights()
vis.AddSkyBox()
# Create the interactive driver system
driver = veh.ChIrrGuiDriver(vis)
# Set the time response for steering and throttle keyboard inputs.
steering_time = 1.0 # time to go from 0 to +1 (or from 0 to -1)
throttle_time = 1.0 # time to go from 0 to +1
braking_time = 0.3 # time to go from 0 to +1
driver.SetSteeringDelta(10 * step_size / steering_time)
driver.SetThrottleDelta(10 * step_size / throttle_time)
driver.SetBrakingDelta(10 * step_size / braking_time)
driver.Initialize()
# Simulation loop
realtime_timer = chrono.ChRealtimeStepTimer()
while vis.Run() :
time = my_feda.GetSystem().GetChTime()
vis.BeginScene()
vis.DrawAll()
vis.EndScene()
# Get driver inputs
driver_inputs = driver.GetInputs()
# Update modules (process inputs from other modules)
driver.Synchronize(time)
terrain.Synchronize(time)
my_feda.Synchronize(time, driver_inputs, terrain)
vis.Synchronize(driver.GetInputModeAsString(), driver_inputs)
# Advance simulation for one timestep for all modules
driver.Advance(step_size)
terrain.Advance(step_size)
my_feda.Advance(step_size)
vis.Advance(step_size)
# Spin in place for real time to catch up
realtime_timer.Spin(step_size)
return 0
# The path to the Chrono data directory containing various assets (meshes, textures, data files)
# is automatically set, relative to the default location of this demo.
# If running from a different directory, you must change the path to the data directory with:
#chrono.SetChronoDataPath('path/to/data')
veh.SetDataPath(chrono.GetChronoDataPath() + 'vehicle/')
# Initial vehicle location and orientation
initLoc = chrono.ChVectorD(0, 0, 0.5)
initRot = chrono.ChQuaternionD(1, 0, 0, 0)
# Visualization type for vehicle parts (PRIMITIVES, MESH, or NONE)
chassis_vis_type = veh.VisualizationType_MESH
suspension_vis_type = veh.VisualizationType_PRIMITIVES
steering_vis_type = veh.VisualizationType_PRIMITIVES
wheel_vis_type = veh.VisualizationType_MESH
tire_vis_type = veh.VisualizationType_MESH
# Collision type for chassis (PRIMITIVES, MESH, or NONE)
chassis_collision_type = veh.CollisionType_NONE
# Type of powertrain model (SHAFTS, SIMPLE_MAP)
powertrain_model = veh.PowertrainModelType_SIMPLE_MAP
# Type of tire model (RIGID, PAC02)
tire_model = veh.TireModelType_PAC02
# Rigid terrain
terrainHeight = 0; # terrain height (FLAT terrain only)
terrainLength = 100.0; # size in X direction
terrainWidth = 100.0; # size in Y direction
# Point on chassis tracked by the camera
trackPoint = chrono.ChVectorD(0.0, 0.0, 1.75)
# Contact method
contact_method = chrono.ChContactMethod_SMC
# Simulation step sizes
step_size = 1e-3;
tire_step_size = 1e-3;
main()
|
the-stack_0_9962 | from django import forms
from .models import Comment, Review
class ReviewForm(forms.ModelForm):
class Meta:
model = Review
fields = ('rating', 'experience', 'description')
def clean_rating(self):
data = self.cleaned_data['rating']
if not data >= 1 and data <= 5:
raise forms.ValidationError("Please choose a rating")
return data
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = ('body',)
|
the-stack_0_9964 | # Read file, create an array of its values after removing the \n from each line
with open('input') as file:
values = file.readlines()
values = [int(value[:len(value)-1]) for value in values]
# PART 1:
# Create a hashmap of every value's complements to reach 2020.
# When you encounter that complement in the future, multiply the two together and print them.
complements = dict()
for i, value in enumerate(values):
try:
print(f'{values[complements[value]]} * {value} = {value * values[complements[value]]}')
except KeyError:
complements[2020 - value] = i
# In PART 2 I'm going to use the filter() and combinations() functions
from itertools import combinations
import math
def check(val):
return sum(val) == 2020
combinations = list(filter(check, list(combinations(values, 3))))
for combination in combinations:
print(f'{combination[0]} * {combination[1]} * {combination[2]} = {math.prod(combination)}')
|
the-stack_0_9965 | #!/usr/bin/env python
from __future__ import division
from __future__ import print_function
# from keras.layers.merge import Concatenate, Add, Dot, Multiply
import glob
import os
import zipfile
import keras
import numpy as np
import tensorflow as tf
from PIL import Image
from keras import backend as K
from sklearn.metrics import classification_report, confusion_matrix
from tensorflow.python.lib.io import file_io
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import tag_constants, signature_constants
from tensorflow.python.saved_model.signature_def_utils_impl import predict_signature_def
LABEL_SET = ['left', 'right', 'up', 'down', 'center', 'double_blink']
def normalize_image(img):
# return (img - 127.5) / 127.5
return (img.astype(np.float32) - 127.5) / 127.5
def denormalize_image(img):
result = img * 127.5 + 127.5
return result.astype(np.uint8)
NORMALIZE = False
def to_savedmodel(model, export_path):
"""Convert the Keras HDF5 model into TensorFlow SavedModel."""
builder = saved_model_builder.SavedModelBuilder(export_path)
signature = predict_signature_def(inputs={'input': model.inputs[0]},
outputs={'income': model.outputs[0]})
with K.get_session() as sess:
builder.add_meta_graph_and_variables(
sess=sess,
tags=[tag_constants.SERVING],
signature_def_map={
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature}
)
builder.save()
def session_to_savedmodel(session, inputs, outputs, export_path):
"""Convert the Keras HDF5 model into TensorFlow SavedModel."""
builder = saved_model_builder.SavedModelBuilder(export_path)
signature = predict_signature_def(inputs={'inputs': inputs},
outputs={'outputs': outputs})
builder.add_meta_graph_and_variables(
sess=session,
tags=[tag_constants.SERVING],
signature_def_map={
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature}
)
builder.save()
def session_from_savedmodel(session, export_dir):
tf.saved_model.loader.load(session, [tag_constants.SERVING], export_dir)
def compare_url(a, b):
ia = int(a.split('/')[-1].replace('img_', '').split('.')[0])
prefix_a = '/'.join(a.split('/')[:-1])
ib = int(b.split('/')[-1].replace('img_', '').split('.')[0])
prefix_b = '/'.join(b.split('/')[:-1])
if prefix_a == prefix_b:
return ia - ib
elif prefix_a > prefix_b:
return 1
else:
return 0
# key compare urls
OFFSET_BATCH = 1000000
def key_compare_url(a):
ia = int(a.split('/')[-1].replace('img_', '').split('.')[0])
batch_num = int(a.split('/')[-2].replace('batch_', ''))
# prefix_a = '/'.join(a.split('/')[:-1])
return batch_num * OFFSET_BATCH + ia
def load_npz(url):
files = np.load(url)
return files['X'], files['y']
# big problem with sorting data!!!!!!
def load_dataset(base_urls, label_set, sequence_length=15, get_zip=True):
globs = {}
print(base_urls)
zips = {}
zip_dirs = {}
if os.path.isdir(base_urls[0]):
get_zip = False
if not get_zip:
for l in label_set:
globs[l] = []
for d in base_urls:
# print ('Open folder label {} in {}'.format(l, d))
path = os.path.join(d, l)
# print (path)
globs[l] += glob.glob('{dir}/*/*.jpg'.format(dir=path))
globs[l].sort(compare_url)
else:
for d in base_urls:
zips[d] = zipfile.ZipFile(retrieve_file(d), 'r')
# zips[d] = GzipFile(d, 'r+b')
zip_dirs[d] = {}
z_namelist = [n for n in zips[d].namelist() if n.split(".")[-1].lower() == 'jpg']
for l in label_set:
zip_dirs[d][l] = [n for n in z_namelist if l in n]
# zip_dirs[d][l].sort(compare_url)
zip_dirs[d][l].sort(key=key_compare_url)
# for u in zip_dirs[d][l]:
# print(u)
# datasets
X = []
y = []
y_raws = []
eye = np.eye(len(label_set))
for i, l in enumerate(label_set):
print('Label: {}'.format(l))
if get_zip:
for d in base_urls:
data = []
print('---Read Zip file: {}'.format(d))
for j, img_url in enumerate(zip_dirs[d][l]):
with Image.open(zips[d].open(img_url, 'r')) as img:
# img = Image.open(zips[d].open(img_url, 'r'))
if NORMALIZE:
img_array = normalize_image(np.array(img))
else:
img_array = np.array(img)
if j % sequence_length == 0 and j != 0:
# package into sequence
X.append(np.array(data))
y.append(np.array(eye[i]))
y_raws.append(l)
data = []
# else:
data.append(img_array)
else:
data = []
for j, img_url in enumerate(globs[l]):
# if j >= 61:
# break
with Image.open(img_url) as img:
# img = Image.open(img_url)
if NORMALIZE:
img_array = normalize_image(np.array(img))
else:
img_array = np.array(img)
# img_array = normalize_image(np.array(img))
if j % sequence_length == 0 and j != 0:
# package into sequence
X.append(np.array(data))
y.append(np.array(eye[i]))
y_raws.append(l)
data = []
# else:
data.append(img_array)
if get_zip:
for d in base_urls:
zips[d].close()
X = np.array(X)
y = np.array(y)
print(X.shape)
print(y.shape)
return X, y, y_raws, label_set
# h5py workaround: copy local models over to GCS if the job_dir is GCS.
def copy_file_to_gcs(job_dir, file_path):
with file_io.FileIO(file_path, mode='r') as input_f:
with file_io.FileIO(os.path.join(job_dir, file_path), mode='w+') as output_f:
output_f.write(input_f.read())
def write_file(job_dir, file_path):
if "gs://" in file_path:
print ('Write file to: {}/{}'.format(job_dir, file_path))
# with as f:
return copy_file_to_gcs(job_dir, file_path)
else:
return open(file_path, 'r')
# read file handle opening of gsc
def retrieve_file(file_path):
if "gs://" in file_path:
print ('readata from gcs: {}'.format(file_path))
# with as f:
return file_io.FileIO(file_path, 'r')
else:
return open(file_path, 'r+b')
def after_train(model, model_name, job_dir, print_fn=print):
# def after_train(model, model_file, model_dir, train_config, label_set, model_name='cnn_', print_fn=print):
# Unhappy hack to work around h5py not being able to write to GCS.
# Force snapshots and saves to local filesystem, then copy them over to GCS.
if job_dir.startswith("gs://"):
model.save(model_name)
copy_file_to_gcs(job_dir, model_name)
else:
model.save(os.path.join(job_dir, model_name))
# Convert the Keras model to TensorFlow SavedModel
print_fn('Save model to {}'.format(job_dir))
to_savedmodel(model, os.path.join(job_dir, 'export'))
def report(true_val, pred_val, label_set, epoch=0, print_fn=print, digits=4, **kwargs):
report = classification_report(true_val, pred_val, target_names=label_set, digits=digits)
matrix = confusion_matrix(true_val, pred_val)
print_fn("----- Epoch:{} -----".format(epoch))
if 'loss' in kwargs:
print_fn('--Loss: {}'.format(kwargs['loss']))
print_fn(report)
print_fn(matrix)
class EvalCheckPoint(keras.callbacks.Callback):
def __init__(self, ml_model,
job_dir,
X, y,
label_set,
sequence_lenth,
eval_freq=4,
print_func=print,
epochs=10,
batch_norm=False
):
self.job_dir = job_dir
self.label_set = label_set
self.sequence_length = sequence_lenth
self.X_test = X
self.y_test = y
self.batch_norm = batch_norm
self.epochs = epochs
self.eval_freq = eval_freq
self.model = None
self.print_func = print_func
self.set_model(ml_model)
self.true_val = None
self.pred_val = None
self.true_val = np.array(np.argmax(self.y_test, axis=1))
def on_epoch_begin(self, epoch, logs={}):
if epoch > 0 and (epoch % self.eval_freq == 0 or epoch == self.epochs):
if self.model is not None:
# if self.batch_norm:
K.set_learning_phase(0)
pred_val = np.argmax(self.model.predict(self.X_test), axis=1)
K.set_learning_phase(1)
report(self.true_val, pred_val, self.label_set, print_fn=self.print_func)
|
the-stack_0_9967 | from statement import Statement
def main():
feature = "project_creation"
with open(f"./features/{feature}.feature", "r") as file, open(f"{feature}.steps.ts", "w") as outfile:
outfile.write('import { Given, When, Then, TableDefinition } from "cucumber";\n\n\n')
antecessors = []
previous_patterns = {
"Given": [],
"When": [],
"Then": [],
}
for line in file.readlines():
statement = Statement.for_line(line, outfile)
if statement:
statement.write_statement_block(antecessors, previous_patterns)
if __name__ == '__main__':
main()
|
the-stack_0_9968 | from utils import detector_utils as detector_utils
import cv2
import tensorflow as tf
import multiprocessing
from multiprocessing import Queue, Pool
import time
from utils.detector_utils import WebcamVideoStream
import datetime
import argparse
frame_processed = 0
score_thresh = 0.2
# Create a worker thread that loads graph and
# does detection on images in an input queue and puts it on an output queue
def worker(input_q, output_q, cap_params, frame_processed):
print(">> loading frozen model for worker")
detection_graph, sess = detector_utils.load_inference_graph()
sess = tf.Session(graph=detection_graph)
while True:
#print("> ===== in worker loop, frame ", frame_processed)
frame = input_q.get()
if (frame is not None):
# actual detection
boxes, scores = detector_utils.detect_objects(
frame, detection_graph, sess)
# draw bounding boxes
detector_utils.draw_box_on_image(
cap_params['num_hands_detect'], cap_params["score_thresh"], scores, boxes, cap_params['im_width'], cap_params['im_height'], frame)
# add frame annotated with bounding box to queue
output_q.put(frame)
frame_processed += 1
else:
output_q.put(frame)
sess.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-src', '--source', dest='video_source', type=int,
default=0, help='Device index of the camera.')
parser.add_argument('-nhands', '--num_hands', dest='num_hands', type=int,
default=2, help='Max number of hands to detect.')
parser.add_argument('-fps', '--fps', dest='fps', type=int,
default=1, help='Show FPS on detection/display visualization')
parser.add_argument('-wd', '--width', dest='width', type=int,
default=300, help='Width of the frames in the video stream.')
parser.add_argument('-ht', '--height', dest='height', type=int,
default=200, help='Height of the frames in the video stream.')
parser.add_argument('-ds', '--display', dest='display', type=int,
default=1, help='Display the detected images using OpenCV. This reduces FPS')
parser.add_argument('-num-w', '--num-workers', dest='num_workers', type=int,
default=4, help='Number of workers.')
parser.add_argument('-q-size', '--queue-size', dest='queue_size', type=int,
default=5, help='Size of the queue.')
args = parser.parse_args()
input_q = Queue(maxsize=args.queue_size)
output_q = Queue(maxsize=args.queue_size)
video_capture = WebcamVideoStream(src=args.video_source,
width=args.width,
height=args.height).start()
cap_params = {}
frame_processed = 0
cap_params['im_width'], cap_params['im_height'] = video_capture.size()
cap_params['score_thresh'] = score_thresh
# max number of hands we want to detect/track
cap_params['num_hands_detect'] = args.num_hands
print(cap_params, args)
# spin up workers to paralleize detection.
pool = Pool(args.num_workers, worker,
(input_q, output_q, cap_params, frame_processed))
start_time = datetime.datetime.now()
num_frames = 0
fps = 0
index = 0
while True:
frame = video_capture.read()
frame = cv2.flip(frame, 1)
index += 1
input_q.put(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
output_frame = output_q.get()
output_frame = cv2.cvtColor(output_frame, cv2.COLOR_RGB2BGR)
elapsed_time = (datetime.datetime.now() -
start_time).total_seconds()
num_frames += 1
fps = num_frames / elapsed_time
# print("frame ", index, num_frames, elapsed_time, fps)
if (output_frame is not None):
if (args.display > 0):
if (args.fps > 0):
detector_utils.draw_fps_on_image(
"FPS : " + str(int(fps)), output_frame)
cv2.imshow('Muilti - threaded Detection', output_frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
if (num_frames == 400):
num_frames = 0
start_time = datetime.datetime.now()
else:
print("frames processed: ", index,
"elapsed time: ", elapsed_time, "fps: ", str(int(fps)))
else:
# print("video end")
break
elapsed_time = (datetime.datetime.now() -
start_time).total_seconds()
fps = num_frames / elapsed_time
print("fps", fps)
pool.terminate()
video_capture.stop()
cv2.destroyAllWindows()
|
the-stack_0_9971 | from sqlalchemy import *
from migrate import *
from migrate.changeset import schema
pre_meta = MetaData()
post_meta = MetaData()
collections = Table('collections', pre_meta,
Column('id', INTEGER, primary_key=True, nullable=False),
Column('user_id', INTEGER),
Column('collection_id', INTEGER),
)
users = Table('users', post_meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('user_id', Integer),
Column('collection_id', Integer),
)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine; bind
# migrate_engine to your metadata
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
pre_meta.tables['collections'].drop()
post_meta.tables['users'].create()
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
pre_meta.tables['collections'].create()
post_meta.tables['users'].drop()
|
the-stack_0_9972 | ###############################
#
# Created by Patrik Valkovic
# 3/9/2021
#
###############################
import unittest
import ffeat
class EachArgTest(unittest.TestCase):
def test_oneparam(self):
p = ffeat.flow.EachArg(lambda x: x + 1)
result, kargs = p(8)
self.assertSequenceEqual(result, [9])
def test_moreparams(self):
p = ffeat.flow.EachArg(lambda x: x + 1)
result, kargs = p(8, 12, 19, 20)
self.assertSequenceEqual(result, [9, 13, 20, 21])
if __name__ == '__main__':
unittest.main()
|
the-stack_0_9975 | import tensorflow as tf
import os
from niftynet.application.base_application import BaseApplication
from niftynet.engine.application_factory import \
ApplicationNetFactory, InitializerFactory, OptimiserFactory
from niftynet.engine.application_variables import \
CONSOLE, NETWORK_OUTPUT, TF_SUMMARIES
from niftynet.engine.sampler_grid import GridSampler
from niftynet.engine.sampler_resize import ResizeSampler
from niftynet.engine.sampler_uniform import UniformSampler
from niftynet.engine.sampler_weighted import WeightedSampler
from niftynet.engine.sampler_balanced import BalancedSampler
from niftynet.engine.windows_aggregator_grid import GridSamplesAggregator
from niftynet.engine.windows_aggregator_resize import ResizeSamplesAggregator
from niftynet.io.image_reader import ImageReader
from niftynet.layer.binary_masking import BinaryMaskingLayer
from niftynet.layer.discrete_label_normalisation import \
DiscreteLabelNormalisationLayer
from niftynet.layer.histogram_normalisation import \
HistogramNormalisationLayer
from niftynet.layer.loss_segmentation import LossFunction
from niftynet.layer.mean_variance_normalisation import \
MeanVarNormalisationLayer
from niftynet.layer.pad import PadLayer
from niftynet.layer.post_processing import PostProcessingLayer
from niftynet.layer.rand_flip import RandomFlipLayer
from niftynet.layer.rand_rotation import RandomRotationLayer
from niftynet.layer.rand_artefact import RandomArtefactLayer
from niftynet.layer.rand_spatial_scaling import RandomSpatialScalingLayer
from niftynet.evaluation.segmentation_evaluator import SegmentationEvaluator
SUPPORTED_INPUT = set(['image', 'label', 'weight', 'sampler', 'inferred'])
class SegmentationApplication(BaseApplication):
REQUIRED_CONFIG_SECTION = "SEGMENTATION"
def __init__(self, net_param, action_param, action):
super(SegmentationApplication, self).__init__()
tf.logging.info('starting segmentation application')
self.action = action
self.net_param = net_param
self.action_param = action_param
self.data_param = None
self.segmentation_param = None
self.SUPPORTED_SAMPLING = {
'uniform': (self.initialise_uniform_sampler,
self.initialise_grid_sampler,
self.initialise_grid_aggregator),
'weighted': (self.initialise_weighted_sampler,
self.initialise_grid_sampler,
self.initialise_grid_aggregator),
'resize': (self.initialise_resize_sampler,
self.initialise_resize_sampler,
self.initialise_resize_aggregator),
'balanced': (self.initialise_balanced_sampler,
self.initialise_grid_sampler,
self.initialise_grid_aggregator),
}
def initialise_dataset_loader(
self, data_param=None, task_param=None, data_partitioner=None):
self.data_param = data_param
self.segmentation_param = task_param
file_lists = self.get_file_lists(data_partitioner)
# read each line of csv files into an instance of Subject
if self.is_training:
self.readers = []
for file_list in file_lists:
reader = ImageReader({'image', 'label', 'weight', 'sampler'})
reader.initialise(data_param, task_param, file_list)
self.readers.append(reader)
elif self.is_inference:
# in the inference process use image input only
inference_reader = ImageReader({'image'})
file_list = data_partitioner.inference_files
inference_reader.initialise(data_param, task_param, file_list)
self.readers = [inference_reader]
elif self.is_evaluation:
file_list = data_partitioner.inference_files
reader = ImageReader({'image', 'label', 'inferred'})
reader.initialise(data_param, task_param, file_list)
self.readers = [reader]
else:
raise ValueError('Action `{}` not supported. Expected one of {}'
.format(self.action, self.SUPPORTED_ACTIONS))
foreground_masking_layer = None
if self.net_param.normalise_foreground_only:
foreground_masking_layer = BinaryMaskingLayer(
type_str=self.net_param.foreground_type,
multimod_fusion=self.net_param.multimod_foreground_type,
threshold=0.0)
mean_var_normaliser = MeanVarNormalisationLayer(
image_name='image', binary_masking_func=foreground_masking_layer)
histogram_normaliser = None
if self.net_param.histogram_ref_file:
histogram_normaliser = HistogramNormalisationLayer(
image_name='image',
modalities=vars(task_param).get('image'),
model_filename=self.net_param.histogram_ref_file,
binary_masking_func=foreground_masking_layer,
norm_type=self.net_param.norm_type,
cutoff=self.net_param.cutoff,
name='hist_norm_layer')
label_normalisers = None
if self.net_param.histogram_ref_file and \
task_param.label_normalisation:
label_normalisers = [DiscreteLabelNormalisationLayer(
image_name='label',
modalities=vars(task_param).get('label'),
model_filename=self.net_param.histogram_ref_file)]
if self.is_evaluation:
label_normalisers.append(
DiscreteLabelNormalisationLayer(
image_name='inferred',
modalities=vars(task_param).get('inferred'),
model_filename=self.net_param.histogram_ref_file))
label_normalisers[-1].key = label_normalisers[0].key
normalisation_layers = []
if self.net_param.normalisation:
normalisation_layers.append(histogram_normaliser)
if self.net_param.whitening:
normalisation_layers.append(mean_var_normaliser)
if task_param.label_normalisation and \
(self.is_training or not task_param.output_prob):
normalisation_layers.extend(label_normalisers)
augmentation_layers = []
if self.is_training:
if self.action_param.random_artefact != -1:
augmentation_layers.append(RandomArtefactLayer())
if self.action_param.random_flipping_axes != -1:
augmentation_layers.append(RandomFlipLayer(flip_axes=self.action_param.random_flipping_axes))
if self.action_param.scaling_percentage:
augmentation_layers.append(RandomSpatialScalingLayer(
min_percentage=self.action_param.scaling_percentage[0],
max_percentage=self.action_param.scaling_percentage[1]))
if self.action_param.rotation_angle or \
self.action_param.rotation_angle_x or \
self.action_param.rotation_angle_y or \
self.action_param.rotation_angle_z:
rotation_layer = RandomRotationLayer()
if self.action_param.rotation_angle:
rotation_layer.init_uniform_angle(
self.action_param.rotation_angle)
else:
rotation_layer.init_non_uniform_angle(
self.action_param.rotation_angle_x,
self.action_param.rotation_angle_y,
self.action_param.rotation_angle_z)
augmentation_layers.append(rotation_layer)
volume_padding_layer = []
if self.net_param.volume_padding_size:
volume_padding_layer.append(PadLayer(
image_name=SUPPORTED_INPUT,
border=self.net_param.volume_padding_size))
for reader in self.readers:
reader.add_preprocessing_layers(
volume_padding_layer +
normalisation_layers +
augmentation_layers)
def initialise_uniform_sampler(self):
self.sampler = [[UniformSampler(
reader=reader,
data_param=self.data_param,
batch_size=self.net_param.batch_size,
windows_per_image=self.action_param.sample_per_volume,
queue_length=self.net_param.queue_length) for reader in
self.readers]]
def initialise_weighted_sampler(self):
self.sampler = [[WeightedSampler(
reader=reader,
data_param=self.data_param,
batch_size=self.net_param.batch_size,
windows_per_image=self.action_param.sample_per_volume,
queue_length=self.net_param.queue_length) for reader in
self.readers]]
def initialise_resize_sampler(self):
self.sampler = [[ResizeSampler(
reader=reader,
data_param=self.data_param,
batch_size=self.net_param.batch_size,
shuffle_buffer=self.is_training,
queue_length=self.net_param.queue_length) for reader in
self.readers]]
def initialise_grid_sampler(self):
self.sampler = [[GridSampler(
reader=reader,
data_param=self.data_param,
batch_size=self.net_param.batch_size,
spatial_window_size=self.action_param.spatial_window_size,
window_border=self.action_param.border,
queue_length=self.net_param.queue_length) for reader in
self.readers]]
def initialise_balanced_sampler(self):
self.sampler = [[BalancedSampler(
reader=reader,
data_param=self.data_param,
batch_size=self.net_param.batch_size,
windows_per_image=self.action_param.sample_per_volume,
queue_length=self.net_param.queue_length) for reader in
self.readers]]
def initialise_grid_aggregator(self):
self.output_decoder = GridSamplesAggregator(
image_reader=self.readers[0],
output_path=self.action_param.save_seg_dir,
window_border=self.action_param.border,
interp_order=self.action_param.output_interp_order)
def initialise_resize_aggregator(self):
self.output_decoder = ResizeSamplesAggregator(
image_reader=self.readers[0],
output_path=self.action_param.save_seg_dir,
window_border=self.action_param.border,
interp_order=self.action_param.output_interp_order)
def initialise_sampler(self):
if self.is_training:
self.SUPPORTED_SAMPLING[self.net_param.window_sampling][0]()
elif self.is_inference:
self.SUPPORTED_SAMPLING[self.net_param.window_sampling][1]()
def initialise_aggregator(self):
self.SUPPORTED_SAMPLING[self.net_param.window_sampling][2]()
def initialise_network(self):
w_regularizer = None
b_regularizer = None
reg_type = self.net_param.reg_type.lower()
decay = self.net_param.decay
if reg_type == 'l2' and decay > 0:
from tensorflow.contrib.layers.python.layers import regularizers
w_regularizer = regularizers.l2_regularizer(decay)
b_regularizer = regularizers.l2_regularizer(decay)
elif reg_type == 'l1' and decay > 0:
from tensorflow.contrib.layers.python.layers import regularizers
w_regularizer = regularizers.l1_regularizer(decay)
b_regularizer = regularizers.l1_regularizer(decay)
self.net = ApplicationNetFactory.create(self.net_param.name)(
num_classes=self.segmentation_param.num_classes,
w_initializer=InitializerFactory.get_initializer(
name=self.net_param.weight_initializer),
b_initializer=InitializerFactory.get_initializer(
name=self.net_param.bias_initializer),
w_regularizer=w_regularizer,
b_regularizer=b_regularizer,
acti_func=self.net_param.activation_function)
def connect_data_and_network(self,
outputs_collector=None,
gradients_collector=None):
# def data_net(for_training):
# with tf.name_scope('train' if for_training else 'validation'):
# sampler = self.get_sampler()[0][0 if for_training else -1]
# data_dict = sampler.pop_batch_op()
# image = tf.cast(data_dict['image'], tf.float32)
# return data_dict, self.net(image, is_training=for_training)
def switch_sampler(for_training):
with tf.name_scope('train' if for_training else 'validation'):
sampler = self.get_sampler()[0][0 if for_training else -1]
return sampler.pop_batch_op()
if self.is_training:
# if self.action_param.validation_every_n > 0:
# data_dict, net_out = tf.cond(tf.logical_not(self.is_validation),
# lambda: data_net(True),
# lambda: data_net(False))
# else:
# data_dict, net_out = data_net(True)
if self.action_param.validation_every_n > 0:
data_dict = tf.cond(tf.logical_not(self.is_validation),
lambda: switch_sampler(for_training=True),
lambda: switch_sampler(for_training=False))
else:
data_dict = switch_sampler(for_training=True)
image = tf.cast(data_dict['image'], tf.float32)
net_out = self.net(image, is_training=self.is_training)
with tf.name_scope('Optimiser'):
optimiser_class = OptimiserFactory.create(
name=self.action_param.optimiser)
self.optimiser = optimiser_class.get_instance(
learning_rate=self.action_param.lr)
loss_func = LossFunction(
n_class=self.segmentation_param.num_classes,
loss_type=self.action_param.loss_type,
softmax=self.segmentation_param.softmax)
data_loss = loss_func(
prediction=net_out,
ground_truth=data_dict.get('label', None),
weight_map=data_dict.get('weight', None))
reg_losses = tf.get_collection(
tf.GraphKeys.REGULARIZATION_LOSSES)
if self.net_param.decay > 0.0 and reg_losses:
reg_loss = tf.reduce_mean(
[tf.reduce_mean(reg_loss) for reg_loss in reg_losses])
loss = data_loss + reg_loss
else:
loss = data_loss
grads = self.optimiser.compute_gradients(loss)
# collecting gradients variables
gradients_collector.add_to_collection([grads])
# collecting output variables
outputs_collector.add_to_collection(
var=data_loss, name='loss',
average_over_devices=False, collection=CONSOLE)
outputs_collector.add_to_collection(
var=data_loss, name='loss',
average_over_devices=True, summary_type='scalar',
collection=TF_SUMMARIES)
# outputs_collector.add_to_collection(
# var=image*180.0, name='image',
# average_over_devices=False, summary_type='image3_sagittal',
# collection=TF_SUMMARIES)
# outputs_collector.add_to_collection(
# var=image, name='image',
# average_over_devices=False,
# collection=NETWORK_OUTPUT)
# outputs_collector.add_to_collection(
# var=tf.reduce_mean(image), name='mean_image',
# average_over_devices=False, summary_type='scalar',
# collection=CONSOLE)
elif self.is_inference:
# converting logits into final output for
# classification probabilities or argmax classification labels
data_dict = switch_sampler(for_training=False)
image = tf.cast(data_dict['image'], tf.float32)
net_out = self.net(image, is_training=True)
output_prob = self.segmentation_param.output_prob
num_classes = self.segmentation_param.num_classes
if output_prob and num_classes > 1:
post_process_layer = PostProcessingLayer(
'SOFTMAX', num_classes=num_classes)
elif not output_prob and num_classes > 1:
post_process_layer = PostProcessingLayer(
'ARGMAX', num_classes=num_classes)
else:
post_process_layer = PostProcessingLayer(
'IDENTITY', num_classes=num_classes)
net_out = post_process_layer(net_out)
outputs_collector.add_to_collection(
var=net_out, name='window',
average_over_devices=False, collection=NETWORK_OUTPUT)
outputs_collector.add_to_collection(
var=data_dict['image_location'], name='location',
average_over_devices=False, collection=NETWORK_OUTPUT)
self.initialise_aggregator()
def interpret_output(self, batch_output):
if self.is_inference:
return self.output_decoder.decode_batch(
batch_output['window'], batch_output['location'])
return True
def initialise_evaluator(self, eval_param):
self.eval_param = eval_param
self.evaluator = SegmentationEvaluator(self.readers[0],
self.segmentation_param,
eval_param)
def add_inferred_output(self, data_param, task_param):
return self.add_inferred_output_like(data_param, task_param, 'label')
|
the-stack_0_9978 | from __future__ import annotations
from typing import Optional, TYPE_CHECKING
from components.base_component import BaseComponent
from equipment_types import EquipmentType
if TYPE_CHECKING:
from entity import Actor, Item
class Equipment(BaseComponent):
parent: Actor
def __init__(self, weapon: Optional[Item] = None, armor: Optional[Item] = None):
self.weapon = weapon
self.armor = armor
@property
def defense_bonus(self) -> int:
bonus = 0
if self.weapon is not None and self.weapon.equipable is not None:
bonus += self.weapon.equipable.defense_bonus
if self.armor is not None and self.armor.equipable is not None:
bonus += self.armor.equipable.defense_bonus
return bonus
@property
def power_bonus(self) -> int:
bonus = 0
if self.weapon is not None and self.weapon.equipable is not None:
bonus += self.weapon.equipable.power_bonus
if self.armor is not None and self.armor.equipable is not None:
bonus += self.armor.equipable.power_bonus
return bonus
def item_is_equipped(self, item: Item) -> bool:
return self.weapon == item or self.armor == item
def unequip_message(self, item_name: str) -> None:
self.parent.gamemap.engine.message_log.add_message(
f"You remove the {item_name}."
)
def equip_message(self, item_name: str) -> None:
self.parent.gamemap.engine.message_log.add_message(
f"You gear up and equip the {item_name}."
)
def equip_to_slot(self, slot: str, item: Item, add_message: bool) -> None:
current_item = getattr(self, slot)
if current_item is not None:
self.unequip_from_slot(slot, add_message)
setattr(self, slot, item)
if add_message:
self.equip_message(item.name)
def unequip_from_slot(self, slot: str, add_message: bool) -> None:
current_item = getattr(self, slot)
if add_message:
self.unequip_message(current_item.name)
setattr(self, slot, None)
def toggle_equip(self, equipable_item: Item, add_message: bool = True) -> None:
if (
equipable_item.equipable
and equipable_item.equipable.equipment_type == EquipmentType.WEAPON
):
slot = "weapon"
else:
slot = "armor"
if getattr(self, slot) == equipable_item:
self.unequip_from_slot(slot, add_message)
else:
self.equip_to_slot(slot, equipable_item, add_message)
|
the-stack_0_9979 | #!/usr/bin/env python3
import ecc_ed25519
import sys, getopt
from cryptography.hazmat.primitives.asymmetric import ed25519
msg = ""
public_key_hex = ""
encoded_signature = ""
try:
opts, args = getopt.getopt(sys.argv[1:],"hm:k:s:",["message=","publickeyhex=", "signature="])
except getopt.GetoptError:
print('verify.py -m YOURMESSAGE -k PUBLIC-KEY-HEX -s SIGNATURE')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('verify.py -m YOURMESSAGE -k PUBLIC-KEY-HEX -s SIGNATURE')
sys.exit()
elif opt in ("-m", "--message"):
msg = arg
msg_as_bytes = str.encode(msg)
elif opt in ("-k", "--publickeyhex"):
public_key_hex = arg
# Get rid of the prefix 01
public_bytes_from_hex = bytes.fromhex(public_key_hex[2:])
loaded_public_key = ed25519.Ed25519PublicKey.from_public_bytes(public_bytes_from_hex)
elif opt in ("-s", "--signature"):
encoded_signature = arg
signature = bytes.fromhex(encoded_signature)
if msg == "" or encoded_signature == "":
print("Message and signature are required!")
print('./verify.py -m YOURMESSAGE -k PUBLIC-KEY-HEX -s SIGNATURE')
sys.exit()
# Read the public_key_hex from default location if not given as param
if public_key_hex == "":
public_key_hex_location = "/etc/casper/validator_keys/public_key_hex"
try:
with open(public_key_hex_location, 'r') as fstream:
public_key_hex = fstream.readlines()[0]
# Get rid of the prefix
public_bytes_from_hex = bytes.fromhex(public_key_hex[2:])
loaded_public_key = ed25519.Ed25519PublicKey.from_public_bytes(public_bytes_from_hex)
except:
print("ERROR: Couldn't access your public key hex at this location: ", public_key_hex_location)
print("Please make sure your public_key_hex file is at the given location and is accessible by the current user.")
print("You can also directly provide your public key as an input parameter.")
print("USAGE: verify.py -m YOURMESSAGE -k PUBLIC-KEY-HEX -s SIGNATURE")
sys.exit()
print("Public Key:\n", public_key_hex)
print("Message:\n", msg)
print("Signature:\n", encoded_signature)
# Verify
try:
loaded_public_key.verify(signature, msg_as_bytes)
print("Verified!")
except:
print("Verification failed!") |
the-stack_0_9981 | from django.shortcuts import redirect
from django.http import HttpResponse
from .models import Link
def index(request):
return HttpResponse("Hello, world. You're at the polls index.")
def openLink(request, temp):
redirectLink = Link.objects.get(name=temp)
link = redirectLink.redirect
print(link)
return redirect(link)
|
the-stack_0_9983 | import gym
from gym import spaces
import numpy as np
# from os import path
import snakeoil3_gym as snakeoil3
import numpy as np
import copy
import collections as col
import os
import time
import sys
class TorcsEnv:
terminal_judge_start = 50 #1000 # If after 100 timestep still no progress, terminated
termination_limit_progress = 5 # [km/h], episode terminates if car is running slower than this limit
default_speed = 50
initial_reset = True
def __init__(self, vision=False, throttle=False, gear_change=False):
self.vision = vision
self.throttle = throttle
self.gear_change = gear_change
self.initial_run = True
##print("launch torcs")
os.system('pkill torcs')
time.sleep(0.5)
if self.vision is True:
os.system('torcs -nofuel -nodamage -nolaptime -vision &')
else:
os.system('torcs -nofuel -nolaptime &')
time.sleep(0.5)
os.system('sh autostart.sh')
time.sleep(0.5)
"""
# Modify here if you use multiple tracks in the environment
self.client = snakeoil3.Client(p=3101, vision=self.vision) # Open new UDP in vtorcs
self.client.MAX_STEPS = np.inf
client = self.client
client.get_servers_input() # Get the initial input from torcs
obs = client.S.d # Get the current full-observation from torcs
"""
if throttle is False:
self.action_space = spaces.Box(low=-1.0, high=1.0, shape=(1,))
else:
self.action_space = spaces.Box(low=-1.0, high=1.0, shape=(2,))
if vision is False:
high = np.array([1., np.inf, np.inf, np.inf, 1., np.inf, 1., np.inf])
low = np.array([0., -np.inf, -np.inf, -np.inf, 0., -np.inf, 0., -np.inf])
self.observation_space = spaces.Box(low=low, high=high)
else:
high = np.array([1., np.inf, np.inf, np.inf, 1., np.inf, 1., np.inf, 255])
low = np.array([0., -np.inf, -np.inf, -np.inf, 0., -np.inf, 0., -np.inf, 0])
self.observation_space = spaces.Box(low=low, high=high)
def step(self, u):
#print("Step")
# convert thisAction to the actual torcs actionstr
client = self.client
this_action = self.agent_to_torcs(u)
# Apply Action
action_torcs = client.R.d
# Steering
action_torcs['steer'] = this_action['steer'] # in [-1, 1]
# Simple Autnmatic Throttle Control by Snakeoil
if self.throttle is False:
print("KAUSHIK: SHOULD NOT BE HERE! ")
sys.exit()
target_speed = self.default_speed
if client.S.d['speedX'] < target_speed - (client.R.d['steer']*50):
client.R.d['accel'] += .01
else:
client.R.d['accel'] -= .01
if client.R.d['accel'] > 0.2:
client.R.d['accel'] = 0.2
if client.S.d['speedX'] < 10:
client.R.d['accel'] += 1/(client.S.d['speedX']+.1)
# Traction Control System
if ((client.S.d['wheelSpinVel'][2]+client.S.d['wheelSpinVel'][3]) -
(client.S.d['wheelSpinVel'][0]+client.S.d['wheelSpinVel'][1]) > 5):
action_torcs['accel'] -= .2
else:
action_torcs['accel'] = this_action['accel']
action_torcs['brake'] = this_action['brake']
# Automatic Gear Change by Snakeoil
if self.gear_change is True:
action_torcs['gear'] = this_action['gear']
else:
# Automatic Gear Change by Snakeoil is possible
action_torcs['gear'] = 1
if self.throttle:
if client.S.d['speedX'] > 50:
action_torcs['gear'] = 2
if client.S.d['speedX'] > 80:
action_torcs['gear'] = 3
if client.S.d['speedX'] > 110:
action_torcs['gear'] = 4
if client.S.d['speedX'] > 140:
action_torcs['gear'] = 5
if client.S.d['speedX'] > 170:
action_torcs['gear'] = 6
# Save the privious full-obs from torcs for the reward calculation
obs_pre = copy.deepcopy(client.S.d)
# One-Step Dynamics Update #################################
# Apply the Agent's action into torcs
client.respond_to_server()
# Get the response of TORCS
client.get_servers_input()
# Get the current full-observation from torcs
obs = client.S.d
# Make an obsevation from a raw observation vector from TORCS
self.observation = self.make_observaton(obs)
# Reward setting Here #######################################
# direction-dependent positive reward
track = np.array(obs['track'])
trackPos = np.array(obs['trackPos'])
sp = np.array(obs['speedX'])
damage = np.array(obs['damage'])
rpm = np.array(obs['rpm'])
progress = sp*np.cos(obs['angle']) - np.abs(sp*np.sin(obs['angle'])) - sp * np.abs(obs['trackPos'])
reward = progress
# collision detection
if obs['damage'] - obs_pre['damage'] > 0:
reward = -1
# Termination judgement #########################
episode_terminate = False
#---------------------------------------------------
if (abs(track.any()) > 1 or abs(trackPos) > 1): # Episode is terminated if the car is out of track
print("Out of track ")
reward = -100 #-200
episode_terminate = True
client.R.d['meta'] = True
if self.terminal_judge_start < self.time_step: # Episode terminates if the progress of agent is small
if progress < self.termination_limit_progress:
print("No progress", progress)
reward = -100 # KAUSHIK ADDED THIS
episode_terminate = True
client.R.d['meta'] = True
#---------------------------------------------------
if np.cos(obs['angle']) < 0: # Episode is terminated if the agent runs backward
episode_terminate = True
client.R.d['meta'] = True
if client.R.d['meta'] is True: # Send a reset signal
self.initial_run = False
client.respond_to_server()
self.time_step += 1
return self.get_obs(), reward, client.R.d['meta'], {}
def reset(self, relaunch=False):
#print("Reset")
self.time_step = 0
if self.initial_reset is not True:
self.client.R.d['meta'] = True
self.client.respond_to_server()
## TENTATIVE. Restarting TORCS every episode suffers the memory leak bug!
if relaunch is True:
self.reset_torcs()
print("### TORCS is RELAUNCHED ###")
# Modify here if you use multiple tracks in the environment
self.client = snakeoil3.Client(p=3101, vision=self.vision) # Open new UDP in vtorcs
self.client.MAX_STEPS = np.inf
client = self.client
client.get_servers_input() # Get the initial input from torcs
obs = client.S.d # Get the current full-observation from torcs
self.observation = self.make_observaton(obs)
self.last_u = None
self.initial_reset = False
return self.get_obs()
def end(self):
os.system('pkill torcs')
def get_obs(self):
return self.observation
def reset_torcs(self):
#print("relaunch torcs")
os.system('pkill torcs')
time.sleep(0.5)
if self.vision is True:
os.system('torcs -nofuel -nodamage -nolaptime -vision &')
else:
os.system('torcs -nofuel -nolaptime &')
time.sleep(0.5)
os.system('sh autostart.sh')
time.sleep(0.5)
def agent_to_torcs(self, u):
torcs_action = {'steer': u[0]}
if self.throttle is True: # throttle action is enabled
torcs_action.update({'accel': u[1]})
torcs_action.update({'brake': u[2]})
if self.gear_change is True: # gear change action is enabled
torcs_action.update({'gear': int(u[3])})
return torcs_action
def obs_vision_to_image_rgb(self, obs_image_vec):
image_vec = obs_image_vec
r = image_vec[0:len(image_vec):3]
g = image_vec[1:len(image_vec):3]
b = image_vec[2:len(image_vec):3]
sz = (64, 64)
r = np.array(r).reshape(sz)
g = np.array(g).reshape(sz)
b = np.array(b).reshape(sz)
return np.array([r, g, b], dtype=np.uint8)
def make_observaton(self, raw_obs):
if self.vision is False:
names = ['focus',
'speedX', 'speedY', 'speedZ', 'angle', 'damage',
'opponents',
'rpm',
'track',
'trackPos',
'wheelSpinVel']
Observation = col.namedtuple('Observaion', names)
return Observation(focus=np.array(raw_obs['focus'], dtype=np.float32)/200.,
speedX=np.array(raw_obs['speedX'], dtype=np.float32)/300.0,
speedY=np.array(raw_obs['speedY'], dtype=np.float32)/300.0,
speedZ=np.array(raw_obs['speedZ'], dtype=np.float32)/300.0,
angle=np.array(raw_obs['angle'], dtype=np.float32)/3.1416,
damage=np.array(raw_obs['damage'], dtype=np.float32),
opponents=np.array(raw_obs['opponents'], dtype=np.float32)/200.,
rpm=np.array(raw_obs['rpm'], dtype=np.float32)/10000,
track=np.array(raw_obs['track'], dtype=np.float32)/200.,
trackPos=np.array(raw_obs['trackPos'], dtype=np.float32)/1.,
wheelSpinVel=np.array(raw_obs['wheelSpinVel'], dtype=np.float32))
else:
names = ['focus',
'speedX', 'speedY', 'speedZ', 'angle',
'opponents',
'rpm',
'track',
'trackPos',
'wheelSpinVel',
'img']
Observation = col.namedtuple('Observaion', names)
# Get RGB from observation
#image_rgb = self.obs_vision_to_image_rgb(raw_obs['img']) # KAUSHIK ADDED THIS
image_rgb = self.obs_vision_to_image_rgb(raw_obs[names[8]])
return Observation(focus=np.array(raw_obs['focus'], dtype=np.float32)/200.,
speedX=np.array(raw_obs['speedX'], dtype=np.float32)/self.default_speed,
speedY=np.array(raw_obs['speedY'], dtype=np.float32)/self.default_speed,
speedZ=np.array(raw_obs['speedZ'], dtype=np.float32)/self.default_speed,
opponents=np.array(raw_obs['opponents'], dtype=np.float32)/200.,
rpm=np.array(raw_obs['rpm'], dtype=np.float32),
track=np.array(raw_obs['track'], dtype=np.float32)/200.,
trackPos=np.array(raw_obs['trackPos'], dtype=np.float32)/1.,
wheelSpinVel=np.array(raw_obs['wheelSpinVel'], dtype=np.float32),
img=image_rgb)
|
the-stack_0_9985 | # -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Pedro Algarvio ([email protected])`
:copyright: © 2012-2013 by the SaltStack Team, see AUTHORS for more details
:license: Apache 2.0, see LICENSE for more details.
tests.integration.shell.call
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
# Import python libs
import os
import sys
import shutil
import yaml
from datetime import datetime
# Import Salt Testing libs
from salttesting import skipIf
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import salt libs
import integration
class CallTest(integration.ShellCase, integration.ShellCaseCommonTestsMixIn):
_call_binary_ = 'salt-call'
def test_default_output(self):
out = self.run_call('-l quiet test.fib 3')
expect = ['local:',
' |_',
' - 0',
' - 1',
' - 1',
' - 2']
self.assertEqual(expect, out[:-1])
def test_text_output(self):
out = self.run_call('-l quiet --out txt test.fib 3')
expect = [
'local: ([0, 1, 1, 2]'
]
self.assertEqual(''.join(expect), ''.join(out).rsplit(",", 1)[0])
@skipIf(sys.platform.startswith('win'), 'This test does not apply on Win')
def test_user_delete_kw_output(self):
ret = self.run_call('-l quiet -d user.delete')
self.assertIn(
'salt \'*\' user.delete name remove=True force=True',
''.join(ret)
)
def test_issue_6973_state_highstate_exit_code(self):
'''
If there is no tops/master_tops or state file matches
for this minion, salt-call should exit non-zero if invoked with
option --retcode-passthrough
'''
src = os.path.join(integration.FILES, 'file/base/top.sls')
dst = os.path.join(integration.FILES, 'file/base/top.sls.bak')
shutil.move(src, dst)
expected_comment = 'No Top file or external nodes data matches found'
try:
stdout, retcode = self.run_call(
'-l quiet --retcode-passthrough state.highstate',
with_retcode=True
)
finally:
shutil.move(dst, src)
self.assertIn(expected_comment, ''.join(stdout))
self.assertNotEqual(0, retcode)
@skipIf(sys.platform.startswith('win'), 'This test does not apply on Win')
def test_issue_2731_masterless(self):
config_dir = '/tmp/salttest'
minion_config_file = os.path.join(config_dir, 'minion')
this_minion_key = os.path.join(
config_dir, 'pki', 'minions', 'minion_test_issue_2731'
)
minion_config = {
'id': 'minion_test_issue_2731',
'master': 'localhost',
'master_port': 64506,
'root_dir': '/tmp/salttest',
'pki_dir': 'pki',
'cachedir': 'cachedir',
'sock_dir': 'minion_sock',
'open_mode': True,
'log_file': '/tmp/salttest/minion_test_issue_2731',
'log_level': 'quiet',
'log_level_logfile': 'info'
}
# Remove existing logfile
if os.path.isfile('/tmp/salttest/minion_test_issue_2731'):
os.unlink('/tmp/salttest/minion_test_issue_2731')
start = datetime.now()
# Let's first test with a master running
open(minion_config_file, 'w').write(
yaml.dump(minion_config, default_flow_style=False)
)
ret = self.run_script(
'salt-call',
'--config-dir {0} cmd.run "echo foo"'.format(
config_dir
)
)
try:
self.assertIn('local:', ret)
except AssertionError:
if os.path.isfile(minion_config_file):
os.unlink(minion_config_file)
# Let's remove our key from the master
if os.path.isfile(this_minion_key):
os.unlink(this_minion_key)
raise
# Calculate the required timeout, since next will fail.
# I needed this because after many attempts, I was unable to catch:
# WARNING: Master hostname: salt not found. Retrying in 30 seconds
ellapsed = datetime.now() - start
timeout = ellapsed.seconds + 3
# Now let's remove the master configuration
minion_config.pop('master')
minion_config.pop('master_port')
open(minion_config_file, 'w').write(
yaml.dump(minion_config, default_flow_style=False)
)
out = self.run_script(
'salt-call',
'--config-dir {0} cmd.run "echo foo"'.format(
config_dir
),
timeout=timeout,
)
try:
self.assertIn(
'Process took more than {0} seconds to complete. '
'Process Killed!'.format(timeout),
out
)
except AssertionError:
if os.path.isfile(minion_config_file):
os.unlink(minion_config_file)
# Let's remove our key from the master
if os.path.isfile(this_minion_key):
os.unlink(this_minion_key)
raise
# Should work with --local
ret = self.run_script(
'salt-call',
'--config-dir {0} --local cmd.run "echo foo"'.format(
config_dir
),
timeout=15
)
try:
self.assertIn('local:', ret)
except AssertionError:
if os.path.isfile(minion_config_file):
os.unlink(minion_config_file)
# Let's remove our key from the master
if os.path.isfile(this_minion_key):
os.unlink(this_minion_key)
raise
# Should work with local file client
minion_config['file_client'] = 'local'
open(minion_config_file, 'w').write(
yaml.dump(minion_config, default_flow_style=False)
)
ret = self.run_script(
'salt-call',
'--config-dir {0} cmd.run "echo foo"'.format(
config_dir
),
timeout=15
)
try:
self.assertIn('local:', ret)
finally:
if os.path.isfile(minion_config_file):
os.unlink(minion_config_file)
# Let's remove our key from the master
if os.path.isfile(this_minion_key):
os.unlink(this_minion_key)
if __name__ == '__main__':
from integration import run_tests
run_tests(CallTest)
|
the-stack_0_9986 | # Copyright (c) 2019-2021 CRS4
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import pytest
import pyecvl._core.ecvl as ecvl_core
import pyecvl.ecvl as ecvl_py
pytest.importorskip("pyecvl.augmentations")
@pytest.mark.parametrize("ecvl", [ecvl_core, ecvl_py])
def test_AugmentationParam(ecvl):
ap = ecvl.AugmentationParam()
min_, max_ = 0.1, 1.2
ap = ecvl.AugmentationParam(min_, max_)
assert ap.min_ == pytest.approx(min_)
assert ap.max_ == pytest.approx(max_)
ecvl.AugmentationParam.SetSeed(12345)
ap.GenerateValue()
assert min_ <= ap.value_ <= max_
@pytest.mark.parametrize("ecvl", [ecvl_core, ecvl_py])
def test_AugRotate(ecvl):
img = ecvl.Image([5, 4, 3], ecvl.DataType.uint8, "xyc", ecvl.ColorType.BGR)
a = ecvl.AugRotate([30, 50])
a.Apply(img)
a = ecvl.AugRotate([30, 50], [2, 3])
a.Apply(img)
a = ecvl.AugRotate([30, 50], [2, 3], 1.1)
a.Apply(img)
a = ecvl.AugRotate([30, 50], [2, 3], 1.1, ecvl.InterpolationType.nearest)
a.Apply(img)
a = ecvl.AugRotate([30, 50], [2, 3], 1.1, ecvl.InterpolationType.nearest,
ecvl.InterpolationType.linear)
a.Apply(img)
# fromtext
f = ecvl.AugRotate if ecvl is ecvl_core else ecvl.AugRotate.fromtext
a = f('angle=[30, 50] center=(2, 3) scale=1.1 interp="nearest" '
'gt_interp="linear"')
a.Apply(img)
@pytest.mark.parametrize("ecvl", [ecvl_core, ecvl_py])
def test_AugResizeDim(ecvl):
img = ecvl.Image([5, 4, 3], ecvl.DataType.uint8, "xyc", ecvl.ColorType.BGR)
a = ecvl.AugResizeDim([4, 3])
a.Apply(img)
a = ecvl.AugResizeDim([4, 3], ecvl.InterpolationType.nearest,
ecvl.InterpolationType.linear)
a.Apply(img)
# fromtext
f = ecvl.AugResizeDim if ecvl is ecvl_core else ecvl.AugResizeDim.fromtext
a = f('dims=(4, 3) interp="linear" gt_interp="nearest"')
a.Apply(img)
@pytest.mark.parametrize("ecvl", [ecvl_core, ecvl_py])
def test_AugResizeScale(ecvl):
img = ecvl.Image([5, 4, 3], ecvl.DataType.uint8, "xyc", ecvl.ColorType.BGR)
a = ecvl.AugResizeScale([0.5, 0.5])
a.Apply(img)
a = ecvl.AugResizeScale([0.5, 0.5], ecvl.InterpolationType.nearest,
ecvl.InterpolationType.linear)
a.Apply(img)
# fromtext
f = ecvl.AugResizeScale if ecvl is ecvl_core else \
ecvl.AugResizeScale.fromtext
a = f('scale=(0.5, 0.5) interp="linear" gt_interp="nearest"')
a.Apply(img)
@pytest.mark.parametrize("ecvl", [ecvl_core, ecvl_py])
def test_AugFlip(ecvl):
img = ecvl.Image([5, 4, 3], ecvl.DataType.uint8, "xyc", ecvl.ColorType.BGR)
a = ecvl.AugFlip(0.5)
a.Apply(img)
# fromtext
f = ecvl.AugFlip if ecvl is ecvl_core else ecvl.AugFlip.fromtext
a = f('p=0.5')
a.Apply(img)
@pytest.mark.parametrize("ecvl", [ecvl_core, ecvl_py])
def test_AugMirror(ecvl):
img = ecvl.Image([5, 4, 3], ecvl.DataType.uint8, "xyc", ecvl.ColorType.BGR)
a = ecvl.AugMirror(0.5)
a.Apply(img)
# fromtext
f = ecvl.AugMirror if ecvl is ecvl_core else ecvl.AugMirror.fromtext
a = f('p=0.5')
a.Apply(img)
@pytest.mark.parametrize("ecvl", [ecvl_core, ecvl_py])
def test_AugGaussianBlur(ecvl):
img = ecvl.Image([5, 4, 3], ecvl.DataType.uint8, "xyc", ecvl.ColorType.BGR)
a = ecvl.AugGaussianBlur([0.2, 0.4])
a.Apply(img)
# fromtext
f = ecvl.AugGaussianBlur if ecvl is ecvl_core else \
ecvl.AugGaussianBlur.fromtext
a = f('sigma=[0.2, 0.4]')
a.Apply(img)
@pytest.mark.parametrize("ecvl", [ecvl_core, ecvl_py])
def test_AugAdditiveLaplaceNoise(ecvl):
img = ecvl.Image([5, 4, 3], ecvl.DataType.uint8, "xyc", ecvl.ColorType.BGR)
a = ecvl.AugAdditiveLaplaceNoise([255 * 0.05, 255 * 0.09])
a.Apply(img)
# fromtext
f = ecvl.AugAdditiveLaplaceNoise if ecvl is ecvl_core else \
ecvl.AugAdditiveLaplaceNoise.fromtext
a = f('std_dev=[12.5, 23.1]')
a.Apply(img)
@pytest.mark.parametrize("ecvl", [ecvl_core, ecvl_py])
def test_AugAdditivePoissonNoise(ecvl):
img = ecvl.Image([5, 4, 3], ecvl.DataType.uint8, "xyc", ecvl.ColorType.BGR)
a = ecvl.AugAdditivePoissonNoise([2.0, 3.0])
a.Apply(img)
# fromtext
f = ecvl.AugAdditivePoissonNoise if ecvl is ecvl_core else \
ecvl.AugAdditivePoissonNoise.fromtext
a = f('lambda=[2.0, 3.0]')
a.Apply(img)
@pytest.mark.parametrize("ecvl", [ecvl_core, ecvl_py])
def test_AugGammaContrast(ecvl):
img = ecvl.Image([5, 4, 3], ecvl.DataType.uint8, "xyc", ecvl.ColorType.BGR)
a = ecvl.AugGammaContrast([3, 4])
a.Apply(img)
# fromtext
f = ecvl.AugGammaContrast if ecvl is ecvl_core else \
ecvl.AugGammaContrast.fromtext
a = f('gamma=[3, 4]')
a.Apply(img)
@pytest.mark.parametrize("ecvl", [ecvl_core, ecvl_py])
def test_AugCoarseDropout(ecvl):
img = ecvl.Image([5, 4, 3], ecvl.DataType.uint8, "xyc", ecvl.ColorType.BGR)
a = ecvl.AugCoarseDropout([0.5, 0.7], [0.1, 0.2], 0.4)
a.Apply(img)
# fromtext
f = ecvl.AugCoarseDropout if ecvl is ecvl_core else \
ecvl.AugCoarseDropout.fromtext
a = f('p=[0.5, 0.7] drop_size=[0.1, 0.2] per_channel=0.4')
a.Apply(img)
@pytest.mark.parametrize("ecvl", [ecvl_core, ecvl_py])
def test_AugTranspose(ecvl):
img = ecvl.Image([5, 4, 3], ecvl.DataType.uint8, "xyc", ecvl.ColorType.BGR)
a = ecvl.AugTranspose(0.5)
a.Apply(img)
# fromtext
f = ecvl.AugTranspose if ecvl is ecvl_core else ecvl.AugTranspose.fromtext
a = f('p=0.5')
a.Apply(img)
@pytest.mark.parametrize("ecvl", [ecvl_core, ecvl_py])
def test_AugBrightness(ecvl):
img = ecvl.Image([5, 4, 3], ecvl.DataType.uint8, "xyc", ecvl.ColorType.BGR)
a = ecvl.AugBrightness([30, 60])
a.Apply(img)
# fromtext
f = ecvl.AugBrightness if ecvl is ecvl_core else \
ecvl.AugBrightness.fromtext
a = f('beta=[30, 60]')
a.Apply(img)
@pytest.mark.parametrize("ecvl", [ecvl_core, ecvl_py])
def test_AugGridDistortion(ecvl):
img = ecvl.Image([5, 4, 3], ecvl.DataType.uint8, "xyc", ecvl.ColorType.BGR)
a = ecvl.AugGridDistortion([5, 10], [-0.2, 0.2])
a.Apply(img)
a = ecvl.AugGridDistortion(
[5, 10], [-0.2, 0.2], ecvl.InterpolationType.nearest
)
a.Apply(img)
a = ecvl.AugGridDistortion(
[5, 10], [-0.2, 0.2], ecvl.InterpolationType.nearest,
ecvl.BorderType.BORDER_REFLECT_101
)
a.Apply(img)
a = ecvl.AugGridDistortion(
[5, 10], [-0.2, 0.2], ecvl.InterpolationType.nearest,
ecvl.BorderType.BORDER_REFLECT_101, 0
)
a.Apply(img)
# fromtext
f = ecvl.AugGridDistortion if ecvl is ecvl_core else \
ecvl.AugGridDistortion.fromtext
a = f('num_steps=[5,10] distort_limit=[-0.2,0.2] interp=\"linear\" '
'border_type=\"reflect_101\" border_value=0')
a.Apply(img)
@pytest.mark.parametrize("ecvl", [ecvl_core, ecvl_py])
def test_AugOpticalDistortion(ecvl):
img = ecvl.Image([5, 4, 3], ecvl.DataType.uint8, "xyc", ecvl.ColorType.BGR)
a = ecvl.AugOpticalDistortion([-0.2, 0.2], [-0.4, 0.4])
a.Apply(img)
a = ecvl.AugOpticalDistortion(
[-0.2, 0.2], [-0.4, 0.4], ecvl.InterpolationType.nearest
)
a.Apply(img)
a = ecvl.AugOpticalDistortion(
[-0.2, 0.2], [-0.4, 0.4], ecvl.InterpolationType.nearest,
ecvl.BorderType.BORDER_REFLECT_101
)
a.Apply(img)
a = ecvl.AugOpticalDistortion(
[-0.2, 0.2], [-0.4, 0.4], ecvl.InterpolationType.nearest,
ecvl.BorderType.BORDER_REFLECT_101, 0
)
a.Apply(img)
# fromtext
f = ecvl.AugOpticalDistortion if ecvl is ecvl_core else \
ecvl.AugOpticalDistortion.fromtext
a = f('distort_limit=[-0.2,0.2] shift_limit=[-0.4,0.4] interp=\"linear\" '
'border_type=\"reflect_101\" border_value=0')
a.Apply(img)
@pytest.mark.parametrize("ecvl", [ecvl_core, ecvl_py])
def test_AugSalt(ecvl):
img = ecvl.Image([5, 4, 3], ecvl.DataType.uint8, "xyc", ecvl.ColorType.BGR)
a = ecvl.AugSalt([0.1, 0.3], 0.5)
a.Apply(img)
# fromtext
f = ecvl.AugSalt if ecvl is ecvl_core else ecvl.AugSalt.fromtext
a = f('p=[0.1,0.3] per_channel=0.5')
a.Apply(img)
@pytest.mark.parametrize("ecvl", [ecvl_core, ecvl_py])
def test_AugPepper(ecvl):
img = ecvl.Image([5, 4, 3], ecvl.DataType.uint8, "xyc", ecvl.ColorType.BGR)
a = ecvl.AugPepper([0.1, 0.3], 0.5)
a.Apply(img)
# fromtext
f = ecvl.AugPepper if ecvl is ecvl_core else ecvl.AugPepper.fromtext
a = f('p=[0.1,0.3] per_channel=0.5')
a.Apply(img)
@pytest.mark.parametrize("ecvl", [ecvl_core, ecvl_py])
def test_AugSaltAndPepper(ecvl):
img = ecvl.Image([5, 4, 3], ecvl.DataType.uint8, "xyc", ecvl.ColorType.BGR)
a = ecvl.AugSaltAndPepper([0.1, 0.3], 0.5)
a.Apply(img)
# fromtext
f = ecvl.AugSaltAndPepper if ecvl is ecvl_core else \
ecvl.AugSaltAndPepper.fromtext
a = f('p=[0.1,0.3] per_channel=0.5')
a.Apply(img)
@pytest.mark.parametrize("ecvl", [ecvl_core, ecvl_py])
def test_AugElasticTransform(ecvl):
img = ecvl.Image([5, 4, 3], ecvl.DataType.uint8, "xyc", ecvl.ColorType.BGR)
a = ecvl.AugElasticTransform([34, 60], [4, 6])
a.Apply(img)
a = ecvl.AugElasticTransform(
[34, 60], [4, 6], ecvl.InterpolationType.nearest
)
a.Apply(img)
a = ecvl.AugElasticTransform(
[34, 60], [4, 6], ecvl.InterpolationType.nearest,
ecvl.BorderType.BORDER_REFLECT_101
)
a.Apply(img)
a = ecvl.AugElasticTransform(
[34, 60], [4, 6], ecvl.InterpolationType.nearest,
ecvl.BorderType.BORDER_REFLECT_101, 0
)
a.Apply(img)
# fromtext
f = ecvl.AugElasticTransform if ecvl is ecvl_core else \
ecvl.AugElasticTransform.fromtext
a = f('alpha=[34,60] sigma=[4,6] interp=\"linear\" '
'border_type=\"reflect_101\" border_value=0')
a.Apply(img)
@pytest.mark.parametrize("ecvl", [ecvl_core, ecvl_py])
def test_AugNormalize(ecvl):
img = ecvl.Image([5, 4, 3], ecvl.DataType.uint8, "xyc", ecvl.ColorType.BGR)
a = ecvl.AugNormalize(20, 5.5)
a.Apply(img)
# fromtext
f = ecvl.AugNormalize if ecvl is ecvl_core else \
ecvl.AugNormalize.fromtext
a = f('mean=20 std=5.5')
a.Apply(img)
@pytest.mark.parametrize("ecvl", [ecvl_core, ecvl_py])
def test_AugNormalize_separate(ecvl):
img = ecvl.Image([5, 4, 3], ecvl.DataType.uint8, "xyc", ecvl.ColorType.BGR)
a = ecvl.AugNormalize([20, 19, 21], [5, 5.5, 6])
a.Apply(img)
# fromtext
f = ecvl.AugNormalize if ecvl is ecvl_core else \
ecvl.AugNormalize.fromtext
a = f('mean=(20,19,21) std=(5,5.5,6)')
a.Apply(img)
@pytest.mark.parametrize("ecvl", [ecvl_core, ecvl_py])
def test_AugCenterCrop(ecvl):
img = ecvl.Image([8, 6, 3], ecvl.DataType.uint8, "xyc", ecvl.ColorType.BGR)
a = ecvl.AugCenterCrop([4, 3])
a.Apply(img)
# fromtext
f = ecvl.AugCenterCrop if ecvl is ecvl_core else \
ecvl.AugCenterCrop.fromtext
a = f('size=(4,3)')
a.Apply(img)
@pytest.mark.parametrize("ecvl", [ecvl_core, ecvl_py])
def test_AugToFloat32(ecvl):
img = ecvl.Image([8, 6, 3], ecvl.DataType.uint8, "xyc", ecvl.ColorType.BGR)
a = ecvl.AugToFloat32()
a.Apply(img)
assert img.elemtype_ == ecvl.DataType.float32
img = ecvl.Image([8, 6, 3], ecvl.DataType.uint8, "xyc", ecvl.ColorType.BGR)
a = ecvl.AugToFloat32(2.)
a.Apply(img)
assert img.elemtype_ == ecvl.DataType.float32
img = ecvl.Image([8, 6, 3], ecvl.DataType.uint8, "xyc", ecvl.ColorType.BGR)
a = ecvl.AugToFloat32(2., 3.)
a.Apply(img)
assert img.elemtype_ == ecvl.DataType.float32
# fromtext
f = ecvl.AugToFloat32 if ecvl is ecvl_core else \
ecvl.AugToFloat32.fromtext
img = ecvl.Image([8, 6, 3], ecvl.DataType.uint8, "xyc", ecvl.ColorType.BGR)
a = f('divisor=2. divisor_gt=3.')
a.Apply(img)
assert img.elemtype_ == ecvl.DataType.float32
@pytest.mark.parametrize("ecvl", [ecvl_core, ecvl_py])
def test_AugDivBy255(ecvl):
img = ecvl.Image([8, 6, 3], ecvl.DataType.uint8, "xyc", ecvl.ColorType.BGR)
a = ecvl.AugDivBy255()
a.Apply(img)
# fromtext
f = ecvl.AugDivBy255 if ecvl is ecvl_core else \
ecvl.AugDivBy255.fromtext
a = f('')
a.Apply(img)
@pytest.mark.parametrize("ecvl", [ecvl_core, ecvl_py])
def test_AugScaleTo(ecvl):
img = ecvl.Image([8, 6, 3], ecvl.DataType.uint8, "xyc", ecvl.ColorType.BGR)
a = ecvl.AugScaleTo(1, 254)
a.Apply(img)
# fromtext
f = ecvl.AugScaleTo if ecvl is ecvl_core else \
ecvl.AugScaleTo.fromtext
a = f('new_min=1 new_max=255')
a.Apply(img)
@pytest.mark.parametrize("ecvl", [ecvl_core, ecvl_py])
def test_AugmentationFactory(ecvl):
img = ecvl.Image([5, 4, 3], ecvl.DataType.uint8, "xyc", ecvl.ColorType.BGR)
# one arg
a = ecvl.AugmentationFactory.create('AugFlip p=0.5')
a.Apply(img)
# two args
a = ecvl.AugmentationFactory.create('AugFlip', 'p=0.5')
a.Apply(img)
# container
txt = ('SequentialAugmentationContainer\n'
'AugRotate angle=[-5,5] center=(0,0) scale=0.5 interp="linear"\n'
'AugAdditiveLaplaceNoise std_dev=[0,0.51]\n'
'AugCoarseDropout p=[0,0.55] drop_size=[0.02,0.1] per_channel=0\n'
'AugAdditivePoissonNoise lambda=[0,40]\n'
'AugResizeDim dims=(30,30) interp="linear"\n'
'end')
c = ecvl.AugmentationFactory.create(txt)
c.Apply(img)
@pytest.mark.parametrize("ecvl", [ecvl_core, ecvl_py])
def test_SequentialAugmentationContainer(ecvl):
img = ecvl.Image([5, 4, 3], ecvl.DataType.uint8, "xyc", ecvl.ColorType.BGR)
# from list
c = ecvl.SequentialAugmentationContainer([
ecvl.AugRotate([-5, 5]),
ecvl.AugMirror(.5),
])
c.Apply(img)
# fromtext
txt = ('AugFlip p=0.2\n'
'AugMirror p=0.2\n'
'end')
f = ecvl.SequentialAugmentationContainer if ecvl is ecvl_core else \
ecvl.SequentialAugmentationContainer.fromtext
c = f(txt)
c.Apply(img)
@pytest.mark.parametrize("ecvl", [ecvl_core, ecvl_py])
def test_OneOfAugmentationContainer(ecvl):
img = ecvl.Image([5, 4, 3], ecvl.DataType.uint8, "xyc", ecvl.ColorType.BGR)
# from list
c = ecvl.OneOfAugmentationContainer(0.7, [
ecvl.AugRotate([-5, 5]),
ecvl.AugMirror(.5),
])
c.Apply(img)
# fromtext
txt = ('p=0.7\n'
'AugFlip p=0.2\n'
'AugMirror p=0.2\n'
'end')
f = ecvl.OneOfAugmentationContainer if ecvl is ecvl_core else \
ecvl.OneOfAugmentationContainer.fromtext
c = f(txt)
c.Apply(img)
|
the-stack_0_9987 | import os
from torchnlp.download import download_file_maybe_extract
def wmt_dataset(directory='data/wmt16_en_de',
train=False,
dev=False,
test=False,
train_filename='train.tok.clean.bpe.32000',
dev_filename='newstest2013.tok.bpe.32000',
test_filename='newstest2014.tok.bpe.32000',
check_files=['train.tok.clean.bpe.32000.en'],
url='https://drive.google.com/uc?export=download&id=0B_bZck-ksdkpM25jRUN2X2UxMm8'):
"""
The Workshop on Machine Translation (WMT) 2014 English-German dataset.
Initially this dataset was preprocessed by Google Brain. Though this download contains test sets
from 2015 and 2016, the train set differs slightly from WMT 2015 and 2016 and significantly from
WMT 2017.
The provided data is mainly taken from version 7 of the Europarl corpus, which is freely
available. Note that this the same data as last year, since Europarl is not anymore translted
across all 23 official European languages. Additional training data is taken from the new News
Commentary corpus. There are about 50 million words of training data per language from the
Europarl corpus and 3 million words from the News Commentary corpus.
A new data resource from 2013 is the Common Crawl corpus which was collected from web sources.
Each parallel corpus comes with a annotation file that gives the source of each sentence pair.
References:
* https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/data_generators/translate_ende.py # noqa: E501
* http://www.statmt.org/wmt14/translation-task.html
Args:
directory (str, optional): Directory to cache the dataset.
train (bool, optional): If to load the training split of the dataset.
dev (bool, optional): If to load the dev split of the dataset.
test (bool, optional): If to load the test split of the dataset.
train_filename (str, optional): The filename of the training split.
dev_filename (str, optional): The filename of the dev split.
test_filename (str, optional): The filename of the test split.
check_files (str, optional): Check if these files exist, then this download was successful.
url (str, optional): URL of the dataset `tar.gz` file.
Returns:
:class:`tuple` of :class:`iterable` or :class:`iterable`:
Returns between one and all dataset splits (train, dev and test) depending on if their
respective boolean argument is ``True``.
Example:
>>> from torchnlp.datasets import wmt_dataset # doctest: +SKIP
>>> train = wmt_dataset(train=True) # doctest: +SKIP
>>> train[:2] # doctest: +SKIP
[{
'en': 'Res@@ um@@ ption of the session',
'de': 'Wiederaufnahme der Sitzungsperiode'
}, {
'en': 'I declare resumed the session of the European Parliament ad@@ jour@@ ned on...'
'de': 'Ich erklär@@ e die am Freitag , dem 17. Dezember unterbro@@ ch@@ ene...'
}]
"""
download_file_maybe_extract(
url=url, directory=directory, check_files=check_files, filename='wmt16_en_de.tar.gz')
ret = []
splits = [(train, train_filename), (dev, dev_filename), (test, test_filename)]
splits = [f for (requested, f) in splits if requested]
for filename in splits:
examples = []
en_path = os.path.join(directory, filename + '.en')
de_path = os.path.join(directory, filename + '.de')
en_file = [l.strip() for l in open(en_path, 'r', encoding='utf-8')]
de_file = [l.strip() for l in open(de_path, 'r', encoding='utf-8')]
assert len(en_file) == len(de_file)
for i in range(len(en_file)):
if en_file[i] != '' and de_file[i] != '':
examples.append({'en': en_file[i], 'de': de_file[i]})
ret.append(examples)
if len(ret) == 1:
return ret[0]
else:
return tuple(ret)
|
the-stack_0_9991 | import sys
import gym.spaces
import itertools
import numpy as np
import random
import tensorflow as tf
import tensorflow.contrib.layers as layers
from collections import namedtuple
from dqn_utils import *
OptimizerSpec = namedtuple("OptimizerSpec", ["constructor", "kwargs", "lr_schedule"])
def learn(env,
q_func,
optimizer_spec,
session,
exploration=LinearSchedule(1000000, 0.1),
stopping_criterion=None,
replay_buffer_size=1000000,
batch_size=32,
gamma=0.99,
learning_starts=50000,
learning_freq=4,
frame_history_len=4,
target_update_freq=10000,
grad_norm_clipping=10):
"""Run Deep Q-learning algorithm.
You can specify your own convnet using q_func.
All schedules are w.r.t. total number of steps taken in the environment.
Parameters
----------
env: gym.Env
gym environment to train on.
q_func: function
Model to use for computing the q function. It should accept the
following named arguments:
img_in: tf.Tensor
tensorflow tensor representing the input image
num_actions: int
number of actions
scope: str
scope in which all the model related variables
should be created
reuse: bool
whether previously created variables should be reused.
optimizer_spec: OptimizerSpec
Specifying the constructor and kwargs, as well as learning rate schedule
for the optimizer
session: tf.Session
tensorflow session to use.
exploration: rl_algs.deepq.utils.schedules.Schedule
schedule for probability of chosing random action.
stopping_criterion: (env, t) -> bool
should return true when it's ok for the RL algorithm to stop.
takes in env and the number of steps executed so far.
replay_buffer_size: int
How many memories to store in the replay buffer.
batch_size: int
How many transitions to sample each time experience is replayed.
gamma: float
Discount Factor
learning_starts: int
After how many environment steps to start replaying experiences
learning_freq: int
How many steps of environment to take between every experience replay
frame_history_len: int
How many past frames to include as input to the model.
target_update_freq: int
How many experience replay rounds (not steps!) to perform between
each update to the target Q network
grad_norm_clipping: float or None
If not None gradients' norms are clipped to this value.
"""
assert type(env.observation_space) == gym.spaces.Box
assert type(env.action_space) == gym.spaces.Discrete
###############
# BUILD MODEL #
###############
if len(env.observation_space.shape) == 1:
# This means we are running on low-dimensional observations (e.g. RAM)
input_shape = env.observation_space.shape
else:
img_h, img_w, img_c = env.observation_space.shape
input_shape = (img_h, img_w, frame_history_len * img_c)
num_actions = env.action_space.n
# set up placeholders
# placeholder for current observation (or state)
obs_t_ph = tf.placeholder(tf.uint8, [None] + list(input_shape))
# placeholder for current action
act_t_ph = tf.placeholder(tf.int32, [None])
# placeholder for current reward
rew_t_ph = tf.placeholder(tf.float32, [None])
# placeholder for next observation (or state)
obs_tp1_ph = tf.placeholder(tf.uint8, [None] + list(input_shape))
# placeholder for end of episode mask
# this value is 1 if the next state corresponds to the end of an episode,
# in which case there is no Q-value at the next state; at the end of an
# episode, only the current state reward contributes to the target, not the
# next state Q-value (i.e. target is just rew_t_ph, not rew_t_ph + gamma * q_tp1)
done_mask_ph = tf.placeholder(tf.float32, [None])
# casting to float on GPU ensures lower data transfer times.
obs_t_float = tf.cast(obs_t_ph, tf.float32) / 255.0
obs_tp1_float = tf.cast(obs_tp1_ph, tf.float32) / 255.0
# Here, you should fill in your own code to compute the Bellman error. This requires
# evaluating the current and next Q-values and constructing the corresponding error.
# TensorFlow will differentiate this error for you, you just need to pass it to the
# optimizer. See assignment text for details.
# Your code should produce one scalar-valued tensor: total_error
# This will be passed to the optimizer in the provided code below.
# Your code should also produce two collections of variables:
# q_func_vars
# target_q_func_vars
# These should hold all of the variables of the Q-function network and target network,
# respectively. A convenient way to get these is to make use of TF's "scope" feature.
# For example, you can create your Q-function network with the scope "q_func" like this:
# <something> = q_func(obs_t_float, num_actions, scope="q_func", reuse=False)
# And then you can obtain the variables like this:
# q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='q_func')
# Older versions of TensorFlow may require using "VARIABLES" instead of "GLOBAL_VARIABLES"
######
# YOUR CODE HERE
######
q_net = q_func(obs_t_float, num_actions, scope='q_func', reuse=False) # Q(s,prev_a)
target_q_net = q_func(obs_tp1_float, num_actions, scope='target_q_func', reuse=False)
q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='q_func')
target_q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='target_q_func')
one_hot_act = tf.one_hot(act_t_ph, depth=num_actions, dtype=tf.float32, name="action_one_hot")
x = (rew_t_ph + gamma * tf.reduce_max(target_q_net) - tf.reduce_sum(one_hot_act*q_net,axis=1))
total_error = tf.square(x)
# construct optimization op (with gradient clipping)
learning_rate = tf.placeholder(tf.float32, (), name="learning_rate")
optimizer = optimizer_spec.constructor(learning_rate=learning_rate, **optimizer_spec.kwargs)
train_fn = minimize_and_clip(optimizer, total_error,
var_list=q_func_vars, clip_val=grad_norm_clipping)
# update_target_fn will be called periodically to copy Q network to target Q network
update_target_fn = []
for var, var_target in zip(sorted(q_func_vars, key=lambda v: v.name),
sorted(target_q_func_vars, key=lambda v: v.name)):
update_target_fn.append(var_target.assign(var))
update_target_fn = tf.group(*update_target_fn)
# construct the replay buffer
replay_buffer = ReplayBuffer(replay_buffer_size, frame_history_len)
###############
# RUN ENV #
###############
model_initialized = False
num_param_updates = 0
mean_episode_reward = -float('nan')
best_mean_episode_reward = -float('inf')
last_obs = env.reset()
LOG_EVERY_N_STEPS = 10000
for t in itertools.count():
### 1. Check stopping criterion
if stopping_criterion is not None and stopping_criterion(env, t):
break
### 2. Step the env and store the transition
# At this point, "last_obs" contains the latest observation that was
# recorded from the simulator. Here, your code needs to store this
# observation and its outcome (reward, next observation, etc.) into
# the replay buffer while stepping the simulator forward one step.
# At the end of this block of code, the simulator should have been
# advanced one step, and the replay buffer should contain one more
# transition.
# Specifically, last_obs must point to the new latest observation.
# Useful functions you'll need to call:
# obs, reward, done, info = env.step(action)
# this steps the environment forward one step
# obs = env.reset()
# this resets the environment if you reached an episode boundary.
# Don't forget to call env.reset() to get a new observation if done
# is true!!
# Note that you cannot use "last_obs" directly as input
# into your network, since it needs to be processed to include context
# from previous frames. You should check out the replay buffer
# implementation in dqn_utils.py to see what functionality the replay
# buffer exposes. The replay buffer has a function called
# encode_recent_observation that will take the latest observation
# that you pushed into the buffer and compute the corresponding
# input that should be given to a Q network by appending some
# previous frames.
# Don't forget to include epsilon greedy exploration!
# And remember that the first time you enter this loop, the model
# may not yet have been initialized (but of course, the first step
# might as well be random, since you haven't trained your net...)
#####
# YOUR CODE HERE
#####
idx = replay_buffer.store_frame(last_obs)
eps = exploration.value(t)
if model_initialized and random.random() > eps:
q_input = replay_buffer.encode_recent_observation()
q_input = np.expand_dims(q_input,axis=0)
action = np.argmax(session.run(q_net,feed_dict = {obs_t_float:q_input}))
else:
action = env.action_space.sample() # exploration
last_obs, reward, done, info = env.step(action)
replay_buffer.store_effect(idx, action, reward, done)
if done:
last_obs = env.reset()
# at this point, the environment should have been advanced one step (and
# reset if done was true), and last_obs should point to the new latest
# observation
### 3. Perform experience replay and train the network.
# note that this is only done if the replay buffer contains enough samples
# for us to learn something useful -- until then, the model will not be
# initialized and random actions should be taken
if (t > learning_starts and
t % learning_freq == 0 and
replay_buffer.can_sample(batch_size)):
# Here, you should perform training. Training consists of four steps:
# 3.a: use the replay buffer to sample a batch of transitions (see the
# replay buffer code for function definition, each batch that you sample
# should consist of current observations, current actions, rewards,
# next observations, and done indicator).
# 3.b: initialize the model if it has not been initialized yet; to do
# that, call
# initialize_interdependent_variables(session, tf.global_variables(), {
# obs_t_ph: obs_t_batch,
# obs_tp1_ph: obs_tp1_batch,
# })
# where obs_t_batch and obs_tp1_batch are the batches of observations at
# the current and next time step. The boolean variable model_initialized
# indicates whether or not the model has been initialized.
# Remember that you have to update the target network too (see 3.d)!
# 3.c: train the model. To do this, you'll need to use the train_fn and
# total_error ops that were created earlier: total_error is what you
# created to compute the total Bellman error in a batch, and train_fn
# will actually perform a gradient step and update the network parameters
# to reduce total_error. When calling session.run on these you'll need to
# populate the following placeholders:
# obs_t_ph
# act_t_ph
# rew_t_ph
# obs_tp1_ph
# done_mask_ph
# (this is needed for computing total_error)
# learning_rate -- you can get this from optimizer_spec.lr_schedule.value(t)
# (this is needed by the optimizer to choose the learning rate)
# 3.d: periodically update the target network by calling
# session.run(update_target_fn)
# you should update every target_update_freq steps, and you may find the
# variable num_param_updates useful for this (it was initialized to 0)
#####
# YOUR CODE HERE
#####
obs_t_batch, act_batch, rew_batch, obs_tp1_batch, done_mask = replay_buffer.sample(batch_size)
if not model_initialized:
initialize_interdependent_variables(session, tf.global_variables(), {
obs_t_ph: obs_t_batch,
obs_tp1_ph: obs_tp1_batch,
})
model_initialized = True
session.run(train_fn, feed_dict={
obs_t_ph:obs_t_batch,
act_t_ph:act_batch,
rew_t_ph:rew_batch,
obs_tp1_ph:obs_tp1_batch,
done_mask_ph:done_mask,
learning_rate:optimizer_spec.lr_schedule.value(t)})
if num_param_updates % target_update_freq == 0:
session.run(update_target_fn)
### 4. Log progress
episode_rewards = get_wrapper_by_name(env, "Monitor").get_episode_rewards()
if len(episode_rewards) > 0:
mean_episode_reward = np.mean(episode_rewards[-100:])
if len(episode_rewards) > 100:
best_mean_episode_reward = max(best_mean_episode_reward, mean_episode_reward)
if t % LOG_EVERY_N_STEPS == 0 and model_initialized:
print("Timestep %d" % (t,))
print("mean reward (100 episodes) %f" % mean_episode_reward)
print("best mean reward %f" % best_mean_episode_reward)
print("episodes %d" % len(episode_rewards))
print("exploration %f" % exploration.value(t))
print("learning_rate %f" % optimizer_spec.lr_schedule.value(t))
sys.stdout.flush()
|
the-stack_0_9992 | # -*- coding: utf-8 -*-
"""
Module with logic for the Environment sub-process
"""
__author__ = 'Samir Adrik'
__email__ = '[email protected]'
from source.util import Assertor, Tracking, Debugger
from .finn_environment_process import FinnEnvironmentProcess
from .engine import SubModel
class FinnEnvironmentSubModel(SubModel):
"""
Implementation of Handler for Environmental statistics
"""
@Tracking
def __init__(self, environmental_data: dict):
"""
Constructor / Instantiate the class.
Parameters
----------
environmental_data : dict
dict with family statistics
"""
Assertor.assert_data_types([environmental_data], [dict])
self.name = FinnEnvironmentSubModel.__name__
super().__init__(name=self.name, desc="Processing Finn Environmental Statistics")
self.environmental_data = environmental_data
@Debugger
def run(self):
"""
method for running the environmental data sub model
"""
environment_process = FinnEnvironmentProcess(self.environmental_data)
return environment_process.environment_statistics
|
the-stack_0_9993 | # Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
http://developer.openstack.org/api-ref-identity-v3.html#groups-v3
"""
from oslo_serialization import jsonutils as json
from tempest.lib.common import rest_client
class GroupsClient(rest_client.RestClient):
api_version = "v3"
def create_group(self, **kwargs):
"""Creates a group.
Available params: see http://developer.openstack.org/
api-ref-identity-v3.html#createGroup
"""
post_body = json.dumps({'group': kwargs})
resp, body = self.post('groups', post_body)
self.expected_success(201, resp.status)
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
def show_group(self, group_id):
"""Get group details."""
resp, body = self.get('groups/%s' % group_id)
self.expected_success(200, resp.status)
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
def list_groups(self):
"""Lists the groups."""
resp, body = self.get('groups')
self.expected_success(200, resp.status)
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
def update_group(self, group_id, **kwargs):
"""Updates a group.
Available params: see http://developer.openstack.org/
api-ref-identity-v3.html#updateGroup
"""
post_body = json.dumps({'group': kwargs})
resp, body = self.patch('groups/%s' % group_id, post_body)
self.expected_success(200, resp.status)
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
def delete_group(self, group_id):
"""Delete a group."""
resp, body = self.delete('groups/%s' % str(group_id))
self.expected_success(204, resp.status)
return rest_client.ResponseBody(resp, body)
def add_group_user(self, group_id, user_id):
"""Add user into group."""
resp, body = self.put('groups/%s/users/%s' % (group_id, user_id),
None)
self.expected_success(204, resp.status)
return rest_client.ResponseBody(resp, body)
def list_group_users(self, group_id):
"""List users in group."""
resp, body = self.get('groups/%s/users' % group_id)
self.expected_success(200, resp.status)
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
def delete_group_user(self, group_id, user_id):
"""Delete user in group."""
resp, body = self.delete('groups/%s/users/%s' % (group_id, user_id))
self.expected_success(204, resp.status)
return rest_client.ResponseBody(resp, body)
def check_group_user_existence(self, group_id, user_id):
"""Check user in group."""
resp, body = self.head('groups/%s/users/%s' % (group_id, user_id))
self.expected_success(204, resp.status)
return rest_client.ResponseBody(resp)
|
the-stack_0_9994 |
import sys
import inspect
from scenic import scenarioFromString
from scenic.core.simulators import DummySimulator, RejectSimulationException
import scenic.syntax.veneer as veneer
## Scene generation utilities
# Compilation
def compileScenic(code, removeIndentation=True, scenario=None):
if removeIndentation:
# to allow indenting code to line up with test function
code = inspect.cleandoc(code)
checkVeneerIsInactive()
scenario = scenarioFromString(code, scenario=scenario)
checkVeneerIsInactive()
return scenario
# Static scenes
def sampleScene(scenario, maxIterations=1):
return generateChecked(scenario, maxIterations)[0]
def sampleSceneFrom(code, maxIterations=1, scenario=None):
scenario = compileScenic(code, scenario=scenario)
return sampleScene(scenario, maxIterations=maxIterations)
def sampleEgo(scenario, maxIterations=1):
scene, iterations = generateChecked(scenario, maxIterations)
return scene.egoObject
def sampleEgoFrom(code, maxIterations=1):
scenario = compileScenic(code)
return sampleEgo(scenario, maxIterations=maxIterations)
def sampleParamP(scenario, maxIterations=1):
scene, iterations = generateChecked(scenario, maxIterations)
return scene.params['p']
def sampleParamPFrom(code, maxIterations=1):
scenario = compileScenic(code)
return sampleParamP(scenario, maxIterations=maxIterations)
# Dynamic simulations
def sampleEgoActions(scenario, maxIterations=1, maxSteps=1, maxScenes=1,
singleAction=True, timestep=1):
allActions = sampleActions(scenario, maxIterations, maxSteps, maxScenes,
singleAction, asMapping=False, timestep=timestep)
return [actions[0] for actions in allActions]
def sampleEgoActionsFromScene(scene, maxIterations=1, maxSteps=1, singleAction=True, timestep=1):
allActions = sampleActionsFromScene(scene, maxIterations=maxIterations, maxSteps=maxSteps,
singleAction=singleAction, asMapping=False,
timestep=timestep)
if allActions is None:
return None
return [actions[0] for actions in allActions]
def sampleActions(scenario, maxIterations=1, maxSteps=1, maxScenes=1,
singleAction=True, asMapping=False, timestep=1):
for i in range(maxScenes):
scene, iterations = generateChecked(scenario, maxIterations)
actions = sampleActionsFromScene(scene, maxIterations=maxIterations, maxSteps=maxSteps,
singleAction=singleAction, asMapping=asMapping,
timestep=timestep)
if actions is not None:
return actions
raise RejectSimulationException(
f'unable to find successful simulation over {maxScenes} scenes')
def sampleActionsFromScene(scene, maxIterations=1, maxSteps=1,
singleAction=True, asMapping=False, timestep=1):
sim = DummySimulator(timestep=timestep)
simulation = sim.simulate(scene, maxSteps=maxSteps, maxIterations=maxIterations)
if not simulation:
return None
actionSequence = simulation.result.actions
if singleAction:
for i, allActions in enumerate(actionSequence):
for agent, actions in allActions.items():
assert len(actions) <= 1
allActions[agent] = actions[0] if actions else None
if asMapping:
return actionSequence
else:
return [tuple(actions.values()) for actions in actionSequence]
def sampleTrajectory(scenario, maxIterations=1, maxSteps=1, maxScenes=1,
raiseGuardViolations=False):
for i in range(maxScenes):
scene, iterations = generateChecked(scenario, maxIterations)
trajectory = sampleTrajectoryFromScene(scene, maxIterations=maxIterations,
maxSteps=maxSteps,
raiseGuardViolations=raiseGuardViolations)
if trajectory is not None:
return trajectory
raise RejectSimulationException(
f'unable to find successful simulation over {maxScenes} scenes')
def sampleResult(scenario, maxIterations=1, maxSteps=1, maxScenes=1):
for i in range(maxScenes):
scene, iterations = generateChecked(scenario, maxIterations)
result = sampleResultFromScene(scene, maxIterations=maxIterations,
maxSteps=maxSteps)
if result is not None:
return result
raise RejectSimulationException(
f'unable to find successful simulation over {maxScenes} scenes')
def sampleResultFromScene(scene, maxIterations=1, maxSteps=1, raiseGuardViolations=False):
sim = DummySimulator(timestep=1)
simulation = sim.simulate(scene, maxSteps=maxSteps, maxIterations=maxIterations,
raiseGuardViolations=raiseGuardViolations)
if not simulation:
return None
return simulation.result
def sampleTrajectoryFromScene(scene, maxIterations=1, maxSteps=1, raiseGuardViolations=False):
result = sampleResultFromScene(scene, maxIterations=maxIterations, maxSteps=maxSteps,
raiseGuardViolations=raiseGuardViolations)
if not result:
return None
return result.trajectory
# Helpers
def generateChecked(scenario, maxIterations):
checkVeneerIsInactive()
scene, iterations = scenario.generate(maxIterations=maxIterations)
checkVeneerIsInactive()
return scene, iterations
def checkVeneerIsInactive():
assert veneer.activity == 0
assert not veneer.scenarioStack
assert not veneer.currentScenario
assert not veneer.evaluatingRequirement
assert not veneer.evaluatingGuard
assert not veneer.scenarios
assert not veneer._globalParameters
assert not veneer.lockedParameters
assert not veneer.lockedModel
assert not veneer.currentSimulation
assert not veneer.currentBehavior
## Error checking utilities
def checkErrorLineNumber(line, exc_info=None):
if exc_info is None:
tb = sys.exc_info()[2]
else:
tb = exc_info.tb
while tb.tb_next is not None:
tb = tb.tb_next
assert tb.tb_lineno == line
|
the-stack_0_9995 | # Created by Ilia
# https://www.tensorflow.org/tutorials/keras/regression#the_auto_mpg_dataset
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.layers.experimental import preprocessing
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data'
column_names = ['MPG', 'Cylinders', 'Displacement', 'Horsepower', 'Weight',
'Acceleration', 'Model Year', 'Origin']
raw_dataset = pd.read_csv(url, names=column_names,
na_values='?', comment='\t',
sep=' ', skipinitialspace=True)
dataset = raw_dataset.copy()
print(dataset.tail())
dataset = dataset.dropna()
dataset['Origin'] = dataset['Origin'].map({1: 'USA', 2: 'Europe', 3: 'Japan'})
dataset = pd.get_dummies(dataset, prefix='', prefix_sep='')
print(dataset.tail())
train_dataset = dataset.sample(frac=0.8, random_state=0)
test_dataset = dataset.drop(train_dataset.index)
train_features = train_dataset.copy()
test_features = test_dataset.copy()
train_labels = train_features.pop('MPG')
test_labels = test_features.pop('MPG')
normalizer = preprocessing.Normalization()
normalizer.adapt(np.array(train_features))
def build_and_compile_model(norm):
model = keras.Sequential([
norm,
layers.Dense(64, activation='relu'),
layers.Dense(64, activation='relu'),
layers.Dense(1)
])
model.compile(loss='mean_absolute_error',
optimizer=tf.keras.optimizers.Adam(0.001))
return model
dnn_model = build_and_compile_model(normalizer)
dnn_model.summary()
history = dnn_model.fit(
train_features, train_labels,
validation_split=0.2,
verbose=0, epochs=100)
def plot_loss(history):
plt.plot(history.history['loss'], label='loss')
plt.plot(history.history['val_loss'], label='val_loss')
plt.ylim([0, 10])
plt.xlabel('Epoch')
plt.ylabel('Error [MPG]')
plt.legend()
plt.grid(True)
plot_loss(history)
plt.show()
dnn_model.evaluate(test_features, test_labels, verbose=0)
# Make Predictions
test_predictions = dnn_model.predict(test_features).flatten()
a = plt.axes(aspect='equal')
plt.scatter(test_labels, test_predictions)
plt.xlabel('True Values [MPG]')
plt.ylabel('Predictions [MPG]')
lims = [0, 50]
plt.xlim(lims)
plt.ylim(lims)
_ = plt.plot(lims, lims)
plt.show()
# Error distribution
error = test_predictions - test_labels
plt.hist(error, bins=25)
plt.xlabel('Prediction Error [MPG]')
_ = plt.ylabel('Count')
plt.show()
# Save model
# dnn_model.save('dnn_model')
# Load model
# reloaded = tf.keras.models.load_model('dnn_model')
|
the-stack_0_10001 | import base64
import io
import logging
import os
import numpy as np
import torch
from PIL import Image
from torch.autograd import Variable
from torchvision import transforms
logger = logging.getLogger(__name__)
class DIYSegmentation:
"""
DIYSegmentation handler class.
"""
def __init__(self):
self.model = None
self.mapping = None
self.device = None
self.initialized = False
def initialize(self, ctx):
"""
load eager mode state_dict based model
"""
properties = ctx.system_properties
self.device = torch.device(
"cuda:" + str(properties.get("gpu_id"))
if torch.cuda.is_available()
else "cpu"
)
logger.info(f"Device on initialization is: {self.device}")
model_dir = properties.get("model_dir")
manifest = ctx.manifest
logger.error(manifest)
serialized_file = manifest["model"]["serializedFile"]
model_pt_path = os.path.join(model_dir, serialized_file)
if not os.path.isfile(model_pt_path):
raise RuntimeError("Missing the model definition file")
logger.debug(model_pt_path)
from model import DynamicUnetDIY
state_dict = torch.load(model_pt_path, map_location=self.device)
self.model = DynamicUnetDIY()
self.model.load_state_dict(state_dict)
self.model.to(self.device)
self.model.eval()
logger.debug("Model file {0} loaded successfully".format(model_pt_path))
self.initialized = True
def preprocess(self, data):
"""
Scales and normalizes a PIL image for an U-net model
"""
image = data[0].get("data")
if image is None:
image = data[0].get("body")
image_transform = transforms.Compose(
[
# must be consistent with model training
transforms.Resize((96, 128)),
transforms.ToTensor(),
# default statistics from imagenet
transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
),
]
)
image = Image.open(io.BytesIO(image)).convert(
"RGB"
) # in case of an alpha channel
image = image_transform(image).unsqueeze_(0)
return image
def inference(self, img):
"""
Predict the chip stack mask of an image using a trained deep learning model.
"""
logger.info(f"Device on inference is: {self.device}")
self.model.eval()
inputs = Variable(img).to(self.device)
outputs = self.model.forward(inputs)
logging.debug(outputs.shape)
return outputs
def postprocess(self, inference_output):
if torch.cuda.is_available():
inference_output = inference_output[0].argmax(dim=0).cpu()
else:
inference_output = inference_output[0].argmax(dim=0)
return [
{
"base64_prediction": base64.b64encode(
inference_output.numpy().astype(np.uint8)
).decode("utf-8")
}
]
_service = DIYSegmentation()
def handle(data, context):
if not _service.initialized:
_service.initialize(context)
if data is None:
return None
data = _service.preprocess(data)
data = _service.inference(data)
data = _service.postprocess(data)
return data
|
the-stack_0_10002 |
import os
from zipfile import ZipFile
bag_of_words = open('spans-pred_charbert.txt', 'r')
charBert = open('40_0.4_spans-pred.txt', 'r').readlines()
zipObj = ZipFile('spans-pred.zip', 'w')
def charList_to_intList(line):
line = line.split('\t')
line = line[1][1:-1].split(' ')
span = []
for elem in line:
if len(elem) >= 2:
span.append(int(elem[:-1]))
return span
with open('spans-pred_.txt', 'w') as combined_preds:
for i, line in enumerate(bag_of_words):
spans = []
span1 = charList_to_intList(line)
span2 = charList_to_intList(charBert[i])
for elem in span1:
spans.append(elem)
for elem in span2:
spans.append(elem)
spans = sorted(set(spans))
combined_preds.write(str(i) + '\t' + str(spans) + '\n')
zipObj.write('spans-pred.txt')
# os.remove('spans-pred.txt')
|
the-stack_0_10003 | # To test a single translator use the -k parameter followed by either
# timescale or crate.
# See https://docs.pytest.org/en/stable/example/parametrize.html
from datetime import datetime
from conftest import crate_translator, timescale_translator
from utils.common import TIME_INDEX_NAME
from utils.tests.common import create_random_entities
import pytest
translators = [
pytest.lazy_fixture('crate_translator'),
pytest.lazy_fixture('timescale_translator')
]
@pytest.mark.parametrize("translator", translators, ids=["crate", "timescale"])
def test_delete_entity_defaults(translator):
num_types = 2
num_ids_per_type = 2
num_updates = 5
entities = create_random_entities(num_types, num_ids_per_type, num_updates)
translator.insert(entities)
deleted_type = entities[0]['type']
deleted_id = entities[0]['id']
total = translator.query()
assert len(total) == num_types * num_ids_per_type
selected = translator.query(entity_type=deleted_type, entity_id=deleted_id)
assert len(selected[0]['index']) == num_updates
n_deleted = translator.delete_entity(deleted_id, entity_type=deleted_type)
assert n_deleted == num_updates
remaining = translator.query()
assert len(remaining) == (len(total) - 1)
survivors = translator.query(
entity_type=deleted_type, entity_id=deleted_id)
assert len(survivors) == 0
translator.clean()
@pytest.mark.parametrize("translator", translators, ids=["crate", "timescale"])
def test_delete_entity_customs(translator):
entities = create_random_entities(num_types=1,
num_ids_per_type=2,
num_updates=10)
for i, e in enumerate(entities):
t = datetime(2018, 1, 1 + i).isoformat(timespec='milliseconds')
e[TIME_INDEX_NAME] = t
translator.insert(entities)
deleted_type = entities[-1]['type']
deleted_id = entities[-1]['id']
res = translator.delete_entity(entity_id=deleted_id,
entity_type=deleted_type,
from_date=datetime(2018, 1, 8).isoformat(),
to_date=datetime(2018, 1, 16).isoformat())
assert res == 5
affected = translator.query(entity_id=deleted_id, entity_type=deleted_type)
assert len(affected) == 1
affected = affected[0]
assert affected['id'] == deleted_id
assert affected['type'] == deleted_type
assert len(affected['index']) == 10 - 5
res = translator.query(entity_type=deleted_type)
assert len(res) == 2
unaffected = res[0] if res[0]['id'] != deleted_id else res[1]
assert unaffected['id'] != deleted_id
assert unaffected['type'] == deleted_type
assert len(unaffected['index']) == 10
translator.clean()
@pytest.mark.parametrize("translator", translators, ids=["crate", "timescale"])
def test_delete_entity_with_tenancy(translator):
entities = create_random_entities(num_types=2,
num_ids_per_type=2,
num_updates=5)
fs = 'fs'
fsp = 'fsp'
translator.insert(entities, fiware_service=fs, fiware_servicepath=fsp)
to_delete = entities[0]
deleted_type = to_delete['type']
deleted_id = to_delete['id']
# No fs nor fsp -> no deletion
res = translator.delete_entity(deleted_id, entity_type=deleted_type)
assert res == 0
# No fsp -> no deletion
res = translator.delete_entity(deleted_id,
entity_type=deleted_type,
fiware_service=fs)
assert res == 0
# Matching fs & fsp -> deletion
res = translator.delete_entity(deleted_id,
entity_type=deleted_type,
fiware_service=fs,
fiware_servicepath=fsp)
assert res == 5
translator.clean(fs)
@pytest.mark.parametrize("translator", translators, ids=["crate", "timescale"])
def test_delete_entities_defaults(translator):
entities = create_random_entities(num_types=3,
num_ids_per_type=2,
num_updates=20)
translator.insert(entities)
type_to_delete = entities[0]['type']
res = translator.delete_entities(type_to_delete)
assert res == 20 * 2
remaining = translator.query()
assert len(remaining) == (3 - 1) * 2
assert all([r['type'] != type_to_delete for r in remaining])
translator.clean()
@pytest.mark.parametrize("translator", translators, ids=["crate", "timescale"])
def test_delete_entities_customs(translator):
entities = create_random_entities(num_types=4,
num_ids_per_type=1,
num_updates=4)
for i, e in enumerate(entities):
time_index = datetime(2018, 1, 1 + i).isoformat()[:-3]
e[TIME_INDEX_NAME] = time_index
translator.insert(entities)
type_to_delete = entities[-1]['type']
res = translator.delete_entities(type_to_delete,
from_date=datetime(
2018, 1, 4).isoformat(),
to_date=datetime(2018, 1, 12).isoformat())
assert res == 3
remaining = translator.query()
assert sum([len(r['index']) for r in remaining]) == ((4 * 4) - 3)
translator.clean()
@pytest.mark.parametrize("translator", translators, ids=["crate", "timescale"])
def test_delete_entities_with_tenancy(translator):
fs = 'fs'
fsp = 'fsp'
entities = create_random_entities(num_types=3,
num_ids_per_type=1,
num_updates=10)
translator.insert(entities, fiware_service=fs, fiware_servicepath=fsp)
type_to_delete = entities[0]['type']
res = translator.delete_entities(type_to_delete)
assert res == 0
res = translator.delete_entities(type_to_delete,
fiware_service=fs,
fiware_servicepath='another/path')
assert res == 0
res = translator.delete_entities(type_to_delete,
fiware_service=fs,
fiware_servicepath=fsp)
assert res == 10
translator.clean(fs)
|
the-stack_0_10004 | import random
print("BEM VINDO AO JOGO DO PARA OU IMPAR")
print("--"*15)
vit = 0
while True:
palpite = int(input('Diga um valor entre zero e 9: '))
jogador = ''
while jogador not in ['P', 'I']:
jogador = str(input('Quer Par ou Impar? ')).strip().upper()[0]
jogada = random.choice(['PAR','IMPAR'])
if (jogador == 'P' and jogada == 'PAR') or (jogador == 'I' and jogada == 'IMPAR'):
print("Saiu {}. Você Venceu!! Vamos novamente.".format(jogada))
vit += 1
else:
print("Saiu {}. Você Perdeu!!".format(jogada))
break
print("Você teve {} vitórias consecutivas.".format(vit) if vit > 0 else "Infelizmente você não teve vitórias desta vez.")
|
the-stack_0_10005 | """The dhcp integration."""
from abc import abstractmethod
from datetime import timedelta
import fnmatch
from ipaddress import ip_address as make_ip_address
import logging
import os
import threading
from aiodiscover import DiscoverHosts
from aiodiscover.discovery import (
HOSTNAME as DISCOVERY_HOSTNAME,
IP_ADDRESS as DISCOVERY_IP_ADDRESS,
MAC_ADDRESS as DISCOVERY_MAC_ADDRESS,
)
from scapy.arch.common import compile_filter
from scapy.config import conf
from scapy.error import Scapy_Exception
from scapy.layers.dhcp import DHCP
from scapy.layers.inet import IP
from scapy.layers.l2 import Ether
from scapy.sendrecv import AsyncSniffer
from homeassistant.components.device_tracker.const import (
ATTR_HOST_NAME,
ATTR_IP,
ATTR_MAC,
ATTR_SOURCE_TYPE,
DOMAIN as DEVICE_TRACKER_DOMAIN,
SOURCE_TYPE_ROUTER,
)
from homeassistant.const import (
EVENT_HOMEASSISTANT_STARTED,
EVENT_HOMEASSISTANT_STOP,
STATE_HOME,
)
from homeassistant.core import Event, HomeAssistant, State, callback
from homeassistant.helpers.device_registry import format_mac
from homeassistant.helpers.event import (
async_track_state_added_domain,
async_track_time_interval,
)
from homeassistant.helpers.typing import ConfigType
from homeassistant.loader import async_get_dhcp
from homeassistant.util.network import is_invalid, is_link_local, is_loopback
from .const import DOMAIN
FILTER = "udp and (port 67 or 68)"
REQUESTED_ADDR = "requested_addr"
MESSAGE_TYPE = "message-type"
HOSTNAME = "hostname"
MAC_ADDRESS = "macaddress"
IP_ADDRESS = "ip"
DHCP_REQUEST = 3
SCAN_INTERVAL = timedelta(minutes=60)
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the dhcp component."""
async def _initialize(_):
address_data = {}
integration_matchers = await async_get_dhcp(hass)
watchers = []
for cls in (DHCPWatcher, DeviceTrackerWatcher, NetworkWatcher):
watcher = cls(hass, address_data, integration_matchers)
await watcher.async_start()
watchers.append(watcher)
async def _async_stop(*_):
for watcher in watchers:
await watcher.async_stop()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _async_stop)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STARTED, _initialize)
return True
class WatcherBase:
"""Base class for dhcp and device tracker watching."""
def __init__(self, hass, address_data, integration_matchers):
"""Initialize class."""
super().__init__()
self.hass = hass
self._integration_matchers = integration_matchers
self._address_data = address_data
def process_client(self, ip_address, hostname, mac_address):
"""Process a client."""
made_ip_address = make_ip_address(ip_address)
if (
is_link_local(made_ip_address)
or is_loopback(made_ip_address)
or is_invalid(made_ip_address)
):
# Ignore self assigned addresses, loopback, invalid
return
data = self._address_data.get(ip_address)
if (
data
and data[MAC_ADDRESS] == mac_address
and data[HOSTNAME].startswith(hostname)
):
# If the address data is the same no need
# to process it
return
self._address_data[ip_address] = {MAC_ADDRESS: mac_address, HOSTNAME: hostname}
self.process_updated_address_data(ip_address, self._address_data[ip_address])
def process_updated_address_data(self, ip_address, data):
"""Process the address data update."""
lowercase_hostname = data[HOSTNAME].lower()
uppercase_mac = data[MAC_ADDRESS].upper()
_LOGGER.debug(
"Processing updated address data for %s: mac=%s hostname=%s",
ip_address,
uppercase_mac,
lowercase_hostname,
)
for entry in self._integration_matchers:
if MAC_ADDRESS in entry and not fnmatch.fnmatch(
uppercase_mac, entry[MAC_ADDRESS]
):
continue
if HOSTNAME in entry and not fnmatch.fnmatch(
lowercase_hostname, entry[HOSTNAME]
):
continue
_LOGGER.debug("Matched %s against %s", data, entry)
self.create_task(
self.hass.config_entries.flow.async_init(
entry["domain"],
context={"source": DOMAIN},
data={
IP_ADDRESS: ip_address,
HOSTNAME: lowercase_hostname,
MAC_ADDRESS: data[MAC_ADDRESS],
},
)
)
@abstractmethod
def create_task(self, task):
"""Pass a task to async_add_task based on which context we are in."""
class NetworkWatcher(WatcherBase):
"""Class to query ptr records routers."""
def __init__(self, hass, address_data, integration_matchers):
"""Initialize class."""
super().__init__(hass, address_data, integration_matchers)
self._unsub = None
self._discover_hosts = None
self._discover_task = None
async def async_stop(self):
"""Stop scanning for new devices on the network."""
if self._unsub:
self._unsub()
self._unsub = None
if self._discover_task:
self._discover_task.cancel()
self._discover_task = None
async def async_start(self):
"""Start scanning for new devices on the network."""
self._discover_hosts = DiscoverHosts()
self._unsub = async_track_time_interval(
self.hass, self.async_start_discover, SCAN_INTERVAL
)
self.async_start_discover()
@callback
def async_start_discover(self, *_):
"""Start a new discovery task if one is not running."""
if self._discover_task and not self._discover_task.done():
return
self._discover_task = self.create_task(self.async_discover())
async def async_discover(self):
"""Process discovery."""
for host in await self._discover_hosts.async_discover():
self.process_client(
host[DISCOVERY_IP_ADDRESS],
host[DISCOVERY_HOSTNAME],
_format_mac(host[DISCOVERY_MAC_ADDRESS]),
)
def create_task(self, task):
"""Pass a task to async_create_task since we are in async context."""
return self.hass.async_create_task(task)
class DeviceTrackerWatcher(WatcherBase):
"""Class to watch dhcp data from routers."""
def __init__(self, hass, address_data, integration_matchers):
"""Initialize class."""
super().__init__(hass, address_data, integration_matchers)
self._unsub = None
async def async_stop(self):
"""Stop watching for new device trackers."""
if self._unsub:
self._unsub()
self._unsub = None
async def async_start(self):
"""Stop watching for new device trackers."""
self._unsub = async_track_state_added_domain(
self.hass, [DEVICE_TRACKER_DOMAIN], self._async_process_device_event
)
for state in self.hass.states.async_all(DEVICE_TRACKER_DOMAIN):
self._async_process_device_state(state)
@callback
def _async_process_device_event(self, event: Event):
"""Process a device tracker state change event."""
self._async_process_device_state(event.data.get("new_state"))
@callback
def _async_process_device_state(self, state: State):
"""Process a device tracker state."""
if state.state != STATE_HOME:
return
attributes = state.attributes
if attributes.get(ATTR_SOURCE_TYPE) != SOURCE_TYPE_ROUTER:
return
ip_address = attributes.get(ATTR_IP)
hostname = attributes.get(ATTR_HOST_NAME, "")
mac_address = attributes.get(ATTR_MAC)
if ip_address is None or mac_address is None:
return
self.process_client(ip_address, hostname, _format_mac(mac_address))
def create_task(self, task):
"""Pass a task to async_create_task since we are in async context."""
return self.hass.async_create_task(task)
class DHCPWatcher(WatcherBase):
"""Class to watch dhcp requests."""
def __init__(self, hass, address_data, integration_matchers):
"""Initialize class."""
super().__init__(hass, address_data, integration_matchers)
self._sniffer = None
self._started = threading.Event()
async def async_stop(self):
"""Stop watching for new device trackers."""
await self.hass.async_add_executor_job(self._stop)
def _stop(self):
"""Stop the thread."""
if self._started.is_set():
self._sniffer.stop()
async def async_start(self):
"""Start watching for dhcp packets."""
# disable scapy promiscuous mode as we do not need it
conf.sniff_promisc = 0
try:
await self.hass.async_add_executor_job(_verify_l2socket_setup, FILTER)
except (Scapy_Exception, OSError) as ex:
if os.geteuid() == 0:
_LOGGER.error("Cannot watch for dhcp packets: %s", ex)
else:
_LOGGER.debug(
"Cannot watch for dhcp packets without root or CAP_NET_RAW: %s", ex
)
return
try:
await self.hass.async_add_executor_job(_verify_working_pcap, FILTER)
except (Scapy_Exception, ImportError) as ex:
_LOGGER.error(
"Cannot watch for dhcp packets without a functional packet filter: %s",
ex,
)
return
self._sniffer = AsyncSniffer(
filter=FILTER,
started_callback=self._started.set,
prn=self.handle_dhcp_packet,
store=0,
)
self._sniffer.start()
if self._sniffer.thread:
self._sniffer.thread.name = self.__class__.__name__
def handle_dhcp_packet(self, packet):
"""Process a dhcp packet."""
if DHCP not in packet:
return
options = packet[DHCP].options
request_type = _decode_dhcp_option(options, MESSAGE_TYPE)
if request_type != DHCP_REQUEST:
# DHCP request
return
ip_address = _decode_dhcp_option(options, REQUESTED_ADDR) or packet[IP].src
hostname = _decode_dhcp_option(options, HOSTNAME) or ""
mac_address = _format_mac(packet[Ether].src)
if ip_address is None or mac_address is None:
return
self.process_client(ip_address, hostname, mac_address)
def create_task(self, task):
"""Pass a task to hass.add_job since we are in a thread."""
return self.hass.add_job(task)
def _decode_dhcp_option(dhcp_options, key):
"""Extract and decode data from a packet option."""
for option in dhcp_options:
if len(option) < 2 or option[0] != key:
continue
value = option[1]
if value is None or key != HOSTNAME:
return value
# hostname is unicode
try:
return value.decode()
except (AttributeError, UnicodeDecodeError):
return None
def _format_mac(mac_address):
"""Format a mac address for matching."""
return format_mac(mac_address).replace(":", "")
def _verify_l2socket_setup(cap_filter):
"""Create a socket using the scapy configured l2socket.
Try to create the socket
to see if we have permissions
since AsyncSniffer will do it another
thread so we will not be able to capture
any permission or bind errors.
"""
conf.L2socket(filter=cap_filter)
def _verify_working_pcap(cap_filter):
"""Verify we can create a packet filter.
If we cannot create a filter we will be listening for
all traffic which is too intensive.
"""
compile_filter(cap_filter)
|
the-stack_0_10006 | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 27 15:06:40 2017
@author: Diogo Leite
"""
# here the FK values was selected in lastas positions according to Species_new object class
from DAL import *
from configuration.configuration_data import *
class _Species_sql_new(object):
"""
This class manipulate the SPECIES table in the database
The FK are manipulated in the lasts positions of the parameters
"""
def __init__(self):
self.db_name = self.get_database_name()
def get_database_name(self):
"""
This method is used to get the database name used in factory
:return: database name
:rtype string
"""
conf_data_obj = Configuration_data('INPHINITY')
db_name = conf_data_obj.get_database_name()
return db_name
def select_all_species_all_attributes(self):
"""
return all the Species in the database
:return: cursor with all species
:rtype Cursor list
"""
sql_string = "SELECT id_specie_SP, designation_SP, FK_id_genus_GE_SP FROM SPECIES"
dalObj = DAL(self.db_name, sql_string)
results = dalObj.executeSelect()
return results
def select_specie_by_bacterium_id(self, id_bacterium):
"""
return a specie given bacterium id
If any exist it is returned -1
:param id_bacterium: id of the bacterium - -1 if unknown
:type id_bacterium: int - not required
:return: cursor with all species
:rtype Cursor list
"""
sql_string = "select id_specie_SP, designation_SP, FK_id_genus_GE_SP from SPECIES, STRAINS, ORGANISMS WHERE FK_id_specie_SP_ST = id_specie_SP and FK_id_strain_ST_OR = id_strain_ST and id_organism_OR = " + str(id_bacterium)
dalObj = DAL(self.db_name, sql_string)
results = dalObj.executeSelect()
if len(results) == 0:
return -1
else:
return results[0]
def insert_specie_if_not_exist_in_Genus(self, specieName, genus_id):
"""
Insert a Specie if it not yet exist (based on the designation)
:param specieName: name of the specie
:param genus_id: FK of the specie's genus - -1 if unknown
:type genusName: string - required
:type genus_id: int - required
:return: id of the specie inserted
:rtype int
:note:: it not verify the complete taxonomy but just only if the specie already exists in a give genus.
"""
id_specie = self.get_specie_id_by_designation_and_genus_id(specieName, genus_id)
if id_specie == -1:
sql_string = "INSERT INTO SPECIES (designation_SP, FK_id_genus_GE_SP) VALUES (%s, %s)"
params = [specieName, genus_id]
dalObj = DAL(self.db_name, sql_string)
dalObj.sqlcommand = sql_string
dalObj.parameters = params
results = dalObj.executeInsert()
return results.lastrowid
else:
print("The specie: %s already exists in the genus id: %d" %(str(specieName), genus_id))
return id_specie
def get_specie_id_by_designation_and_genus_id(self, designation, genus_id):
"""
get the id of a Specie based on its designation and genus_id
:param designation: designation of the specie
:param genus_id: FK id_genus
:type designation: string - required
:type genus_id: int - required
:return: id of the couple or -1 if inexistant
:rtype int
"""
sql_string = "SELECT id_specie_SP FROM SPECIES WHERE designation_SP = '" + str(designation) + "' AND FK_id_genus_GE_SP = " + str(genus_id)
dalObj = DAL(self.db_name, sql_string)
results = dalObj.executeSelect()
if len(results) is 0:
return -1
else:
return results[0][0]
def get_specie_by_id(self, id_specie):
"""
Get a specie by its id
:return: Specie elements info
:rtype List(infos species)
"""
sql_string = "SELECT id_specie_SP, designation_SP, FK_id_genus_GE_SP FROM SPECIES WHERE id_specie_SP = " + str(id_specie)
dalobj = DAL(self.db_name, sql_string)
results = dalobj.executeSelect()
return results[0]
def get_specie_by_organism_id(self, id_organism):
"""
Get a strain by an organism id
:return: Strain elements info
:rtype List(infos organism)
"""
sql_string = "SELECT id_specie_SP, designation_SP, FK_id_genus_GE_SP FROM STRAINS, SPECIES, ORGANISMS WHERE FK_id_specie_SP_ST = id_specie_SP and id_strain_ST = FK_id_strain_ST_OR and id_organism_OR = " + str(id_organism)
dalobj = DAL(self.db_name, sql_string)
results = dalobj.executeSelect()
return results[0]
def select_all_species_of_genus_id(self, id_genus):
"""
return all the Species in the database based on a genus id
:param id_genus: id of the genus - -1 if unknown
:type id_genus: int - not required
:return: cursor with all species
:rtype Cursor list
"""
sql_string = "SELECT id_specie_SP, designation_SP, FK_id_genus_GE_SP FROM SPECIES WHERE FK_id_genus_GE_SP = " + str(id_genus)
dalObj = DAL(self.db_name, sql_string)
results = dalObj.executeSelect()
return results
def select_all_species_frequency_couples_by_phage_id_positive(self, phage_id):
"""
return the frequencies list of interactions where appear a given phage id
:return: cursor with all species frequencies
:rtype Cursor list
"""
sql_string = "select id_specie_SP, designation_SP, FK_id_genus_GE_SP, count(id_specie_SP) as 'Quantity' FROM SPECIES, STRAINS, ORGANISMS, COUPLES WHERE FK_id_organism_phage_OR_CP = " + str(phage_id) + " and FK_id_organism_bact_OR_CP = id_organism_OR and FK_id_strain_ST_OR = id_strain_ST and FK_id_specie_SP_ST = id_specie_SP and interaction_CP = 1 group by id_specie_SP;"
dalObj = DAL(self.db_name, sql_string)
results = dalObj.executeSelect()
return results
|
the-stack_0_10008 | # Copyright 2015 Lukas Lalinsky
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of the JSON-over-HTTP RPC protocol used by Avatica."""
import re
import socket
import pprint
import math
import logging
import time
from phoenixdb import errors
from phoenixdb.avatica.proto import requests_pb2, common_pb2, responses_pb2
import requests
#from requests_gssapi import HTTPSPNEGOAuth, OPTIONAL
from requests_kerberos import HTTPKerberosAuth, OPTIONAL
import kerberos
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
try:
from HTMLParser import HTMLParser
except ImportError:
from html.parser import HTMLParser
__all__ = ['AvaticaClient']
logger = logging.getLogger(__name__)
class JettyErrorPageParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.path = []
self.title = []
self.message = []
def handle_starttag(self, tag, attrs):
self.path.append(tag)
def handle_endtag(self, tag):
self.path.pop()
def handle_data(self, data):
if len(self.path) > 2 and self.path[0] == 'html' and self.path[1] == 'body':
if len(self.path) == 3 and self.path[2] == 'h2':
self.title.append(data.strip())
elif len(self.path) == 4 and self.path[2] == 'p' and self.path[3] == 'pre':
self.message.append(data.strip())
def parse_url(url):
url = urlparse.urlparse(url)
if not url.scheme and not url.netloc and url.path:
netloc = url.path
if ':' not in netloc:
netloc = '{}:8765'.format(netloc)
return urlparse.ParseResult('http', netloc, '/', '', '', '')
return url
# Defined in phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
SQLSTATE_ERROR_CLASSES = [
('08', errors.OperationalError), # Connection Exception
('22018', errors.IntegrityError), # Constraint violatioin.
('22', errors.DataError), # Data Exception
('23', errors.IntegrityError), # Constraint Violation
('24', errors.InternalError), # Invalid Cursor State
('25', errors.InternalError), # Invalid Transaction State
('42', errors.ProgrammingError), # Syntax Error or Access Rule Violation
('XLC', errors.OperationalError), # Execution exceptions
('INT', errors.InternalError), # Phoenix internal error
]
# Relevant properties as defined by https://calcite.apache.org/avatica/docs/client_reference.html
OPEN_CONNECTION_PROPERTIES = (
'user', # User for the database connection
'password', # Password for the user
)
def raise_sql_error(code, sqlstate, message):
for prefix, error_class in SQLSTATE_ERROR_CLASSES:
if sqlstate.startswith(prefix):
raise error_class(message, code, sqlstate)
def parse_and_raise_sql_error(message):
match = re.findall(r'(?:([^ ]+): )?ERROR (\d+) \(([0-9A-Z]{5})\): (.*?) ->', message)
if match is not None and len(match):
exception, code, sqlstate, message = match[0]
raise_sql_error(int(code), sqlstate, message)
def parse_error_page(html):
parser = JettyErrorPageParser()
parser.feed(html)
if parser.title == ['HTTP ERROR: 500']:
message = ' '.join(parser.message).strip()
parse_and_raise_sql_error(message)
raise errors.InternalError(message)
def parse_error_protobuf(text):
message = common_pb2.WireMessage()
message.ParseFromString(text)
err = responses_pb2.ErrorResponse()
err.ParseFromString(message.wrapped_message)
parse_and_raise_sql_error(err.error_message)
raise_sql_error(err.error_code, err.sql_state, err.error_message)
raise errors.InternalError(err.error_message)
class AvaticaClient(object):
"""Client for Avatica's RPC server.
This exposes all low-level functionality that the Avatica
server provides, using the native terminology. You most likely
do not want to use this class directly, but rather get connect
to a server using :func:`phoenixdb.connect`.
"""
def __init__(self, url, max_retries=None, auth=None):
"""Constructs a new client object.
:param url:
URL of an Avatica RPC server.
"""
self.url = parse_url(url)
self.max_retries = max_retries if max_retries is not None else 3
self.auth = auth
self.connection = None
def connect(self):
"""This method used to open a persistent TCP connection
requests does not require this"""
pass
def close(self):
"""Also does nothing per requests"""
pass
def _post_request(self, body, headers):
retry_count = self.max_retries
while True:
logger.debug("POST %s %r %r", self.url.geturl(), body, headers)
try:
if self.auth == "SPNEGO":
#response = requests.request('post', self.url.geturl(), data=body, stream=True, headers=headers, auth=HTTPSPNEGOAuth(mutual_authentication=OPTIONAL))
response = requests.request('post', self.url.geturl(), data=body, stream=True, headers=headers, auth=HTTPKerberosAuth(mutual_authentication=OPTIONAL, mech_oid=kerberos.GSS_MECH_OID_SPNEGO), timeout=7200)
else:
response = requests.request('post', self.url.geturl(), data=body, stream=True, headers=headers, timeout=7200)
except requests.HTTPError as e:
if retry_count > 0:
delay = math.exp(-retry_count)
logger.debug("HTTP protocol error, will retry in %s seconds...", delay, exc_info=True)
time.sleep(delay)
retry_count -= 1
continue
raise errors.InterfaceError('RPC request failed', cause=e)
else:
if response.status_code == requests.codes.service_unavailable:
if retry_count > 0:
delay = math.exp(-retry_count)
logger.debug("Service unavailable, will retry in %s seconds...", delay, exc_info=True)
time.sleep(delay)
retry_count -= 1
continue
return response
def _apply(self, request_data, expected_response_type=None):
logger.debug("Sending request\n%s", pprint.pformat(request_data))
request_name = request_data.__class__.__name__
message = common_pb2.WireMessage()
message.name = 'org.apache.calcite.avatica.proto.Requests${}'.format(request_name)
message.wrapped_message = request_data.SerializeToString()
body = message.SerializeToString()
headers = {'content-type': 'application/x-google-protobuf'}
response = self._post_request(body, headers)
response_body = response.raw.read()
if response.status_code != requests.codes.ok:
logger.debug("Received response\n%s", response_body)
if b'<html>' in response_body:
parse_error_page(response_body)
else:
# assume the response is in protobuf format
parse_error_protobuf(response_body)
raise errors.InterfaceError('RPC request returned invalid status code', response.status_code)
message = common_pb2.WireMessage()
message.ParseFromString(response_body)
logger.debug("Received response\n%s", message)
if expected_response_type is None:
expected_response_type = request_name.replace('Request', 'Response')
expected_response_type = 'org.apache.calcite.avatica.proto.Responses$' + expected_response_type
if message.name != expected_response_type:
raise errors.InterfaceError('unexpected response type "{}" expected "{}"'.format(message.name, expected_response_type))
return message.wrapped_message
def get_catalogs(self, connection_id):
request = requests_pb2.CatalogsRequest()
request.connection_id = connection_id
return self._apply(request)
def get_schemas(self, connection_id, catalog=None, schemaPattern=None):
request = requests_pb2.SchemasRequest()
request.connection_id = connection_id
if catalog is not None:
request.catalog = catalog
if schemaPattern is not None:
request.schema_pattern = schemaPattern
return self._apply(request)
def get_tables(self, connection_id, catalog=None, schemaPattern=None, tableNamePattern=None, typeList=None):
request = requests_pb2.TablesRequest()
request.connection_id = connection_id
if catalog is not None:
request.catalog = catalog
if schemaPattern is not None:
request.schema_pattern = schemaPattern
if tableNamePattern is not None:
request.table_name_pattern = tableNamePattern
if typeList is not None:
request.type_list = typeList
if typeList is not None:
request.type_list.extend(typeList)
request.has_type_list = typeList is not None
return self._apply(request)
def get_columns(self, connection_id, catalog=None, schemaPattern=None, tableNamePattern=None, columnNamePattern=None):
request = requests_pb2.ColumnsRequest()
request.connection_id = connection_id
if catalog is not None:
request.catalog = catalog
if schemaPattern is not None:
request.schema_pattern = schemaPattern
if tableNamePattern is not None:
request.table_name_pattern = tableNamePattern
if columnNamePattern is not None:
request.column_name_pattern = columnNamePattern
return self._apply(request)
def get_table_types(self, connection_id):
request = requests_pb2.TableTypesRequest()
request.connection_id = connection_id
return self._apply(request)
def get_type_info(self, connection_id):
request = requests_pb2.TypeInfoRequest()
request.connection_id = connection_id
return self._apply(request)
def connection_sync(self, connection_id, connProps=None):
"""Synchronizes connection properties with the server.
:param connection_id:
ID of the current connection.
:param connProps:
Dictionary with the properties that should be changed.
:returns:
A ``common_pb2.ConnectionProperties`` object.
"""
if connProps is None:
connProps = {}
request = requests_pb2.ConnectionSyncRequest()
request.connection_id = connection_id
request.conn_props.auto_commit = connProps.get('autoCommit', False)
request.conn_props.has_auto_commit = True
request.conn_props.read_only = connProps.get('readOnly', False)
request.conn_props.has_read_only = True
request.conn_props.transaction_isolation = connProps.get('transactionIsolation', 0)
request.conn_props.catalog = connProps.get('catalog', '')
request.conn_props.schema = connProps.get('schema', '')
response_data = self._apply(request)
response = responses_pb2.ConnectionSyncResponse()
response.ParseFromString(response_data)
return response.conn_props
def open_connection(self, connection_id, info=None):
"""Opens a new connection.
:param connection_id:
ID of the connection to open.
"""
request = requests_pb2.OpenConnectionRequest()
request.connection_id = connection_id
if info is not None:
# Info is a list of repeated pairs, setting a dict directly fails
for k, v in info.items():
request.info[k] = v
response_data = self._apply(request)
response = responses_pb2.OpenConnectionResponse()
response.ParseFromString(response_data)
def close_connection(self, connection_id):
"""Closes a connection.
:param connection_id:
ID of the connection to close.
"""
request = requests_pb2.CloseConnectionRequest()
request.connection_id = connection_id
self._apply(request)
def create_statement(self, connection_id):
"""Creates a new statement.
:param connection_id:
ID of the current connection.
:returns:
New statement ID.
"""
request = requests_pb2.CreateStatementRequest()
request.connection_id = connection_id
response_data = self._apply(request)
response = responses_pb2.CreateStatementResponse()
response.ParseFromString(response_data)
return response.statement_id
def close_statement(self, connection_id, statement_id):
"""Closes a statement.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to close.
"""
request = requests_pb2.CloseStatementRequest()
request.connection_id = connection_id
request.statement_id = statement_id
self._apply(request)
def prepare_and_execute(self, connection_id, statement_id, sql, max_rows_total=None, first_frame_max_size=None):
"""Prepares and immediately executes a statement.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to prepare.
:param sql:
SQL query.
:param max_rows_total:
The maximum number of rows that will be allowed for this query.
:param first_frame_max_size:
The maximum number of rows that will be returned in the first Frame returned for this query.
:returns:
Result set with the signature of the prepared statement and the first frame data.
"""
request = requests_pb2.PrepareAndExecuteRequest()
request.connection_id = connection_id
request.statement_id = statement_id
request.sql = sql
if max_rows_total is not None:
request.max_rows_total = max_rows_total
if first_frame_max_size is not None:
request.first_frame_max_size = first_frame_max_size
response_data = self._apply(request, 'ExecuteResponse')
response = responses_pb2.ExecuteResponse()
response.ParseFromString(response_data)
return response.results
def prepare(self, connection_id, sql, max_rows_total=None):
"""Prepares a statement.
:param connection_id:
ID of the current connection.
:param sql:
SQL query.
:param max_rows_total:
The maximum number of rows that will be allowed for this query.
:returns:
Signature of the prepared statement.
"""
request = requests_pb2.PrepareRequest()
request.connection_id = connection_id
request.sql = sql
if max_rows_total is not None:
request.max_rows_total = max_rows_total
response_data = self._apply(request)
response = responses_pb2.PrepareResponse()
response.ParseFromString(response_data)
return response.statement
def execute(self, connection_id, statement_id, signature, parameter_values=None, first_frame_max_size=None):
"""Returns a frame of rows.
The frame describes whether there may be another frame. If there is not
another frame, the current iteration is done when we have finished the
rows in the this frame.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to fetch rows from.
:param signature:
common_pb2.Signature object
:param parameter_values:
A list of parameter values, if statement is to be executed; otherwise ``None``.
:param first_frame_max_size:
The maximum number of rows that will be returned in the first Frame returned for this query.
:returns:
Frame data, or ``None`` if there are no more.
"""
request = requests_pb2.ExecuteRequest()
request.statementHandle.id = statement_id
request.statementHandle.connection_id = connection_id
request.statementHandle.signature.CopyFrom(signature)
if parameter_values is not None:
request.parameter_values.extend(parameter_values)
request.has_parameter_values = True
if first_frame_max_size is not None:
request.deprecated_first_frame_max_size = first_frame_max_size
request.first_frame_max_size = first_frame_max_size
response_data = self._apply(request)
response = responses_pb2.ExecuteResponse()
response.ParseFromString(response_data)
return response.results
def fetch(self, connection_id, statement_id, offset=0, frame_max_size=None):
"""Returns a frame of rows.
The frame describes whether there may be another frame. If there is not
another frame, the current iteration is done when we have finished the
rows in the this frame.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to fetch rows from.
:param offset:
Zero-based offset of first row in the requested frame.
:param frame_max_size:
Maximum number of rows to return; negative means no limit.
:returns:
Frame data, or ``None`` if there are no more.
"""
request = requests_pb2.FetchRequest()
request.connection_id = connection_id
request.statement_id = statement_id
request.offset = offset
if frame_max_size is not None:
request.frame_max_size = frame_max_size
response_data = self._apply(request)
response = responses_pb2.FetchResponse()
response.ParseFromString(response_data)
return response.frame
|
the-stack_0_10009 | import numpy as np
import scipy as sp
from ._model import Model
from ..utils import safe_isinstance, record_import_error
from ..utils.transformers import parse_prefix_suffix_for_tokenizer
from .. import models
from .._serializable import Serializer, Deserializer
try:
import torch
except ImportError as e:
record_import_error("torch", "Torch could not be imported!", e)
try:
import tensorflow as tf
except ImportError as e:
record_import_error("tensorflow", "TensorFlow could not be imported!", e)
class TeacherForcing(Model):
""" Generates scores (log odds) for output text explanation algorithms using Teacher Forcing technique.
This class supports generation of log odds for transformer models as well as functions. In model agnostic
cases (model is function) it expects a similarity_model and similarity_tokenizer to approximate log odd scores
for target sentence generated by the model.
"""
def __init__(self, model, tokenizer=None, similarity_model=None, similarity_tokenizer=None, batch_size=128, device=None):
""" Build a teacher forcing model from the given text generation model.
Parameters
----------
model: object or function
A object of any pretrained transformer model or function which is to be explained.
tokenizer: object
A tokenizer object(PreTrainedTokenizer/PreTrainedTokenizerFast) which is used to tokenize source and target sentence.
similarity_model: object
A pretrained transformer model object which is used in model agnostic scenario to approximate log odds.
similarity_tokenizer: object
A tokenizer object(PreTrainedTokenizer/PreTrainedTokenizerFast) which is used to tokenize sentence in model agnostic scenario.
batch_size: int
Batch size for model inferencing and computing logodds (default=128).
device: str
By default, it infers if system has a gpu and accordingly sets device. Should be 'cpu' or 'cuda' or pytorch models.
Returns
-------
numpy.ndarray
The scores (log odds) of generating target sentence ids using the model.
"""
super().__init__(model)
self.tokenizer = tokenizer
# set pad token if not defined
if self.tokenizer is not None and self.tokenizer.pad_token is None:
self.tokenizer.pad_token = self.tokenizer.eos_token
self.device = device
self.batch_size = batch_size
# assign text generation function
if safe_isinstance(model, "transformers.PreTrainedModel") or safe_isinstance(model, "transformers.TFPreTrainedModel"):
self.text_generate = models.TextGeneration(self.inner_model, tokenizer=self.tokenizer, device=self.device)
self.similarity_model = model
self.similarity_tokenizer = tokenizer
self.model_agnostic = False
else:
self.text_generate = models.TextGeneration(self.inner_model, device=self.device)
self.similarity_model = similarity_model
self.similarity_tokenizer = similarity_tokenizer
# set pad token for a similarity tokenizer(in a model agnostic scenario) if not defined
if self.similarity_tokenizer is not None and self.similarity_tokenizer.pad_token is None:
self.similarity_tokenizer.pad_token = self.similarity_tokenizer.eos_token
self.model_agnostic = True
# initializing target which is the target sentence/ids for every new row of explanation
self.output = None
self.output_names = None
self.similarity_model_type = None
if safe_isinstance(self.similarity_model, "transformers.PreTrainedModel"):
self.similarity_model_type = "pt"
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if self.device is None else self.device
self.similarity_model = self.similarity_model.to(self.device)
elif safe_isinstance(self.similarity_model, "transformers.TFPreTrainedModel"):
self.similarity_model_type = "tf"
def __call__(self, X, Y):
""" Computes log odds scores of generating output(text) for a given batch of input(text/image) .
Parameters
----------
X: numpy.ndarray
An array containing a list of masked inputs.
Y: numpy.ndarray
An array containing a list of target sentence/ids.
Returns
-------
numpy.ndarray
A numpy array of log odds scores for every input pair (masked_X, X)
"""
output_batch = None
# caching updates output names and target sentence ids
self.update_output_names(Y[:1])
start_batch_idx, end_batch_idx = 0, len(X)
while start_batch_idx < end_batch_idx:
X_batch = X[start_batch_idx:start_batch_idx+self.batch_size]
Y_batch = Y[start_batch_idx:start_batch_idx+self.batch_size]
logits = self.get_teacher_forced_logits(X_batch, Y_batch)
logodds = self.get_logodds(logits)
if output_batch is None:
output_batch = logodds
else:
output_batch = np.concatenate((output_batch, logodds))
start_batch_idx += self.batch_size
return output_batch
def update_output_names(self, output):
""" The function updates output tokens.
It mimics the caching mechanism to update the output tokens for every
new row of explanation that are to be explained.
Parameters
----------
output: numpy.ndarray
Output(sentence/sentence ids) for an explanation row.
"""
# check if the target sentence has been updated (occurs when explaining a new row)
if (self.output is None) or (not np.array_equal(self.output, output)):
self.output = output
self.output_names = self.get_output_names(output)
def get_output_names(self, output):
""" Gets the output tokens by computing the output sentence ids and output names using the similarity_tokenizer.
Parameters
----------
output: numpy.ndarray
Output(sentence/sentence ids) for an explanation row.
Returns
-------
list
A list of output tokens.
"""
output_ids = self.get_outputs(output)
output_names = [self.similarity_tokenizer.decode([x]).strip() for x in output_ids[0, :]]
return output_names
def get_outputs(self, X):
""" The function tokenizes output sentences and returns ids.
Parameters
----------
X: numpy.ndarray
Output(sentence/sentence ids) for an explanation row.
Returns
-------
numpy.ndarray
An array of output(target sentence) ids.
"""
# check if output is a sentence or already parsed target ids
if X.dtype.type is np.str_:
parsed_tokenizer_dict = parse_prefix_suffix_for_tokenizer(self.similarity_tokenizer)
keep_prefix, keep_suffix = parsed_tokenizer_dict['keep_prefix'], parsed_tokenizer_dict['keep_suffix']
if keep_suffix > 0:
output_ids = np.array(self.similarity_tokenizer(X.tolist(), padding=True)["input_ids"])[:, keep_prefix:-keep_suffix]
else:
output_ids = np.array(self.similarity_tokenizer(X.tolist(), padding=True)["input_ids"])[:, keep_prefix:]
else:
output_ids = X
return output_ids
def get_inputs(self, X, padding_side='right'):
""" The function tokenizes source sentences.
In model agnostic case, the function calls model(X) which is expected to
return a batch of output sentences which is tokenized to compute inputs.
Parameters
----------
X: numpy.ndarray
X could be a batch of text or images(model agnostic case).
Returns
-------
dict
Dictionary of padded source sentence ids and attention mask as tensors("pt" or "tf" based on similarity_model_type).
"""
if self.model_agnostic:
# In model agnostic case, we first pass the input through the model and then tokenize output sentence
input_sentences = np.array(self.inner_model(X))
else:
input_sentences = np.array(X)
# set tokenizer padding to prepare inputs for batch inferencing
# padding_side="left" for only decoder models text generation eg. GPT2
self.similarity_tokenizer.padding_side = padding_side
inputs = self.similarity_tokenizer(input_sentences.tolist(), return_tensors=self.similarity_model_type, padding=True)
# set tokenizer padding to default
self.similarity_tokenizer.padding_side = 'right'
return inputs
def get_logodds(self, logits):
""" Calculates log odds from logits.
This function passes the logits through softmax and then computes log odds for the output(target sentence) ids.
Parameters
----------
logits: numpy.ndarray
An array of logits generated from the model.
Returns
-------
numpy.ndarray
Computes log odds for corresponding output ids.
"""
# set output ids for which scores are to be extracted
if self.output.dtype.type is np.str_:
output_ids = self.get_outputs(self.output)[0]
else:
output_ids = self.output[0]
def calc_logodds(arr):
probs = np.exp(arr) / np.exp(arr).sum(-1)
logodds = sp.special.logit(probs)
return logodds
# pass logits through softmax, get the token corresponding score and convert back to log odds (as one vs all)
logodds = np.apply_along_axis(calc_logodds, -1, logits)
logodds_for_output_ids = logodds[:, np.array(range(logodds.shape[1])), output_ids]
return logodds_for_output_ids
def model_inference(self, inputs, output_ids):
""" This function performs model inference for tensorflow and pytorch models.
Parameters
----------
inputs: dict
Dictionary of padded source sentence ids and attention mask as tensors.
output_ids: numpy.ndarray
An array of decoder output ids.
Returns
-------
numpy.ndarray
Returns output logits from the model.
"""
if self.similarity_model_type == "pt":
# create torch tensors and move to device
inputs = inputs.to(self.device)
output_ids = torch.tensor(output_ids, dtype=torch.int64, device=self.device)
self.similarity_model.eval()
with torch.no_grad():
if self.similarity_model.config.is_encoder_decoder:
# model inference
outputs = self.similarity_model(**inputs, decoder_input_ids=output_ids, labels=output_ids, return_dict=True)
else:
# combine source and target sentence ids to pass into decoder eg: in case of distillgpt2
inputs["input_ids"] = torch.cat((inputs["input_ids"], output_ids), dim=-1)
attention_mask_for_output_ids = torch.ones(output_ids.shape, dtype=output_ids.dtype, device=self.device)
inputs["attention_mask"] = torch.cat((inputs["attention_mask"], attention_mask_for_output_ids), dim=-1)
# create position ids due to left padding for decoder models
inputs["position_ids"] = (inputs["attention_mask"].long().cumsum(-1) - 1)
inputs["position_ids"].masked_fill_(inputs["attention_mask"] == 0, 0)
# model inference
outputs = self.similarity_model(**inputs, return_dict=True)
logits = outputs.logits.detach().cpu().numpy().astype('float64')
elif self.similarity_model_type == "tf":
output_ids = tf.convert_to_tensor(output_ids, dtype=tf.int32)
if self.similarity_model.config.is_encoder_decoder:
if self.device is None:
outputs = self.similarity_model(inputs, decoder_input_ids=output_ids, labels=output_ids, return_dict=True)
else:
try:
with tf.device(self.device):
outputs = self.similarity_model(inputs, decoder_input_ids=output_ids, labels=output_ids, return_dict=True)
except RuntimeError as e:
print(e)
else:
# combine source and target sentence ids to pass into decoder eg: in case of distillgpt2
inputs["input_ids"] = tf.concat((inputs["input_ids"], output_ids), axis=-1)
attention_mask_for_output_ids = tf.ones(output_ids.shape, dtype=output_ids.dtype)
inputs["attention_mask"] = tf.concat((inputs["attention_mask"], attention_mask_for_output_ids), axis=-1)
inputs["position_ids"] = tf.math.cumsum(inputs["attention_mask"], axis=-1) - 1
inputs["position_ids"] = tf.where(inputs["attention_mask"] == 0, 0, inputs["position_ids"])
if self.device is None:
outputs = self.similarity_model(inputs, return_dict=True)
else:
try:
with tf.device(self.device):
outputs = self.similarity_model(inputs, return_dict=True)
except RuntimeError as e:
print(e)
logits = outputs.logits.numpy().astype('float64')
return logits
def get_teacher_forced_logits(self, X, Y):
""" The function generates logits for transformer models.
It generates logits for encoder-decoder models as well as decoder only models by using the teacher forcing technique.
Parameters
----------
X: numpy.ndarray
An array containing a list of masked inputs.
Y: numpy.ndarray
An array containing a list of target sentence/ids.
Returns
-------
numpy.ndarray
Decoder output logits for output(target sentence) ids.
"""
# check if type of model architecture assigned in model config
if (hasattr(self.similarity_model.config, "is_encoder_decoder") and not self.similarity_model.config.is_encoder_decoder) \
and (hasattr(self.similarity_model.config, "is_decoder") and not self.similarity_model.config.is_decoder):
raise ValueError(
"Please assign either of is_encoder_decoder or is_decoder to True in model config for extracting target sentence ids"
)
# get output ids for teacher forcing
output_ids = self.get_outputs(Y)
if self.similarity_model.config.is_encoder_decoder:
# encode batched inputs by padding on the right side
inputs = self.get_inputs(X, padding_side='right')
# assigning decoder start token id as it is needed for encoder decoder model generation
decoder_start_token_id = None
if hasattr(self.similarity_model.config, "decoder_start_token_id") and \
self.similarity_model.config.decoder_start_token_id is not None:
decoder_start_token_id = self.similarity_model.config.decoder_start_token_id
elif hasattr(self.similarity_model.config, "bos_token_id") and self.similarity_model.config.bos_token_id is not None:
decoder_start_token_id = self.similarity_model.config.bos_token_id
elif (hasattr(self.similarity_model.config, "decoder") and hasattr(self.similarity_model.config.decoder, "bos_token_id") and \
self.similarity_model.config.decoder.bos_token_id is not None):
decoder_start_token_id = self.similarity_model.config.decoder.bos_token_id
else:
raise ValueError(
"No decoder_start_token_id or bos_token_id defined in config for encoder-decoder generation"
)
# concat decoder start token id to target sentence ids
output_start_id = np.ones((output_ids.shape[0], 1)) * decoder_start_token_id
output_ids = np.concatenate((output_start_id, output_ids), axis=-1)
# generate outputs and logits
logits = self.model_inference(inputs, output_ids)
logits = logits[:, :-1, :]
else:
# encode batched inputs by padding on the left side
inputs = self.get_inputs(X, padding_side='left')
# generate outputs and logits
logits = self.model_inference(inputs, output_ids)
# extract only logits corresponding to target sentence ids
logits = logits[:, -output_ids.shape[1]-1:-1, :]
return logits
def save(self, out_file):
super().save(out_file)
# Increment the verison number when the encoding changes!
with Serializer(out_file, "shap.models.TeacherForcing", version=0) as s:
s.save("tokenizer", self.tokenizer)
s.save("similarity_model", self.similarity_model)
s.save("similarity_tokenizer", self.similarity_tokenizer)
s.save("batch_size", self.batch_size)
s.save("device", self.device)
@classmethod
def load(cls, in_file, instantiate=True):
if instantiate:
return cls._instantiated_load(in_file)
kwargs = super().load(in_file, instantiate=False)
with Deserializer(in_file, "shap.models.TeacherForcing", min_version=0, max_version=0) as s:
kwargs["tokenizer"] = s.load("tokenizer")
kwargs["similarity_model"] = s.load("similarity_model")
kwargs["similarity_tokenizer"] = s.load("similarity_tokenizer")
kwargs["batch_size"] = s.load("batch_size")
kwargs["device"] = s.load("device")
return kwargs
|
the-stack_0_10010 | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import print_function
import os
import re
import llnl.util.tty as tty
from llnl.util.filesystem import mkdirp
import spack.repo
import spack.stage
import spack.util.web
from spack.spec import Spec
from spack.url import (
UndetectableNameError,
UndetectableVersionError,
parse_name,
parse_version,
)
from spack.util.editor import editor
from spack.util.executable import ProcessError, which
from spack.util.naming import (
mod_to_class,
simplify_name,
valid_fully_qualified_module_name,
)
description = "create a new package file"
section = "packaging"
level = "short"
package_template = '''\
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
# ----------------------------------------------------------------------------
# If you submit this package back to Spack as a pull request,
# please first remove this boilerplate and all FIXME comments.
#
# This is a template package file for Spack. We've put "FIXME"
# next to all the things you'll want to change. Once you've handled
# them, you can save this file and test your package like this:
#
# spack install {name}
#
# You can edit this file again by typing:
#
# spack edit {name}
#
# See the Spack documentation for more information on packaging.
# ----------------------------------------------------------------------------
from spack import *
class {class_name}({base_class_name}):
"""FIXME: Put a proper description of your package here."""
# FIXME: Add a proper url for your package's homepage here.
homepage = "https://www.example.com"
{url_def}
# FIXME: Add a list of GitHub accounts to
# notify when the package is updated.
# maintainers = ['github_user1', 'github_user2']
{versions}
{dependencies}
{body_def}
'''
class BundlePackageTemplate(object):
"""
Provides the default values to be used for a bundle package file template.
"""
base_class_name = 'BundlePackage'
dependencies = """\
# FIXME: Add dependencies if required.
# depends_on('foo')"""
url_def = " # There is no URL since there is no code to download."
body_def = " # There is no need for install() since there is no code."
def __init__(self, name, versions):
self.name = name
self.class_name = mod_to_class(name)
self.versions = versions
def write(self, pkg_path):
"""Writes the new package file."""
# Write out a template for the file
with open(pkg_path, "w") as pkg_file:
pkg_file.write(package_template.format(
name=self.name,
class_name=self.class_name,
base_class_name=self.base_class_name,
url_def=self.url_def,
versions=self.versions,
dependencies=self.dependencies,
body_def=self.body_def))
class PackageTemplate(BundlePackageTemplate):
"""Provides the default values to be used for the package file template"""
base_class_name = 'Package'
body_def = """\
def install(self, spec, prefix):
# FIXME: Unknown build system
make()
make('install')"""
url_line = ' url = "{url}"'
def __init__(self, name, url, versions):
super(PackageTemplate, self).__init__(name, versions)
self.url_def = self.url_line.format(url=url)
class AutotoolsPackageTemplate(PackageTemplate):
"""Provides appropriate overrides for Autotools-based packages
that *do* come with a ``configure`` script"""
base_class_name = 'AutotoolsPackage'
body_def = """\
def configure_args(self):
# FIXME: Add arguments other than --prefix
# FIXME: If not needed delete this function
args = []
return args"""
class AutoreconfPackageTemplate(PackageTemplate):
"""Provides appropriate overrides for Autotools-based packages
that *do not* come with a ``configure`` script"""
base_class_name = 'AutotoolsPackage'
dependencies = """\
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
depends_on('m4', type='build')
# FIXME: Add additional dependencies if required.
# depends_on('foo')"""
body_def = """\
def autoreconf(self, spec, prefix):
# FIXME: Modify the autoreconf method as necessary
autoreconf('--install', '--verbose', '--force')
def configure_args(self):
# FIXME: Add arguments other than --prefix
# FIXME: If not needed delete this function
args = []
return args"""
class CMakePackageTemplate(PackageTemplate):
"""Provides appropriate overrides for CMake-based packages"""
base_class_name = 'CMakePackage'
body_def = """\
def cmake_args(self):
# FIXME: Add arguments other than
# FIXME: CMAKE_INSTALL_PREFIX and CMAKE_BUILD_TYPE
# FIXME: If not needed delete this function
args = []
return args"""
class MesonPackageTemplate(PackageTemplate):
"""Provides appropriate overrides for meson-based packages"""
base_class_name = 'MesonPackage'
body_def = """\
def meson_args(self):
# FIXME: If not needed delete this function
args = []
return args"""
class QMakePackageTemplate(PackageTemplate):
"""Provides appropriate overrides for QMake-based packages"""
base_class_name = 'QMakePackage'
body_def = """\
def qmake_args(self):
# FIXME: If not needed delete this function
args = []
return args"""
class MavenPackageTemplate(PackageTemplate):
"""Provides appropriate overrides for Maven-based packages"""
base_class_name = 'MavenPackage'
body_def = """\
def build(self, spec, prefix):
# FIXME: If not needed delete this function
pass"""
class SconsPackageTemplate(PackageTemplate):
"""Provides appropriate overrides for SCons-based packages"""
base_class_name = 'SConsPackage'
body_def = """\
def build_args(self, spec, prefix):
# FIXME: Add arguments to pass to build.
# FIXME: If not needed delete this function
args = []
return args"""
class WafPackageTemplate(PackageTemplate):
"""Provides appropriate override for Waf-based packages"""
base_class_name = 'WafPackage'
body_def = """\
# FIXME: Override configure_args(), build_args(),
# or install_args() if necessary."""
class BazelPackageTemplate(PackageTemplate):
"""Provides appropriate overrides for Bazel-based packages"""
dependencies = """\
# FIXME: Add additional dependencies if required.
depends_on('bazel', type='build')"""
body_def = """\
def install(self, spec, prefix):
# FIXME: Add logic to build and install here.
bazel()"""
class PythonPackageTemplate(PackageTemplate):
"""Provides appropriate overrides for python extensions"""
base_class_name = 'PythonPackage'
dependencies = """\
# FIXME: Only add the python/pip/wheel dependencies if you need specific versions
# or need to change the dependency type. Generic python/pip/wheel dependencies are
# added implicity by the PythonPackage base class.
# depends_on('[email protected]:2.Y,3.Z:', type=('build', 'run'))
# depends_on('[email protected]:', type='build')
# depends_on('[email protected]:', type='build')
# FIXME: Add a build backend, usually defined in pyproject.toml. If no such file
# exists, use setuptools.
# depends_on('py-setuptools', type='build')
# depends_on('py-flit-core', type='build')
# depends_on('py-poetry-core', type='build')
# FIXME: Add additional dependencies if required.
# depends_on('py-foo', type=('build', 'run'))"""
body_def = """\
def global_options(self, spec, prefix):
# FIXME: Add options to pass to setup.py
# FIXME: If not needed, delete this function
options = []
return options
def install_options(self, spec, prefix):
# FIXME: Add options to pass to setup.py install
# FIXME: If not needed, delete this function
options = []
return options"""
def __init__(self, name, url, *args, **kwargs):
# If the user provided `--name py-numpy`, don't rename it py-py-numpy
if not name.startswith('py-'):
# Make it more obvious that we are renaming the package
tty.msg("Changing package name from {0} to py-{0}".format(name))
name = 'py-{0}'.format(name)
# Simple PyPI URLs:
# https://<hostname>/packages/<type>/<first character of project>/<project>/<download file>
# e.g. https://pypi.io/packages/source/n/numpy/numpy-1.19.4.zip
# e.g. https://www.pypi.io/packages/source/n/numpy/numpy-1.19.4.zip
# e.g. https://pypi.org/packages/source/n/numpy/numpy-1.19.4.zip
# e.g. https://pypi.python.org/packages/source/n/numpy/numpy-1.19.4.zip
# e.g. https://files.pythonhosted.org/packages/source/n/numpy/numpy-1.19.4.zip
# PyPI URLs containing hash:
# https://<hostname>/packages/<two character hash>/<two character hash>/<longer hash>/<download file>
# e.g. https://pypi.io/packages/c5/63/a48648ebc57711348420670bb074998f79828291f68aebfff1642be212ec/numpy-1.19.4.zip
# e.g. https://files.pythonhosted.org/packages/c5/63/a48648ebc57711348420670bb074998f79828291f68aebfff1642be212ec/numpy-1.19.4.zip
# e.g. https://files.pythonhosted.org/packages/c5/63/a48648ebc57711348420670bb074998f79828291f68aebfff1642be212ec/numpy-1.19.4.zip#sha256=141ec3a3300ab89c7f2b0775289954d193cc8edb621ea05f99db9cb181530512
# PyPI URLs for wheels:
# https://pypi.io/packages/py3/a/azureml_core/azureml_core-1.11.0-py3-none-any.whl
# https://pypi.io/packages/py3/d/dotnetcore2/dotnetcore2-2.1.14-py3-none-macosx_10_9_x86_64.whl
# https://pypi.io/packages/py3/d/dotnetcore2/dotnetcore2-2.1.14-py3-none-manylinux1_x86_64.whl
# https://files.pythonhosted.org/packages/cp35.cp36.cp37.cp38.cp39/s/shiboken2/shiboken2-5.15.2-5.15.2-cp35.cp36.cp37.cp38.cp39-abi3-manylinux1_x86_64.whl
# https://files.pythonhosted.org/packages/f4/99/ad2ef1aeeb395ee2319bb981ea08dbbae878d30dd28ebf27e401430ae77a/azureml_core-1.36.0.post2-py3-none-any.whl#sha256=60bcad10b4380d78a8280deb7365de2c2cd66527aacdcb4a173f613876cbe739
match = re.search(
r'(?:pypi|pythonhosted)[^/]+/packages' + '/([^/#]+)' * 4,
url
)
if match:
# PyPI URLs for wheels are too complicated, ignore them for now
# https://www.python.org/dev/peps/pep-0427/#file-name-convention
if not match.group(4).endswith('.whl'):
if len(match.group(2)) == 1:
# Simple PyPI URL
url = '/'.join(match.group(3, 4))
else:
# PyPI URL containing hash
# Project name doesn't necessarily match download name, but it
# usually does, so this is the best we can do
project = parse_name(url)
url = '/'.join([project, match.group(4)])
self.url_line = ' pypi = "{url}"'
else:
# Add a reminder about spack preferring PyPI URLs
self.url_line = '''
# FIXME: ensure the package is not available through PyPI. If it is,
# re-run `spack create --force` with the PyPI URL.
''' + self.url_line
super(PythonPackageTemplate, self).__init__(name, url, *args, **kwargs)
class RPackageTemplate(PackageTemplate):
"""Provides appropriate overrides for R extensions"""
base_class_name = 'RPackage'
dependencies = """\
# FIXME: Add dependencies if required.
# depends_on('r-foo', type=('build', 'run'))"""
body_def = """\
def configure_args(self):
# FIXME: Add arguments to pass to install via --configure-args
# FIXME: If not needed delete this function
args = []
return args"""
def __init__(self, name, url, *args, **kwargs):
# If the user provided `--name r-rcpp`, don't rename it r-r-rcpp
if not name.startswith('r-'):
# Make it more obvious that we are renaming the package
tty.msg("Changing package name from {0} to r-{0}".format(name))
name = 'r-{0}'.format(name)
r_name = parse_name(url)
cran = re.search(
r'(?:r-project|rstudio)[^/]+/src' + '/([^/]+)' * 2,
url
)
if cran:
url = r_name
self.url_line = ' cran = "{url}"'
bioc = re.search(
r'(?:bioconductor)[^/]+/packages' + '/([^/]+)' * 5,
url
)
if bioc:
self.url_line = ' url = "{0}"\n'\
' bioc = "{1}"'.format(url, r_name)
super(RPackageTemplate, self).__init__(name, url, *args, **kwargs)
class PerlmakePackageTemplate(PackageTemplate):
"""Provides appropriate overrides for Perl extensions
that come with a Makefile.PL"""
base_class_name = 'PerlPackage'
dependencies = """\
# FIXME: Add dependencies if required:
# depends_on('perl-foo', type=('build', 'run'))"""
body_def = """\
def configure_args(self):
# FIXME: Add non-standard arguments
# FIXME: If not needed delete this function
args = []
return args"""
def __init__(self, name, *args, **kwargs):
# If the user provided `--name perl-cpp`, don't rename it perl-perl-cpp
if not name.startswith('perl-'):
# Make it more obvious that we are renaming the package
tty.msg("Changing package name from {0} to perl-{0}".format(name))
name = 'perl-{0}'.format(name)
super(PerlmakePackageTemplate, self).__init__(name, *args, **kwargs)
class PerlbuildPackageTemplate(PerlmakePackageTemplate):
"""Provides appropriate overrides for Perl extensions
that come with a Build.PL instead of a Makefile.PL"""
dependencies = """\
depends_on('perl-module-build', type='build')
# FIXME: Add additional dependencies if required:
# depends_on('perl-foo', type=('build', 'run'))"""
class OctavePackageTemplate(PackageTemplate):
"""Provides appropriate overrides for octave packages"""
base_class_name = 'OctavePackage'
dependencies = """\
extends('octave')
# FIXME: Add additional dependencies if required.
# depends_on('octave-foo', type=('build', 'run'))"""
def __init__(self, name, *args, **kwargs):
# If the user provided `--name octave-splines`, don't rename it
# octave-octave-splines
if not name.startswith('octave-'):
# Make it more obvious that we are renaming the package
tty.msg("Changing package name from {0} to octave-{0}".format(name)) # noqa
name = 'octave-{0}'.format(name)
super(OctavePackageTemplate, self).__init__(name, *args, **kwargs)
class RubyPackageTemplate(PackageTemplate):
"""Provides appropriate overrides for Ruby packages"""
base_class_name = 'RubyPackage'
dependencies = """\
# FIXME: Add dependencies if required. Only add the ruby dependency
# if you need specific versions. A generic ruby dependency is
# added implicity by the RubyPackage class.
# depends_on('[email protected]:', type=('build', 'run'))
# depends_on('ruby-foo', type=('build', 'run'))"""
body_def = """\
def build(self, spec, prefix):
# FIXME: If not needed delete this function
pass"""
def __init__(self, name, *args, **kwargs):
# If the user provided `--name ruby-numpy`, don't rename it
# ruby-ruby-numpy
if not name.startswith('ruby-'):
# Make it more obvious that we are renaming the package
tty.msg("Changing package name from {0} to ruby-{0}".format(name))
name = 'ruby-{0}'.format(name)
super(RubyPackageTemplate, self).__init__(name, *args, **kwargs)
class MakefilePackageTemplate(PackageTemplate):
"""Provides appropriate overrides for Makefile packages"""
base_class_name = 'MakefilePackage'
body_def = """\
def edit(self, spec, prefix):
# FIXME: Edit the Makefile if necessary
# FIXME: If not needed delete this function
# makefile = FileFilter('Makefile')
# makefile.filter('CC = .*', 'CC = cc')"""
class IntelPackageTemplate(PackageTemplate):
"""Provides appropriate overrides for licensed Intel software"""
base_class_name = 'IntelPackage'
body_def = """\
# FIXME: Override `setup_environment` if necessary."""
class SIPPackageTemplate(PackageTemplate):
"""Provides appropriate overrides for SIP packages."""
base_class_name = 'SIPPackage'
body_def = """\
def configure_args(self, spec, prefix):
# FIXME: Add arguments other than --bindir and --destdir
# FIXME: If not needed delete this function
args = []
return args"""
def __init__(self, name, *args, **kwargs):
# If the user provided `--name py-pyqt4`, don't rename it py-py-pyqt4
if not name.startswith('py-'):
# Make it more obvious that we are renaming the package
tty.msg("Changing package name from {0} to py-{0}".format(name))
name = 'py-{0}'.format(name)
super(SIPPackageTemplate, self).__init__(name, *args, **kwargs)
templates = {
'autotools': AutotoolsPackageTemplate,
'autoreconf': AutoreconfPackageTemplate,
'cmake': CMakePackageTemplate,
'bundle': BundlePackageTemplate,
'qmake': QMakePackageTemplate,
'maven': MavenPackageTemplate,
'scons': SconsPackageTemplate,
'waf': WafPackageTemplate,
'bazel': BazelPackageTemplate,
'python': PythonPackageTemplate,
'r': RPackageTemplate,
'perlmake': PerlmakePackageTemplate,
'perlbuild': PerlbuildPackageTemplate,
'octave': OctavePackageTemplate,
'ruby': RubyPackageTemplate,
'makefile': MakefilePackageTemplate,
'intel': IntelPackageTemplate,
'meson': MesonPackageTemplate,
'sip': SIPPackageTemplate,
'generic': PackageTemplate,
}
def setup_parser(subparser):
subparser.add_argument(
'url', nargs='?',
help="url of package archive")
subparser.add_argument(
'--keep-stage', action='store_true',
help="don't clean up staging area when command completes")
subparser.add_argument(
'-n', '--name',
help="name of the package to create")
subparser.add_argument(
'-t', '--template', metavar='TEMPLATE',
choices=sorted(templates.keys()),
help="build system template to use. options: %(choices)s")
subparser.add_argument(
'-r', '--repo',
help="path to a repository where the package should be created")
subparser.add_argument(
'-N', '--namespace',
help="specify a namespace for the package. must be the namespace of "
"a repository registered with Spack")
subparser.add_argument(
'-f', '--force', action='store_true',
help="overwrite any existing package file with the same name")
subparser.add_argument(
'--skip-editor', action='store_true',
help="skip the edit session for the package (e.g., automation)")
subparser.add_argument(
'-b', '--batch', action='store_true',
help="don't ask which versions to checksum")
class BuildSystemGuesser:
"""An instance of BuildSystemGuesser provides a callable object to be used
during ``spack create``. By passing this object to ``spack checksum``, we
can take a peek at the fetched tarball and discern the build system it uses
"""
def __init__(self):
"""Sets the default build system."""
self.build_system = 'generic'
def __call__(self, stage, url):
"""Try to guess the type of build system used by a project based on
the contents of its archive or the URL it was downloaded from."""
if url is not None:
# Most octave extensions are hosted on Octave-Forge:
# https://octave.sourceforge.net/index.html
# They all have the same base URL.
if 'downloads.sourceforge.net/octave/' in url:
self.build_system = 'octave'
return
if url.endswith('.gem'):
self.build_system = 'ruby'
return
if url.endswith('.whl') or '.whl#' in url:
self.build_system = 'python'
return
# A list of clues that give us an idea of the build system a package
# uses. If the regular expression matches a file contained in the
# archive, the corresponding build system is assumed.
# NOTE: Order is important here. If a package supports multiple
# build systems, we choose the first match in this list.
clues = [
(r'/CMakeLists\.txt$', 'cmake'),
(r'/NAMESPACE$', 'r'),
(r'/configure$', 'autotools'),
(r'/configure\.(in|ac)$', 'autoreconf'),
(r'/Makefile\.am$', 'autoreconf'),
(r'/pom\.xml$', 'maven'),
(r'/SConstruct$', 'scons'),
(r'/waf$', 'waf'),
(r'/pyproject.toml', 'python'),
(r'/setup\.(py|cfg)$', 'python'),
(r'/WORKSPACE$', 'bazel'),
(r'/Build\.PL$', 'perlbuild'),
(r'/Makefile\.PL$', 'perlmake'),
(r'/.*\.gemspec$', 'ruby'),
(r'/Rakefile$', 'ruby'),
(r'/setup\.rb$', 'ruby'),
(r'/.*\.pro$', 'qmake'),
(r'/(GNU)?[Mm]akefile$', 'makefile'),
(r'/DESCRIPTION$', 'octave'),
(r'/meson\.build$', 'meson'),
(r'/configure\.py$', 'sip'),
]
# Peek inside the compressed file.
if (stage.archive_file.endswith('.zip') or
'.zip#' in stage.archive_file):
try:
unzip = which('unzip')
output = unzip('-lq', stage.archive_file, output=str)
except ProcessError:
output = ''
else:
try:
tar = which('tar')
output = tar('--exclude=*/*/*', '-tf',
stage.archive_file, output=str)
except ProcessError:
output = ''
lines = output.split('\n')
# Determine the build system based on the files contained
# in the archive.
for pattern, bs in clues:
if any(re.search(pattern, line) for line in lines):
self.build_system = bs
break
def get_name(args):
"""Get the name of the package based on the supplied arguments.
If a name was provided, always use that. Otherwise, if a URL was
provided, extract the name from that. Otherwise, use a default.
Args:
args (argparse.Namespace): The arguments given to
``spack create``
Returns:
str: The name of the package
"""
# Default package name
name = 'example'
if args.name is not None:
# Use a user-supplied name if one is present
name = args.name
if len(args.name.strip()) > 0:
tty.msg("Using specified package name: '{0}'".format(name))
else:
tty.die("A package name must be provided when using the option.")
elif args.url is not None:
# Try to guess the package name based on the URL
try:
name = parse_name(args.url)
if name != args.url:
desc = 'URL'
else:
desc = 'package name'
tty.msg("This looks like a {0} for {1}".format(desc, name))
except UndetectableNameError:
tty.die("Couldn't guess a name for this package.",
" Please report this bug. In the meantime, try running:",
" `spack create --name <name> <url>`")
name = simplify_name(name)
if not valid_fully_qualified_module_name(name):
tty.die("Package name can only contain a-z, 0-9, and '-'")
return name
def get_url(args):
"""Get the URL to use.
Use a default URL if none is provided.
Args:
args (argparse.Namespace): The arguments given to ``spack create``
Returns:
str: The URL of the package
"""
# Default URL
url = 'https://www.example.com/example-1.2.3.tar.gz'
if args.url:
# Use a user-supplied URL if one is present
url = args.url
return url
def get_versions(args, name):
"""Returns a list of versions and hashes for a package.
Also returns a BuildSystemGuesser object.
Returns default values if no URL is provided.
Args:
args (argparse.Namespace): The arguments given to ``spack create``
name (str): The name of the package
Returns:
tuple: versions and hashes, and a BuildSystemGuesser object
"""
# Default version with hash
hashed_versions = """\
# FIXME: Add proper versions and checksums here.
# version('1.2.3', '0123456789abcdef0123456789abcdef')"""
# Default version without hash
unhashed_versions = """\
# FIXME: Add proper versions here.
# version('1.2.4')"""
# Default guesser
guesser = BuildSystemGuesser()
if args.url is not None and args.template != 'bundle':
# Find available versions
try:
url_dict = spack.util.web.find_versions_of_archive(args.url)
except UndetectableVersionError:
# Use fake versions
tty.warn("Couldn't detect version in: {0}".format(args.url))
return hashed_versions, guesser
if not url_dict:
# If no versions were found, revert to what the user provided
version = parse_version(args.url)
url_dict = {version: args.url}
versions = spack.stage.get_checksums_for_versions(
url_dict, name, first_stage_function=guesser,
keep_stage=args.keep_stage,
batch=(args.batch or len(url_dict) == 1))
else:
versions = unhashed_versions
return versions, guesser
def get_build_system(args, guesser):
"""Determine the build system template.
If a template is specified, always use that. Otherwise, if a URL
is provided, download the tarball and peek inside to guess what
build system it uses. Otherwise, use a generic template by default.
Args:
args (argparse.Namespace): The arguments given to ``spack create``
guesser (BuildSystemGuesser): The first_stage_function given to
``spack checksum`` which records the build system it detects
Returns:
str: The name of the build system template to use
"""
# Default template
template = 'generic'
if args.template is not None:
# Use a user-supplied template if one is present
template = args.template
tty.msg("Using specified package template: '{0}'".format(template))
elif args.url is not None:
# Use whatever build system the guesser detected
template = guesser.build_system
if template == 'generic':
tty.warn("Unable to detect a build system. "
"Using a generic package template.")
else:
msg = "This package looks like it uses the {0} build system"
tty.msg(msg.format(template))
return template
def get_repository(args, name):
"""Returns a Repo object that will allow us to determine the path where
the new package file should be created.
Args:
args (argparse.Namespace): The arguments given to ``spack create``
name (str): The name of the package to create
Returns:
spack.repo.Repo: A Repo object capable of determining the path to the
package file
"""
spec = Spec(name)
# Figure out namespace for spec
if spec.namespace and args.namespace and spec.namespace != args.namespace:
tty.die("Namespaces '{0}' and '{1}' do not match.".format(
spec.namespace, args.namespace))
if not spec.namespace and args.namespace:
spec.namespace = args.namespace
# Figure out where the new package should live
repo_path = args.repo
if repo_path is not None:
repo = spack.repo.Repo(repo_path)
if spec.namespace and spec.namespace != repo.namespace:
tty.die("Can't create package with namespace {0} in repo with "
"namespace {1}".format(spec.namespace, repo.namespace))
else:
if spec.namespace:
repo = spack.repo.path.get_repo(spec.namespace, None)
if not repo:
tty.die("Unknown namespace: '{0}'".format(spec.namespace))
else:
repo = spack.repo.path.first_repo()
# Set the namespace on the spec if it's not there already
if not spec.namespace:
spec.namespace = repo.namespace
return repo
def create(parser, args):
# Gather information about the package to be created
name = get_name(args)
url = get_url(args)
versions, guesser = get_versions(args, name)
build_system = get_build_system(args, guesser)
# Create the package template object
constr_args = {'name': name, 'versions': versions}
package_class = templates[build_system]
if package_class != BundlePackageTemplate:
constr_args['url'] = url
package = package_class(**constr_args)
tty.msg("Created template for {0} package".format(package.name))
# Create a directory for the new package
repo = get_repository(args, name)
pkg_path = repo.filename_for_package_name(package.name)
if os.path.exists(pkg_path) and not args.force:
tty.die('{0} already exists.'.format(pkg_path),
' Try running `spack create --force` to overwrite it.')
else:
mkdirp(os.path.dirname(pkg_path))
# Write the new package file
package.write(pkg_path)
tty.msg("Created package file: {0}".format(pkg_path))
# Optionally open up the new package file in your $EDITOR
if not args.skip_editor:
editor(pkg_path)
|
the-stack_0_10012 | #! /usr/bin/env python
#
# Example program using irc.client.
#
# Copyright (C) 1999-2002 Joel Rosdahl
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Joel Rosdahl <[email protected]>
#
# servermap connects to an IRC server and finds out what other IRC
# servers there are in the net and prints a tree-like map of their
# interconnections.
#
# Example:
#
# % ./servermap irc.dal.net somenickname
# Connecting to server...
# Getting links...
#
# 26 servers (18 leaves and 8 hubs)
#
# splitrock.tx.us.dal.net
# `-vader.ny.us.dal.net
# |-twisted.ma.us.dal.net
# |-sodre.nj.us.dal.net
# |-glass.oh.us.dal.net
# |-distant.ny.us.dal.net
# | |-algo.se.eu.dal.net
# | | |-borg.se.eu.dal.net
# | | | `-ced.se.eu.dal.net
# | | |-viking.no.eu.dal.net
# | | |-inco.fr.eu.dal.net
# | | |-paranoia.se.eu.dal.net
# | | |-gaston.se.eu.dal.net
# | | | `-powertech.no.eu.dal.net
# | | `-algo-u.se.eu.dal.net
# | |-philly.pa.us.dal.net
# | |-liberty.nj.us.dal.net
# | `-jade.va.us.dal.net
# `-journey.ca.us.dal.net
# |-ion.va.us.dal.net
# |-dragons.ca.us.dal.net
# |-toronto.on.ca.dal.net
# | `-netropolis-r.uk.eu.dal.net
# | |-traced.de.eu.dal.net
# | `-lineone.uk.eu.dal.net
# `-omega.ca.us.dal.net
import irc.client
import sys
def on_connect(connection, event):
sys.stdout.write("\nGetting links...")
sys.stdout.flush()
connection.links()
def on_passwdmismatch(connection, event):
print("Password required.")
sys.exit(1)
def on_links(connection, event):
global links
links.append((event.arguments[0],
event.arguments[1],
event.arguments[2]))
def on_endoflinks(connection, event):
global links
print("\n")
m = {}
for (to_node, from_node, desc) in links:
if from_node != to_node:
m[from_node] = m.get(from_node, []) + [to_node]
if connection.get_server_name() in m:
if len(m[connection.get_server_name()]) == 1:
hubs = len(m) - 1
else:
hubs = len(m)
else:
hubs = 0
print("%d servers (%d leaves and %d hubs)\n" % (len(links), len(links)-hubs, hubs))
print_tree(0, [], connection.get_server_name(), m)
connection.quit("Using irc.client.py")
def on_disconnect(connection, event):
sys.exit(0)
def indent_string(level, active_levels, last):
if level == 0:
return ""
s = ""
for i in range(level-1):
if i in active_levels:
s = s + "| "
else:
s = s + " "
if last:
s = s + "`-"
else:
s = s + "|-"
return s
def print_tree(level, active_levels, root, map, last=0):
sys.stdout.write(indent_string(level, active_levels, last)
+ root + "\n")
if root in map:
list = map[root]
for r in list[:-1]:
print_tree(level+1, active_levels[:]+[level], r, map)
print_tree(level+1, active_levels[:], list[-1], map, 1)
def main():
global links
if len(sys.argv) != 3:
print("Usage: servermap <server[:port]> <nickname>")
sys.exit(1)
links = []
s = sys.argv[1].split(":", 1)
server = s[0]
if len(s) == 2:
try:
port = int(s[1])
except ValueError:
print("Error: Erroneous port.")
sys.exit(1)
else:
port = 6667
nickname = sys.argv[2]
client = irc.client.IRC()
sys.stdout.write("Connecting to server...")
sys.stdout.flush()
try:
c = client.server().connect(server, port, nickname)
except irc.client.ServerConnectionError as x:
print(x)
sys.exit(1)
c.add_global_handler("welcome", on_connect)
c.add_global_handler("passwdmismatch", on_passwdmismatch)
c.add_global_handler("links", on_links)
c.add_global_handler("endoflinks", on_endoflinks)
c.add_global_handler("disconnect", on_disconnect)
client.process_forever()
if __name__ == '__main__':
main()
|
the-stack_0_10013 | # 5550
# <!*[^<>]*>
# POLYNOMIAL
# nums:4
# POLYNOMIAL AttackString:"<"+"!"*10000+"! _1_POA(i)"
import re
from time import perf_counter
regex = """<!*[^<>]*>"""
REGEX = re.compile(regex)
for i in range(0, 150000):
ATTACK = "<" + "!" * i * 10000 + "! _1_POA(i)"
LEN = len(ATTACK)
BEGIN = perf_counter()
m = REGEX.search(ATTACK)
# m = REGEX.match(ATTACK)
DURATION = perf_counter() - BEGIN
print(f"{i *10000}: took {DURATION} seconds!") |
the-stack_0_10014 | #!/usr/bin/env python3
import os
import re
import sys
import urllib.request
from time import strptime
# Regular Expressions
CVE_RE = re.compile('CVE-[0-9]{4}-[0-9]{4,}')
HTML_RE = re.compile('<[^<]+?>')
BOUNTY_RE = re.compile('\[\$([0-9\.]|TBD|N/A)+\]')
BUG_RE = re.compile('\[[0-9]+\]')
DESCRIPTION_RE = re.compile('[:-]{0,1} [^\.]*(\.|\s)')
CLEAN_RE = re.compile('(\]|\[|\: |\- |\$)')
ANNOUNCED_RE = re.compile('[0-9]{4}-[0-9]{2}-[0-9]{2}')
DATE_RE = re.compile('\w+, \w+ ([0-9]{1}|[0-9]{2}), [0-9]{4}')
def format_date(publish_date):
fields = publish_date.split()
sub = fields[1][:3]
month = str(strptime(sub,'%b').tm_mon)
if len(month) == 1:
month = '0' + month
year = fields[3]
day = fields[2][:-1]
if len(day) == 1:
day = '0' + day
return "{}-{}-{}".format(year, month, day)
SKELETON = list()
with open("../spec/data/cve-skeleton.yml", "r") as f:
SKELETON = f.readlines()
def get_skeleton(cve, description, bounty, bug, announced):
""" Return the skeleton of a CVE with the given fields filled. """
global SKELETON
skeleton = SKELETON.copy()
for i in range(len(skeleton)):
if skeleton[i] == "CVE:\n":
skeleton[i] = "CVE: {:s}\n".format(cve)
elif skeleton[i] == "description: |\n":
skeleton[i] = "description: |\n {:s}\n".format(description)
elif skeleton[i] == "bugs: []\n":
skeleton[i]= "bugs: [{:s}]\n".format(bug)
elif skeleton[i] == " amt:\n":
if bounty == "N/A":
skeleton[i] = " amt: 0\n"
elif bounty == "TBD":
skeleton[i+1] = " announced: TBD\n"
else:
skeleton[i] = " amt: {:s}\n".format(bounty)
elif skeleton[i] == "announced:\n":
skeleton[i] = "announced: {:s}\n".format(announced)
return "".join(skeleton)
def clean_line(line):
""" Decode bytestrings and string newlines. """
return line.decode().strip("\n")
def clean_match(text):
""" Clean up the text by removing matches in CLEAN_RE. """
return CLEAN_RE.sub('', text)
def get_page(url):
""" Return the raw HTML of the given URL. """
return urllib.request.urlopen(url)
if __name__ == "__main__":
url = sys.argv[1]
page = get_page(url)
contents = page.readlines()
matches = list()
publish_date = ""
for line in contents:
line = HTML_RE.sub('', clean_line(line))
if CVE_RE.search(line):
matches.append(line)
if DATE_RE.search(line) and not publish_date:
publish_date = line
matches = list(set(matches))
# For each CVE...
for cve in matches:
# Parse out the fields we care about...
try:
bounty = clean_match(BOUNTY_RE.search(cve).group(0))
except:
bounty = ""
bug_id = clean_match(BUG_RE.search(cve).group(0))
cve_id = clean_match(CVE_RE.search(cve).group(0))
try:
description = clean_match(DESCRIPTION_RE.search(cve).group(0))
except:
print("ERROR: Regex failed for Description in " + str(cve_id))
try:
announced = clean_match(ANNOUNCED_RE.search(cve).group(0))
except:
announced = format_date(publish_date)
# And write the new CVE to disk.
cve_path = "../cves/{:s}.yml".format(cve_id)
if os.path.exists(cve_path):
print("Skipping CVE: {:s}.".format(cve_id))
else:
skeleton = get_skeleton(cve_id, description, bounty, bug_id, announced)
with open("../cves/" + cve_id + ".yml", "w") as f:
f.write(skeleton)
print(" Created CVE: {:s}".format(cve_path))
|
the-stack_0_10015 | import unittest
from argparse import ArgumentTypeError
from streamlink.utils.args import (
boolean, comma_list, comma_list_filter, filesize, keyvalue, num
)
class TestUtilsArgs(unittest.TestCase):
def test_boolean_true(self):
self.assertEqual(boolean('1'), True)
self.assertEqual(boolean('on'), True)
self.assertEqual(boolean('true'), True)
self.assertEqual(boolean('yes'), True)
self.assertEqual(boolean('Yes'), True)
def test_boolean_false(self):
self.assertEqual(boolean('0'), False)
self.assertEqual(boolean('false'), False)
self.assertEqual(boolean('no'), False)
self.assertEqual(boolean('No'), False)
self.assertEqual(boolean('off'), False)
def test_boolean_error(self):
with self.assertRaises(ArgumentTypeError):
boolean('yesno')
with self.assertRaises(ArgumentTypeError):
boolean('FOO')
with self.assertRaises(ArgumentTypeError):
boolean('2')
def test_comma_list(self):
# (values, result)
test_data = [
('foo.bar,example.com', ['foo.bar', 'example.com']),
('/var/run/foo,/var/run/bar', ['/var/run/foo', '/var/run/bar']),
('foo bar,24', ['foo bar', '24']),
('hls', ['hls']),
]
for _v, _r in test_data:
self.assertEqual(comma_list(_v), _r)
def test_comma_list_filter(self):
# (acceptable, values, result)
test_data = [
(['foo', 'bar', 'com'], 'foo,bar,example.com', ['foo', 'bar']),
(['/var/run/foo', 'FO'], '/var/run/foo,/var/run/bar',
['/var/run/foo']),
(['hls', 'hls5', 'dash'], 'hls,hls5', ['hls', 'hls5']),
(['EU', 'RU'], 'DE,FR,RU,US', ['RU']),
]
for _a, _v, _r in test_data:
func = comma_list_filter(_a)
self.assertEqual(func(_v), _r)
def test_filesize(self):
self.assertEqual(filesize('2000'), 2000)
self.assertEqual(filesize('11KB'), 1024 * 11)
self.assertEqual(filesize('12MB'), 1024 * 1024 * 12)
self.assertEqual(filesize('1KB'), 1024)
self.assertEqual(filesize('1MB'), 1024 * 1024)
self.assertEqual(filesize('2KB'), 1024 * 2)
def test_filesize_error(self):
with self.assertRaises(ValueError):
filesize('FOO')
with self.assertRaises(ValueError):
filesize('0.00000')
def test_keyvalue(self):
# (value, result)
test_data = [
('X-Forwarded-For=127.0.0.1', ('X-Forwarded-For', '127.0.0.1')),
('Referer=https://foo.bar', ('Referer', 'https://foo.bar')),
(
'User-Agent=Mozilla/5.0 (X11; Linux x86_64; rv:60.0)'
' Gecko/20100101 Firefox/60.0',
('User-Agent', 'Mozilla/5.0 (X11; Linux x86_64; rv:60.0) '
'Gecko/20100101 Firefox/60.0')
),
('domain=example.com', ('domain', 'example.com')),
]
for _v, _r in test_data:
self.assertEqual(keyvalue(_v), _r)
def test_keyvalue_error(self):
with self.assertRaises(ValueError):
keyvalue('127.0.0.1')
def test_num(self):
# (value, func, result)
test_data = [
('33', num(int, 5, 120), 33),
('234', num(int, min=10), 234),
('50.222', num(float, 10, 120), 50.222),
]
for _v, _f, _r in test_data:
self.assertEqual(_f(_v), _r)
def test_num_error(self):
with self.assertRaises(ArgumentTypeError):
func = num(int, 5, 10)
func('3')
with self.assertRaises(ArgumentTypeError):
func = num(int, max=11)
func('12')
with self.assertRaises(ArgumentTypeError):
func = num(int, min=15)
func('8')
with self.assertRaises(ArgumentTypeError):
func = num(float, 10, 20)
func('40.222')
|
the-stack_0_10016 | #!/usr/bin/env vpython
#
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import collections
import json
import logging
import os
import pipes
import re
import shutil
import signal
import socket
import sys
import tempfile
# The following non-std imports are fetched via vpython. See the list at
# //.vpython
import dateutil.parser # pylint: disable=import-error
import jsonlines # pylint: disable=import-error
import psutil # pylint: disable=import-error
import six
CHROMIUM_SRC_PATH = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..'))
# Use the android test-runner's gtest results support library for generating
# output json ourselves.
sys.path.insert(0, os.path.join(CHROMIUM_SRC_PATH, 'build', 'android'))
from pylib.base import base_test_result # pylint: disable=import-error
from pylib.base import result_sink # pylint: disable=import-error
from pylib.results import json_results # pylint: disable=import-error
if six.PY2:
import subprocess32 as subprocess # pylint: disable=import-error
else:
import subprocess # pylint: disable=import-error,wrong-import-order
DEFAULT_CROS_CACHE = os.path.abspath(
os.path.join(CHROMIUM_SRC_PATH, 'build', 'cros_cache'))
CHROMITE_PATH = os.path.abspath(
os.path.join(CHROMIUM_SRC_PATH, 'third_party', 'chromite'))
CROS_RUN_TEST_PATH = os.path.abspath(
os.path.join(CHROMITE_PATH, 'bin', 'cros_run_test'))
# This is a special hostname that resolves to a different DUT in the lab
# depending on which lab machine you're on.
LAB_DUT_HOSTNAME = 'variable_chromeos_device_hostname'
SYSTEM_LOG_LOCATIONS = [
'/var/log/chrome/',
'/var/log/messages',
'/var/log/ui/',
]
TAST_DEBUG_DOC = 'https://bit.ly/2LgvIXz'
class TestFormatError(Exception):
pass
class RemoteTest(object):
# This is a basic shell script that can be appended to in order to invoke the
# test on the device.
BASIC_SHELL_SCRIPT = [
'#!/bin/sh',
# /home and /tmp are mounted with "noexec" in the device, but some of our
# tools and tests use those dirs as a workspace (eg: vpython downloads
# python binaries to ~/.vpython-root and /tmp/vpython_bootstrap).
# /usr/local/tmp doesn't have this restriction, so change the location of
# the home and temp dirs for the duration of the test.
'export HOME=/usr/local/tmp',
'export TMPDIR=/usr/local/tmp',
]
def __init__(self, args, unknown_args):
self._additional_args = unknown_args
self._path_to_outdir = args.path_to_outdir
self._test_launcher_summary_output = args.test_launcher_summary_output
self._logs_dir = args.logs_dir
self._use_vm = args.use_vm
self._rdb_client = result_sink.TryInitClient()
self._retries = 0
self._timeout = None
self._test_launcher_shard_index = args.test_launcher_shard_index
self._test_launcher_total_shards = args.test_launcher_total_shards
# The location on disk of a shell script that can be optionally used to
# invoke the test on the device. If it's not set, we assume self._test_cmd
# contains the test invocation.
self._on_device_script = None
self._test_cmd = [
CROS_RUN_TEST_PATH,
'--board',
args.board,
'--cache-dir',
args.cros_cache,
]
if args.use_vm:
self._test_cmd += [
'--start',
# Don't persist any filesystem changes after the VM shutsdown.
'--copy-on-write',
]
else:
self._test_cmd += [
'--device', args.device if args.device else LAB_DUT_HOSTNAME
]
if args.logs_dir:
for log in SYSTEM_LOG_LOCATIONS:
self._test_cmd += ['--results-src', log]
self._test_cmd += [
'--results-dest-dir',
os.path.join(args.logs_dir, 'system_logs')
]
if args.flash:
self._test_cmd += ['--flash']
if args.public_image:
self._test_cmd += ['--public-image']
# This environment variable is set for tests that have been instrumented
# for code coverage. Its incoming value is expected to be a location
# inside a subdirectory of result_dir above. This is converted to an
# absolute path that the vm is able to write to, and passed in the
# --results-src flag to cros_run_vm_test for copying out of the vm before
# its termination.
self._llvm_profile_var = None
if os.environ.get('LLVM_PROFILE_FILE'):
_, llvm_profile_file = os.path.split(os.environ['LLVM_PROFILE_FILE'])
self._llvm_profile_var = '/tmp/profraw/%s' % llvm_profile_file
# This should make the vm test runner exfil the profiling data.
self._test_cmd += ['--results-src', '/tmp/profraw']
self._test_env = setup_env()
@property
def suite_name(self):
raise NotImplementedError('Child classes need to define suite name.')
@property
def test_cmd(self):
return self._test_cmd
def write_test_script_to_disk(self, script_contents):
# Since we're using an on_device_script to invoke the test, we'll need to
# set cwd.
self._test_cmd += [
'--remote-cmd',
'--cwd',
os.path.relpath(self._path_to_outdir, CHROMIUM_SRC_PATH),
]
logging.info('Running the following command on the device:')
logging.info('\n' + '\n'.join(script_contents))
fd, tmp_path = tempfile.mkstemp(suffix='.sh', dir=self._path_to_outdir)
os.fchmod(fd, 0o755)
with os.fdopen(fd, 'wb') as f:
f.write('\n'.join(script_contents) + '\n')
return tmp_path
def run_test(self):
# Traps SIGTERM and kills all child processes of cros_run_test when it's
# caught. This will allow us to capture logs from the device if a test hangs
# and gets timeout-killed by swarming. See also:
# https://chromium.googlesource.com/infra/luci/luci-py/+/master/appengine/swarming/doc/Bot.md#graceful-termination_aka-the-sigterm-and-sigkill-dance
test_proc = None
def _kill_child_procs(trapped_signal, _):
logging.warning('Received signal %d. Killing child processes of test.',
trapped_signal)
if not test_proc or not test_proc.pid:
# This shouldn't happen?
logging.error('Test process not running.')
return
for child in psutil.Process(test_proc.pid).children():
logging.warning('Killing process %s', child)
child.kill()
signal.signal(signal.SIGTERM, _kill_child_procs)
for i in range(self._retries + 1):
logging.info('########################################')
logging.info('Test attempt #%d', i)
logging.info('########################################')
test_proc = subprocess.Popen(
self._test_cmd,
stdout=sys.stdout,
stderr=sys.stderr,
env=self._test_env)
try:
test_proc.wait(timeout=self._timeout)
except subprocess.TimeoutExpired: # pylint: disable=no-member
logging.error('Test timed out. Sending SIGTERM.')
# SIGTERM the proc and wait 10s for it to close.
test_proc.terminate()
try:
test_proc.wait(timeout=10)
except subprocess.TimeoutExpired: # pylint: disable=no-member
# If it hasn't closed in 10s, SIGKILL it.
logging.error('Test did not exit in time. Sending SIGKILL.')
test_proc.kill()
test_proc.wait()
logging.info('Test exitted with %d.', test_proc.returncode)
if test_proc.returncode == 0:
break
ret = self.post_run(test_proc.returncode)
# Allow post_run to override test proc return code. (Useful when the host
# side Tast bin returns 0 even for failed tests.)
if ret is not None:
return ret
return test_proc.returncode
def post_run(self, return_code):
if self._on_device_script:
os.remove(self._on_device_script)
# Create a simple json results file for a test run. The results will contain
# only one test (suite_name), and will either be a PASS or FAIL depending on
# return_code.
if self._test_launcher_summary_output:
result = (
base_test_result.ResultType.FAIL
if return_code else base_test_result.ResultType.PASS)
suite_result = base_test_result.BaseTestResult(self.suite_name, result)
run_results = base_test_result.TestRunResults()
run_results.AddResult(suite_result)
with open(self._test_launcher_summary_output, 'w') as f:
json.dump(json_results.GenerateResultsDict([run_results]), f)
@staticmethod
def get_artifacts(path):
"""Crawls a given directory for file artifacts to attach to a test.
Args:
path: Path to a directory to search for artifacts.
Returns:
A dict mapping name of the artifact to its absolute filepath.
"""
artifacts = {}
for dirpath, _, filenames in os.walk(path):
for f in filenames:
artifact_path = os.path.join(dirpath, f)
artifacts[os.path.relpath(artifact_path, path)] = {
'filePath': artifact_path,
}
return artifacts
class TastTest(RemoteTest):
def __init__(self, args, unknown_args):
super(TastTest, self).__init__(args, unknown_args)
self._suite_name = args.suite_name
self._tast_vars = args.tast_vars
self._tests = args.tests
# The CQ passes in '--gtest_filter' when specifying tests to skip. Store it
# here and parse it later to integrate it into Tast executions.
self._gtest_style_filter = args.gtest_filter
self._conditional = args.conditional
self._should_strip = args.strip_chrome
self._deploy_lacros = args.deploy_lacros
if self._deploy_lacros and self._should_strip:
raise TestFormatError(
'--strip-chrome is only applicable to ash-chrome because '
'lacros-chrome deployment uses --nostrip by default, so it cannot '
'be specificed with --deploy-lacros.')
if not self._llvm_profile_var and not self._logs_dir:
# The host-side Tast bin returns 0 when tests fail, so we need to capture
# and parse its json results to reliably determine if tests fail.
raise TestFormatError(
'When using the host-side Tast bin, "--logs-dir" must be passed in '
'order to parse its results.')
# If the first test filter is negative, it should be safe to assume all of
# them are, so just test the first filter.
if self._gtest_style_filter and self._gtest_style_filter[0] == '-':
raise TestFormatError('Negative test filters not supported for Tast.')
@property
def suite_name(self):
return self._suite_name
def build_test_command(self):
unsupported_args = [
'--test-launcher-retry-limit',
'--test-launcher-batch-limit',
'--gtest_repeat',
]
for unsupported_arg in unsupported_args:
if any(arg.startswith(unsupported_arg) for arg in self._additional_args):
logging.info(
'%s not supported for Tast tests. The arg will be ignored.',
unsupported_arg)
self._additional_args = [
arg for arg in self._additional_args
if not arg.startswith(unsupported_arg)
]
# Lacros deployment mounts itself by default.
self._test_cmd.extend(
['--deploy-lacros'] if self._deploy_lacros else ['--deploy', '--mount'])
self._test_cmd += [
'--build-dir',
os.path.relpath(self._path_to_outdir, CHROMIUM_SRC_PATH)
] + self._additional_args
# Coverage tests require some special pre-test setup, so use an
# on_device_script in that case. For all other tests, use cros_run_test's
# built-in '--tast' option. This gives us much better results reporting.
if self._llvm_profile_var:
# Build the shell script that will be used on the device to invoke the
# test.
device_test_script_contents = self.BASIC_SHELL_SCRIPT[:]
device_test_script_contents += [
'echo "LLVM_PROFILE_FILE=%s" >> /etc/chrome_dev.conf' %
(self._llvm_profile_var)
]
local_test_runner_cmd = ['local_test_runner', '-waituntilready']
if self._use_vm:
# If we're running tests in VMs, tell the test runner to skip tests that
# aren't compatible.
local_test_runner_cmd.append('-extrauseflags=tast_vm')
if self._conditional:
local_test_runner_cmd.append(pipes.quote(self._conditional))
else:
local_test_runner_cmd.extend(self._tests)
device_test_script_contents.append(' '.join(local_test_runner_cmd))
self._on_device_script = self.write_test_script_to_disk(
device_test_script_contents)
self._test_cmd += [
'--files',
os.path.relpath(self._on_device_script), '--',
'./' + os.path.relpath(self._on_device_script, self._path_to_outdir)
]
else:
# Capture tast's results in the logs dir as well.
if self._logs_dir:
self._test_cmd += [
'--results-dir',
self._logs_dir,
]
self._test_cmd += [
'--tast-total-shards=%d' % self._test_launcher_total_shards,
'--tast-shard-index=%d' % self._test_launcher_shard_index,
]
# If we're using a test filter, replace the contents of the Tast
# conditional with a long list of "name:test" expressions, one for each
# test in the filter.
if self._gtest_style_filter:
if self._conditional or self._tests:
logging.warning(
'Presence of --gtest_filter will cause the specified Tast '
'conditional or test list to be ignored.')
names = []
for test in self._gtest_style_filter.split(':'):
names.append('"name:%s"' % test)
self._conditional = '(' + ' || '.join(names) + ')'
if self._conditional:
# Don't use pipes.quote() here. Something funky happens with the arg
# as it gets passed down from cros_run_test to tast. (Tast picks up the
# escaping single quotes and complains that the conditional "must be
# within parentheses".)
self._test_cmd.append('--tast=%s' % self._conditional)
else:
self._test_cmd.append('--tast')
self._test_cmd.extend(self._tests)
for v in self._tast_vars or []:
self._test_cmd.extend(['--tast-var', v])
# Mounting ash-chrome gives it enough disk space to not need stripping,
# but only for one not instrumented with code coverage.
# Lacros uses --nostrip by default, so there is no need to specify.
if not self._deploy_lacros and not self._should_strip:
self._test_cmd.append('--nostrip')
def post_run(self, return_code):
# If we don't need to parse the host-side Tast tool's results, fall back to
# the parent method's default behavior.
if self._llvm_profile_var:
return super(TastTest, self).post_run(return_code)
tast_results_path = os.path.join(self._logs_dir, 'streamed_results.jsonl')
if not os.path.exists(tast_results_path):
logging.error(
'Tast results not found at %s. Falling back to generic result '
'reporting.', tast_results_path)
return super(TastTest, self).post_run(return_code)
# See the link below for the format of the results:
# https://godoc.org/chromium.googlesource.com/chromiumos/platform/tast.git/src/chromiumos/cmd/tast/run#TestResult
with jsonlines.open(tast_results_path) as reader:
tast_results = collections.deque(reader)
suite_results = base_test_result.TestRunResults()
for test in tast_results:
errors = test['errors']
start, end = test['start'], test['end']
# Use dateutil to parse the timestamps since datetime can't handle
# nanosecond precision.
duration = dateutil.parser.parse(end) - dateutil.parser.parse(start)
duration_ms = duration.total_seconds() * 1000
if bool(test['skipReason']):
result = base_test_result.ResultType.SKIP
elif errors:
result = base_test_result.ResultType.FAIL
else:
result = base_test_result.ResultType.PASS
error_log = ''
if errors:
# See the link below for the format of these errors:
# https://godoc.org/chromium.googlesource.com/chromiumos/platform/tast.git/src/chromiumos/tast/testing#Error
for err in errors:
error_log += err['stack'].encode('utf-8') + '\n'
error_log += (
"\nIf you're unsure why this test failed, consult the steps "
'outlined in\n%s\n' % TAST_DEBUG_DOC)
base_result = base_test_result.BaseTestResult(
test['name'], result, duration=duration_ms, log=error_log)
suite_results.AddResult(base_result)
self._maybe_handle_perf_results(test['name'])
if self._rdb_client:
# Walk the contents of the test's "outDir" and atttach any file found
# inside as an RDB 'artifact'. (This could include system logs, screen
# shots, etc.)
artifacts = self.get_artifacts(test['outDir'])
self._rdb_client.Post(test['name'], result, error_log, artifacts)
if self._rdb_client and self._logs_dir:
# Attach artifacts from the device that don't apply to a single test.
artifacts = self.get_artifacts(
os.path.join(self._logs_dir, 'system_logs'))
artifacts.update(
self.get_artifacts(os.path.join(self._logs_dir, 'crashes')))
self._rdb_client.ReportInvocationLevelArtifacts(artifacts)
if self._test_launcher_summary_output:
with open(self._test_launcher_summary_output, 'w') as f:
json.dump(json_results.GenerateResultsDict([suite_results]), f)
if not suite_results.DidRunPass():
return 1
elif return_code:
logging.warning(
'No failed tests found, but exit code of %d was returned from '
'cros_run_test.', return_code)
return return_code
return 0
def _maybe_handle_perf_results(self, test_name):
"""Prepares any perf results from |test_name| for process_perf_results.
- process_perf_results looks for top level directories containing a
perf_results.json file and a test_results.json file. The directory names
are used as the benchmark names.
- If a perf_results.json or results-chart.json file exists in the
|test_name| results directory, a top level directory is created and the
perf results file is copied to perf_results.json.
- A trivial test_results.json file is also created to indicate that the test
succeeded (this function would not be called otherwise).
- When process_perf_results is run, it will find the expected files in the
named directory and upload the benchmark results.
"""
perf_results = os.path.join(self._logs_dir, 'tests', test_name,
'perf_results.json')
# TODO(stevenjb): Remove check for crosbolt results-chart.json file.
if not os.path.exists(perf_results):
perf_results = os.path.join(self._logs_dir, 'tests', test_name,
'results-chart.json')
if os.path.exists(perf_results):
benchmark_dir = os.path.join(self._logs_dir, test_name)
if not os.path.isdir(benchmark_dir):
os.makedirs(benchmark_dir)
shutil.copyfile(perf_results,
os.path.join(benchmark_dir, 'perf_results.json'))
# process_perf_results.py expects a test_results.json file.
test_results = {'valid': True, 'failures': []}
with open(os.path.join(benchmark_dir, 'test_results.json'), 'w') as out:
json.dump(test_results, out)
class GTestTest(RemoteTest):
# The following list corresponds to paths that should not be copied over to
# the device during tests. In other words, these files are only ever used on
# the host.
_FILE_IGNORELIST = [
re.compile(r'.*build/android.*'),
re.compile(r'.*build/chromeos.*'),
re.compile(r'.*build/cros_cache.*'),
# The following matches anything under //testing/ that isn't under
# //testing/buildbot/filters/.
re.compile(r'.*testing/(?!buildbot/filters).*'),
re.compile(r'.*third_party/chromite.*'),
]
def __init__(self, args, unknown_args):
super(GTestTest, self).__init__(args, unknown_args)
self._test_exe = args.test_exe
self._runtime_deps_path = args.runtime_deps_path
self._vpython_dir = args.vpython_dir
self._on_device_script = None
self._env_vars = args.env_var
self._stop_ui = args.stop_ui
self._trace_dir = args.trace_dir
@property
def suite_name(self):
return self._test_exe
def build_test_command(self):
# To keep things easy for us, ensure both types of output locations are
# the same.
if self._test_launcher_summary_output and self._logs_dir:
json_out_dir = os.path.dirname(self._test_launcher_summary_output) or '.'
if os.path.abspath(json_out_dir) != os.path.abspath(self._logs_dir):
raise TestFormatError(
'--test-launcher-summary-output and --logs-dir must point to '
'the same directory.')
if self._test_launcher_summary_output:
result_dir, result_file = os.path.split(
self._test_launcher_summary_output)
# If args.test_launcher_summary_output is a file in cwd, result_dir will
# be an empty string, so replace it with '.' when this is the case so
# cros_run_test can correctly handle it.
if not result_dir:
result_dir = '.'
device_result_file = '/tmp/%s' % result_file
self._test_cmd += [
'--results-src',
device_result_file,
'--results-dest-dir',
result_dir,
]
if self._trace_dir and self._logs_dir:
trace_path = os.path.dirname(self._trace_dir) or '.'
if os.path.abspath(trace_path) != os.path.abspath(self._logs_dir):
raise TestFormatError(
'--trace-dir and --logs-dir must point to the same directory.')
if self._trace_dir:
trace_path, trace_dirname = os.path.split(self._trace_dir)
device_trace_dir = '/tmp/%s' % trace_dirname
self._test_cmd += [
'--results-src',
device_trace_dir,
'--results-dest-dir',
trace_path,
]
# Build the shell script that will be used on the device to invoke the test.
# Stored here as a list of lines.
device_test_script_contents = self.BASIC_SHELL_SCRIPT[:]
if self._llvm_profile_var:
device_test_script_contents += [
'export LLVM_PROFILE_FILE=%s' % self._llvm_profile_var,
]
for var_name, var_val in self._env_vars:
device_test_script_contents += ['export %s=%s' % (var_name, var_val)]
if self._vpython_dir:
vpython_path = os.path.join(self._path_to_outdir, self._vpython_dir,
'vpython')
cpython_path = os.path.join(self._path_to_outdir, self._vpython_dir,
'bin', 'python')
if not os.path.exists(vpython_path) or not os.path.exists(cpython_path):
raise TestFormatError(
'--vpython-dir must point to a dir with both infra/python/cpython '
'and infra/tools/luci/vpython installed.')
vpython_spec_path = os.path.relpath(
os.path.join(CHROMIUM_SRC_PATH, '.vpython'), self._path_to_outdir)
# Initialize the vpython cache. This can take 10-20s, and some tests
# can't afford to wait that long on the first invocation.
device_test_script_contents.extend([
'export PATH=$PWD/%s:$PWD/%s/bin/:$PATH' %
(self._vpython_dir, self._vpython_dir),
'vpython -vpython-spec %s -vpython-tool install' %
(vpython_spec_path),
])
test_invocation = ('LD_LIBRARY_PATH=./ ./%s --test-launcher-shard-index=%d '
'--test-launcher-total-shards=%d' %
(self._test_exe, self._test_launcher_shard_index,
self._test_launcher_total_shards))
if self._test_launcher_summary_output:
test_invocation += ' --test-launcher-summary-output=%s' % (
device_result_file)
if self._trace_dir:
device_test_script_contents.extend([
'rm -rf %s' % device_trace_dir,
'su chronos -c -- "mkdir -p %s"' % device_trace_dir,
])
test_invocation += ' --trace-dir=%s' % device_trace_dir
if self._additional_args:
test_invocation += ' %s' % ' '.join(self._additional_args)
if self._stop_ui:
device_test_script_contents += [
'stop ui',
]
# The UI service on the device owns the chronos user session, so shutting
# it down as chronos kills the entire execution of the test. So we'll have
# to run as root up until the test invocation.
test_invocation = 'su chronos -c -- "%s"' % test_invocation
# And we'll need to chown everything since cros_run_test's "--as-chronos"
# option normally does that for us.
device_test_script_contents.append('chown -R chronos: ../..')
else:
self._test_cmd += [
# Some tests fail as root, so run as the less privileged user
# 'chronos'.
'--as-chronos',
]
device_test_script_contents.append(test_invocation)
self._on_device_script = self.write_test_script_to_disk(
device_test_script_contents)
runtime_files = [os.path.relpath(self._on_device_script)]
runtime_files += self._read_runtime_files()
if self._vpython_dir:
# --vpython-dir is relative to the out dir, but --files expects paths
# relative to src dir, so fix the path up a bit.
runtime_files.append(
os.path.relpath(
os.path.abspath(
os.path.join(self._path_to_outdir, self._vpython_dir)),
CHROMIUM_SRC_PATH))
# TODO(bpastene): Add the vpython spec to the test's runtime deps instead
# of handling it here.
runtime_files.append('.vpython')
for f in runtime_files:
self._test_cmd.extend(['--files', f])
self._test_cmd += [
'--',
'./' + os.path.relpath(self._on_device_script, self._path_to_outdir)
]
def _read_runtime_files(self):
if not self._runtime_deps_path:
return []
abs_runtime_deps_path = os.path.abspath(
os.path.join(self._path_to_outdir, self._runtime_deps_path))
with open(abs_runtime_deps_path) as runtime_deps_file:
files = [l.strip() for l in runtime_deps_file if l]
rel_file_paths = []
for f in files:
rel_file_path = os.path.relpath(
os.path.abspath(os.path.join(self._path_to_outdir, f)))
if not any(regex.match(rel_file_path) for regex in self._FILE_IGNORELIST):
rel_file_paths.append(rel_file_path)
return rel_file_paths
def post_run(self, _):
if self._on_device_script:
os.remove(self._on_device_script)
def device_test(args, unknown_args):
# cros_run_test has trouble with relative paths that go up directories,
# so cd to src/, which should be the root of all data deps.
os.chdir(CHROMIUM_SRC_PATH)
# pylint: disable=redefined-variable-type
# TODO: Remove the above when depot_tool's pylint is updated to include the
# fix to https://github.com/PyCQA/pylint/issues/710.
if args.test_type == 'tast':
test = TastTest(args, unknown_args)
else:
test = GTestTest(args, unknown_args)
test.build_test_command()
logging.info('Running the following command on the device:')
logging.info(' '.join(test.test_cmd))
return test.run_test()
def host_cmd(args, cmd_args):
if not cmd_args:
raise TestFormatError('Must specify command to run on the host.')
elif args.deploy_chrome and not args.path_to_outdir:
raise TestFormatError(
'--path-to-outdir must be specified if --deploy-chrome is passed.')
cros_run_test_cmd = [
CROS_RUN_TEST_PATH,
'--board',
args.board,
'--cache-dir',
args.cros_cache,
]
if args.use_vm:
cros_run_test_cmd += [
'--start',
# Don't persist any filesystem changes after the VM shutsdown.
'--copy-on-write',
]
else:
cros_run_test_cmd += [
'--device', args.device if args.device else LAB_DUT_HOSTNAME
]
if args.verbose:
cros_run_test_cmd.append('--debug')
if args.flash:
cros_run_test_cmd.append('--flash')
if args.public_image:
cros_run_test_cmd += ['--public-image']
if args.logs_dir:
for log in SYSTEM_LOG_LOCATIONS:
cros_run_test_cmd += ['--results-src', log]
cros_run_test_cmd += [
'--results-dest-dir',
os.path.join(args.logs_dir, 'system_logs')
]
test_env = setup_env()
if args.deploy_chrome:
# Mounting ash-chrome gives it enough disk space to not need stripping.
cros_run_test_cmd.extend(['--deploy-lacros'] if args.deploy_lacros else
['--deploy', '--mount', '--nostrip'])
cros_run_test_cmd += [
'--build-dir',
os.path.abspath(args.path_to_outdir),
]
cros_run_test_cmd += [
'--host-cmd',
'--',
] + cmd_args
logging.info('Running the following command:')
logging.info(' '.join(cros_run_test_cmd))
return subprocess.call(
cros_run_test_cmd, stdout=sys.stdout, stderr=sys.stderr, env=test_env)
def setup_env():
"""Returns a copy of the current env with some needed vars added."""
env = os.environ.copy()
# Some chromite scripts expect chromite/bin to be on PATH.
env['PATH'] = env['PATH'] + ':' + os.path.join(CHROMITE_PATH, 'bin')
# deploy_chrome needs a set of GN args used to build chrome to determine if
# certain libraries need to be pushed to the device. It looks for the args via
# an env var. To trigger the default deploying behavior, give it a dummy set
# of args.
# TODO(crbug.com/823996): Make the GN-dependent deps controllable via cmd
# line args.
if not env.get('GN_ARGS'):
env['GN_ARGS'] = 'enable_nacl = true'
if not env.get('USE'):
env['USE'] = 'highdpi'
return env
def add_common_args(*parsers):
for parser in parsers:
parser.add_argument('--verbose', '-v', action='store_true')
parser.add_argument(
'--board', type=str, required=True, help='Type of CrOS device.')
parser.add_argument(
'--cros-cache',
type=str,
default=DEFAULT_CROS_CACHE,
help='Path to cros cache.')
parser.add_argument(
'--path-to-outdir',
type=str,
required=True,
help='Path to output directory, all of whose contents will be '
'deployed to the device.')
parser.add_argument(
'--runtime-deps-path',
type=str,
help='Runtime data dependency file from GN.')
parser.add_argument(
'--vpython-dir',
type=str,
help='Location on host of a directory containing a vpython binary to '
'deploy to the device before the test starts. The location of '
'this dir will be added onto PATH in the device. WARNING: The '
'arch of the device might not match the arch of the host, so '
'avoid using "${platform}" when downloading vpython via CIPD.')
parser.add_argument(
'--logs-dir',
type=str,
dest='logs_dir',
help='Will copy everything under /var/log/ from the device after the '
'test into the specified dir.')
# Shard args are parsed here since we might also specify them via env vars.
parser.add_argument(
'--test-launcher-shard-index',
type=int,
default=os.environ.get('GTEST_SHARD_INDEX', 0),
help='Index of the external shard to run.')
parser.add_argument(
'--test-launcher-total-shards',
type=int,
default=os.environ.get('GTEST_TOTAL_SHARDS', 1),
help='Total number of external shards.')
parser.add_argument(
'--flash',
action='store_true',
help='Will flash the device to the current SDK version before running '
'the test.')
parser.add_argument(
'--public-image',
action='store_true',
help='Will flash a public "full" image to the device.')
vm_or_device_group = parser.add_mutually_exclusive_group()
vm_or_device_group.add_argument(
'--use-vm',
action='store_true',
help='Will run the test in the VM instead of a device.')
vm_or_device_group.add_argument(
'--device',
type=str,
help='Hostname (or IP) of device to run the test on. This arg is not '
'required if --use-vm is set.')
def main():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest='test_type')
# Host-side test args.
host_cmd_parser = subparsers.add_parser(
'host-cmd',
help='Runs a host-side test. Pass the host-side command to run after '
'"--". If --use-vm is passed, hostname and port for the device '
'will be 127.0.0.1:9222.')
host_cmd_parser.set_defaults(func=host_cmd)
host_cmd_parser.add_argument(
'--deploy-chrome',
action='store_true',
help='Will deploy a locally built ash-chrome binary to the device before '
'running the host-cmd.')
host_cmd_parser.add_argument(
'--deploy-lacros',
action='store_true',
help='Deploy a lacros-chrome instead of ash-chrome.')
# GTest args.
# TODO(bpastene): Rename 'vm-test' arg to 'gtest'.
gtest_parser = subparsers.add_parser(
'vm-test', help='Runs a device-side gtest.')
gtest_parser.set_defaults(func=device_test)
gtest_parser.add_argument(
'--test-exe',
type=str,
required=True,
help='Path to test executable to run inside the device.')
# GTest args. Some are passed down to the test binary in the device. Others
# are parsed here since they might need tweaking or special handling.
gtest_parser.add_argument(
'--test-launcher-summary-output',
type=str,
help='When set, will pass the same option down to the test and retrieve '
'its result file at the specified location.')
gtest_parser.add_argument(
'--stop-ui',
action='store_true',
help='Will stop the UI service in the device before running the test.')
gtest_parser.add_argument(
'--trace-dir',
type=str,
help='When set, will pass down to the test to generate the trace and '
'retrieve the trace files to the specified location.')
gtest_parser.add_argument(
'--env-var',
nargs=2,
action='append',
default=[],
help='Env var to set on the device for the duration of the test. '
'Expected format is "--env-var SOME_VAR_NAME some_var_value". Specify '
'multiple times for more than one var.')
# Tast test args.
# pylint: disable=line-too-long
tast_test_parser = subparsers.add_parser(
'tast',
help='Runs a device-side set of Tast tests. For more details, see: '
'https://chromium.googlesource.com/chromiumos/platform/tast/+/master/docs/running_tests.md'
)
tast_test_parser.set_defaults(func=device_test)
tast_test_parser.add_argument(
'--suite-name',
type=str,
required=True,
help='Name to apply to the set of Tast tests to run. This has no effect '
'on what is executed, but is used mainly for test results reporting '
'and tracking (eg: flakiness dashboard).')
tast_test_parser.add_argument(
'--test-launcher-summary-output',
type=str,
help='Generates a simple GTest-style JSON result file for the test run.')
# TODO(bpastene): Change all uses of "--conditional" to use "--attr-expr".
tast_test_parser.add_argument(
'--conditional',
'--attr-expr',
type=str,
dest='conditional',
help='A boolean expression whose matching tests will run '
'(eg: ("dep:chrome")).')
tast_test_parser.add_argument(
'--strip-chrome',
action='store_true',
help='Strips symbols from ash-chrome before deploying to the device.')
tast_test_parser.add_argument(
'--deploy-lacros',
action='store_true',
help='Deploy a lacros-chrome instead of ash-chrome.')
tast_test_parser.add_argument(
'--tast-var',
action='append',
dest='tast_vars',
help='Runtime variables for Tast tests, and the format are expected to '
'be "key=value" pairs.')
tast_test_parser.add_argument(
'--test',
'-t',
action='append',
dest='tests',
help='A Tast test to run in the device (eg: "ui.ChromeLogin").')
tast_test_parser.add_argument(
'--gtest_filter',
type=str,
help="Similar to GTest's arg of the same name, this will filter out the "
"specified tests from the Tast run. However, due to the nature of Tast's "
'cmd-line API, this will overwrite the value(s) of "--test" above.')
add_common_args(gtest_parser, tast_test_parser, host_cmd_parser)
args = sys.argv[1:]
unknown_args = []
# If a '--' is present in the args, treat everything to the right of it as
# args to the test and everything to the left as args to this test runner.
# Otherwise treat all known args as args to this test runner and all unknown
# args as test args.
if '--' in args:
unknown_args = args[args.index('--') + 1:]
args = args[0:args.index('--')]
if unknown_args:
args = parser.parse_args(args=args)
else:
args, unknown_args = parser.parse_known_args()
logging.basicConfig(level=logging.DEBUG if args.verbose else logging.WARN)
if not args.use_vm and not args.device:
# If we're not running on a VM, but haven't specified a hostname, assume
# we're on a lab bot and are trying to run a test on a lab DUT. See if the
# magic lab DUT hostname resolves to anything. (It will in the lab and will
# not on dev machines.)
try:
socket.getaddrinfo(LAB_DUT_HOSTNAME, None)
except socket.gaierror:
logging.error('The default DUT hostname of %s is unreachable.',
LAB_DUT_HOSTNAME)
return 1
args.cros_cache = os.path.abspath(args.cros_cache)
return args.func(args, unknown_args)
if __name__ == '__main__':
sys.exit(main())
|
the-stack_0_10017 | # импортируем специальные поля Алхимии для инициализации полей таблицы
from sqlalchemy import Column, Float, String, Integer
# импортируем модуль инициализации декларативного класса Алхимии
from DB.dbcore import Base
class Client(Base):
__tablename__ = 'clients'
id = Column(Integer, primary_key = True)
address = Column(String)
chat_id = Column(Integer)
email = Column(String)
latitude = Column(Float)
longitude = Column(Float)
phone = Column(String)
title = Column(String)
user_name = Column(String)
def __init__(self, address, chat_id, email, latitude, longitude, phone, title, user_name):
self.address = address
self.chat_id = chat_id
self.email = email
self.latitude = latitude
self.longitude = longitude
self.phone = phone
self.title = title
self.user_name = user_name
def __repr__(self):
return f'<Client: {self.id}, {self.title}, {self.address}>' |
the-stack_0_10020 | import cv2
import numpy as np
from PIL import Image
def anahtarOlustur(gorsel,gelen):
r, c ,t= gorsel.shape
keyGen = np.random.randint(0, 256, size=(r, c, t ), dtype=np.uint8)
key = np.random.choice(gelen,size=(r, c,t))
mylist = []
for i in key:
arr = np.array(i, dtype=np.uint8)
mylist.append(arr)
fth = np.array(mylist)
return cv2.bitwise_xor(fth, keyGen)
def xor(gorsel, anahtar):
r, c ,t= gorsel.shape
return cv2.bitwise_xor(gorsel, anahtar)
def hexToUint8(hash):
return [int(hash[i:i+2],16) for i in range(0,len(hash),2)]
|
the-stack_0_10022 | _base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/voc0712.py',
'../_base_/default_runtime.py'
]
model = dict(roi_head=dict(bbox_head=dict(num_classes=20)))
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
# actual epoch = 3 * 3 = 9
lr_config = dict(policy='step', step=[3])
# runtime settings
total_epochs = 4 # actual epoch = 4 * 3 = 12
|
the-stack_0_10023 | import torch
from torch import Tensor
from torch.nn import Parameter as Param
from torch_geometric.nn.conv import MessagePassing
from ..inits import uniform
class GatedGraphConv(MessagePassing):
r"""The gated graph convolution operator from the `"Gated Graph Sequence
Neural Networks" <https://arxiv.org/abs/1511.05493>`_ paper
.. math::
\mathbf{h}_i^{(0)} &= \mathbf{x}_i \, \Vert \, \mathbf{0}
\mathbf{m}_i^{(l+1)} &= \sum_{j \in \mathcal{N}(i)} \mathbf{\Theta}
\cdot \mathbf{h}_j^{(l)}
\mathbf{h}_i^{(l+1)} &= \textrm{GRU} (\mathbf{m}_i^{(l+1)},
\mathbf{h}_i^{(l)})
up to representation :math:`\mathbf{h}_i^{(L)}`.
The number of input channels of :math:`\mathbf{x}_i` needs to be less or
equal than :obj:`out_channels`.
Args:
out_channels (int): Size of each input sample.
num_layers (int): The sequence length :math:`L`.
aggr (string, optional): The aggregation scheme to use
(:obj:`"add"`, :obj:`"mean"`, :obj:`"max"`).
(default: :obj:`"add"`)
bias (bool, optional): If set to :obj:`False`, the layer will not learn
an additive bias. (default: :obj:`True`)
**kwargs (optional): Additional arguments of
:class:`torch_geometric.nn.conv.MessagePassing`.
"""
def __init__(self,
out_channels,
num_layers,
aggr='add',
bias=True,
**kwargs):
super(GatedGraphConv, self).__init__(aggr=aggr, **kwargs)
self.out_channels = out_channels
self.num_layers = num_layers
self.weight = Param(Tensor(num_layers, out_channels, out_channels))
self.rnn = torch.nn.GRUCell(out_channels, out_channels, bias=bias)
self.reset_parameters()
def reset_parameters(self):
uniform(self.out_channels, self.weight)
self.rnn.reset_parameters()
def forward(self, x, edge_index, edge_weight=None):
""""""
h = x if x.dim() == 2 else x.unsqueeze(-1)
if h.size(1) > self.out_channels:
raise ValueError('The number of input channels is not allowed to '
'be larger than the number of output channels')
if h.size(1) < self.out_channels:
zero = h.new_zeros(h.size(0), self.out_channels - h.size(1))
h = torch.cat([h, zero], dim=1)
for i in range(self.num_layers):
m = torch.matmul(h, self.weight[i])
m = self.propagate(edge_index, x=m, edge_weight=edge_weight)
h = self.rnn(m, h)
return h
def message(self, x_j, edge_weight):
if edge_weight is not None:
return edge_weight.view(-1, 1) * x_j
return x_j
def __repr__(self):
return '{}({}, num_layers={})'.format(
self.__class__.__name__, self.out_channels, self.num_layers)
|
the-stack_0_10024 | import os
from pathlib import Path
from allennlp.data.iterators import BasicIterator
from allennlp.nn.util import move_to_device
from pytorch_pretrained_bert import BertTokenizer, BertModel, BertAdam
import config
from bert_model_variances.bert_multilayer_output import BertMultiLayerSeqClassification
from data_utils.exvocab import ExVocabulary
from data_utils.readers.bert_reader_fever_sent_selection import BertContentSelectionReader
# from evaluation import ext_hotpot_eval
from evaluation import fever_scorer
from fever_sampler.ss_sampler import build_full_wiki_document_forward_item, down_sample_neg
from fever_utils import fever_db
from flint import torch_util
# from hotpot_data_analysis.fullwiki_provided_upperbound import append_gt_downstream_to_get_upperbound_from_doc_retri
from utils import common, list_dict_data_tool
import torch
from tqdm import tqdm
import numpy as np
import copy
import allennlp
from utils import save_tool
import torch.nn.functional as F
def eval_model(model, data_iter, device_num, with_probs=False, make_int=False, show_progress=False):
print("Evaluating ...")
tqdm_disable = not show_progress
with torch.no_grad():
model.eval()
totoal_size = 0
y_pred_list = []
y_fid_list = []
y_pid_list = []
y_element_list = []
y_logits_list = []
y_probs_list = []
for batch_idx, batch in tqdm(enumerate(data_iter), disable=tqdm_disable):
batch = move_to_device(batch, device_num)
eval_paired_sequence = batch['paired_sequence']
eval_paired_segments_ids = batch['paired_segments_ids']
eval_labels_ids = batch['label']
eval_att_mask, _ = torch_util.get_length_and_mask(eval_paired_sequence)
s1_span = batch['bert_s1_span']
s2_span = batch['bert_s2_span']
out = model(eval_paired_sequence, token_type_ids=eval_paired_segments_ids, attention_mask=eval_att_mask,
mode=BertMultiLayerSeqClassification.ForwardMode.EVAL,
labels=eval_labels_ids)
y_pid_list.extend(list(batch['oid']))
y_fid_list.extend(list(batch['fid']))
y_element_list.extend(list(batch['item']))
y_pred_list.extend(torch.max(out, 1)[1].view(out.size(0)).tolist())
y_logits_list.extend(out.view(out.size(0)).tolist())
if with_probs:
y_probs_list.extend(torch.sigmoid(out).view(out.size(0)).tolist())
totoal_size += out.size(0)
result_items_list = []
assert len(y_pred_list) == len(y_fid_list)
assert len(y_pred_list) == len(y_pid_list)
assert len(y_pred_list) == len(y_element_list)
assert len(y_pred_list) == len(y_logits_list)
if with_probs:
assert len(y_pred_list) == len(y_probs_list)
for i in range(len(y_pred_list)):
r_item = dict()
r_item['fid'] = y_fid_list[i]
r_item['oid'] = y_pid_list[i] if not make_int else int(y_pid_list[i])
r_item['score'] = y_logits_list[i]
r_item['element'] = y_element_list[i]
if with_probs:
r_item['prob'] = y_probs_list[i]
result_items_list.append(r_item)
return result_items_list
def select_top_k_and_to_results_dict(scored_dict, merged_field_name='merged_field',
score_field_name='prob', item_field_name='element',
top_k=5, threshold=None):
results_dict = dict()
for key, value in scored_dict.items():
if key not in results_dict:
results_dict[key] = dict()
# if merged_field_name not in value:
# results_dict[key]['scored_results'] = []
# results_dict[key]['predicated_evidence'] = []
# continue
fitems_dict = value[merged_field_name]
scored_element_list = []
for item in fitems_dict.values():
score = item[score_field_name]
element = item[item_field_name]
scored_element_list.append((score, element)) # score is index 0.
results_dict[key]['scored_results'] = scored_element_list
sorted_e_list = sorted(scored_element_list, key=lambda x: x[0], reverse=True)
evidence_sid = []
scored_evidence_sid = []
for s, e in sorted_e_list:
if threshold is not None:
if s >= threshold:
evidence_sid.append(e)
scored_evidence_sid.append([s, e])
else:
evidence_sid.append(e)
scored_evidence_sid.append([s, e])
evidence_sid = evidence_sid[:top_k]
scored_evidence_sid = scored_evidence_sid[:top_k]
assert len(evidence_sid) == len(scored_evidence_sid)
results_dict[key]['predicted_evidence'] = []
for sid in evidence_sid:
doc_id, ln = sid.split('(-.-)')[0], int(sid.split('(-.-)')[1])
results_dict[key]['predicted_evidence'].append([doc_id, ln])
results_dict[key]['predicted_scored_evidence'] = []
for score, sid in scored_evidence_sid:
doc_id, ln = sid.split('(-.-)')[0], int(sid.split('(-.-)')[1])
results_dict[key]['predicted_scored_evidence'].append((score, [doc_id, ln]))
# predicted_sentids
# results_dict[key]['predicted_sentids'] = results_dict[key]['predicted_evidence']
return results_dict
def get_sentences(tag, is_training, debug=False):
if tag == 'dev':
d_list = common.load_jsonl(config.FEVER_DEV)
elif tag == 'train':
d_list = common.load_jsonl(config.FEVER_TRAIN)
elif tag == 'test':
d_list = common.load_jsonl(config.FEVER_TEST)
else:
raise ValueError(f"Tag:{tag} not supported.")
if debug:
# d_list = d_list[:10]
d_list = d_list[:50]
# d_list = d_list[:200]
doc_results = common.load_jsonl(
config.RESULT_PATH / f"doc_retri_results/fever_results/merged_doc_results/m_doc_{tag}.jsonl")
doc_results_dict = list_dict_data_tool.list_to_dict(doc_results, 'id')
fever_db_cursor = fever_db.get_cursor(config.FEVER_DB)
forward_items = build_full_wiki_document_forward_item(doc_results_dict, d_list, is_training=is_training,
db_cursor=fever_db_cursor)
return forward_items
def set_gt_nli_label(d_list, delete_label=False):
for item in d_list:
item['predicted_label'] = item['label']
if delete_label:
del item['label']
return d_list
def model_go():
seed = 12
torch.manual_seed(seed)
# bert_model_name = 'bert-large-uncased'
bert_model_name = 'bert-base-uncased'
experiment_name = 'fever_v0_cs_ratio_001'
# lazy = False
lazy = True
forward_size = 128
# batch_size = 64
# batch_size = 192
batch_size = 128
gradient_accumulate_step = int(batch_size / forward_size)
warmup_proportion = 0.1
learning_rate = 5e-5
num_train_epochs = 5
eval_frequency = 20000
pos_ratio = 0.01
do_lower_case = True
# debug_mode = True
debug_mode = False
# est_datasize = 900_000
num_class = 1
# num_train_optimization_steps
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device_num = 0 if torch.cuda.is_available() else -1
n_gpu = torch.cuda.device_count()
unk_token_num = {'tokens': 1} # work around for initiating vocabulary.
vocab = ExVocabulary(unk_token_num=unk_token_num)
vocab.add_token_to_namespace("false", namespace="labels") # 0
vocab.add_token_to_namespace("true", namespace="labels") # 1
vocab.add_token_to_namespace("hidden", namespace="labels")
vocab.change_token_with_index_to_namespace("hidden", -2, namespace='labels')
# Load Dataset
# train_list = common.load_jsonl(config.FEVER_TRAIN)
dev_list = common.load_jsonl(config.FEVER_DEV)
set_gt_nli_label(dev_list)
# dev_fitems_list = common.load_jsonl(
# config.PDATA_ROOT / "content_selection_forward" / "hotpot_dev_p_level_unlabeled.jsonl")
# train_fitems_list = common.load_jsonl(
# config.PDATA_ROOT / "content_selection_forward" / "hotpot_train_p_level_labeled.jsonl")
dev_fitems_list = get_sentences('dev', is_training=False, debug=debug_mode)
train_fitems_list = get_sentences('train', is_training=True, debug=debug_mode)
if debug_mode:
dev_list = dev_list[:50]
eval_frequency = 1
# print(dev_list[-1]['_id'])
# exit(0)
sampled_train_list = down_sample_neg(train_fitems_list, ratio=pos_ratio)
est_datasize = len(sampled_train_list)
dev_o_dict = list_dict_data_tool.list_to_dict(dev_list, 'id')
# print(dev_o_dict)
bert_tokenizer = BertTokenizer.from_pretrained(bert_model_name, do_lower_case=do_lower_case)
bert_cs_reader = BertContentSelectionReader(bert_tokenizer, lazy, is_paired=True,
example_filter=lambda x: len(x['context']) == 0, max_l=128)
bert_encoder = BertModel.from_pretrained(bert_model_name)
model = BertMultiLayerSeqClassification(bert_encoder, num_labels=num_class, num_of_pooling_layer=1,
act_type='tanh', use_pretrained_pooler=True, use_sigmoid=True)
#
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
num_train_optimization_steps = int(est_datasize / forward_size / gradient_accumulate_step) * \
num_train_epochs
if debug_mode:
num_train_optimization_steps = 100
print("Estimated training size", est_datasize)
print("Number of optimization steps:", num_train_optimization_steps)
optimizer = BertAdam(optimizer_grouped_parameters,
lr=learning_rate,
warmup=warmup_proportion,
t_total=num_train_optimization_steps)
dev_instances = bert_cs_reader.read(dev_fitems_list)
biterator = BasicIterator(batch_size=forward_size)
biterator.index_with(vocab)
model.to(device)
if n_gpu > 1:
model = torch.nn.DataParallel(model)
forbackward_step = 0
update_step = 0
logging_agent = save_tool.ScoreLogger({})
file_path_prefix = '.'
if not debug_mode:
file_path_prefix, date = save_tool.gen_file_prefix(f"{experiment_name}")
# # # Create Log File
# Save the source code.
script_name = os.path.basename(__file__)
with open(os.path.join(file_path_prefix, script_name), 'w') as out_f, open(__file__, 'r') as it:
out_f.write(it.read())
out_f.flush()
# # # Log File end
for epoch_i in range(num_train_epochs):
print("Epoch:", epoch_i)
sampled_train_list = down_sample_neg(train_fitems_list, ratio=pos_ratio)
train_instance = bert_cs_reader.read(sampled_train_list)
train_iter = biterator(train_instance, num_epochs=1, shuffle=True)
for batch in tqdm(train_iter):
model.train()
batch = move_to_device(batch, device_num)
paired_sequence = batch['paired_sequence']
paired_segments_ids = batch['paired_segments_ids']
labels_ids = batch['label']
att_mask, _ = torch_util.get_length_and_mask(paired_sequence)
s1_span = batch['bert_s1_span']
s2_span = batch['bert_s2_span']
loss = model(paired_sequence, token_type_ids=paired_segments_ids, attention_mask=att_mask,
mode=BertMultiLayerSeqClassification.ForwardMode.TRAIN,
labels=labels_ids)
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if gradient_accumulate_step > 1:
loss = loss / gradient_accumulate_step
loss.backward()
forbackward_step += 1
if forbackward_step % gradient_accumulate_step == 0:
optimizer.step()
optimizer.zero_grad()
update_step += 1
if update_step % eval_frequency == 0:
print("Update steps:", update_step)
dev_iter = biterator(dev_instances, num_epochs=1, shuffle=False)
cur_eval_results_list = eval_model(model, dev_iter, device_num, with_probs=True, make_int=True)
copied_dev_o_dict = copy.deepcopy(dev_o_dict)
copied_dev_d_list = copy.deepcopy(dev_list)
list_dict_data_tool.append_subfield_from_list_to_dict(cur_eval_results_list, copied_dev_o_dict,
'oid', 'fid', check=True)
print("Threshold 0.5:")
cur_results_dict_th0_5 = select_top_k_and_to_results_dict(copied_dev_o_dict,
top_k=5, threshold=0.5)
list_dict_data_tool.append_item_from_dict_to_list(copied_dev_d_list, cur_results_dict_th0_5,
'id', 'predicted_evidence')
mode = {'standard': True, 'check_sent_id_correct': True}
strict_score, acc_score, pr, rec, f1 = fever_scorer.fever_score(copied_dev_d_list, dev_list,
mode=mode, max_evidence=5)
score_05 = {
'ss': strict_score, 'as': acc_score,
'pr': pr, 'rec': rec, 'f1': f1,
}
print("Threshold 0.1:")
cur_results_dict_th0_1 = select_top_k_and_to_results_dict(copied_dev_o_dict,
top_k=5, threshold=0.1)
list_dict_data_tool.append_item_from_dict_to_list(copied_dev_d_list, cur_results_dict_th0_1,
'id', 'predicted_evidence')
mode = {'standard': True, 'check_sent_id_correct': True}
strict_score, acc_score, pr, rec, f1 = fever_scorer.fever_score(copied_dev_d_list, dev_list,
mode=mode, max_evidence=5)
score_01 = {
'ss': strict_score, 'as': acc_score,
'pr': pr, 'rec': rec, 'f1': f1,
}
logging_item = {
'score_01': score_01,
'score_05': score_05,
}
print(logging_item)
s01_ss_score = score_01['ss']
s05_ss_score = score_05['ss']
#
# exit(0)
# print(logging_item)
save_file_name = f'i({update_step})|e({epoch_i})' \
f'|s01({s01_ss_score})|s05({s05_ss_score})' \
f'|seed({seed})'
common.save_jsonl(cur_eval_results_list, Path(file_path_prefix) /
f"{save_file_name}_dev_sent_results.json")
# print(save_file_name)
logging_agent.incorporate_results({}, save_file_name, logging_item)
logging_agent.logging_to_file(Path(file_path_prefix) / "log.json")
model_to_save = model.module if hasattr(model, 'module') else model
output_model_file = Path(file_path_prefix) / save_file_name
torch.save(model_to_save.state_dict(), str(output_model_file))
# print(logging_agent.logging_item_list)
# Epoch eval:
print("Update steps:", update_step)
dev_iter = biterator(dev_instances, num_epochs=1, shuffle=False)
cur_eval_results_list = eval_model(model, dev_iter, device_num, with_probs=True, make_int=True)
copied_dev_o_dict = copy.deepcopy(dev_o_dict)
copied_dev_d_list = copy.deepcopy(dev_list)
list_dict_data_tool.append_subfield_from_list_to_dict(cur_eval_results_list, copied_dev_o_dict,
'oid', 'fid', check=True)
print("Threshold 0.5:")
cur_results_dict_th0_5 = select_top_k_and_to_results_dict(copied_dev_o_dict,
top_k=5, threshold=0.5)
list_dict_data_tool.append_item_from_dict_to_list(copied_dev_d_list, cur_results_dict_th0_5,
'id', 'predicted_evidence')
mode = {'standard': True, 'check_sent_id_correct': True}
strict_score, acc_score, pr, rec, f1 = fever_scorer.fever_score(copied_dev_d_list, dev_list,
mode=mode, max_evidence=5)
score_05 = {
'ss': strict_score, 'as': acc_score,
'pr': pr, 'rec': rec, 'f1': f1,
}
print("Threshold 0.1:")
cur_results_dict_th0_1 = select_top_k_and_to_results_dict(copied_dev_o_dict,
top_k=5, threshold=0.1)
list_dict_data_tool.append_item_from_dict_to_list(copied_dev_d_list, cur_results_dict_th0_1,
'id', 'predicted_evidence')
mode = {'standard': True, 'check_sent_id_correct': True}
strict_score, acc_score, pr, rec, f1 = fever_scorer.fever_score(copied_dev_d_list, dev_list,
mode=mode, max_evidence=5)
score_01 = {
'ss': strict_score, 'as': acc_score,
'pr': pr, 'rec': rec, 'f1': f1,
}
logging_item = {
'score_01': score_01,
'score_05': score_05,
}
print(logging_item)
s01_ss_score = score_01['ss']
s05_ss_score = score_05['ss']
#
# exit(0)
# print(logging_item)
save_file_name = f'i({update_step})|e({epoch_i})' \
f'|s01({s01_ss_score})|s05({s05_ss_score})' \
f'|seed({seed})'
common.save_jsonl(cur_eval_results_list, Path(file_path_prefix) /
f"{save_file_name}_dev_sent_results.jsonl")
# print(save_file_name)
logging_agent.incorporate_results({}, save_file_name, logging_item)
logging_agent.logging_to_file(Path(file_path_prefix) / "log.json")
model_to_save = model.module if hasattr(model, 'module') else model
output_model_file = Path(file_path_prefix) / save_file_name
torch.save(model_to_save.state_dict(), str(output_model_file))
def eval_trainset_for_train_nli(model_path):
tag = 'test'
is_training = False
seed = 12
torch.manual_seed(seed)
bert_model_name = 'bert-base-uncased'
lazy = False
# lazy = True
forward_size = 128
# batch_size = 64
# batch_size = 192
batch_size = 128
do_lower_case = True
debug_mode = False
# debug_mode = True
num_class = 1
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device_num = 0 if torch.cuda.is_available() else -1
n_gpu = torch.cuda.device_count()
unk_token_num = {'tokens': 1} # work around for initiating vocabulary.
vocab = ExVocabulary(unk_token_num=unk_token_num)
vocab.add_token_to_namespace("false", namespace="labels") # 0
vocab.add_token_to_namespace("true", namespace="labels") # 1
vocab.add_token_to_namespace("hidden", namespace="labels")
vocab.change_token_with_index_to_namespace("hidden", -2, namespace='labels')
# Load Dataset
train_fitems_list = get_sentences(tag, is_training=is_training, debug=debug_mode)
est_datasize = len(train_fitems_list)
bert_tokenizer = BertTokenizer.from_pretrained(bert_model_name, do_lower_case=do_lower_case)
bert_cs_reader = BertContentSelectionReader(bert_tokenizer, lazy, is_paired=True,
example_filter=lambda x: len(x['context']) == 0, max_l=128)
bert_encoder = BertModel.from_pretrained(bert_model_name)
model = BertMultiLayerSeqClassification(bert_encoder, num_labels=num_class, num_of_pooling_layer=1,
act_type='tanh', use_pretrained_pooler=True, use_sigmoid=True)
model.load_state_dict(torch.load(model_path))
print("Estimated training size", est_datasize)
print("Estimated forward steps:", est_datasize / forward_size)
biterator = BasicIterator(batch_size=forward_size)
biterator.index_with(vocab)
model.to(device)
if n_gpu > 1:
model = torch.nn.DataParallel(model)
train_instance = bert_cs_reader.read(train_fitems_list)
train_iter = biterator(train_instance, num_epochs=1, shuffle=False)
cur_eval_results_list = eval_model(model, train_iter, device_num, with_probs=True, make_int=True, show_progress=True)
if debug_mode:
train_list = common.load_jsonl(config.FEVER_TRAIN)
train_list = train_list[:50]
set_gt_nli_label(train_list)
train_o_dict = list_dict_data_tool.list_to_dict(train_list, 'id')
copied_dev_o_dict = copy.deepcopy(train_o_dict)
copied_dev_d_list = copy.deepcopy(train_list)
list_dict_data_tool.append_subfield_from_list_to_dict(cur_eval_results_list, copied_dev_o_dict,
'oid', 'fid', check=True)
print("Threshold 0.5:")
cur_results_dict_th0_5 = select_top_k_and_to_results_dict(copied_dev_o_dict,
top_k=5, threshold=0.1)
list_dict_data_tool.append_item_from_dict_to_list(copied_dev_d_list, cur_results_dict_th0_5,
'id', 'predicted_evidence')
mode = {'standard': True, 'check_sent_id_correct': True}
strict_score, acc_score, pr, rec, f1 = fever_scorer.fever_score(copied_dev_d_list, train_list,
mode=mode, max_evidence=5)
print(strict_score, acc_score, pr, rec, f1)
common.save_jsonl(cur_eval_results_list, f'{tag}_sent_results_labeled:{is_training}.jsonl')
if __name__ == '__main__':
model_go()
# model_path = config.PRO_ROOT / "saved_models/04-13-16:37:29_fever_v0_cs/i(5000)|e(0)|s01(0.9170917091709171)|s05(0.8842384238423843)|seed(12)"
#
# model_path = config.PRO_ROOT / "saved_models/04-13-16:37:29_fever_v0_cs/i(15000)|e(1)|s01(0.9013901390139014)|s05(0.8517851785178517)|seed(12)"
# eval_trainset_for_train_nli(model_path)
# dev_sent_list = get_sentences('dev', is_training=False)
# print(len(dev_sent_list))
#
# train_sent_list = get_sentences('dev', is_training=True)
# sampled_sent_list = down_sample_neg(train_sent_list, ratio=0.2)
# print(len(sampled_sent_list))
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.