Code
stringlengths 103
85.9k
| Summary
sequencelengths 0
94
|
---|---|
Please provide a description of the function:def notify_observers(self, which=None, min_priority=None):
if self._update_on:
if which is None:
which = self
if min_priority is None:
[callble(self, which=which) for _, _, callble in self.observers]
else:
for p, _, callble in self.observers:
if p <= min_priority:
break
callble(self, which=which) | [
"\n Notifies all observers. Which is the element, which kicked off this\n notification loop. The first argument will be self, the second `which`.\n\n .. note::\n \n notifies only observers with priority p > min_priority!\n \n :param min_priority: only notify observers with priority > min_priority\n if min_priority is None, notify all observers in order\n "
] |
Please provide a description of the function:def constrain_fixed(self, value=None, warning=True, trigger_parent=True):
if value is not None:
self[:] = value
#index = self.unconstrain()
index = self._add_to_index_operations(self.constraints, np.empty(0), __fixed__, warning)
self._highest_parent_._set_fixed(self, index)
self.notify_observers(self, None if trigger_parent else -np.inf)
return index | [
"\n Constrain this parameter to be fixed to the current value it carries.\n\n This does not override the previous constraints, so unfixing will\n restore the constraint set before fixing.\n\n :param warning: print a warning for overwriting constraints.\n "
] |
Please provide a description of the function:def unconstrain_fixed(self):
unconstrained = self.unconstrain(__fixed__)
self._highest_parent_._set_unfixed(self, unconstrained)
#if self._default_constraint_ is not None:
# return self.constrain(self._default_constraint_)
return unconstrained | [
"\n This parameter will no longer be fixed.\n\n If there was a constraint on this parameter when fixing it,\n it will be constraint with that previous constraint.\n "
] |
Please provide a description of the function:def constrain(self, transform, warning=True, trigger_parent=True):
if isinstance(transform, Transformation):
self.param_array[...] = transform.initialize(self.param_array)
elif transform == __fixed__:
return self.fix(warning=warning, trigger_parent=trigger_parent)
else:
raise ValueError('Can only constrain with paramz.transformations.Transformation object')
reconstrained = self.unconstrain()
added = self._add_to_index_operations(self.constraints, reconstrained, transform, warning)
self.trigger_update(trigger_parent)
return added | [
"\n :param transform: the :py:class:`paramz.transformations.Transformation`\n to constrain the this parameter to.\n :param warning: print a warning if re-constraining parameters.\n\n Constrain the parameter to the given\n :py:class:`paramz.transformations.Transformation`.\n "
] |
Please provide a description of the function:def constrain_positive(self, warning=True, trigger_parent=True):
self.constrain(Logexp(), warning=warning, trigger_parent=trigger_parent) | [
"\n :param warning: print a warning if re-constraining parameters.\n\n Constrain this parameter to the default positive constraint.\n "
] |
Please provide a description of the function:def constrain_negative(self, warning=True, trigger_parent=True):
self.constrain(NegativeLogexp(), warning=warning, trigger_parent=trigger_parent) | [
"\n :param warning: print a warning if re-constraining parameters.\n\n Constrain this parameter to the default negative constraint.\n "
] |
Please provide a description of the function:def constrain_bounded(self, lower, upper, warning=True, trigger_parent=True):
self.constrain(Logistic(lower, upper), warning=warning, trigger_parent=trigger_parent) | [
"\n :param lower, upper: the limits to bound this parameter to\n :param warning: print a warning if re-constraining parameters.\n\n Constrain this parameter to lie within the given range.\n "
] |
Please provide a description of the function:def load(file_or_path):
from pickle import UnpicklingError
_python3 = True
try:
import cPickle as pickle
_python3 = False
except ImportError: #python3
import pickle
try:
if _python3:
strcl = str
p3kw = dict(encoding='latin1')
return _unpickle(file_or_path, pickle, strcl, p3kw)
else:
strcl = basestring
p3kw = {}
return _unpickle(file_or_path, pickle, strcl, p3kw)
except UnpicklingError: # pragma: no coverage
import pickle
return _unpickle(file_or_path, pickle, strcl, p3kw) | [
"\n Load a previously pickled model, using `m.pickle('path/to/file.pickle)'`\n\n :param file_name: path/to/file.pickle\n "
] |
Please provide a description of the function:def checkgrad(self, verbose=0, step=1e-6, tolerance=1e-3, df_tolerance=1e-12):
# Make sure we always call the gradcheck on the highest parent
# This ensures the assumption of the highest parent to hold the fixes
# In the checkgrad function we take advantage of that, so it needs
# to be set in place here.
if self.has_parent():
return self._highest_parent_._checkgrad(self, verbose=verbose, step=step, tolerance=tolerance, df_tolerance=df_tolerance)
return self._checkgrad(self, verbose=verbose, step=step, tolerance=tolerance, df_tolerance=df_tolerance) | [
"\n Check the gradient of this parameter with respect to the highest parent's\n objective function.\n This is a three point estimate of the gradient, wiggling at the parameters\n with a stepsize step.\n The check passes if either the ratio or the difference between numerical and\n analytical gradient is smaller then tolerance.\n\n :param bool verbose: whether each parameter shall be checked individually.\n :param float step: the stepsize for the numerical three point gradient estimate.\n :param float tolerance: the tolerance for the gradient ratio or difference.\n :param float df_tolerance: the tolerance for df_tolerance\n\n .. note::\n The *dF_ratio* indicates the limit of accuracy of numerical gradients.\n If it is too small, e.g., smaller than 1e-12, the numerical gradients\n are usually not accurate enough for the tests (shown with blue).\n "
] |
Please provide a description of the function:def opt(self, x_init, f_fp=None, f=None, fp=None):
tnc_rcstrings = ['Local minimum', 'Converged', 'XConverged', 'Maximum number of f evaluations reached',
'Line search failed', 'Function is constant']
assert f_fp != None, "TNC requires f_fp"
opt_dict = {}
if self.xtol is not None:
opt_dict['xtol'] = self.xtol
if self.ftol is not None:
opt_dict['ftol'] = self.ftol
if self.gtol is not None:
opt_dict['pgtol'] = self.gtol
opt_result = optimize.fmin_tnc(f_fp, x_init, messages=self.messages,
maxfun=self.max_f_eval, **opt_dict)
self.x_opt = opt_result[0]
self.f_opt = f_fp(self.x_opt)[0]
self.funct_eval = opt_result[1]
self.status = tnc_rcstrings[opt_result[2]] | [
"\n Run the TNC optimizer\n\n "
] |
Please provide a description of the function:def opt(self, x_init, f_fp=None, f=None, fp=None):
rcstrings = ['Converged', 'Maximum number of f evaluations reached', 'Error']
assert f_fp != None, "BFGS requires f_fp"
opt_dict = {}
if self.xtol is not None:
print("WARNING: l-bfgs-b doesn't have an xtol arg, so I'm going to ignore it")
if self.ftol is not None:
print("WARNING: l-bfgs-b doesn't have an ftol arg, so I'm going to ignore it")
if self.gtol is not None:
opt_dict['pgtol'] = self.gtol
if self.bfgs_factor is not None:
opt_dict['factr'] = self.bfgs_factor
opt_result = optimize.fmin_l_bfgs_b(f_fp, x_init, maxfun=self.max_iters, maxiter=self.max_iters, **opt_dict)
self.x_opt = opt_result[0]
self.f_opt = f_fp(self.x_opt)[0]
self.funct_eval = opt_result[2]['funcalls']
self.status = rcstrings[opt_result[2]['warnflag']]
#a more helpful error message is available in opt_result in the Error case
if opt_result[2]['warnflag']==2: # pragma: no coverage, this is not needed to be covered
self.status = 'Error' + str(opt_result[2]['task']) | [
"\n Run the optimizer\n\n "
] |
Please provide a description of the function:def opt(self, x_init, f_fp=None, f=None, fp=None):
rcstrings = ['','Maximum number of iterations exceeded', 'Gradient and/or function calls not changing']
opt_dict = {}
if self.xtol is not None:
print("WARNING: bfgs doesn't have an xtol arg, so I'm going to ignore it")
if self.ftol is not None:
print("WARNING: bfgs doesn't have an ftol arg, so I'm going to ignore it")
if self.gtol is not None:
opt_dict['gtol'] = self.gtol
opt_result = optimize.fmin_bfgs(f, x_init, fp, disp=self.messages,
maxiter=self.max_iters, full_output=True, **opt_dict)
self.x_opt = opt_result[0]
self.f_opt = f_fp(self.x_opt)[0]
self.funct_eval = opt_result[4]
self.status = rcstrings[opt_result[6]] | [
"\n Run the optimizer\n\n "
] |
Please provide a description of the function:def opt(self, x_init, f_fp=None, f=None, fp=None):
statuses = ['Converged', 'Maximum number of function evaluations made', 'Maximum number of iterations reached']
opt_dict = {}
if self.xtol is not None:
opt_dict['xtol'] = self.xtol
if self.ftol is not None:
opt_dict['ftol'] = self.ftol
if self.gtol is not None:
print("WARNING: simplex doesn't have an gtol arg, so I'm going to ignore it")
opt_result = optimize.fmin(f, x_init, (), disp=self.messages,
maxfun=self.max_f_eval, full_output=True, **opt_dict)
self.x_opt = opt_result[0]
self.f_opt = opt_result[1]
self.funct_eval = opt_result[3]
self.status = statuses[opt_result[4]]
self.trace = None | [
"\n The simplex optimizer does not require gradients.\n "
] |
Please provide a description of the function:def combine_inputs(self, args, kw, ignore_args):
"Combines the args and kw in a unique way, such that ordering of kwargs does not lead to recompute"
inputs= args + tuple(c[1] for c in sorted(kw.items(), key=lambda x: x[0]))
# REMOVE the ignored arguments from input and PREVENT it from being checked!!!
return [a for i,a in enumerate(inputs) if i not in ignore_args] | [] |
Please provide a description of the function:def prepare_cache_id(self, combined_args_kw):
"get the cacheid (conc. string of argument self.ids in order)"
cache_id = "".join(self.id(a) for a in combined_args_kw)
return cache_id | [] |
Please provide a description of the function:def ensure_cache_length(self):
"Ensures the cache is within its limits and has one place free"
if len(self.order) == self.limit:
# we have reached the limit, so lets release one element
cache_id = self.order.popleft()
combined_args_kw = self.cached_inputs[cache_id]
for ind in combined_args_kw:
ind_id = self.id(ind)
tmp = self.cached_input_ids.get(ind_id, None)
if tmp is not None:
ref, cache_ids = tmp
if len(cache_ids) == 1 and ref() is not None:
ref().remove_observer(self, self.on_cache_changed)
del self.cached_input_ids[ind_id]
else:
cache_ids.remove(cache_id)
self.cached_input_ids[ind_id] = [ref, cache_ids]
try:
del self.cached_outputs[cache_id]
except KeyError:
# Was not cached before, possibly a keyboard interrupt
pass
try:
del self.inputs_changed[cache_id]
except KeyError:
# Was not cached before, possibly a keyboard interrupt
pass
try:
del self.cached_inputs[cache_id]
except KeyError:
# Was not cached before, possibly a keyboard interrupt
pass | [] |
Please provide a description of the function:def add_to_cache(self, cache_id, inputs, output):
self.inputs_changed[cache_id] = False
self.cached_outputs[cache_id] = output
self.order.append(cache_id)
self.cached_inputs[cache_id] = inputs
for a in inputs:
if a is not None and not isinstance(a, Number) and not isinstance(a, str):
ind_id = self.id(a)
v = self.cached_input_ids.get(ind_id, [weakref.ref(a), []])
v[1].append(cache_id)
if len(v[1]) == 1:
a.add_observer(self, self.on_cache_changed)
self.cached_input_ids[ind_id] = v | [
"This adds cache_id to the cache, with inputs and output"
] |
Please provide a description of the function:def on_cache_changed(self, direct, which=None):
for what in [direct, which]:
ind_id = self.id(what)
_, cache_ids = self.cached_input_ids.get(ind_id, [None, []])
for cache_id in cache_ids:
self.inputs_changed[cache_id] = True | [
"\n A callback funtion, which sets local flags when the elements of some cached inputs change\n\n this function gets 'hooked up' to the inputs when we cache them, and upon their elements being changed we update here.\n "
] |
Please provide a description of the function:def reset(self):
[a().remove_observer(self, self.on_cache_changed) if (a() is not None) else None for [a, _] in self.cached_input_ids.values()]
self.order = collections.deque()
self.cached_inputs = {} # point from cache_ids to a list of [ind_ids], which where used in cache cache_id
#=======================================================================
# point from each ind_id to [ref(obj), cache_ids]
# 0: a weak reference to the object itself
# 1: the cache_ids in which this ind_id is used (len will be how many times we have seen this ind_id)
self.cached_input_ids = {}
#=======================================================================
self.cached_outputs = {} # point from cache_ids to outputs
self.inputs_changed = {} | [
"\n Totally reset the cache\n "
] |
Please provide a description of the function:def disable_caching(self):
"Disable the cache of this object. This also removes previously cached results"
self.caching_enabled = False
for c in self.values():
c.disable_cacher() | [] |
Please provide a description of the function:def enable_caching(self):
"Enable the cache of this object."
self.caching_enabled = True
for c in self.values():
c.enable_cacher() | [] |
Please provide a description of the function:def SCG(f, gradf, x, optargs=(), maxiters=500, max_f_eval=np.inf, xtol=None, ftol=None, gtol=None):
if xtol is None:
xtol = 1e-6
if ftol is None:
ftol = 1e-6
if gtol is None:
gtol = 1e-5
sigma0 = 1.0e-7
fold = f(x, *optargs) # Initial function value.
function_eval = 1
fnow = fold
gradnew = gradf(x, *optargs) # Initial gradient.
function_eval += 1
#if any(np.isnan(gradnew)):
# raise UnexpectedInfOrNan, "Gradient contribution resulted in a NaN value"
current_grad = np.dot(gradnew, gradnew)
gradold = gradnew.copy()
d = -gradnew # Initial search direction.
success = True # Force calculation of directional derivs.
nsuccess = 0 # nsuccess counts number of successes.
beta = 1.0 # Initial scale parameter.
betamin = 1.0e-15 # Lower bound on scale.
betamax = 1.0e15 # Upper bound on scale.
status = "Not converged"
flog = [fold]
iteration = 0
# Main optimization loop.
while iteration < maxiters:
# Calculate first and second directional derivatives.
if success:
mu = np.dot(d, gradnew)
if mu >= 0: # pragma: no cover
d = -gradnew
mu = np.dot(d, gradnew)
kappa = np.dot(d, d)
sigma = sigma0 / np.sqrt(kappa)
xplus = x + sigma * d
gplus = gradf(xplus, *optargs)
function_eval += 1
theta = np.dot(d, (gplus - gradnew)) / sigma
# Increase effective curvature and evaluate step size alpha.
delta = theta + beta * kappa
if delta <= 0: # pragma: no cover
delta = beta * kappa
beta = beta - theta / kappa
alpha = -mu / delta
# Calculate the comparison ratio.
xnew = x + alpha * d
fnew = f(xnew, *optargs)
function_eval += 1
Delta = 2.*(fnew - fold) / (alpha * mu)
if Delta >= 0.:
success = True
nsuccess += 1
x = xnew
fnow = fnew
else:
success = False
fnow = fold
# Store relevant variables
flog.append(fnow) # Current function value
iteration += 1
if success:
# Test for termination
if (np.abs(fnew - fold) < ftol):
status = 'converged - relative reduction in objective'
break
# return x, flog, function_eval, status
elif (np.max(np.abs(alpha * d)) < xtol):
status = 'converged - relative stepsize'
break
else:
# Update variables for new position
gradold = gradnew
gradnew = gradf(x, *optargs)
function_eval += 1
current_grad = np.dot(gradnew, gradnew)
fold = fnew
# If the gradient is zero then we are done.
if current_grad <= gtol:
status = 'converged - relative reduction in gradient'
break
# return x, flog, function_eval, status
# Adjust beta according to comparison ratio.
if Delta < 0.25:
beta = min(4.0 * beta, betamax)
if Delta > 0.75:
beta = max(0.25 * beta, betamin)
# Update search direction using Polak-Ribiere formula, or re-start
# in direction of negative gradient after nparams steps.
if nsuccess == x.size:
d = -gradnew
beta = 1. # This is not in the original paper
nsuccess = 0
elif success:
Gamma = np.dot(gradold - gradnew, gradnew) / (mu)
d = Gamma * d - gradnew
else:
# If we get here, then we haven't terminated in the given number of
# iterations.
status = "maxiter exceeded"
return x, flog, function_eval, status | [
"\n Optimisation through Scaled Conjugate Gradients (SCG)\n\n f: the objective function\n gradf : the gradient function (should return a 1D np.ndarray)\n x : the initial condition\n\n Returns\n x the optimal value for x\n flog : a list of all the objective values\n function_eval number of fn evaluations\n status: string describing convergence status\n "
] |
Please provide a description of the function:def remove(self, priority, observer, callble):
self.flush()
for i in range(len(self) - 1, -1, -1):
p,o,c = self[i]
if priority==p and observer==o and callble==c:
del self._poc[i] | [
"\n Remove one observer, which had priority and callble.\n "
] |
Please provide a description of the function:def add(self, priority, observer, callble):
#if observer is not None:
ins = 0
for pr, _, _ in self:
if priority > pr:
break
ins += 1
self._poc.insert(ins, (priority, weakref.ref(observer), callble)) | [
"\n Add an observer with priority and callble\n "
] |
Please provide a description of the function:def properties_for(self, index):
return vectorize(lambda i: [prop for prop in self.properties() if i in self[prop]], otypes=[list])(index) | [
"\n Returns a list of properties, such that each entry in the list corresponds\n to the element of the index given.\n\n Example:\n let properties: 'one':[1,2,3,4], 'two':[3,5,6]\n\n >>> properties_for([2,3,5])\n [['one'], ['one', 'two'], ['two']]\n "
] |
Please provide a description of the function:def properties_dict_for(self, index):
props = self.properties_for(index)
prop_index = extract_properties_to_index(index, props)
return prop_index | [
"\n Return a dictionary, containing properties as keys and indices as index\n Thus, the indices for each constraint, which is contained will be collected as\n one dictionary\n\n Example:\n let properties: 'one':[1,2,3,4], 'two':[3,5,6]\n\n >>> properties_dict_for([2,3,5])\n {'one':[2,3], 'two':[3,5]}\n "
] |
Please provide a description of the function:def optimize(self, optimizer=None, start=None, messages=False, max_iters=1000, ipython_notebook=True, clear_after_finish=False, **kwargs):
if self.is_fixed or self.size == 0:
print('nothing to optimize')
return
if not self.update_model():
print("updates were off, setting updates on again")
self.update_model(True)
if start is None:
start = self.optimizer_array
if optimizer is None:
optimizer = self.preferred_optimizer
if isinstance(optimizer, optimization.Optimizer):
opt = optimizer
opt.model = self
else:
optimizer = optimization.get_optimizer(optimizer)
opt = optimizer(max_iters=max_iters, **kwargs)
with VerboseOptimization(self, opt, maxiters=max_iters, verbose=messages, ipython_notebook=ipython_notebook, clear_after_finish=clear_after_finish) as vo:
opt.run(start, f_fp=self._objective_grads, f=self._objective, fp=self._grads)
self.optimizer_array = opt.x_opt
self.optimization_runs.append(opt)
return opt | [
"\n Optimize the model using self.log_likelihood and self.log_likelihood_gradient, as well as self.priors.\n\n kwargs are passed to the optimizer. They can be:\n\n :param max_iters: maximum number of function evaluations\n :type max_iters: int\n :messages: True: Display messages during optimisation, \"ipython_notebook\":\n :type messages: bool\"string\n :param optimizer: which optimizer to use (defaults to self.preferred optimizer)\n :type optimizer: string\n\n Valid optimizers are:\n - 'scg': scaled conjugate gradient method, recommended for stability.\n See also GPy.inference.optimization.scg\n - 'fmin_tnc': truncated Newton method (see scipy.optimize.fmin_tnc)\n - 'simplex': the Nelder-Mead simplex method (see scipy.optimize.fmin),\n - 'lbfgsb': the l-bfgs-b method (see scipy.optimize.fmin_l_bfgs_b),\n - 'lbfgs': the bfgs method (see scipy.optimize.fmin_bfgs),\n - 'sgd': stochastic gradient decsent (see scipy.optimize.sgd). For experts only!\n\n\n "
] |
Please provide a description of the function:def optimize_restarts(self, num_restarts=10, robust=False, verbose=True, parallel=False, num_processes=None, **kwargs):
initial_length = len(self.optimization_runs)
initial_parameters = self.optimizer_array.copy()
if parallel: #pragma: no cover
try:
pool = mp.Pool(processes=num_processes)
obs = [self.copy() for i in range(num_restarts)]
[obs[i].randomize() for i in range(num_restarts-1)]
jobs = pool.map(opt_wrapper, [(o,kwargs) for o in obs])
pool.close()
pool.join()
except KeyboardInterrupt:
print("Ctrl+c received, terminating and joining pool.")
pool.terminate()
pool.join()
for i in range(num_restarts):
try:
if not parallel:
if i > 0:
self.randomize()
self.optimize(**kwargs)
else:#pragma: no cover
self.optimization_runs.append(jobs[i])
if verbose:
print(("Optimization restart {0}/{1}, f = {2}".format(i + 1, num_restarts, self.optimization_runs[-1].f_opt)))
except Exception as e:
if robust:
print(("Warning - optimization restart {0}/{1} failed".format(i + 1, num_restarts)))
else:
raise e
if len(self.optimization_runs) > initial_length:
# This works, since failed jobs don't get added to the optimization_runs.
i = np.argmin([o.f_opt for o in self.optimization_runs[initial_length:]])
self.optimizer_array = self.optimization_runs[initial_length + i].x_opt
else:
self.optimizer_array = initial_parameters
return self.optimization_runs | [
"\n Perform random restarts of the model, and set the model to the best\n seen solution.\n\n If the robust flag is set, exceptions raised during optimizations will\n be handled silently. If _all_ runs fail, the model is reset to the\n existing parameter values.\n\n \\*\\*kwargs are passed to the optimizer.\n\n :param num_restarts: number of restarts to use (default 10)\n :type num_restarts: int\n :param robust: whether to handle exceptions silently or not (default False)\n :type robust: bool\n :param parallel: whether to run each restart as a separate process. It relies on the multiprocessing module.\n :type parallel: bool\n :param num_processes: number of workers in the multiprocessing pool\n :type numprocesses: int\n :param max_f_eval: maximum number of function evaluations\n :type max_f_eval: int\n :param max_iters: maximum number of iterations\n :type max_iters: int\n :param messages: whether to display during optimisation\n :type messages: bool\n\n .. note::\n\n If num_processes is None, the number of workes in the\n multiprocessing pool is automatically set to the number of processors\n on the current machine.\n\n "
] |
Please provide a description of the function:def _grads(self, x):
try:
# self._set_params_transformed(x)
self.optimizer_array = x
self.obj_grads = self._transform_gradients(self.objective_function_gradients())
self._fail_count = 0
except (LinAlgError, ZeroDivisionError, ValueError): #pragma: no cover
if self._fail_count >= self._allowed_failures:
raise
self._fail_count += 1
self.obj_grads = np.clip(self._transform_gradients(self.objective_function_gradients()), -1e100, 1e100)
return self.obj_grads | [
"\n Gets the gradients from the likelihood and the priors.\n\n Failures are handled robustly. The algorithm will try several times to\n return the gradients, and will raise the original exception if\n the objective cannot be computed.\n\n :param x: the parameters of the model.\n :type x: np.array\n "
] |
Please provide a description of the function:def _objective(self, x):
try:
self.optimizer_array = x
obj = self.objective_function()
self._fail_count = 0
except (LinAlgError, ZeroDivisionError, ValueError):#pragma: no cover
if self._fail_count >= self._allowed_failures:
raise
self._fail_count += 1
return np.inf
return obj | [
"\n The objective function passed to the optimizer. It combines\n the likelihood and the priors.\n\n Failures are handled robustly. The algorithm will try several times to\n return the objective, and will raise the original exception if\n the objective cannot be computed.\n\n :param x: the parameters of the model.\n :parameter type: np.array\n "
] |
Please provide a description of the function:def _checkgrad(self, target_param=None, verbose=False, step=1e-6, tolerance=1e-3, df_tolerance=1e-12):
if not self._model_initialized_:
import warnings
warnings.warn("This model has not been initialized, try model.inititialize_model()", RuntimeWarning)
return False
x = self.optimizer_array.copy()
if not verbose:
# make sure only to test the selected parameters
if target_param is None:
transformed_index = np.arange(len(x))
else:
transformed_index = self._raveled_index_for_transformed(target_param)
if transformed_index.size == 0:
print("No free parameters to check")
return True
# just check the global ratio
dx = np.zeros(x.shape)
dx[transformed_index] = step * (np.sign(np.random.uniform(-1, 1, transformed_index.size)) if transformed_index.size != 2 else 1.)
# evaulate around the point x
f1 = self._objective(x + dx)
f2 = self._objective(x - dx)
gradient = self._grads(x)
dx = dx[transformed_index]
gradient = gradient[transformed_index]
denominator = (2 * np.dot(dx, gradient))
global_ratio = (f1 - f2) / np.where(denominator == 0., 1e-32, denominator)
global_diff = np.abs(f1 - f2) < tolerance and np.allclose(gradient, 0, atol=tolerance)
if global_ratio is np.nan: # pragma: no cover
global_ratio = 0
return np.abs(1. - global_ratio) < tolerance or global_diff
else:
# check the gradient of each parameter individually, and do some pretty printing
try:
names = self.parameter_names_flat()
except NotImplementedError:
names = ['Variable %i' % i for i in range(len(x))]
# Prepare for pretty-printing
header = ['Name', 'Ratio', 'Difference', 'Analytical', 'Numerical', 'dF_ratio']
max_names = max([len(names[i]) for i in range(len(names))] + [len(header[0])])
float_len = 10
cols = [max_names]
cols.extend([max(float_len, len(header[i])) for i in range(1, len(header))])
cols = np.array(cols) + 5
header_string = ["{h:^{col}}".format(h=header[i], col=cols[i]) for i in range(len(cols))]
header_string = list(map(lambda x: '|'.join(x), [header_string]))
separator = '-' * len(header_string[0])
print('\n'.join([header_string[0], separator]))
if target_param is None:
target_param = self
transformed_index = self._raveled_index_for_transformed(target_param)
if transformed_index.size == 0:
print("No free parameters to check")
return True
gradient = self._grads(x).copy()
np.where(gradient == 0, 1e-312, gradient)
ret = True
for xind in zip(transformed_index):
xx = x.copy()
xx[xind] += step
f1 = float(self._objective(xx))
xx[xind] -= 2.*step
f2 = float(self._objective(xx))
#Avoid divide by zero, if any of the values are above 1e-15, otherwise both values are essentiall
#the same
if f1 > 1e-15 or f1 < -1e-15 or f2 > 1e-15 or f2 < -1e-15:
df_ratio = np.abs((f1 - f2) / min(f1, f2))
else: # pragma: no cover
df_ratio = 1.0
df_unstable = df_ratio < df_tolerance
numerical_gradient = (f1 - f2) / (2. * step)
if np.all(gradient[xind] == 0): # pragma: no cover
ratio = (f1 - f2) == gradient[xind]
else:
ratio = (f1 - f2) / (2. * step * gradient[xind])
difference = np.abs(numerical_gradient - gradient[xind])
if (np.abs(1. - ratio) < tolerance) or np.abs(difference) < tolerance:
formatted_name = "\033[92m {0} \033[0m".format(names[xind])
ret &= True
else: # pragma: no cover
formatted_name = "\033[91m {0} \033[0m".format(names[xind])
ret &= False
if df_unstable: # pragma: no cover
formatted_name = "\033[94m {0} \033[0m".format(names[xind])
r = '%.6f' % float(ratio)
d = '%.6f' % float(difference)
g = '%.6f' % gradient[xind]
ng = '%.6f' % float(numerical_gradient)
df = '%1.e' % float(df_ratio)
grad_string = "{0:<{c0}}|{1:^{c1}}|{2:^{c2}}|{3:^{c3}}|{4:^{c4}}|{5:^{c5}}".format(formatted_name, r, d, g, ng, df, c0=cols[0] + 9, c1=cols[1], c2=cols[2], c3=cols[3], c4=cols[4], c5=cols[5])
print(grad_string)
self.optimizer_array = x
return ret | [
"\n Check the gradient of the ,odel by comparing to a numerical\n estimate. If the verbose flag is passed, individual\n components are tested (and printed)\n\n :param verbose: If True, print a \"full\" checking of each parameter\n :type verbose: bool\n :param step: The size of the step around which to linearise the objective\n :type step: float (default 1e-6)\n :param tolerance: the tolerance allowed (see note)\n :type tolerance: float (default 1e-3)\n\n Note:-\n The gradient is considered correct if the ratio of the analytical\n and numerical gradients is within <tolerance> of unity.\n\n The *dF_ratio* indicates the limit of numerical accuracy of numerical gradients.\n If it is too small, e.g., smaller than 1e-12, the numerical gradients are usually\n not accurate enough for the tests (shown with blue).\n "
] |
Please provide a description of the function:def _repr_html_(self):
model_details = [['<b>Model</b>', self.name + '<br>'],
['<b>Objective</b>', '{}<br>'.format(float(self.objective_function()))],
["<b>Number of Parameters</b>", '{}<br>'.format(self.size)],
["<b>Number of Optimization Parameters</b>", '{}<br>'.format(self._size_transformed())],
["<b>Updates</b>", '{}<br>'.format(self._update_on)],
]
from operator import itemgetter
to_print = [] + ["<p class=pd>"] + ["{}: {}".format(name, detail) for name, detail in model_details] + ["</p>"]
to_print.append(super(Model, self)._repr_html_())
return "\n".join(to_print) | [
"Representation of the model in html for notebook display.",
"<style type=\"text/css\">\n.pd{\n font-family: \"Courier New\", Courier, monospace !important;\n width: 100%;\n padding: 3px;\n}\n</style>\\n"
] |
Please provide a description of the function:def add_index_operation(self, name, operations):
if name not in self._index_operations:
self._add_io(name, operations)
else:
raise AttributeError("An index operation with the name {} was already taken".format(name)) | [
"\n Add index operation with name to the operations given.\n\n raises: attribute error if operations exist.\n "
] |
Please provide a description of the function:def _disconnect_parent(self, *args, **kw):
for name, iop in list(self._index_operations.items()):
iopc = iop.copy()
iop.clear()
self.remove_index_operation(name)
self.add_index_operation(name, iopc)
#self.constraints.clear()
#self.constraints = constr
self._parent_ = None
self._parent_index_ = None
self._connect_fixes()
self._notify_parent_change() | [
"\n From Parentable:\n disconnect the parent and set the new constraints to constr\n "
] |
Please provide a description of the function:def _offset_for(self, param):
if param.has_parent():
p = param._parent_._get_original(param)
if p in self.parameters:
return reduce(lambda a,b: a + b.size, self.parameters[:p._parent_index_], 0)
return self._offset_for(param._parent_) + param._parent_._offset_for(param)
return 0 | [
"\n Return the offset of the param inside this parameterized object.\n This does not need to account for shaped parameters, as it\n basically just sums up the parameter sizes which come before param.\n "
] |
Please provide a description of the function:def _raveled_index_for(self, param):
from ..param import ParamConcatenation
if isinstance(param, ParamConcatenation):
return np.hstack((self._raveled_index_for(p) for p in param.params))
return param._raveled_index() + self._offset_for(param) | [
"\n get the raveled index for a param\n that is an int array, containing the indexes for the flattened\n param inside this parameterized logic.\n\n !Warning! be sure to call this method on the highest parent of a hierarchy,\n as it uses the fixes to do its work\n "
] |
Please provide a description of the function:def _raveled_index_for_transformed(self, param):
ravi = self._raveled_index_for(param)
if self._has_fixes():
fixes = self._fixes_
### Transformed indices, handling the offsets of previous fixes
transformed = (np.r_[:self.size] - (~fixes).cumsum())
return transformed[ravi[fixes[ravi]]]
else:
return ravi | [
"\n get the raveled index for a param for the transformed parameter array\n (optimizer array).\n\n that is an int array, containing the indexes for the flattened\n param inside this parameterized logic.\n\n !Warning! be sure to call this method on the highest parent of a hierarchy,\n as it uses the fixes to do its work. If you do not know\n what you are doing, do not use this method, it will have\n unexpected returns!\n "
] |
Please provide a description of the function:def _parent_changed(self, parent):
from .index_operations import ParameterIndexOperationsView
#if getattr(self, "_in_init_"):
#import ipdb;ipdb.set_trace()
#self.constraints.update(param.constraints, start)
#self.priors.update(param.priors, start)
offset = parent._offset_for(self)
for name, iop in list(self._index_operations.items()):
self.remove_index_operation(name)
self.add_index_operation(name, ParameterIndexOperationsView(parent._index_operations[name], offset, self.size))
self._fixes_ = None
for p in self.parameters:
p._parent_changed(parent) | [
"\n From Parentable:\n Called when the parent changed\n\n update the constraints and priors view, so that\n constraining is automized for the parent.\n "
] |
Please provide a description of the function:def _add_to_index_operations(self, which, reconstrained, what, warning):
if warning and reconstrained.size > 0:
# TODO: figure out which parameters have changed and only print those
logging.getLogger(self.name).warning("reconstraining parameters {}".format(self.hierarchy_name() or self.name))
index = self._raveled_index()
which.add(what, index)
return index | [
"\n Helper preventing copy code.\n This adds the given what (transformation, prior etc) to parameter index operations which.\n reconstrained are reconstrained indices.\n warn when reconstraining parameters if warning is True.\n TODO: find out which parameters have changed specifically\n "
] |
Please provide a description of the function:def _remove_from_index_operations(self, which, transforms):
if len(transforms) == 0:
transforms = which.properties()
removed = np.empty((0,), dtype=int)
for t in list(transforms):
unconstrained = which.remove(t, self._raveled_index())
removed = np.union1d(removed, unconstrained)
if t is __fixed__:
self._highest_parent_._set_unfixed(self, unconstrained)
return removed | [
"\n Helper preventing copy code.\n Remove given what (transform prior etc) from which param index ops.\n "
] |
Please provide a description of the function:def copy(self):
from .lists_and_dicts import ObserverList
memo = {}
memo[id(self)] = self
memo[id(self.observers)] = ObserverList()
return self.__deepcopy__(memo) | [
"\n Make a copy. This means, we delete all observers and return a copy of this\n array. It will still be an ObsAr!\n "
] |
Please provide a description of the function:def update_model(self, updates=None):
if updates is None:
return self._update_on
assert isinstance(updates, bool), "updates are either on (True) or off (False)"
p = getattr(self, '_highest_parent_', None)
def turn_updates(s):
s._update_on = updates
p.traverse(turn_updates)
self.trigger_update() | [
"\n Get or set, whether automatic updates are performed. When updates are\n off, the model might be in a non-working state. To make the model work\n turn updates on again.\n\n :param bool|None updates:\n\n bool: whether to do updates\n None: get the current update state\n "
] |
Please provide a description of the function:def trigger_update(self, trigger_parent=True):
if not self.update_model() or (hasattr(self, "_in_init_") and self._in_init_):
#print "Warning: updates are off, updating the model will do nothing"
return
self._trigger_params_changed(trigger_parent) | [
"\n Update the model from the current state.\n Make sure that updates are on, otherwise this\n method will do nothing\n\n :param bool trigger_parent: Whether to trigger the parent, after self has updated\n "
] |
Please provide a description of the function:def optimizer_array(self):
if self.__dict__.get('_optimizer_copy_', None) is None or self.size != self._optimizer_copy_.size:
self._optimizer_copy_ = np.empty(self.size)
if not self._optimizer_copy_transformed:
self._optimizer_copy_.flat = self.param_array.flat
#py3 fix
#[np.put(self._optimizer_copy_, ind, c.finv(self.param_array[ind])) for c, ind in self.constraints.iteritems() if c != __fixed__]
[np.put(self._optimizer_copy_, ind, c.finv(self.param_array[ind])) for c, ind in self.constraints.items() if c != __fixed__]
self._optimizer_copy_transformed = True
if self._has_fixes():# or self._has_ties()):
self._ensure_fixes()
return self._optimizer_copy_[self._fixes_]
return self._optimizer_copy_ | [
"\n Array for the optimizer to work on.\n This array always lives in the space for the optimizer.\n Thus, it is untransformed, going from Transformations.\n\n Setting this array, will make sure the transformed parameters for this model\n will be set accordingly. It has to be set with an array, retrieved from\n this method, as e.g. fixing will resize the array.\n\n The optimizer should only interfere with this array, such that transformations\n are secured.\n "
] |
Please provide a description of the function:def optimizer_array(self, p):
f = None
if self.has_parent() and self.constraints[__fixed__].size != 0:
f = np.ones(self.size).astype(bool)
f[self.constraints[__fixed__]] = FIXED
elif self._has_fixes():
f = self._fixes_
if f is None:
self.param_array.flat = p
[np.put(self.param_array, ind, c.f(self.param_array.flat[ind]))
#py3 fix
#for c, ind in self.constraints.iteritems() if c != __fixed__]
for c, ind in self.constraints.items() if c != __fixed__]
else:
self.param_array.flat[f] = p
[np.put(self.param_array, ind[f[ind]], c.f(self.param_array.flat[ind[f[ind]]]))
#py3 fix
#for c, ind in self.constraints.iteritems() if c != __fixed__]
for c, ind in self.constraints.items() if c != __fixed__]
#self._highest_parent_.tie.propagate_val()
self._optimizer_copy_transformed = False
self.trigger_update() | [
"\n Make sure the optimizer copy does not get touched, thus, we only want to\n set the values *inside* not the array itself.\n\n Also we want to update param_array in here.\n "
] |
Please provide a description of the function:def _trigger_params_changed(self, trigger_parent=True):
[p._trigger_params_changed(trigger_parent=False) for p in self.parameters if not p.is_fixed]
self.notify_observers(None, None if trigger_parent else -np.inf) | [
"\n First tell all children to update,\n then update yourself.\n\n If trigger_parent is True, we will tell the parent, otherwise not.\n "
] |
Please provide a description of the function:def _transform_gradients(self, g):
#py3 fix
#[np.put(g, i, c.gradfactor(self.param_array[i], g[i])) for c, i in self.constraints.iteritems() if c != __fixed__]
[np.put(g, i, c.gradfactor(self.param_array[i], g[i])) for c, i in self.constraints.items() if c != __fixed__]
if self._has_fixes(): return g[self._fixes_]
return g | [
"\n Transform the gradients by multiplying the gradient factor for each\n constraint to it.\n "
] |
Please provide a description of the function:def parameter_names(self, add_self=False, adjust_for_printing=False, recursive=True, intermediate=False):
if adjust_for_printing: adjust = adjust_name_for_printing
else: adjust = lambda x: x
names = []
if intermediate or (not recursive):
names.extend([adjust(x.name) for x in self.parameters])
if intermediate or recursive: names.extend([
xi for x in self.parameters for xi in
x.parameter_names(add_self=True,
adjust_for_printing=adjust_for_printing,
recursive=True,
intermediate=False)])
if add_self: names = map(lambda x: adjust(self.name) + "." + x, names)
return names | [
"\n Get the names of all parameters of this model or parameter. It starts\n from the parameterized object you are calling this method on.\n\n Note: This does not unravel multidimensional parameters,\n use parameter_names_flat to unravel parameters!\n\n :param bool add_self: whether to add the own name in front of names\n :param bool adjust_for_printing: whether to call `adjust_name_for_printing` on names\n :param bool recursive: whether to traverse through hierarchy and append leaf node names\n :param bool intermediate: whether to add intermediate names, that is parameterized objects\n "
] |
Please provide a description of the function:def parameter_names_flat(self, include_fixed=False):
name_list = []
for p in self.flattened_parameters:
name = p.hierarchy_name()
if p.size > 1:
name_list.extend(["{}[{!s}]".format(name, i) for i in p._indices()])
else:
name_list.append(name)
name_list = np.array(name_list)
if not include_fixed and self._has_fixes():
return name_list[self._fixes_]
return name_list | [
"\n Return the flattened parameter names for all subsequent parameters\n of this parameter. We do not include the name for self here!\n\n If you want the names for fixed parameters as well in this list,\n set include_fixed to True.\n if not hasattr(obj, 'cache'):\n obj.cache = FunctionCacher()\n :param bool include_fixed: whether to include fixed names here.\n "
] |
Please provide a description of the function:def randomize(self, rand_gen=None, *args, **kwargs):
if rand_gen is None:
rand_gen = np.random.normal
# first take care of all parameters (from N(0,1))
x = rand_gen(size=self._size_transformed(), *args, **kwargs)
updates = self.update_model()
self.update_model(False) # Switch off the updates
self.optimizer_array = x # makes sure all of the tied parameters get the same init (since there's only one prior object...)
# now draw from prior where possible
x = self.param_array.copy()
unfixlist = np.ones((self.size,),dtype=np.bool)
unfixlist[self.constraints[__fixed__]] = False
self.param_array.flat[unfixlist] = x.view(np.ndarray).ravel()[unfixlist]
self.update_model(updates) | [
"\n Randomize the model.\n Make this draw from the rand_gen if one exists, else draw random normal(0,1)\n\n :param rand_gen: np random number generator which takes args and kwargs\n :param flaot loc: loc parameter for random number generator\n :param float scale: scale parameter for random number generator\n :param args, kwargs: will be passed through to random number generator\n "
] |
Please provide a description of the function:def _propagate_param_grad(self, parray, garray):
#if self.param_array.size != self.size:
# self._param_array_ = np.empty(self.size, dtype=np.float64)
#if self.gradient.size != self.size:
# self._gradient_array_ = np.empty(self.size, dtype=np.float64)
pi_old_size = 0
for pi in self.parameters:
pislice = slice(pi_old_size, pi_old_size + pi.size)
self.param_array[pislice] = pi.param_array.flat # , requirements=['C', 'W']).flat
self.gradient_full[pislice] = pi.gradient_full.flat # , requirements=['C', 'W']).flat
pi.param_array.data = parray[pislice].data
pi.gradient_full.data = garray[pislice].data
pi._propagate_param_grad(parray[pislice], garray[pislice])
pi_old_size += pi.size
self._model_initialized_ = True | [
"\n For propagating the param_array and gradient_array.\n This ensures the in memory view of each subsequent array.\n\n 1.) connect param_array of children to self.param_array\n 2.) tell all children to propagate further\n "
] |
Please provide a description of the function:def initialize_parameter(self):
#logger.debug("connecting parameters")
self._highest_parent_._notify_parent_change()
self._highest_parent_._connect_parameters() #logger.debug("calling parameters changed")
self._highest_parent_._connect_fixes()
self.trigger_update() | [
"\n Call this function to initialize the model, if you built it without initialization.\n\n This HAS to be called manually before optmizing or it will be causing\n unexpected behaviour, if not errors!\n "
] |
Please provide a description of the function:def param_array(self):
if (self.__dict__.get('_param_array_', None) is None) or (self._param_array_.size != self.size):
self._param_array_ = np.empty(self.size, dtype=np.float64)
return self._param_array_ | [
"\n Array representing the parameters of this class.\n There is only one copy of all parameters in memory, two during optimization.\n\n !WARNING!: setting the parameter array MUST always be done in memory:\n m.param_array[:] = m_copy.param_array\n "
] |
Please provide a description of the function:def unfixed_param_array(self):
if self.constraints[__fixed__].size !=0:
fixes = np.ones(self.size).astype(bool)
fixes[self.constraints[__fixed__]] = FIXED
return self._param_array_[fixes]
else:
return self._param_array_ | [
"\n Array representing the parameters of this class.\n There is only one copy of all parameters in memory, two during optimization.\n\n !WARNING!: setting the parameter array MUST always be done in memory:\n m.param_array[:] = m_copy.param_array\n "
] |
Please provide a description of the function:def traverse(self, visit, *args, **kwargs):
if not self.__visited:
visit(self, *args, **kwargs)
self.__visited = True
self._traverse(visit, *args, **kwargs)
self.__visited = False | [
"\n Traverse the hierarchy performing `visit(self, *args, **kwargs)`\n at every node passed by downwards. This function includes self!\n\n See *visitor pattern* in literature. This is implemented in pre-order fashion.\n\n Example::\n\n #Collect all children:\n\n children = []\n self.traverse(children.append)\n print children\n\n "
] |
Please provide a description of the function:def traverse_parents(self, visit, *args, **kwargs):
if self.has_parent():
self.__visited = True
self._parent_.traverse_parents(visit, *args, **kwargs)
self._parent_.traverse(visit, *args, **kwargs)
self.__visited = False | [
"\n Traverse the hierarchy upwards, visiting all parents and their children except self.\n See \"visitor pattern\" in literature. This is implemented in pre-order fashion.\n\n Example:\n\n parents = []\n self.traverse_parents(parents.append)\n print parents\n "
] |
Please provide a description of the function:def save(self, filename, ftype='HDF5'): # pragma: no coverage
from ..param import Param
def gather_params(self, plist):
if isinstance(self,Param):
plist.append(self)
plist = []
self.traverse(gather_params, plist)
names = self.parameter_names(adjust_for_printing=True)
if ftype=='HDF5':
try:
import h5py
f = h5py.File(filename,'w')
for p,n in zip(plist,names):
n = n.replace('.','_')
p = p.values
d = f.create_dataset(n,p.shape,dtype=p.dtype)
d[:] = p
if hasattr(self, 'param_array'):
d = f.create_dataset('param_array',self.param_array.shape, dtype=self.param_array.dtype)
d[:] = self.param_array
f.close()
except:
raise 'Fails to write the parameters into a HDF5 file!' | [
"\n Save all the model parameters into a file (HDF5 by default).\n\n This is not supported yet. We are working on having a consistent,\n human readable way of saving and loading GPy models. This only\n saves the parameter array to a hdf5 file. In order\n to load the model again, use the same script for building the model\n you used to build this model. Then load the param array from this hdf5\n file and set the parameters of the created model:\n\n >>> m[:] = h5_file['param_array']\n\n This is less then optimal, we are working on a better solution to that.\n "
] |
Please provide a description of the function:def phi(self, Xpred, degrees=None):
assert Xpred.shape[1] == self.X.shape[1], "Need to predict with same shape as training data."
if degrees is None:
degrees = range(self.basis.degree+1)
tmp_phi = np.empty((len(degrees), Xpred.shape[0], Xpred.shape[1]))
for i, w in enumerate(degrees):
# Objective function
tmpX = self._phi(Xpred, w)
tmp_phi[i] = tmpX * self.weights[[w], :]
return tmp_phi | [
"\n Compute the design matrix for this model\n using the degrees given by the index array\n in degrees\n\n :param array-like Xpred: inputs to compute the design matrix for\n :param array-like degrees: array of degrees to use [default=range(self.degree+1)]\n :returns array-like phi: The design matrix [degree x #samples x #dimensions]\n "
] |
Please provide a description of the function:def pickle(self, f, protocol=-1):
try: #Py2
import cPickle as pickle
if isinstance(f, basestring):
with open(f, 'wb') as f:
pickle.dump(self, f, protocol)
else:
pickle.dump(self, f, protocol)
except ImportError: #python3
import pickle
if isinstance(f, str):
with open(f, 'wb') as f:
pickle.dump(self, f, protocol)
else:
pickle.dump(self, f, protocol) | [
"\n :param f: either filename or open file object to write to.\n if it is an open buffer, you have to make sure to close\n it properly.\n :param protocol: pickling protocol to use, python-pickle for details.\n "
] |
Please provide a description of the function:def copy(self, memo=None, which=None):
#raise NotImplementedError, "Copy is not yet implemented, TODO: Observable hierarchy"
if memo is None:
memo = {}
import copy
# the next part makes sure that we do not include parents in any form:
parents = []
if which is None:
which = self
which.traverse_parents(parents.append) # collect parents
for p in parents:
if not id(p) in memo :memo[id(p)] = None # set all parents to be None, so they will not be copied
if not id(self.gradient) in memo:memo[id(self.gradient)] = None # reset the gradient
if not id(self._fixes_) in memo :memo[id(self._fixes_)] = None # fixes have to be reset, as this is now highest parent
copy = copy.deepcopy(self, memo) # and start the copy
copy._parent_index_ = None
copy._trigger_params_changed()
return copy | [
"\n Returns a (deep) copy of the current parameter handle.\n\n All connections to parents of the copy will be cut.\n\n :param dict memo: memo for deepcopy\n :param Parameterized which: parameterized object which started the copy process [default: self]\n "
] |
Please provide a description of the function:def consolidate_dependencies(needs_ipython, child_program,
requirement_files, manual_dependencies):
# We get the logger here because it's not defined at module level
logger = logging.getLogger('fades')
if needs_ipython:
logger.debug("Adding ipython dependency because --ipython was detected")
ipython_dep = parsing.parse_manual(['ipython'])
else:
ipython_dep = {}
if child_program:
srcfile_deps = parsing.parse_srcfile(child_program)
logger.debug("Dependencies from source file: %s", srcfile_deps)
docstring_deps = parsing.parse_docstring(child_program)
logger.debug("Dependencies from docstrings: %s", docstring_deps)
else:
srcfile_deps = {}
docstring_deps = {}
all_dependencies = [ipython_dep, srcfile_deps, docstring_deps]
if requirement_files is not None:
for rf_path in requirement_files:
rf_deps = parsing.parse_reqfile(rf_path)
logger.debug('Dependencies from requirements file %r: %s', rf_path, rf_deps)
all_dependencies.append(rf_deps)
manual_deps = parsing.parse_manual(manual_dependencies)
logger.debug("Dependencies from parameters: %s", manual_deps)
all_dependencies.append(manual_deps)
# Merge dependencies
indicated_deps = {}
for dep in all_dependencies:
for repo, info in dep.items():
indicated_deps.setdefault(repo, set()).update(info)
return indicated_deps | [
"Parse files, get deps and merge them. Deps read later overwrite those read earlier."
] |
Please provide a description of the function:def decide_child_program(args_executable, args_child_program):
# We get the logger here because it's not defined at module level
logger = logging.getLogger('fades')
if args_executable:
# if --exec given, check that it's just the executable name,
# not absolute or relative paths
if os.path.sep in args_child_program:
logger.error(
"The parameter to --exec must be a file name (to be found "
"inside venv's bin directory), not a file path: %r",
args_child_program)
raise FadesError("File path given to --exec parameter")
# indicated --execute, local and not analyzable for dependencies
analyzable_child_program = None
child_program = args_child_program
elif args_child_program is not None:
# normal case, the child program is to be analyzed (being it local or remote)
if args_child_program.startswith(("http://", "https://")):
args_child_program = helpers.download_remote_script(args_child_program)
else:
if not os.access(args_child_program, os.R_OK):
logger.error("'%s' not found. If you want to run an executable "
"file from a library installed in the virtualenv "
"check the `--exec` option in the help.",
args_child_program)
raise FadesError("child program not found.")
analyzable_child_program = args_child_program
child_program = args_child_program
else:
# not indicated executable, not child program, "interpreter" mode
analyzable_child_program = None
child_program = None
return analyzable_child_program, child_program | [
"Decide which the child program really is (if any)."
] |
Please provide a description of the function:def detect_inside_virtualenv(prefix, real_prefix, base_prefix):
if real_prefix is not None:
return True
if base_prefix is None:
return False
# if prefix is different than base_prefix, it's a venv
return prefix != base_prefix | [
"Tell if fades is running inside a virtualenv.\n\n The params 'real_prefix' and 'base_prefix' may be None.\n\n This is copied from pip code (slightly modified), see\n\n https://github.com/pypa/pip/blob/281eb61b09d87765d7c2b92f6982b3fe76ccb0af/\n pip/locations.py#L39\n "
] |
Please provide a description of the function:def _get_normalized_args(parser):
env = os.environ
if '_' in env and env['_'] != sys.argv[0] and len(sys.argv) >= 1 and " " in sys.argv[1]:
return parser.parse_args(shlex.split(sys.argv[1]) + sys.argv[2:])
else:
return parser.parse_args() | [
"Return the parsed command line arguments.\n\n Support the case when executed from a shebang, where all the\n parameters come in sys.argv[1] in a single string separated\n by spaces (in this case, the third parameter is what is being\n executed)\n "
] |
Please provide a description of the function:def go():
parser = argparse.ArgumentParser(prog='PROG', epilog=help_epilog,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-V', '--version', action='store_true',
help="show version and info about the system, and exit")
parser.add_argument('-v', '--verbose', action='store_true',
help="send all internal debugging lines to stderr, which may be very "
"useful to debug any problem that may arise.")
parser.add_argument('-q', '--quiet', action='store_true',
help="don't show anything (unless it has a real problem), so the "
"original script stderr is not polluted at all.")
parser.add_argument('-d', '--dependency', action='append',
help="specify dependencies through command line (this option can be "
"used multiple times)")
parser.add_argument('-r', '--requirement', action='append',
help="indicate files to read dependencies from (this option can be "
"used multiple times)")
parser.add_argument('-p', '--python', action='store',
help=("Specify the Python interpreter to use.\n"
" Default is: %s") % (sys.executable,))
parser.add_argument('-x', '--exec', dest='executable', action='store_true',
help=("Indicate that the child_program should be looked up in the "
"virtualenv."))
parser.add_argument('-i', '--ipython', action='store_true', help="use IPython shell.")
parser.add_argument('--system-site-packages', action='store_true', default=False,
help=("Give the virtual environment access to the "
"system site-packages dir."))
parser.add_argument('--virtualenv-options', action='append', default=[],
help=("Extra options to be supplied to virtualenv. (this option can be "
"used multiple times)"))
parser.add_argument('--check-updates', action='store_true',
help=("check for packages updates"))
parser.add_argument('--no-precheck-availability', action='store_true',
help=("Don't check if the packages exists in PyPI before actually try "
"to install them."))
parser.add_argument('--pip-options', action='append', default=[],
help=("Extra options to be supplied to pip. (this option can be "
"used multiple times)"))
parser.add_argument('--python-options', action='append', default=[],
help=("Extra options to be supplied to python. (this option can be "
"used multiple times)"))
parser.add_argument('--rm', dest='remove', metavar='UUID',
help=("Remove a virtualenv by UUID. See --get-venv-dir option to "
"easily find out the UUID."))
parser.add_argument('--clean-unused-venvs', action='store',
help=("This option remove venvs that haven't been used for more than "
"CLEAN_UNUSED_VENVS days. Appart from that, will compact usage "
"stats file.\n"
"When this option is present, the cleaning takes place at the "
"beginning of the execution."))
parser.add_argument('--get-venv-dir', action='store_true',
help=("Show the virtualenv base directory (which includes the "
"virtualenv UUID) and quit."))
parser.add_argument('child_program', nargs='?', default=None)
parser.add_argument('child_options', nargs=argparse.REMAINDER)
cli_args = _get_normalized_args(parser)
# update args from config file (if needed).
args = file_options.options_from_file(cli_args)
# validate input, parameters, and support some special options
if args.version:
print("Running 'fades' version", fades.__version__)
print(" Python:", sys.version_info)
print(" System:", platform.platform())
return 0
# set up logger and dump basic version info
logger = fades_logger.set_up(args.verbose, args.quiet)
logger.debug("Running Python %s on %r", sys.version_info, platform.platform())
logger.debug("Starting fades v. %s", fades.__version__)
logger.debug("Arguments: %s", args)
# verify that the module is NOT being used from a virtualenv
if detect_inside_virtualenv(sys.prefix, getattr(sys, 'real_prefix', None),
getattr(sys, 'base_prefix', None)):
logger.error(
"fades is running from inside a virtualenv (%r), which is not supported", sys.prefix)
raise FadesError("Cannot run from a virtualenv")
if args.verbose and args.quiet:
logger.warning("Overriding 'quiet' option ('verbose' also requested)")
# start the virtualenvs manager
venvscache = cache.VEnvsCache(os.path.join(helpers.get_basedir(), 'venvs.idx'))
# start usage manager
usage_manager = envbuilder.UsageManager(os.path.join(helpers.get_basedir(), 'usage_stats'),
venvscache)
if args.clean_unused_venvs:
try:
max_days_to_keep = int(args.clean_unused_venvs)
except ValueError:
logger.error("clean_unused_venvs must be an integer.")
raise FadesError('clean_unused_venvs not an integer')
usage_manager.clean_unused_venvs(max_days_to_keep)
return 0
uuid = args.remove
if uuid:
venv_data = venvscache.get_venv(uuid=uuid)
if venv_data:
# remove this venv from the cache
env_path = venv_data.get('env_path')
if env_path:
envbuilder.destroy_venv(env_path, venvscache)
else:
logger.warning("Invalid 'env_path' found in virtualenv metadata: %r. "
"Not removing virtualenv.", env_path)
else:
logger.warning('No virtualenv found with uuid: %s.', uuid)
return 0
# decided which the child program really is
analyzable_child_program, child_program = decide_child_program(
args.executable, args.child_program)
# Group and merge dependencies
indicated_deps = consolidate_dependencies(args.ipython,
analyzable_child_program,
args.requirement,
args.dependency)
# Check for packages updates
if args.check_updates:
helpers.check_pypi_updates(indicated_deps)
# get the interpreter version requested for the child_program
interpreter, is_current = helpers.get_interpreter_version(args.python)
# options
pip_options = args.pip_options # pip_options mustn't store.
python_options = args.python_options
options = {}
options['pyvenv_options'] = []
options['virtualenv_options'] = args.virtualenv_options
if args.system_site_packages:
options['virtualenv_options'].append("--system-site-packages")
options['pyvenv_options'] = ["--system-site-packages"]
create_venv = False
venv_data = venvscache.get_venv(indicated_deps, interpreter, uuid, options)
if venv_data:
env_path = venv_data['env_path']
# A venv was found in the cache check if its valid or re-generate it.
if not os.path.exists(env_path):
logger.warning("Missing directory (the virtualenv will be re-created): %r", env_path)
venvscache.remove(env_path)
create_venv = True
else:
create_venv = True
if create_venv:
# Check if the requested packages exists in pypi.
if not args.no_precheck_availability and indicated_deps.get('pypi'):
logger.info("Checking the availabilty of dependencies in PyPI. "
"You can use '--no-precheck-availability' to avoid it.")
if not helpers.check_pypi_exists(indicated_deps):
logger.error("An indicated dependency doesn't exist. Exiting")
raise FadesError("Required dependency does not exist")
# Create a new venv
venv_data, installed = envbuilder.create_venv(indicated_deps, args.python, is_current,
options, pip_options)
# store this new venv in the cache
venvscache.store(installed, venv_data, interpreter, options)
if args.get_venv_dir:
# all it was requested is the virtualenv's path, show it and quit (don't run anything)
print(venv_data['env_path'])
return 0
# run forest run!!
python_exe = 'ipython' if args.ipython else 'python'
python_exe = os.path.join(venv_data['env_bin_path'], python_exe)
# add the virtualenv /bin path to the child PATH.
environ_path = venv_data['env_bin_path']
if 'PATH' in os.environ:
environ_path += os.pathsep + os.environ['PATH']
os.environ['PATH'] = environ_path
# store usage information
usage_manager.store_usage_stat(venv_data, venvscache)
if child_program is None:
interactive = True
logger.debug(
"Calling the interactive Python interpreter with arguments %r", python_options)
cmd = [python_exe] + python_options
p = subprocess.Popen(cmd)
else:
interactive = False
if args.executable:
cmd = [os.path.join(venv_data['env_bin_path'], child_program)]
logger.debug("Calling child program %r with options %s",
child_program, args.child_options)
else:
cmd = [python_exe] + python_options + [child_program]
logger.debug(
"Calling Python interpreter with arguments %s to execute the child program"
" %r with options %s", python_options, child_program, args.child_options)
try:
p = subprocess.Popen(cmd + args.child_options)
except FileNotFoundError:
logger.error("Command not found: %s", child_program)
raise FadesError("Command not found")
def _signal_handler(signum, _):
if interactive and signum == signal.SIGINT:
logger.debug("Swallowing signal %s", signum)
else:
logger.debug("Redirecting signal %s to child", signum)
os.kill(p.pid, signum)
# redirect the useful signals
for s in REDIRECTED_SIGNALS:
signal.signal(s, _signal_handler)
# wait child to finish, end
rc = p.wait()
if rc:
logger.debug("Child process not finished correctly: returncode=%d", rc)
return rc | [
"Make the magic happen.",
"Handle signals received by parent process, send them to child.\n\n The only exception is CTRL-C, that is generated *from* the interactive\n interpreter (it's a keyboard combination!), so we swallow it for the\n interpreter to not see it twice.\n "
] |
Please provide a description of the function:def set_up(verbose, quiet):
logger = logging.getLogger('fades')
logger.setLevel(logging.DEBUG)
# select logging level according to user desire; also use a simpler
# formatting for non-verbose logging
if verbose:
log_level = logging.DEBUG
log_format = FMT_DETAILED
elif quiet:
log_level = logging.WARNING
log_format = FMT_SIMPLE
else:
log_level = logging.INFO
log_format = FMT_SIMPLE
# all to the stdout
handler = SalutingStreamHandler(logger)
handler.setLevel(log_level)
logger.addHandler(handler)
formatter = logging.Formatter(log_format)
handler.setFormatter(formatter)
# and to the syslog
for syslog_path in ('/dev/log', '/var/run/syslog'):
if not os.path.exists(syslog_path):
continue
try:
handler = logging.handlers.SysLogHandler(address=syslog_path)
except Exception:
# silently ignore that the user doesn't have a syslog active; can
# see all the info with "-v" anyway
pass
else:
logger.addHandler(handler)
formatter = logging.Formatter(FMT_SYSLOG)
handler.setFormatter(formatter)
break
return logger | [
"Set up the logging."
] |
Please provide a description of the function:def emit(self, record):
if not self._already_saluted:
self._already_saluted = True
self._logger.info(SALUTATION)
super().emit(record) | [
"Call father's emit, but salute first (just once)."
] |
Please provide a description of the function:def parse_fade_requirement(text):
text = text.strip()
if "::" in text:
repo_raw, requirement = text.split("::", 1)
try:
repo = {'pypi': REPO_PYPI, 'vcs': REPO_VCS}[repo_raw]
except KeyError:
logger.warning("Not understood fades repository: %r", repo_raw)
return
else:
if ":" in text and "/" in text:
repo = REPO_VCS
else:
repo = REPO_PYPI
requirement = text
if repo == REPO_VCS:
dependency = VCSDependency(requirement)
else:
dependency = list(parse_requirements(requirement))[0]
return repo, dependency | [
"Return a requirement and repo from the given text, already parsed and converted."
] |
Please provide a description of the function:def _parse_content(fh):
content = iter(fh)
deps = {}
for line in content:
# quickly discard most of the lines
if 'fades' not in line:
continue
# discard other string with 'fades' that isn't a comment
if '#' not in line:
continue
# assure that it's a well commented line and no other stuff
line = line.strip()
index_of_last_fades = line.rfind('fades')
index_of_first_hash = line.index('#')
# discard when fades does not appear after #
if index_of_first_hash > index_of_last_fades:
continue
import_part, fades_part = line.rsplit("#", 1)
# discard other comments in the same line that aren't for fades
if "fades" not in fades_part:
import_part, fades_part = import_part.rsplit("#", 1)
fades_part = fades_part.strip()
if not fades_part.startswith("fades"):
continue
if not import_part:
# the fades comment was done at the beginning of the line,
# which means that the import info is in the next one
import_part = next(content).strip()
if import_part.startswith('#'):
continue
# get module
import_tokens = import_part.split()
if import_tokens[0] == 'import':
module_path = import_tokens[1]
elif import_tokens[0] == 'from' and import_tokens[2] == 'import':
module_path = import_tokens[1]
else:
logger.debug("Not understood import info: %s", import_tokens)
continue
module = module_path.split(".")[0]
# If fades know the real name of the pkg. Replace it!
if module in PKG_NAMES_DB:
module = PKG_NAMES_DB[module]
# To match the "safe" name that pkg_resources creates:
module = module.replace('_', '-')
# get the fades info after 'fades' mark, if any
if len(fades_part) == 5 or fades_part[5:].strip()[0] in "<>=!":
# just the 'fades' mark, and maybe a version specification, the requirement is what
# was imported (maybe with that version comparison)
requirement = module + fades_part[5:]
elif fades_part[5] != " ":
# starts with fades but it's part of a longer weird word
logger.warning("Not understood fades info: %r", fades_part)
continue
else:
# more complex stuff, to be parsed as a normal requirement
requirement = fades_part[5:]
# parse and convert the requirement
parsed_req = parse_fade_requirement(requirement)
if parsed_req is None:
continue
repo, dependency = parsed_req
deps.setdefault(repo, []).append(dependency)
return deps | [
"Parse the content of a script to find marked dependencies."
] |
Please provide a description of the function:def _parse_docstring(fh):
find_fades = re.compile(r'\b(fades)\b:').search
for line in fh:
if line.startswith("'"):
quote = "'"
break
if line.startswith('"'):
quote = '"'
break
else:
return {}
if line[1] == quote:
# comment start with triple quotes
endquote = quote * 3
else:
endquote = quote
if endquote in line[len(endquote):]:
docstring_lines = [line[:line.index(endquote)]]
else:
docstring_lines = [line]
for line in fh:
if endquote in line:
docstring_lines.append(line[:line.index(endquote)])
break
docstring_lines.append(line)
docstring_lines = iter(docstring_lines)
for doc_line in docstring_lines:
if find_fades(doc_line):
break
else:
return {}
return _parse_requirement(list(docstring_lines)) | [
"Parse the docstrings of a script to find marked dependencies."
] |
Please provide a description of the function:def _parse_requirement(iterable):
deps = {}
for line in iterable:
line = line.strip()
if not line or line[0] == '#':
continue
parsed_req = parse_fade_requirement(line)
if parsed_req is None:
continue
repo, dependency = parsed_req
deps.setdefault(repo, []).append(dependency)
return deps | [
"Actually parse the requirements, from file or manually specified."
] |
Please provide a description of the function:def _read_lines(filepath):
with open(filepath, 'rt', encoding='utf8') as fh:
for line in fh:
line = line.strip()
if line.startswith("-r"):
logger.debug("Reading deps from nested requirement file: %s", line)
try:
nested_filename = line.split()[1]
except IndexError:
logger.warning(
"Invalid format to indicate a nested requirements file: '%r'", line)
else:
nested_filepath = os.path.join(
os.path.dirname(filepath), nested_filename)
yield from _read_lines(nested_filepath)
else:
yield line | [
"Read a req file to a list to support nested requirement files."
] |
Please provide a description of the function:def create_venv(requested_deps, interpreter, is_current, options, pip_options):
# create virtualenv
env = _FadesEnvBuilder()
env_path, env_bin_path, pip_installed = env.create_env(interpreter, is_current, options)
venv_data = {}
venv_data['env_path'] = env_path
venv_data['env_bin_path'] = env_bin_path
venv_data['pip_installed'] = pip_installed
# install deps
installed = {}
for repo in requested_deps.keys():
if repo in (REPO_PYPI, REPO_VCS):
mgr = PipManager(env_bin_path, pip_installed=pip_installed, options=pip_options)
else:
logger.warning("Install from %r not implemented", repo)
continue
installed[repo] = {}
repo_requested = requested_deps[repo]
logger.debug("Installing dependencies for repo %r: requested=%s", repo, repo_requested)
for dependency in repo_requested:
try:
mgr.install(dependency)
except Exception:
logger.debug("Installation Step failed, removing virtualenv")
destroy_venv(env_path)
raise FadesError('Dependency installation failed')
if repo == REPO_VCS:
# no need to request the installed version, as we'll always compare
# to the url itself
project = dependency.url
version = None
else:
# always store the installed dependency, as in the future we'll select the venv
# based on what is installed, not what used requested (remember that user may
# request >, >=, etc!)
project = dependency.project_name
version = mgr.get_version(project)
installed[repo][project] = version
logger.debug("Installed dependencies: %s", installed)
return venv_data, installed | [
"Create a new virtualvenv with the requirements of this script."
] |
Please provide a description of the function:def destroy_venv(env_path, venvscache=None):
# remove the venv itself in disk
logger.debug("Destroying virtualenv at: %s", env_path)
shutil.rmtree(env_path, ignore_errors=True)
# remove venv from cache
if venvscache is not None:
venvscache.remove(env_path) | [
"Destroy a venv."
] |
Please provide a description of the function:def create_with_virtualenv(self, interpreter, virtualenv_options):
args = ['virtualenv', '--python', interpreter, self.env_path]
args.extend(virtualenv_options)
if not self.pip_installed:
args.insert(3, '--no-pip')
try:
helpers.logged_exec(args)
self.env_bin_path = os.path.join(self.env_path, 'bin')
except FileNotFoundError as error:
logger.error('Virtualenv is not installed. It is needed to create a virtualenv with '
'a different python version than fades (got {})'.format(error))
raise FadesError('virtualenv not found')
except helpers.ExecutionError as error:
error.dump_to_log(logger)
raise FadesError('virtualenv could not be run')
except Exception as error:
logger.exception("Error creating virtualenv: %s", error)
raise FadesError('General error while running virtualenv') | [
"Create a virtualenv using the virtualenv lib."
] |
Please provide a description of the function:def create_env(self, interpreter, is_current, options):
if is_current:
# apply pyvenv options
pyvenv_options = options['pyvenv_options']
if "--system-site-packages" in pyvenv_options:
self.system_site_packages = True
logger.debug("Creating virtualenv with pyvenv. options=%s", pyvenv_options)
self.create(self.env_path)
else:
virtualenv_options = options['virtualenv_options']
logger.debug("Creating virtualenv with virtualenv")
self.create_with_virtualenv(interpreter, virtualenv_options)
logger.debug("env_bin_path: %s", self.env_bin_path)
# Re check if pip was installed (supporting both binary and .exe for Windows)
pip_bin = os.path.join(self.env_bin_path, "pip")
pip_exe = os.path.join(self.env_bin_path, "pip.exe")
if not (os.path.exists(pip_bin) or os.path.exists(pip_exe)):
logger.debug("pip isn't installed in the venv, setting pip_installed=False")
self.pip_installed = False
return self.env_path, self.env_bin_path, self.pip_installed | [
"Create the virtualenv and return its info."
] |
Please provide a description of the function:def store_usage_stat(self, venv_data, cache):
with open(self.stat_file_path, 'at') as f:
self._write_venv_usage(f, venv_data) | [
"Log an usage record for venv_data."
] |
Please provide a description of the function:def clean_unused_venvs(self, max_days_to_keep):
with filelock(self.stat_file_lock):
now = datetime.utcnow()
venvs_dict = self._get_compacted_dict_usage_from_file()
for venv_uuid, usage_date in venvs_dict.copy().items():
usage_date = self._str_to_datetime(usage_date)
if (now - usage_date).days > max_days_to_keep:
# remove venv from usage dict
del venvs_dict[venv_uuid]
venv_meta = self.venvscache.get_venv(uuid=venv_uuid)
if venv_meta is None:
# if meta isn't found means that something had failed previously and
# usage_file wasn't updated.
continue
env_path = venv_meta['env_path']
logger.info("Destroying virtualenv at: %s", env_path) # #256
destroy_venv(env_path, self.venvscache)
self._write_compacted_dict_usage_to_file(venvs_dict) | [
"Compact usage stats and remove venvs.\n\n This method loads the complete file usage in memory, for every venv compact all records in\n one (the lastest), updates this info for every env deleted and, finally, write the entire\n file to disk.\n\n If something failed during this steps, usage file remains unchanged and can contain some\n data about some deleted env. This is not a problem, the next time this function it's\n called, this records will be deleted.\n "
] |
Please provide a description of the function:def logged_exec(cmd):
logger = logging.getLogger('fades.exec')
logger.debug("Executing external command: %r", cmd)
p = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
stdout = []
for line in p.stdout:
line = line[:-1]
stdout.append(line)
logger.debug(STDOUT_LOG_PREFIX + line)
retcode = p.wait()
if retcode:
raise ExecutionError(retcode, cmd, stdout)
return stdout | [
"Execute a command, redirecting the output to the log."
] |
Please provide a description of the function:def _get_specific_dir(dir_type):
if SNAP_BASEDIR_NAME in os.environ:
logger.debug("Getting base dir information from SNAP_BASEDIR_NAME env var.")
direct = os.path.join(os.environ[SNAP_BASEDIR_NAME], dir_type)
else:
try:
basedirectory = _get_basedirectory()
except ImportError:
logger.debug("Using last resort base dir: ~/.fades")
from os.path import expanduser
direct = os.path.join(expanduser("~"), ".fades")
else:
xdg_attrib = 'xdg_{}_home'.format(dir_type)
base = getattr(basedirectory, xdg_attrib)
direct = os.path.join(base, 'fades')
if not os.path.exists(direct):
os.makedirs(direct)
return direct | [
"Get a specific directory, using some XDG base, with sensible default."
] |
Please provide a description of the function:def _get_interpreter_info(interpreter=None):
if interpreter is None:
# If interpreter is None by default returns the current interpreter data.
major, minor = sys.version_info[:2]
executable = sys.executable
else:
args = [interpreter, '-c', SHOW_VERSION_CMD]
try:
requested_interpreter_info = logged_exec(args)
except Exception as error:
logger.error("Error getting requested interpreter version: %s", error)
raise FadesError("Could not get interpreter version")
requested_interpreter_info = json.loads(requested_interpreter_info[0])
executable = requested_interpreter_info['path']
major = requested_interpreter_info['major']
minor = requested_interpreter_info['minor']
if executable[-1].isdigit():
executable = executable.split(".")[0][:-1]
interpreter = "{}{}.{}".format(executable, major, minor)
return interpreter | [
"Return the interpreter's full path using pythonX.Y format."
] |
Please provide a description of the function:def get_interpreter_version(requested_interpreter):
logger.debug('Getting interpreter version for: %s', requested_interpreter)
current_interpreter = _get_interpreter_info()
logger.debug('Current interpreter is %s', current_interpreter)
if requested_interpreter is None:
return(current_interpreter, True)
else:
requested_interpreter = _get_interpreter_info(requested_interpreter)
is_current = requested_interpreter == current_interpreter
logger.debug('Interpreter=%s. It is the same as fades?=%s',
requested_interpreter, is_current)
return (requested_interpreter, is_current) | [
"Return a 'sanitized' interpreter and indicates if it is the current one."
] |
Please provide a description of the function:def check_pypi_updates(dependencies):
dependencies_up_to_date = []
for dependency in dependencies.get('pypi', []):
# get latest version from PyPI api
try:
latest_version = get_latest_version_number(dependency.project_name)
except Exception as error:
logger.warning("--check-updates command will be aborted. Error: %s", error)
return dependencies
# get required version
required_version = None
if dependency.specs:
_, required_version = dependency.specs[0]
if required_version:
dependencies_up_to_date.append(dependency)
if latest_version > required_version:
logger.info("There is a new version of %s: %s",
dependency.project_name, latest_version)
elif latest_version < required_version:
logger.warning("The requested version for %s is greater "
"than latest found in PyPI: %s",
dependency.project_name, latest_version)
else:
logger.info("The requested version for %s is the latest one in PyPI: %s",
dependency.project_name, latest_version)
else:
project_name_plus = "{}=={}".format(dependency.project_name, latest_version)
dependencies_up_to_date.append(pkg_resources.Requirement.parse(project_name_plus))
logger.info("The latest version of %r is %s and will use it.",
dependency.project_name, latest_version)
dependencies["pypi"] = dependencies_up_to_date
return dependencies | [
"Return a list of dependencies to upgrade."
] |
Please provide a description of the function:def _pypi_head_package(dependency):
if dependency.specs:
_, version = dependency.specs[0]
url = BASE_PYPI_URL_WITH_VERSION.format(name=dependency.project_name, version=version)
else:
url = BASE_PYPI_URL.format(name=dependency.project_name)
logger.debug("Doing HEAD requests against %s", url)
req = request.Request(url, method='HEAD')
try:
response = request.urlopen(req)
except HTTPError as http_error:
if http_error.code == HTTP_STATUS_NOT_FOUND:
return False
else:
raise
if response.status == HTTP_STATUS_OK:
logger.debug("%r exists in PyPI.", dependency)
return True
else:
# Maybe we are getting somethink like a redirect. In this case we are only
# warning to the user and trying to install the dependency.
# In the worst scenery fades will fail to install it.
logger.warning("Got a (unexpected) HTTP_STATUS=%r and reason=%r checking if %r exists",
response.status, response.reason, dependency)
return True | [
"Hit pypi with a http HEAD to check if pkg_name exists."
] |
Please provide a description of the function:def check_pypi_exists(dependencies):
for dependency in dependencies.get('pypi', []):
logger.debug("Checking if %r exists in PyPI", dependency)
try:
exists = _pypi_head_package(dependency)
except Exception as error:
logger.error("Error checking %s in PyPI: %r", dependency, error)
raise FadesError("Could not check if dependency exists in PyPI")
else:
if not exists:
logger.error("%s doesn't exists in PyPI.", dependency)
return False
return True | [
"Check if the indicated dependencies actually exists in pypi."
] |
Please provide a description of the function:def download_remote_script(url):
temp_fh = tempfile.NamedTemporaryFile('wt', encoding='utf8', suffix=".py", delete=False)
downloader = _ScriptDownloader(url)
logger.info(
"Downloading remote script from %r using (%r downloader) to %r",
url, downloader.name, temp_fh.name)
content = downloader.get()
temp_fh.write(content)
temp_fh.close()
return temp_fh.name | [
"Download the content of a remote script to a local temp file."
] |
Please provide a description of the function:def dump_to_log(self, logger):
logger.error("Execution ended in %s for cmd %s", self._retcode, self._cmd)
for line in self._collected_stdout:
logger.error(STDOUT_LOG_PREFIX + line) | [
"Send the cmd info and collected stdout to logger."
] |
Please provide a description of the function:def _decide(self):
netloc = parse.urlparse(self.url).netloc
name = self.NETLOCS.get(netloc, 'raw')
return name | [
"Find out which method should be applied to download that URL."
] |
Please provide a description of the function:def get(self):
method_name = "_download_" + self.name
method = getattr(self, method_name)
return method() | [
"Get the script content from the URL using the decided downloader."
] |
Please provide a description of the function:def _download_raw(self, url=None):
if url is None:
url = self.url
req = request.Request(url, headers=self.HEADERS_PLAIN)
return request.urlopen(req).read().decode("utf8") | [
"Download content from URL directly."
] |
Please provide a description of the function:def _download_linkode(self):
# build the API url
linkode_id = self.url.split("/")[-1]
if linkode_id.startswith("#"):
linkode_id = linkode_id[1:]
url = "https://linkode.org/api/1/linkodes/" + linkode_id
req = request.Request(url, headers=self.HEADERS_JSON)
resp = request.urlopen(req)
raw = resp.read()
data = json.loads(raw.decode("utf8"))
content = data['content']
return content | [
"Download content from Linkode pastebin."
] |
Please provide a description of the function:def _download_pastebin(self):
paste_id = self.url.split("/")[-1]
url = "https://pastebin.com/raw/" + paste_id
return self._download_raw(url) | [
"Download content from Pastebin itself."
] |
Please provide a description of the function:def _download_gist(self):
parts = parse.urlparse(self.url)
url = "https://gist.github.com" + parts.path + "/raw"
return self._download_raw(url) | [
"Download content from github's pastebin."
] |
Please provide a description of the function:def get_version():
with open('fades/_version.py') as fh:
m = re.search("\(([^']*)\)", fh.read())
if m is None:
raise ValueError("Unrecognized version in 'fades/_version.py'")
return m.groups()[0].replace(', ', '.') | [
"Retrieves package version from the file."
] |
Please provide a description of the function:def initialize_options(self):
install.initialize_options(self)
# leave the proper script according to the platform
script = SCRIPT_WIN if sys.platform == "win32" else SCRIPT_REST
self.distribution.scripts = [script] | [
"Run parent initialization and then fix the scripts var."
] |
Please provide a description of the function:def run(self):
install.run(self)
# man directory
if self._custom_man_dir is not None:
if not os.path.exists(self._custom_man_dir):
os.makedirs(self._custom_man_dir)
shutil.copy("man/fades.1", self._custom_man_dir) | [
"Run parent install, and then save the man file."
] |
Please provide a description of the function:def finalize_options(self):
install.finalize_options(self)
if self.prefix is None:
# no place for man page (like in a 'snap')
man_dir = None
else:
man_dir = os.path.join(self.prefix, "share", "man", "man1")
# if we have 'root', put the building path also under it (used normally
# by pbuilder)
if self.root is not None:
man_dir = os.path.join(self.root, man_dir[1:])
self._custom_man_dir = man_dir | [
"Alter the installation path."
] |
Please provide a description of the function:def options_from_file(args):
logger.debug("updating options from config files")
updated_from_file = []
for config_file in CONFIG_FILES:
logger.debug("updating from: %s", config_file)
parser = ConfigParser()
parser.read(config_file)
try:
items = parser.items('fades')
except NoSectionError:
continue
for config_key, config_value in items:
if config_value in ['true', 'false']:
config_value = config_value == 'true'
if config_key in MERGEABLE_CONFIGS:
current_value = getattr(args, config_key, [])
if current_value is None:
current_value = []
current_value.append(config_value)
setattr(args, config_key, current_value)
if not getattr(args, config_key, False) or config_key in updated_from_file:
# By default all 'store-true' arguments are False. So we only
# override them if they are False. If they are True means that the
# user is setting those on the CLI.
setattr(args, config_key, config_value)
updated_from_file.append(config_key)
logger.debug("updating %s to %s from file settings", config_key, config_value)
return args | [
"Get a argparse.Namespace and return it updated with options from config files.\n\n Config files will be parsed with priority equal to his order in CONFIG_FILES.\n "
] |
Please provide a description of the function:def _venv_match(self, installed, requirements):
if not requirements:
# special case for no requirements, where we can't actually
# check anything: the venv is useful if nothing installed too
return None if installed else []
satisfying_deps = []
for repo, req_deps in requirements.items():
useful_inst = set()
if repo not in installed:
# the venv doesn't even have the repo
return None
if repo == REPO_VCS:
inst_deps = {VCSDependency(url) for url in installed[repo].keys()}
else:
inst_deps = {Distribution(project_name=dep, version=ver)
for (dep, ver) in installed[repo].items()}
for req in req_deps:
for inst in inst_deps:
if inst in req:
useful_inst.add(inst)
break
else:
# nothing installed satisfied that requirement
return None
# assure *all* that is installed is useful for the requirements
if useful_inst == inst_deps:
satisfying_deps.extend(inst_deps)
else:
return None
# it did it through!
return satisfying_deps | [
"Return True if what is installed satisfies the requirements.\n\n This method has multiple exit-points, but only for False (because\n if *anything* is not satisified, the venv is no good). Only after\n all was checked, and it didn't exit, the venv is ok so return True.\n "
] |
Please provide a description of the function:def _match_by_uuid(self, current_venvs, uuid):
for venv_str in current_venvs:
venv = json.loads(venv_str)
env_path = venv.get('metadata', {}).get('env_path')
_, env_uuid = os.path.split(env_path)
if env_uuid == uuid:
return venv | [
"Select a venv matching exactly by uuid."
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.