id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
|
---|---|---|---|---|---|---|---|---|---|---|---|
5,600 | tBuLi/symfit | symfit/core/minimizers.py | BaseMinimizer._baseobjective_from_callable | def _baseobjective_from_callable(self, func, objective_type=MinimizeModel):
"""
symfit works with BaseObjective subclasses internally. If a custom
objective is provided, we wrap it into a BaseObjective, MinimizeModel by
default.
:param func: Callable. If already an instance of BaseObjective, it is
returned immediately. If not, it is turned into a BaseObjective of
type ``objective_type``.
:param objective_type:
:return:
"""
if isinstance(func, BaseObjective) or (hasattr(func, '__self__') and
isinstance(func.__self__, BaseObjective)):
# The latter condition is added to make sure .eval_jacobian methods
# are still considered correct, and not doubly wrapped.
return func
else:
from .fit import CallableNumericalModel, BaseModel
if isinstance(func, BaseModel):
model = func
else:
# Minimize the provided custom objective instead. We therefore
# wrap it into a CallableNumericalModel, thats what they are for
y = sympy.Dummy()
model = CallableNumericalModel(
{y: func},
connectivity_mapping={y: set(self.parameters)}
)
return objective_type(model,
data={y: None for y in model.dependent_vars}) | python | def _baseobjective_from_callable(self, func, objective_type=MinimizeModel):
"""
symfit works with BaseObjective subclasses internally. If a custom
objective is provided, we wrap it into a BaseObjective, MinimizeModel by
default.
:param func: Callable. If already an instance of BaseObjective, it is
returned immediately. If not, it is turned into a BaseObjective of
type ``objective_type``.
:param objective_type:
:return:
"""
if isinstance(func, BaseObjective) or (hasattr(func, '__self__') and
isinstance(func.__self__, BaseObjective)):
# The latter condition is added to make sure .eval_jacobian methods
# are still considered correct, and not doubly wrapped.
return func
else:
from .fit import CallableNumericalModel, BaseModel
if isinstance(func, BaseModel):
model = func
else:
# Minimize the provided custom objective instead. We therefore
# wrap it into a CallableNumericalModel, thats what they are for
y = sympy.Dummy()
model = CallableNumericalModel(
{y: func},
connectivity_mapping={y: set(self.parameters)}
)
return objective_type(model,
data={y: None for y in model.dependent_vars}) | [
"def",
"_baseobjective_from_callable",
"(",
"self",
",",
"func",
",",
"objective_type",
"=",
"MinimizeModel",
")",
":",
"if",
"isinstance",
"(",
"func",
",",
"BaseObjective",
")",
"or",
"(",
"hasattr",
"(",
"func",
",",
"'__self__'",
")",
"and",
"isinstance",
"(",
"func",
".",
"__self__",
",",
"BaseObjective",
")",
")",
":",
"# The latter condition is added to make sure .eval_jacobian methods",
"# are still considered correct, and not doubly wrapped.",
"return",
"func",
"else",
":",
"from",
".",
"fit",
"import",
"CallableNumericalModel",
",",
"BaseModel",
"if",
"isinstance",
"(",
"func",
",",
"BaseModel",
")",
":",
"model",
"=",
"func",
"else",
":",
"# Minimize the provided custom objective instead. We therefore",
"# wrap it into a CallableNumericalModel, thats what they are for",
"y",
"=",
"sympy",
".",
"Dummy",
"(",
")",
"model",
"=",
"CallableNumericalModel",
"(",
"{",
"y",
":",
"func",
"}",
",",
"connectivity_mapping",
"=",
"{",
"y",
":",
"set",
"(",
"self",
".",
"parameters",
")",
"}",
")",
"return",
"objective_type",
"(",
"model",
",",
"data",
"=",
"{",
"y",
":",
"None",
"for",
"y",
"in",
"model",
".",
"dependent_vars",
"}",
")"
] | symfit works with BaseObjective subclasses internally. If a custom
objective is provided, we wrap it into a BaseObjective, MinimizeModel by
default.
:param func: Callable. If already an instance of BaseObjective, it is
returned immediately. If not, it is turned into a BaseObjective of
type ``objective_type``.
:param objective_type:
:return: | [
"symfit",
"works",
"with",
"BaseObjective",
"subclasses",
"internally",
".",
"If",
"a",
"custom",
"objective",
"is",
"provided",
"we",
"wrap",
"it",
"into",
"a",
"BaseObjective",
"MinimizeModel",
"by",
"default",
"."
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/core/minimizers.py#L44-L74 |
5,601 | tBuLi/symfit | symfit/core/minimizers.py | GradientMinimizer.resize_jac | def resize_jac(self, func):
"""
Removes values with identical indices to fixed parameters from the
output of func. func has to return the jacobian of a scalar function.
:param func: Jacobian function to be wrapped. Is assumed to be the
jacobian of a scalar function.
:return: Jacobian corresponding to non-fixed parameters only.
"""
if func is None:
return None
@wraps(func)
def resized(*args, **kwargs):
out = func(*args, **kwargs)
# Make one dimensional, corresponding to a scalar function.
out = np.atleast_1d(np.squeeze(out))
mask = [p not in self._fixed_params for p in self.parameters]
return out[mask]
return resized | python | def resize_jac(self, func):
"""
Removes values with identical indices to fixed parameters from the
output of func. func has to return the jacobian of a scalar function.
:param func: Jacobian function to be wrapped. Is assumed to be the
jacobian of a scalar function.
:return: Jacobian corresponding to non-fixed parameters only.
"""
if func is None:
return None
@wraps(func)
def resized(*args, **kwargs):
out = func(*args, **kwargs)
# Make one dimensional, corresponding to a scalar function.
out = np.atleast_1d(np.squeeze(out))
mask = [p not in self._fixed_params for p in self.parameters]
return out[mask]
return resized | [
"def",
"resize_jac",
"(",
"self",
",",
"func",
")",
":",
"if",
"func",
"is",
"None",
":",
"return",
"None",
"@",
"wraps",
"(",
"func",
")",
"def",
"resized",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"out",
"=",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"# Make one dimensional, corresponding to a scalar function.",
"out",
"=",
"np",
".",
"atleast_1d",
"(",
"np",
".",
"squeeze",
"(",
"out",
")",
")",
"mask",
"=",
"[",
"p",
"not",
"in",
"self",
".",
"_fixed_params",
"for",
"p",
"in",
"self",
".",
"parameters",
"]",
"return",
"out",
"[",
"mask",
"]",
"return",
"resized"
] | Removes values with identical indices to fixed parameters from the
output of func. func has to return the jacobian of a scalar function.
:param func: Jacobian function to be wrapped. Is assumed to be the
jacobian of a scalar function.
:return: Jacobian corresponding to non-fixed parameters only. | [
"Removes",
"values",
"with",
"identical",
"indices",
"to",
"fixed",
"parameters",
"from",
"the",
"output",
"of",
"func",
".",
"func",
"has",
"to",
"return",
"the",
"jacobian",
"of",
"a",
"scalar",
"function",
"."
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/core/minimizers.py#L143-L161 |
5,602 | tBuLi/symfit | symfit/core/minimizers.py | HessianMinimizer.resize_hess | def resize_hess(self, func):
"""
Removes values with identical indices to fixed parameters from the
output of func. func has to return the Hessian of a scalar function.
:param func: Hessian function to be wrapped. Is assumed to be the
Hessian of a scalar function.
:return: Hessian corresponding to free parameters only.
"""
if func is None:
return None
@wraps(func)
def resized(*args, **kwargs):
out = func(*args, **kwargs)
# Make two dimensional, corresponding to a scalar function.
out = np.atleast_2d(np.squeeze(out))
mask = [p not in self._fixed_params for p in self.parameters]
return np.atleast_2d(out[mask, mask])
return resized | python | def resize_hess(self, func):
"""
Removes values with identical indices to fixed parameters from the
output of func. func has to return the Hessian of a scalar function.
:param func: Hessian function to be wrapped. Is assumed to be the
Hessian of a scalar function.
:return: Hessian corresponding to free parameters only.
"""
if func is None:
return None
@wraps(func)
def resized(*args, **kwargs):
out = func(*args, **kwargs)
# Make two dimensional, corresponding to a scalar function.
out = np.atleast_2d(np.squeeze(out))
mask = [p not in self._fixed_params for p in self.parameters]
return np.atleast_2d(out[mask, mask])
return resized | [
"def",
"resize_hess",
"(",
"self",
",",
"func",
")",
":",
"if",
"func",
"is",
"None",
":",
"return",
"None",
"@",
"wraps",
"(",
"func",
")",
"def",
"resized",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"out",
"=",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"# Make two dimensional, corresponding to a scalar function.",
"out",
"=",
"np",
".",
"atleast_2d",
"(",
"np",
".",
"squeeze",
"(",
"out",
")",
")",
"mask",
"=",
"[",
"p",
"not",
"in",
"self",
".",
"_fixed_params",
"for",
"p",
"in",
"self",
".",
"parameters",
"]",
"return",
"np",
".",
"atleast_2d",
"(",
"out",
"[",
"mask",
",",
"mask",
"]",
")",
"return",
"resized"
] | Removes values with identical indices to fixed parameters from the
output of func. func has to return the Hessian of a scalar function.
:param func: Hessian function to be wrapped. Is assumed to be the
Hessian of a scalar function.
:return: Hessian corresponding to free parameters only. | [
"Removes",
"values",
"with",
"identical",
"indices",
"to",
"fixed",
"parameters",
"from",
"the",
"output",
"of",
"func",
".",
"func",
"has",
"to",
"return",
"the",
"Hessian",
"of",
"a",
"scalar",
"function",
"."
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/core/minimizers.py#L179-L197 |
5,603 | tBuLi/symfit | symfit/core/minimizers.py | ScipyMinimize.execute | def execute(self, bounds=None, jacobian=None, hessian=None, constraints=None, **minimize_options):
"""
Calls the wrapped algorithm.
:param bounds: The bounds for the parameters. Usually filled by
:class:`~symfit.core.minimizers.BoundedMinimizer`.
:param jacobian: The Jacobian. Usually filled by
:class:`~symfit.core.minimizers.ScipyGradientMinimize`.
:param \*\*minimize_options: Further keywords to pass to
:func:`scipy.optimize.minimize`. Note that your `method` will
usually be filled by a specific subclass.
"""
ans = minimize(
self.objective,
self.initial_guesses,
method=self.method_name(),
bounds=bounds,
constraints=constraints,
jac=jacobian,
hess=hessian,
**minimize_options
)
return self._pack_output(ans) | python | def execute(self, bounds=None, jacobian=None, hessian=None, constraints=None, **minimize_options):
"""
Calls the wrapped algorithm.
:param bounds: The bounds for the parameters. Usually filled by
:class:`~symfit.core.minimizers.BoundedMinimizer`.
:param jacobian: The Jacobian. Usually filled by
:class:`~symfit.core.minimizers.ScipyGradientMinimize`.
:param \*\*minimize_options: Further keywords to pass to
:func:`scipy.optimize.minimize`. Note that your `method` will
usually be filled by a specific subclass.
"""
ans = minimize(
self.objective,
self.initial_guesses,
method=self.method_name(),
bounds=bounds,
constraints=constraints,
jac=jacobian,
hess=hessian,
**minimize_options
)
return self._pack_output(ans) | [
"def",
"execute",
"(",
"self",
",",
"bounds",
"=",
"None",
",",
"jacobian",
"=",
"None",
",",
"hessian",
"=",
"None",
",",
"constraints",
"=",
"None",
",",
"*",
"*",
"minimize_options",
")",
":",
"ans",
"=",
"minimize",
"(",
"self",
".",
"objective",
",",
"self",
".",
"initial_guesses",
",",
"method",
"=",
"self",
".",
"method_name",
"(",
")",
",",
"bounds",
"=",
"bounds",
",",
"constraints",
"=",
"constraints",
",",
"jac",
"=",
"jacobian",
",",
"hess",
"=",
"hessian",
",",
"*",
"*",
"minimize_options",
")",
"return",
"self",
".",
"_pack_output",
"(",
"ans",
")"
] | Calls the wrapped algorithm.
:param bounds: The bounds for the parameters. Usually filled by
:class:`~symfit.core.minimizers.BoundedMinimizer`.
:param jacobian: The Jacobian. Usually filled by
:class:`~symfit.core.minimizers.ScipyGradientMinimize`.
:param \*\*minimize_options: Further keywords to pass to
:func:`scipy.optimize.minimize`. Note that your `method` will
usually be filled by a specific subclass. | [
"Calls",
"the",
"wrapped",
"algorithm",
"."
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/core/minimizers.py#L331-L353 |
5,604 | tBuLi/symfit | symfit/core/minimizers.py | ScipyConstrainedMinimize.scipy_constraints | def scipy_constraints(self, constraints):
"""
Returns all constraints in a scipy compatible format.
:param constraints: List of either MinimizeModel instances (this is what
is provided by :class:`~symfit.core.fit.Fit`),
:class:`~symfit.core.fit.BaseModel`, or
:class:`sympy.core.relational.Relational`.
:return: dict of scipy compatible statements.
"""
cons = []
types = { # scipy only distinguishes two types of constraint.
sympy.Eq: 'eq', sympy.Ge: 'ineq',
}
for constraint in constraints:
if isinstance(constraint, MinimizeModel):
# Typically the case when called by `Fit
constraint_type = constraint.model.constraint_type
elif hasattr(constraint, 'constraint_type'):
# Model object, not provided by `Fit`. Do the best we can.
if self.parameters != constraint.params:
raise AssertionError('The constraint should accept the same'
' parameters as used for the fit.')
constraint_type = constraint.constraint_type
constraint = MinimizeModel(constraint, data=self.objective.data)
elif isinstance(constraint, sympy.Rel):
constraint_type = constraint.__class__
constraint = self.objective.model.__class__.as_constraint(
constraint, self.objective.model
)
constraint = MinimizeModel(constraint, data=self.objective.data)
else:
raise TypeError('Unknown type for a constraint.')
con = {
'type': types[constraint_type],
'fun': constraint,
}
cons.append(con)
cons = tuple(cons)
return cons | python | def scipy_constraints(self, constraints):
"""
Returns all constraints in a scipy compatible format.
:param constraints: List of either MinimizeModel instances (this is what
is provided by :class:`~symfit.core.fit.Fit`),
:class:`~symfit.core.fit.BaseModel`, or
:class:`sympy.core.relational.Relational`.
:return: dict of scipy compatible statements.
"""
cons = []
types = { # scipy only distinguishes two types of constraint.
sympy.Eq: 'eq', sympy.Ge: 'ineq',
}
for constraint in constraints:
if isinstance(constraint, MinimizeModel):
# Typically the case when called by `Fit
constraint_type = constraint.model.constraint_type
elif hasattr(constraint, 'constraint_type'):
# Model object, not provided by `Fit`. Do the best we can.
if self.parameters != constraint.params:
raise AssertionError('The constraint should accept the same'
' parameters as used for the fit.')
constraint_type = constraint.constraint_type
constraint = MinimizeModel(constraint, data=self.objective.data)
elif isinstance(constraint, sympy.Rel):
constraint_type = constraint.__class__
constraint = self.objective.model.__class__.as_constraint(
constraint, self.objective.model
)
constraint = MinimizeModel(constraint, data=self.objective.data)
else:
raise TypeError('Unknown type for a constraint.')
con = {
'type': types[constraint_type],
'fun': constraint,
}
cons.append(con)
cons = tuple(cons)
return cons | [
"def",
"scipy_constraints",
"(",
"self",
",",
"constraints",
")",
":",
"cons",
"=",
"[",
"]",
"types",
"=",
"{",
"# scipy only distinguishes two types of constraint.",
"sympy",
".",
"Eq",
":",
"'eq'",
",",
"sympy",
".",
"Ge",
":",
"'ineq'",
",",
"}",
"for",
"constraint",
"in",
"constraints",
":",
"if",
"isinstance",
"(",
"constraint",
",",
"MinimizeModel",
")",
":",
"# Typically the case when called by `Fit",
"constraint_type",
"=",
"constraint",
".",
"model",
".",
"constraint_type",
"elif",
"hasattr",
"(",
"constraint",
",",
"'constraint_type'",
")",
":",
"# Model object, not provided by `Fit`. Do the best we can.",
"if",
"self",
".",
"parameters",
"!=",
"constraint",
".",
"params",
":",
"raise",
"AssertionError",
"(",
"'The constraint should accept the same'",
"' parameters as used for the fit.'",
")",
"constraint_type",
"=",
"constraint",
".",
"constraint_type",
"constraint",
"=",
"MinimizeModel",
"(",
"constraint",
",",
"data",
"=",
"self",
".",
"objective",
".",
"data",
")",
"elif",
"isinstance",
"(",
"constraint",
",",
"sympy",
".",
"Rel",
")",
":",
"constraint_type",
"=",
"constraint",
".",
"__class__",
"constraint",
"=",
"self",
".",
"objective",
".",
"model",
".",
"__class__",
".",
"as_constraint",
"(",
"constraint",
",",
"self",
".",
"objective",
".",
"model",
")",
"constraint",
"=",
"MinimizeModel",
"(",
"constraint",
",",
"data",
"=",
"self",
".",
"objective",
".",
"data",
")",
"else",
":",
"raise",
"TypeError",
"(",
"'Unknown type for a constraint.'",
")",
"con",
"=",
"{",
"'type'",
":",
"types",
"[",
"constraint_type",
"]",
",",
"'fun'",
":",
"constraint",
",",
"}",
"cons",
".",
"append",
"(",
"con",
")",
"cons",
"=",
"tuple",
"(",
"cons",
")",
"return",
"cons"
] | Returns all constraints in a scipy compatible format.
:param constraints: List of either MinimizeModel instances (this is what
is provided by :class:`~symfit.core.fit.Fit`),
:class:`~symfit.core.fit.BaseModel`, or
:class:`sympy.core.relational.Relational`.
:return: dict of scipy compatible statements. | [
"Returns",
"all",
"constraints",
"in",
"a",
"scipy",
"compatible",
"format",
"."
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/core/minimizers.py#L477-L517 |
5,605 | tBuLi/symfit | symfit/core/minimizers.py | TrustConstr._get_jacobian_hessian_strategy | def _get_jacobian_hessian_strategy(self):
"""
Figure out how to calculate the jacobian and hessian. Will return a
tuple describing how best to calculate the jacobian and hessian,
repectively. If None, it should be calculated using the available
analytical method.
:return: tuple of jacobian_method, hessian_method
"""
if self.jacobian is not None and self.hessian is None:
jacobian = None
hessian = 'cs'
elif self.jacobian is None and self.hessian is None:
jacobian = 'cs'
hessian = soBFGS(exception_strategy='damp_update')
else:
jacobian = None
hessian = None
return jacobian, hessian | python | def _get_jacobian_hessian_strategy(self):
"""
Figure out how to calculate the jacobian and hessian. Will return a
tuple describing how best to calculate the jacobian and hessian,
repectively. If None, it should be calculated using the available
analytical method.
:return: tuple of jacobian_method, hessian_method
"""
if self.jacobian is not None and self.hessian is None:
jacobian = None
hessian = 'cs'
elif self.jacobian is None and self.hessian is None:
jacobian = 'cs'
hessian = soBFGS(exception_strategy='damp_update')
else:
jacobian = None
hessian = None
return jacobian, hessian | [
"def",
"_get_jacobian_hessian_strategy",
"(",
"self",
")",
":",
"if",
"self",
".",
"jacobian",
"is",
"not",
"None",
"and",
"self",
".",
"hessian",
"is",
"None",
":",
"jacobian",
"=",
"None",
"hessian",
"=",
"'cs'",
"elif",
"self",
".",
"jacobian",
"is",
"None",
"and",
"self",
".",
"hessian",
"is",
"None",
":",
"jacobian",
"=",
"'cs'",
"hessian",
"=",
"soBFGS",
"(",
"exception_strategy",
"=",
"'damp_update'",
")",
"else",
":",
"jacobian",
"=",
"None",
"hessian",
"=",
"None",
"return",
"jacobian",
",",
"hessian"
] | Figure out how to calculate the jacobian and hessian. Will return a
tuple describing how best to calculate the jacobian and hessian,
repectively. If None, it should be calculated using the available
analytical method.
:return: tuple of jacobian_method, hessian_method | [
"Figure",
"out",
"how",
"to",
"calculate",
"the",
"jacobian",
"and",
"hessian",
".",
"Will",
"return",
"a",
"tuple",
"describing",
"how",
"best",
"to",
"calculate",
"the",
"jacobian",
"and",
"hessian",
"repectively",
".",
"If",
"None",
"it",
"should",
"be",
"calculated",
"using",
"the",
"available",
"analytical",
"method",
"."
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/core/minimizers.py#L566-L584 |
5,606 | tBuLi/symfit | symfit/core/minimizers.py | BasinHopping.execute | def execute(self, **minimize_options):
"""
Execute the basin-hopping minimization.
:param minimize_options: options to be passed on to
:func:`scipy.optimize.basinhopping`.
:return: :class:`symfit.core.fit_results.FitResults`
"""
if 'minimizer_kwargs' not in minimize_options:
minimize_options['minimizer_kwargs'] = {}
if 'method' not in minimize_options['minimizer_kwargs']:
# If no minimizer was set by the user upon execute, use local_minimizer
minimize_options['minimizer_kwargs']['method'] = self.local_minimizer.method_name()
if 'jac' not in minimize_options['minimizer_kwargs'] and isinstance(self.local_minimizer, GradientMinimizer):
# Assign the jacobian
minimize_options['minimizer_kwargs']['jac'] = self.local_minimizer.wrapped_jacobian
if 'constraints' not in minimize_options['minimizer_kwargs'] and isinstance(self.local_minimizer, ConstrainedMinimizer):
# Assign constraints
minimize_options['minimizer_kwargs']['constraints'] = self.local_minimizer.wrapped_constraints
if 'bounds' not in minimize_options['minimizer_kwargs'] and isinstance(self.local_minimizer, BoundedMinimizer):
# Assign bounds
minimize_options['minimizer_kwargs']['bounds'] = self.local_minimizer.bounds
ans = basinhopping(
self.objective,
self.initial_guesses,
**minimize_options
)
return self._pack_output(ans) | python | def execute(self, **minimize_options):
"""
Execute the basin-hopping minimization.
:param minimize_options: options to be passed on to
:func:`scipy.optimize.basinhopping`.
:return: :class:`symfit.core.fit_results.FitResults`
"""
if 'minimizer_kwargs' not in minimize_options:
minimize_options['minimizer_kwargs'] = {}
if 'method' not in minimize_options['minimizer_kwargs']:
# If no minimizer was set by the user upon execute, use local_minimizer
minimize_options['minimizer_kwargs']['method'] = self.local_minimizer.method_name()
if 'jac' not in minimize_options['minimizer_kwargs'] and isinstance(self.local_minimizer, GradientMinimizer):
# Assign the jacobian
minimize_options['minimizer_kwargs']['jac'] = self.local_minimizer.wrapped_jacobian
if 'constraints' not in minimize_options['minimizer_kwargs'] and isinstance(self.local_minimizer, ConstrainedMinimizer):
# Assign constraints
minimize_options['minimizer_kwargs']['constraints'] = self.local_minimizer.wrapped_constraints
if 'bounds' not in minimize_options['minimizer_kwargs'] and isinstance(self.local_minimizer, BoundedMinimizer):
# Assign bounds
minimize_options['minimizer_kwargs']['bounds'] = self.local_minimizer.bounds
ans = basinhopping(
self.objective,
self.initial_guesses,
**minimize_options
)
return self._pack_output(ans) | [
"def",
"execute",
"(",
"self",
",",
"*",
"*",
"minimize_options",
")",
":",
"if",
"'minimizer_kwargs'",
"not",
"in",
"minimize_options",
":",
"minimize_options",
"[",
"'minimizer_kwargs'",
"]",
"=",
"{",
"}",
"if",
"'method'",
"not",
"in",
"minimize_options",
"[",
"'minimizer_kwargs'",
"]",
":",
"# If no minimizer was set by the user upon execute, use local_minimizer",
"minimize_options",
"[",
"'minimizer_kwargs'",
"]",
"[",
"'method'",
"]",
"=",
"self",
".",
"local_minimizer",
".",
"method_name",
"(",
")",
"if",
"'jac'",
"not",
"in",
"minimize_options",
"[",
"'minimizer_kwargs'",
"]",
"and",
"isinstance",
"(",
"self",
".",
"local_minimizer",
",",
"GradientMinimizer",
")",
":",
"# Assign the jacobian",
"minimize_options",
"[",
"'minimizer_kwargs'",
"]",
"[",
"'jac'",
"]",
"=",
"self",
".",
"local_minimizer",
".",
"wrapped_jacobian",
"if",
"'constraints'",
"not",
"in",
"minimize_options",
"[",
"'minimizer_kwargs'",
"]",
"and",
"isinstance",
"(",
"self",
".",
"local_minimizer",
",",
"ConstrainedMinimizer",
")",
":",
"# Assign constraints",
"minimize_options",
"[",
"'minimizer_kwargs'",
"]",
"[",
"'constraints'",
"]",
"=",
"self",
".",
"local_minimizer",
".",
"wrapped_constraints",
"if",
"'bounds'",
"not",
"in",
"minimize_options",
"[",
"'minimizer_kwargs'",
"]",
"and",
"isinstance",
"(",
"self",
".",
"local_minimizer",
",",
"BoundedMinimizer",
")",
":",
"# Assign bounds",
"minimize_options",
"[",
"'minimizer_kwargs'",
"]",
"[",
"'bounds'",
"]",
"=",
"self",
".",
"local_minimizer",
".",
"bounds",
"ans",
"=",
"basinhopping",
"(",
"self",
".",
"objective",
",",
"self",
".",
"initial_guesses",
",",
"*",
"*",
"minimize_options",
")",
"return",
"self",
".",
"_pack_output",
"(",
"ans",
")"
] | Execute the basin-hopping minimization.
:param minimize_options: options to be passed on to
:func:`scipy.optimize.basinhopping`.
:return: :class:`symfit.core.fit_results.FitResults` | [
"Execute",
"the",
"basin",
"-",
"hopping",
"minimization",
"."
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/core/minimizers.py#L719-L748 |
5,607 | tBuLi/symfit | symfit/core/printing.py | SymfitNumPyPrinter._print_MatMul | def _print_MatMul(self, expr):
"""
Matrix multiplication printer. The sympy one turns everything into a
dot product without type-checking.
"""
from sympy import MatrixExpr
links = []
for i, j in zip(expr.args[1:], expr.args[:-1]):
if isinstance(i, MatrixExpr) and isinstance(j, MatrixExpr):
links.append(').dot(')
else:
links.append('*')
printouts = [self._print(i) for i in expr.args]
result = [printouts[0]]
for link, printout in zip(links, printouts[1:]):
result.extend([link, printout])
return '({0})'.format(''.join(result)) | python | def _print_MatMul(self, expr):
"""
Matrix multiplication printer. The sympy one turns everything into a
dot product without type-checking.
"""
from sympy import MatrixExpr
links = []
for i, j in zip(expr.args[1:], expr.args[:-1]):
if isinstance(i, MatrixExpr) and isinstance(j, MatrixExpr):
links.append(').dot(')
else:
links.append('*')
printouts = [self._print(i) for i in expr.args]
result = [printouts[0]]
for link, printout in zip(links, printouts[1:]):
result.extend([link, printout])
return '({0})'.format(''.join(result)) | [
"def",
"_print_MatMul",
"(",
"self",
",",
"expr",
")",
":",
"from",
"sympy",
"import",
"MatrixExpr",
"links",
"=",
"[",
"]",
"for",
"i",
",",
"j",
"in",
"zip",
"(",
"expr",
".",
"args",
"[",
"1",
":",
"]",
",",
"expr",
".",
"args",
"[",
":",
"-",
"1",
"]",
")",
":",
"if",
"isinstance",
"(",
"i",
",",
"MatrixExpr",
")",
"and",
"isinstance",
"(",
"j",
",",
"MatrixExpr",
")",
":",
"links",
".",
"append",
"(",
"').dot('",
")",
"else",
":",
"links",
".",
"append",
"(",
"'*'",
")",
"printouts",
"=",
"[",
"self",
".",
"_print",
"(",
"i",
")",
"for",
"i",
"in",
"expr",
".",
"args",
"]",
"result",
"=",
"[",
"printouts",
"[",
"0",
"]",
"]",
"for",
"link",
",",
"printout",
"in",
"zip",
"(",
"links",
",",
"printouts",
"[",
"1",
":",
"]",
")",
":",
"result",
".",
"extend",
"(",
"[",
"link",
",",
"printout",
"]",
")",
"return",
"'({0})'",
".",
"format",
"(",
"''",
".",
"join",
"(",
"result",
")",
")"
] | Matrix multiplication printer. The sympy one turns everything into a
dot product without type-checking. | [
"Matrix",
"multiplication",
"printer",
".",
"The",
"sympy",
"one",
"turns",
"everything",
"into",
"a",
"dot",
"product",
"without",
"type",
"-",
"checking",
"."
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/core/printing.py#L34-L50 |
5,608 | tBuLi/symfit | symfit/contrib/interactive_guess/interactive_guess.py | InteractiveGuess.execute | def execute(self, **kwargs):
"""
Execute the interactive guessing procedure.
:param show: Whether or not to show the figure. Useful for testing.
:type show: bool
:param block: Blocking call to matplotlib
:type show: bool
Any additional keyword arguments are passed to
matplotlib.pyplot.show().
"""
show = kwargs.pop('show')
if show:
# self.fig.show() # Apparently this does something else,
# see https://github.com/matplotlib/matplotlib/issues/6138
plt.show(**kwargs) | python | def execute(self, **kwargs):
"""
Execute the interactive guessing procedure.
:param show: Whether or not to show the figure. Useful for testing.
:type show: bool
:param block: Blocking call to matplotlib
:type show: bool
Any additional keyword arguments are passed to
matplotlib.pyplot.show().
"""
show = kwargs.pop('show')
if show:
# self.fig.show() # Apparently this does something else,
# see https://github.com/matplotlib/matplotlib/issues/6138
plt.show(**kwargs) | [
"def",
"execute",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"show",
"=",
"kwargs",
".",
"pop",
"(",
"'show'",
")",
"if",
"show",
":",
"# self.fig.show() # Apparently this does something else,",
"# see https://github.com/matplotlib/matplotlib/issues/6138",
"plt",
".",
"show",
"(",
"*",
"*",
"kwargs",
")"
] | Execute the interactive guessing procedure.
:param show: Whether or not to show the figure. Useful for testing.
:type show: bool
:param block: Blocking call to matplotlib
:type show: bool
Any additional keyword arguments are passed to
matplotlib.pyplot.show(). | [
"Execute",
"the",
"interactive",
"guessing",
"procedure",
"."
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/contrib/interactive_guess/interactive_guess.py#L99-L115 |
5,609 | tBuLi/symfit | symfit/contrib/interactive_guess/interactive_guess.py | InteractiveGuess._set_up_sliders | def _set_up_sliders(self):
"""
Creates an slider for every parameter.
"""
i = 0.05
self._sliders = {}
for param in self.model.params:
if not param.fixed:
axbg = 'lightgoldenrodyellow'
else:
axbg = 'red'
# start-x, start-y, width, height
ax = self.fig.add_axes((0.162, i, 0.68, 0.03),
facecolor=axbg, label=param)
val = param.value
if not hasattr(param, 'min') or param.min is None:
minimum = 0
else:
minimum = param.min
if not hasattr(param, 'max') or param.max is None:
maximum = 2 * val
else:
maximum = param.max
slid = plt.Slider(ax, param, minimum, maximum,
valinit=val, valfmt='% 5.4g')
self._sliders[param] = slid
slid.on_changed(self._update_plot)
i += 0.05 | python | def _set_up_sliders(self):
"""
Creates an slider for every parameter.
"""
i = 0.05
self._sliders = {}
for param in self.model.params:
if not param.fixed:
axbg = 'lightgoldenrodyellow'
else:
axbg = 'red'
# start-x, start-y, width, height
ax = self.fig.add_axes((0.162, i, 0.68, 0.03),
facecolor=axbg, label=param)
val = param.value
if not hasattr(param, 'min') or param.min is None:
minimum = 0
else:
minimum = param.min
if not hasattr(param, 'max') or param.max is None:
maximum = 2 * val
else:
maximum = param.max
slid = plt.Slider(ax, param, minimum, maximum,
valinit=val, valfmt='% 5.4g')
self._sliders[param] = slid
slid.on_changed(self._update_plot)
i += 0.05 | [
"def",
"_set_up_sliders",
"(",
"self",
")",
":",
"i",
"=",
"0.05",
"self",
".",
"_sliders",
"=",
"{",
"}",
"for",
"param",
"in",
"self",
".",
"model",
".",
"params",
":",
"if",
"not",
"param",
".",
"fixed",
":",
"axbg",
"=",
"'lightgoldenrodyellow'",
"else",
":",
"axbg",
"=",
"'red'",
"# start-x, start-y, width, height",
"ax",
"=",
"self",
".",
"fig",
".",
"add_axes",
"(",
"(",
"0.162",
",",
"i",
",",
"0.68",
",",
"0.03",
")",
",",
"facecolor",
"=",
"axbg",
",",
"label",
"=",
"param",
")",
"val",
"=",
"param",
".",
"value",
"if",
"not",
"hasattr",
"(",
"param",
",",
"'min'",
")",
"or",
"param",
".",
"min",
"is",
"None",
":",
"minimum",
"=",
"0",
"else",
":",
"minimum",
"=",
"param",
".",
"min",
"if",
"not",
"hasattr",
"(",
"param",
",",
"'max'",
")",
"or",
"param",
".",
"max",
"is",
"None",
":",
"maximum",
"=",
"2",
"*",
"val",
"else",
":",
"maximum",
"=",
"param",
".",
"max",
"slid",
"=",
"plt",
".",
"Slider",
"(",
"ax",
",",
"param",
",",
"minimum",
",",
"maximum",
",",
"valinit",
"=",
"val",
",",
"valfmt",
"=",
"'% 5.4g'",
")",
"self",
".",
"_sliders",
"[",
"param",
"]",
"=",
"slid",
"slid",
".",
"on_changed",
"(",
"self",
".",
"_update_plot",
")",
"i",
"+=",
"0.05"
] | Creates an slider for every parameter. | [
"Creates",
"an",
"slider",
"for",
"every",
"parameter",
"."
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/contrib/interactive_guess/interactive_guess.py#L158-L186 |
5,610 | tBuLi/symfit | symfit/contrib/interactive_guess/interactive_guess.py | InteractiveGuess._update_plot | def _update_plot(self, _):
"""Callback to redraw the plot to reflect the new parameter values."""
# Since all sliders call this same callback without saying who they are
# I need to update the values for all parameters. This can be
# circumvented by creating a seperate callback function for each
# parameter.
for param in self.model.params:
param.value = self._sliders[param].val
for indep_var, dep_var in self._projections:
self._update_specific_plot(indep_var, dep_var) | python | def _update_plot(self, _):
"""Callback to redraw the plot to reflect the new parameter values."""
# Since all sliders call this same callback without saying who they are
# I need to update the values for all parameters. This can be
# circumvented by creating a seperate callback function for each
# parameter.
for param in self.model.params:
param.value = self._sliders[param].val
for indep_var, dep_var in self._projections:
self._update_specific_plot(indep_var, dep_var) | [
"def",
"_update_plot",
"(",
"self",
",",
"_",
")",
":",
"# Since all sliders call this same callback without saying who they are",
"# I need to update the values for all parameters. This can be",
"# circumvented by creating a seperate callback function for each",
"# parameter.",
"for",
"param",
"in",
"self",
".",
"model",
".",
"params",
":",
"param",
".",
"value",
"=",
"self",
".",
"_sliders",
"[",
"param",
"]",
".",
"val",
"for",
"indep_var",
",",
"dep_var",
"in",
"self",
".",
"_projections",
":",
"self",
".",
"_update_specific_plot",
"(",
"indep_var",
",",
"dep_var",
")"
] | Callback to redraw the plot to reflect the new parameter values. | [
"Callback",
"to",
"redraw",
"the",
"plot",
"to",
"reflect",
"the",
"new",
"parameter",
"values",
"."
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/contrib/interactive_guess/interactive_guess.py#L200-L209 |
5,611 | tBuLi/symfit | symfit/contrib/interactive_guess/interactive_guess.py | InteractiveGuess._eval_model | def _eval_model(self):
"""
Convenience method for evaluating the model with the current parameters
:return: named tuple with results
"""
arguments = self._x_grid.copy()
arguments.update({param: param.value for param in self.model.params})
return self.model(**key2str(arguments)) | python | def _eval_model(self):
"""
Convenience method for evaluating the model with the current parameters
:return: named tuple with results
"""
arguments = self._x_grid.copy()
arguments.update({param: param.value for param in self.model.params})
return self.model(**key2str(arguments)) | [
"def",
"_eval_model",
"(",
"self",
")",
":",
"arguments",
"=",
"self",
".",
"_x_grid",
".",
"copy",
"(",
")",
"arguments",
".",
"update",
"(",
"{",
"param",
":",
"param",
".",
"value",
"for",
"param",
"in",
"self",
".",
"model",
".",
"params",
"}",
")",
"return",
"self",
".",
"model",
"(",
"*",
"*",
"key2str",
"(",
"arguments",
")",
")"
] | Convenience method for evaluating the model with the current parameters
:return: named tuple with results | [
"Convenience",
"method",
"for",
"evaluating",
"the",
"model",
"with",
"the",
"current",
"parameters"
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/contrib/interactive_guess/interactive_guess.py#L211-L219 |
5,612 | tBuLi/symfit | symfit/contrib/interactive_guess/interactive_guess.py | Strategy2D.plot_data | def plot_data(self, proj, ax):
"""
Creates and plots a scatter plot of the original data.
"""
x, y = proj
ax.scatter(self.ig.independent_data[x],
self.ig.dependent_data[y], c='b') | python | def plot_data(self, proj, ax):
"""
Creates and plots a scatter plot of the original data.
"""
x, y = proj
ax.scatter(self.ig.independent_data[x],
self.ig.dependent_data[y], c='b') | [
"def",
"plot_data",
"(",
"self",
",",
"proj",
",",
"ax",
")",
":",
"x",
",",
"y",
"=",
"proj",
"ax",
".",
"scatter",
"(",
"self",
".",
"ig",
".",
"independent_data",
"[",
"x",
"]",
",",
"self",
".",
"ig",
".",
"dependent_data",
"[",
"y",
"]",
",",
"c",
"=",
"'b'",
")"
] | Creates and plots a scatter plot of the original data. | [
"Creates",
"and",
"plots",
"a",
"scatter",
"plot",
"of",
"the",
"original",
"data",
"."
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/contrib/interactive_guess/interactive_guess.py#L241-L247 |
5,613 | tBuLi/symfit | symfit/contrib/interactive_guess/interactive_guess.py | StrategynD.plot_data | def plot_data(self, proj, ax):
"""
Creates and plots the contourplot of the original data. This is done
by evaluating the density of projected datapoints on a grid.
"""
x, y = proj
x_data = self.ig.independent_data[x]
y_data = self.ig.dependent_data[y]
projected_data = np.column_stack((x_data, y_data)).T
kde = gaussian_kde(projected_data)
xx, yy = np.meshgrid(self.ig._x_points[x], self.ig._y_points[y])
x_grid = xx.flatten()
y_grid = yy.flatten()
contour_grid = kde.pdf(np.column_stack((x_grid, y_grid)).T)
# This is an fugly kludge, but it seems nescessary to make low density
# areas show up.
if self.ig.log_contour:
contour_grid = np.log(contour_grid)
vmin = -7
else:
vmin = None
ax.contourf(xx, yy, contour_grid.reshape(xx.shape),
50, vmin=vmin, cmap='Blues') | python | def plot_data(self, proj, ax):
"""
Creates and plots the contourplot of the original data. This is done
by evaluating the density of projected datapoints on a grid.
"""
x, y = proj
x_data = self.ig.independent_data[x]
y_data = self.ig.dependent_data[y]
projected_data = np.column_stack((x_data, y_data)).T
kde = gaussian_kde(projected_data)
xx, yy = np.meshgrid(self.ig._x_points[x], self.ig._y_points[y])
x_grid = xx.flatten()
y_grid = yy.flatten()
contour_grid = kde.pdf(np.column_stack((x_grid, y_grid)).T)
# This is an fugly kludge, but it seems nescessary to make low density
# areas show up.
if self.ig.log_contour:
contour_grid = np.log(contour_grid)
vmin = -7
else:
vmin = None
ax.contourf(xx, yy, contour_grid.reshape(xx.shape),
50, vmin=vmin, cmap='Blues') | [
"def",
"plot_data",
"(",
"self",
",",
"proj",
",",
"ax",
")",
":",
"x",
",",
"y",
"=",
"proj",
"x_data",
"=",
"self",
".",
"ig",
".",
"independent_data",
"[",
"x",
"]",
"y_data",
"=",
"self",
".",
"ig",
".",
"dependent_data",
"[",
"y",
"]",
"projected_data",
"=",
"np",
".",
"column_stack",
"(",
"(",
"x_data",
",",
"y_data",
")",
")",
".",
"T",
"kde",
"=",
"gaussian_kde",
"(",
"projected_data",
")",
"xx",
",",
"yy",
"=",
"np",
".",
"meshgrid",
"(",
"self",
".",
"ig",
".",
"_x_points",
"[",
"x",
"]",
",",
"self",
".",
"ig",
".",
"_y_points",
"[",
"y",
"]",
")",
"x_grid",
"=",
"xx",
".",
"flatten",
"(",
")",
"y_grid",
"=",
"yy",
".",
"flatten",
"(",
")",
"contour_grid",
"=",
"kde",
".",
"pdf",
"(",
"np",
".",
"column_stack",
"(",
"(",
"x_grid",
",",
"y_grid",
")",
")",
".",
"T",
")",
"# This is an fugly kludge, but it seems nescessary to make low density",
"# areas show up.",
"if",
"self",
".",
"ig",
".",
"log_contour",
":",
"contour_grid",
"=",
"np",
".",
"log",
"(",
"contour_grid",
")",
"vmin",
"=",
"-",
"7",
"else",
":",
"vmin",
"=",
"None",
"ax",
".",
"contourf",
"(",
"xx",
",",
"yy",
",",
"contour_grid",
".",
"reshape",
"(",
"xx",
".",
"shape",
")",
",",
"50",
",",
"vmin",
"=",
"vmin",
",",
"cmap",
"=",
"'Blues'",
")"
] | Creates and plots the contourplot of the original data. This is done
by evaluating the density of projected datapoints on a grid. | [
"Creates",
"and",
"plots",
"the",
"contourplot",
"of",
"the",
"original",
"data",
".",
"This",
"is",
"done",
"by",
"evaluating",
"the",
"density",
"of",
"projected",
"datapoints",
"on",
"a",
"grid",
"."
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/contrib/interactive_guess/interactive_guess.py#L278-L302 |
5,614 | tBuLi/symfit | symfit/distributions.py | BivariateGaussian | def BivariateGaussian(x, y, mu_x, mu_y, sig_x, sig_y, rho):
"""
Bivariate Gaussian pdf.
:param x: :class:`symfit.core.argument.Variable`
:param y: :class:`symfit.core.argument.Variable`
:param mu_x: :class:`symfit.core.argument.Parameter` for the mean of `x`
:param mu_y: :class:`symfit.core.argument.Parameter` for the mean of `y`
:param sig_x: :class:`symfit.core.argument.Parameter` for the standard
deviation of `x`
:param sig_y: :class:`symfit.core.argument.Parameter` for the standard
deviation of `y`
:param rho: :class:`symfit.core.argument.Parameter` for the correlation
between `x` and `y`.
:return: sympy expression for a Bivariate Gaussian pdf.
"""
exponent = - 1 / (2 * (1 - rho**2))
exponent *= (x - mu_x)**2 / sig_x**2 + (y - mu_y)**2 / sig_y**2 \
- 2 * rho * (x - mu_x) * (y - mu_y) / (sig_x * sig_y)
return sympy.exp(exponent) / (2 * sympy.pi * sig_x * sig_y * sympy.sqrt(1 - rho**2)) | python | def BivariateGaussian(x, y, mu_x, mu_y, sig_x, sig_y, rho):
"""
Bivariate Gaussian pdf.
:param x: :class:`symfit.core.argument.Variable`
:param y: :class:`symfit.core.argument.Variable`
:param mu_x: :class:`symfit.core.argument.Parameter` for the mean of `x`
:param mu_y: :class:`symfit.core.argument.Parameter` for the mean of `y`
:param sig_x: :class:`symfit.core.argument.Parameter` for the standard
deviation of `x`
:param sig_y: :class:`symfit.core.argument.Parameter` for the standard
deviation of `y`
:param rho: :class:`symfit.core.argument.Parameter` for the correlation
between `x` and `y`.
:return: sympy expression for a Bivariate Gaussian pdf.
"""
exponent = - 1 / (2 * (1 - rho**2))
exponent *= (x - mu_x)**2 / sig_x**2 + (y - mu_y)**2 / sig_y**2 \
- 2 * rho * (x - mu_x) * (y - mu_y) / (sig_x * sig_y)
return sympy.exp(exponent) / (2 * sympy.pi * sig_x * sig_y * sympy.sqrt(1 - rho**2)) | [
"def",
"BivariateGaussian",
"(",
"x",
",",
"y",
",",
"mu_x",
",",
"mu_y",
",",
"sig_x",
",",
"sig_y",
",",
"rho",
")",
":",
"exponent",
"=",
"-",
"1",
"/",
"(",
"2",
"*",
"(",
"1",
"-",
"rho",
"**",
"2",
")",
")",
"exponent",
"*=",
"(",
"x",
"-",
"mu_x",
")",
"**",
"2",
"/",
"sig_x",
"**",
"2",
"+",
"(",
"y",
"-",
"mu_y",
")",
"**",
"2",
"/",
"sig_y",
"**",
"2",
"-",
"2",
"*",
"rho",
"*",
"(",
"x",
"-",
"mu_x",
")",
"*",
"(",
"y",
"-",
"mu_y",
")",
"/",
"(",
"sig_x",
"*",
"sig_y",
")",
"return",
"sympy",
".",
"exp",
"(",
"exponent",
")",
"/",
"(",
"2",
"*",
"sympy",
".",
"pi",
"*",
"sig_x",
"*",
"sig_y",
"*",
"sympy",
".",
"sqrt",
"(",
"1",
"-",
"rho",
"**",
"2",
")",
")"
] | Bivariate Gaussian pdf.
:param x: :class:`symfit.core.argument.Variable`
:param y: :class:`symfit.core.argument.Variable`
:param mu_x: :class:`symfit.core.argument.Parameter` for the mean of `x`
:param mu_y: :class:`symfit.core.argument.Parameter` for the mean of `y`
:param sig_x: :class:`symfit.core.argument.Parameter` for the standard
deviation of `x`
:param sig_y: :class:`symfit.core.argument.Parameter` for the standard
deviation of `y`
:param rho: :class:`symfit.core.argument.Parameter` for the correlation
between `x` and `y`.
:return: sympy expression for a Bivariate Gaussian pdf. | [
"Bivariate",
"Gaussian",
"pdf",
"."
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/distributions.py#L21-L40 |
5,615 | tBuLi/symfit | symfit/core/fit.py | r_squared | def r_squared(model, fit_result, data):
"""
Calculates the coefficient of determination, R^2, for the fit.
(Is not defined properly for vector valued functions.)
:param model: Model instance
:param fit_result: FitResults instance
:param data: data with which the fit was performed.
"""
# First filter out the dependent vars
y_is = [data[var] for var in model if var in data]
x_is = [value for var, value in data.items() if var.name in model.__signature__.parameters]
y_bars = [np.mean(y_i) if y_i is not None else None for y_i in y_is]
f_is = model(*x_is, **fit_result.params)
SS_res = np.sum([np.sum((y_i - f_i)**2) for y_i, f_i in zip(y_is, f_is) if y_i is not None])
SS_tot = np.sum([np.sum((y_i - y_bar)**2) for y_i, y_bar in zip(y_is, y_bars) if y_i is not None])
return 1 - SS_res/SS_tot | python | def r_squared(model, fit_result, data):
"""
Calculates the coefficient of determination, R^2, for the fit.
(Is not defined properly for vector valued functions.)
:param model: Model instance
:param fit_result: FitResults instance
:param data: data with which the fit was performed.
"""
# First filter out the dependent vars
y_is = [data[var] for var in model if var in data]
x_is = [value for var, value in data.items() if var.name in model.__signature__.parameters]
y_bars = [np.mean(y_i) if y_i is not None else None for y_i in y_is]
f_is = model(*x_is, **fit_result.params)
SS_res = np.sum([np.sum((y_i - f_i)**2) for y_i, f_i in zip(y_is, f_is) if y_i is not None])
SS_tot = np.sum([np.sum((y_i - y_bar)**2) for y_i, y_bar in zip(y_is, y_bars) if y_i is not None])
return 1 - SS_res/SS_tot | [
"def",
"r_squared",
"(",
"model",
",",
"fit_result",
",",
"data",
")",
":",
"# First filter out the dependent vars",
"y_is",
"=",
"[",
"data",
"[",
"var",
"]",
"for",
"var",
"in",
"model",
"if",
"var",
"in",
"data",
"]",
"x_is",
"=",
"[",
"value",
"for",
"var",
",",
"value",
"in",
"data",
".",
"items",
"(",
")",
"if",
"var",
".",
"name",
"in",
"model",
".",
"__signature__",
".",
"parameters",
"]",
"y_bars",
"=",
"[",
"np",
".",
"mean",
"(",
"y_i",
")",
"if",
"y_i",
"is",
"not",
"None",
"else",
"None",
"for",
"y_i",
"in",
"y_is",
"]",
"f_is",
"=",
"model",
"(",
"*",
"x_is",
",",
"*",
"*",
"fit_result",
".",
"params",
")",
"SS_res",
"=",
"np",
".",
"sum",
"(",
"[",
"np",
".",
"sum",
"(",
"(",
"y_i",
"-",
"f_i",
")",
"**",
"2",
")",
"for",
"y_i",
",",
"f_i",
"in",
"zip",
"(",
"y_is",
",",
"f_is",
")",
"if",
"y_i",
"is",
"not",
"None",
"]",
")",
"SS_tot",
"=",
"np",
".",
"sum",
"(",
"[",
"np",
".",
"sum",
"(",
"(",
"y_i",
"-",
"y_bar",
")",
"**",
"2",
")",
"for",
"y_i",
",",
"y_bar",
"in",
"zip",
"(",
"y_is",
",",
"y_bars",
")",
"if",
"y_i",
"is",
"not",
"None",
"]",
")",
"return",
"1",
"-",
"SS_res",
"/",
"SS_tot"
] | Calculates the coefficient of determination, R^2, for the fit.
(Is not defined properly for vector valued functions.)
:param model: Model instance
:param fit_result: FitResults instance
:param data: data with which the fit was performed. | [
"Calculates",
"the",
"coefficient",
"of",
"determination",
"R^2",
"for",
"the",
"fit",
"."
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/core/fit.py#L1431-L1448 |
5,616 | tBuLi/symfit | symfit/core/fit.py | _partial_subs | def _partial_subs(func, func2vars):
"""
Partial-bug proof substitution. Works by making the substitutions on
the expression inside the derivative first, and then rebuilding the
derivative safely without evaluating it using `_partial_diff`.
"""
if isinstance(func, sympy.Derivative):
new_func = func.expr.xreplace(func2vars)
new_variables = tuple(var.xreplace(func2vars)
for var in func.variables)
return _partial_diff(new_func, *new_variables)
else:
return func.xreplace(func2vars) | python | def _partial_subs(func, func2vars):
"""
Partial-bug proof substitution. Works by making the substitutions on
the expression inside the derivative first, and then rebuilding the
derivative safely without evaluating it using `_partial_diff`.
"""
if isinstance(func, sympy.Derivative):
new_func = func.expr.xreplace(func2vars)
new_variables = tuple(var.xreplace(func2vars)
for var in func.variables)
return _partial_diff(new_func, *new_variables)
else:
return func.xreplace(func2vars) | [
"def",
"_partial_subs",
"(",
"func",
",",
"func2vars",
")",
":",
"if",
"isinstance",
"(",
"func",
",",
"sympy",
".",
"Derivative",
")",
":",
"new_func",
"=",
"func",
".",
"expr",
".",
"xreplace",
"(",
"func2vars",
")",
"new_variables",
"=",
"tuple",
"(",
"var",
".",
"xreplace",
"(",
"func2vars",
")",
"for",
"var",
"in",
"func",
".",
"variables",
")",
"return",
"_partial_diff",
"(",
"new_func",
",",
"*",
"new_variables",
")",
"else",
":",
"return",
"func",
".",
"xreplace",
"(",
"func2vars",
")"
] | Partial-bug proof substitution. Works by making the substitutions on
the expression inside the derivative first, and then rebuilding the
derivative safely without evaluating it using `_partial_diff`. | [
"Partial",
"-",
"bug",
"proof",
"substitution",
".",
"Works",
"by",
"making",
"the",
"substitutions",
"on",
"the",
"expression",
"inside",
"the",
"derivative",
"first",
"and",
"then",
"rebuilding",
"the",
"derivative",
"safely",
"without",
"evaluating",
"it",
"using",
"_partial_diff",
"."
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/core/fit.py#L1693-L1705 |
5,617 | tBuLi/symfit | symfit/core/fit.py | BaseModel._init_from_dict | def _init_from_dict(self, model_dict):
"""
Initiate self from a model_dict to make sure attributes such as vars, params are available.
Creates lists of alphabetically sorted independent vars, dependent vars, sigma vars, and parameters.
Finally it creates a signature for this model so it can be called nicely. This signature only contains
independent vars and params, as one would expect.
:param model_dict: dict of (dependent_var, expression) pairs.
"""
sort_func = lambda symbol: symbol.name
self.model_dict = OrderedDict(sorted(model_dict.items(),
key=lambda i: sort_func(i[0])))
# Everything at the bottom of the toposort is independent, at the top
# dependent, and the rest interdependent.
ordered = list(toposort(self.connectivity_mapping))
independent = sorted(ordered.pop(0), key=sort_func)
self.dependent_vars = sorted(ordered.pop(-1), key=sort_func)
self.interdependent_vars = sorted(
[item for items in ordered for item in items],
key=sort_func
)
# `independent` contains both params and vars, needs to be separated
self.independent_vars = [s for s in independent if
not isinstance(s, Parameter) and not s in self]
self.params = [s for s in independent if isinstance(s, Parameter)]
try:
assert not any(isinstance(var, Parameter)
for var in self.dependent_vars)
assert not any(isinstance(var, Parameter)
for var in self.interdependent_vars)
except AssertionError:
raise ModelError('`Parameter`\'s can not feature in the role '
'of `Variable`')
# Make Variable object corresponding to each depedent var.
self.sigmas = {var: Variable(name='sigma_{}'.format(var.name))
for var in self.dependent_vars} | python | def _init_from_dict(self, model_dict):
"""
Initiate self from a model_dict to make sure attributes such as vars, params are available.
Creates lists of alphabetically sorted independent vars, dependent vars, sigma vars, and parameters.
Finally it creates a signature for this model so it can be called nicely. This signature only contains
independent vars and params, as one would expect.
:param model_dict: dict of (dependent_var, expression) pairs.
"""
sort_func = lambda symbol: symbol.name
self.model_dict = OrderedDict(sorted(model_dict.items(),
key=lambda i: sort_func(i[0])))
# Everything at the bottom of the toposort is independent, at the top
# dependent, and the rest interdependent.
ordered = list(toposort(self.connectivity_mapping))
independent = sorted(ordered.pop(0), key=sort_func)
self.dependent_vars = sorted(ordered.pop(-1), key=sort_func)
self.interdependent_vars = sorted(
[item for items in ordered for item in items],
key=sort_func
)
# `independent` contains both params and vars, needs to be separated
self.independent_vars = [s for s in independent if
not isinstance(s, Parameter) and not s in self]
self.params = [s for s in independent if isinstance(s, Parameter)]
try:
assert not any(isinstance(var, Parameter)
for var in self.dependent_vars)
assert not any(isinstance(var, Parameter)
for var in self.interdependent_vars)
except AssertionError:
raise ModelError('`Parameter`\'s can not feature in the role '
'of `Variable`')
# Make Variable object corresponding to each depedent var.
self.sigmas = {var: Variable(name='sigma_{}'.format(var.name))
for var in self.dependent_vars} | [
"def",
"_init_from_dict",
"(",
"self",
",",
"model_dict",
")",
":",
"sort_func",
"=",
"lambda",
"symbol",
":",
"symbol",
".",
"name",
"self",
".",
"model_dict",
"=",
"OrderedDict",
"(",
"sorted",
"(",
"model_dict",
".",
"items",
"(",
")",
",",
"key",
"=",
"lambda",
"i",
":",
"sort_func",
"(",
"i",
"[",
"0",
"]",
")",
")",
")",
"# Everything at the bottom of the toposort is independent, at the top",
"# dependent, and the rest interdependent.",
"ordered",
"=",
"list",
"(",
"toposort",
"(",
"self",
".",
"connectivity_mapping",
")",
")",
"independent",
"=",
"sorted",
"(",
"ordered",
".",
"pop",
"(",
"0",
")",
",",
"key",
"=",
"sort_func",
")",
"self",
".",
"dependent_vars",
"=",
"sorted",
"(",
"ordered",
".",
"pop",
"(",
"-",
"1",
")",
",",
"key",
"=",
"sort_func",
")",
"self",
".",
"interdependent_vars",
"=",
"sorted",
"(",
"[",
"item",
"for",
"items",
"in",
"ordered",
"for",
"item",
"in",
"items",
"]",
",",
"key",
"=",
"sort_func",
")",
"# `independent` contains both params and vars, needs to be separated",
"self",
".",
"independent_vars",
"=",
"[",
"s",
"for",
"s",
"in",
"independent",
"if",
"not",
"isinstance",
"(",
"s",
",",
"Parameter",
")",
"and",
"not",
"s",
"in",
"self",
"]",
"self",
".",
"params",
"=",
"[",
"s",
"for",
"s",
"in",
"independent",
"if",
"isinstance",
"(",
"s",
",",
"Parameter",
")",
"]",
"try",
":",
"assert",
"not",
"any",
"(",
"isinstance",
"(",
"var",
",",
"Parameter",
")",
"for",
"var",
"in",
"self",
".",
"dependent_vars",
")",
"assert",
"not",
"any",
"(",
"isinstance",
"(",
"var",
",",
"Parameter",
")",
"for",
"var",
"in",
"self",
".",
"interdependent_vars",
")",
"except",
"AssertionError",
":",
"raise",
"ModelError",
"(",
"'`Parameter`\\'s can not feature in the role '",
"'of `Variable`'",
")",
"# Make Variable object corresponding to each depedent var.",
"self",
".",
"sigmas",
"=",
"{",
"var",
":",
"Variable",
"(",
"name",
"=",
"'sigma_{}'",
".",
"format",
"(",
"var",
".",
"name",
")",
")",
"for",
"var",
"in",
"self",
".",
"dependent_vars",
"}"
] | Initiate self from a model_dict to make sure attributes such as vars, params are available.
Creates lists of alphabetically sorted independent vars, dependent vars, sigma vars, and parameters.
Finally it creates a signature for this model so it can be called nicely. This signature only contains
independent vars and params, as one would expect.
:param model_dict: dict of (dependent_var, expression) pairs. | [
"Initiate",
"self",
"from",
"a",
"model_dict",
"to",
"make",
"sure",
"attributes",
"such",
"as",
"vars",
"params",
"are",
"available",
"."
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/core/fit.py#L273-L310 |
5,618 | tBuLi/symfit | symfit/core/fit.py | BaseModel.function_dict | def function_dict(self):
"""
Equivalent to ``self.model_dict``, but with all variables replaced by
functions if applicable. Sorted by the evaluation order according to
``self.ordered_symbols``, not alphabetical like ``self.model_dict``!
"""
func_dict = OrderedDict()
for var, func in self.vars_as_functions.items():
expr = self.model_dict[var].xreplace(self.vars_as_functions)
func_dict[func] = expr
return func_dict | python | def function_dict(self):
"""
Equivalent to ``self.model_dict``, but with all variables replaced by
functions if applicable. Sorted by the evaluation order according to
``self.ordered_symbols``, not alphabetical like ``self.model_dict``!
"""
func_dict = OrderedDict()
for var, func in self.vars_as_functions.items():
expr = self.model_dict[var].xreplace(self.vars_as_functions)
func_dict[func] = expr
return func_dict | [
"def",
"function_dict",
"(",
"self",
")",
":",
"func_dict",
"=",
"OrderedDict",
"(",
")",
"for",
"var",
",",
"func",
"in",
"self",
".",
"vars_as_functions",
".",
"items",
"(",
")",
":",
"expr",
"=",
"self",
".",
"model_dict",
"[",
"var",
"]",
".",
"xreplace",
"(",
"self",
".",
"vars_as_functions",
")",
"func_dict",
"[",
"func",
"]",
"=",
"expr",
"return",
"func_dict"
] | Equivalent to ``self.model_dict``, but with all variables replaced by
functions if applicable. Sorted by the evaluation order according to
``self.ordered_symbols``, not alphabetical like ``self.model_dict``! | [
"Equivalent",
"to",
"self",
".",
"model_dict",
"but",
"with",
"all",
"variables",
"replaced",
"by",
"functions",
"if",
"applicable",
".",
"Sorted",
"by",
"the",
"evaluation",
"order",
"according",
"to",
"self",
".",
"ordered_symbols",
"not",
"alphabetical",
"like",
"self",
".",
"model_dict",
"!"
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/core/fit.py#L340-L350 |
5,619 | tBuLi/symfit | symfit/core/fit.py | TakesData._model_sanity | def _model_sanity(model):
"""
Perform some basic sanity checking on the model to warn users when they
might be trying something ill advised.
:param model: model instance.
"""
if not isinstance(model, ODEModel) and not isinstance(model, BaseNumericalModel):
# Such a model should probably not contain derivatives
for var, expr in model.items():
if isinstance(var, sympy.Derivative) or expr.has(sympy.Derivative):
warnings.warn(RuntimeWarning(
'The model contains derivatives in its definition. '
'Are you sure you don\'t mean to use `symfit.ODEModel`?'
)) | python | def _model_sanity(model):
"""
Perform some basic sanity checking on the model to warn users when they
might be trying something ill advised.
:param model: model instance.
"""
if not isinstance(model, ODEModel) and not isinstance(model, BaseNumericalModel):
# Such a model should probably not contain derivatives
for var, expr in model.items():
if isinstance(var, sympy.Derivative) or expr.has(sympy.Derivative):
warnings.warn(RuntimeWarning(
'The model contains derivatives in its definition. '
'Are you sure you don\'t mean to use `symfit.ODEModel`?'
)) | [
"def",
"_model_sanity",
"(",
"model",
")",
":",
"if",
"not",
"isinstance",
"(",
"model",
",",
"ODEModel",
")",
"and",
"not",
"isinstance",
"(",
"model",
",",
"BaseNumericalModel",
")",
":",
"# Such a model should probably not contain derivatives",
"for",
"var",
",",
"expr",
"in",
"model",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"var",
",",
"sympy",
".",
"Derivative",
")",
"or",
"expr",
".",
"has",
"(",
"sympy",
".",
"Derivative",
")",
":",
"warnings",
".",
"warn",
"(",
"RuntimeWarning",
"(",
"'The model contains derivatives in its definition. '",
"'Are you sure you don\\'t mean to use `symfit.ODEModel`?'",
")",
")"
] | Perform some basic sanity checking on the model to warn users when they
might be trying something ill advised.
:param model: model instance. | [
"Perform",
"some",
"basic",
"sanity",
"checking",
"on",
"the",
"model",
"to",
"warn",
"users",
"when",
"they",
"might",
"be",
"trying",
"something",
"ill",
"advised",
"."
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/core/fit.py#L1014-L1028 |
5,620 | tBuLi/symfit | symfit/core/fit.py | TakesData.data_shapes | def data_shapes(self):
"""
Returns the shape of the data. In most cases this will be the same for
all variables of the same type, if not this raises an Exception.
Ignores variables which are set to None by design so we know that those
None variables can be assumed to have the same shape as the other in
calculations where this is needed, such as the covariance matrix.
:return: Tuple of all independent var shapes, dependent var shapes.
"""
independent_shapes = []
for var, data in self.independent_data.items():
if data is not None:
independent_shapes.append(data.shape)
dependent_shapes = []
for var, data in self.dependent_data.items():
if data is not None:
dependent_shapes.append(data.shape)
return list(set(independent_shapes)), list(set(dependent_shapes)) | python | def data_shapes(self):
"""
Returns the shape of the data. In most cases this will be the same for
all variables of the same type, if not this raises an Exception.
Ignores variables which are set to None by design so we know that those
None variables can be assumed to have the same shape as the other in
calculations where this is needed, such as the covariance matrix.
:return: Tuple of all independent var shapes, dependent var shapes.
"""
independent_shapes = []
for var, data in self.independent_data.items():
if data is not None:
independent_shapes.append(data.shape)
dependent_shapes = []
for var, data in self.dependent_data.items():
if data is not None:
dependent_shapes.append(data.shape)
return list(set(independent_shapes)), list(set(dependent_shapes)) | [
"def",
"data_shapes",
"(",
"self",
")",
":",
"independent_shapes",
"=",
"[",
"]",
"for",
"var",
",",
"data",
"in",
"self",
".",
"independent_data",
".",
"items",
"(",
")",
":",
"if",
"data",
"is",
"not",
"None",
":",
"independent_shapes",
".",
"append",
"(",
"data",
".",
"shape",
")",
"dependent_shapes",
"=",
"[",
"]",
"for",
"var",
",",
"data",
"in",
"self",
".",
"dependent_data",
".",
"items",
"(",
")",
":",
"if",
"data",
"is",
"not",
"None",
":",
"dependent_shapes",
".",
"append",
"(",
"data",
".",
"shape",
")",
"return",
"list",
"(",
"set",
"(",
"independent_shapes",
")",
")",
",",
"list",
"(",
"set",
"(",
"dependent_shapes",
")",
")"
] | Returns the shape of the data. In most cases this will be the same for
all variables of the same type, if not this raises an Exception.
Ignores variables which are set to None by design so we know that those
None variables can be assumed to have the same shape as the other in
calculations where this is needed, such as the covariance matrix.
:return: Tuple of all independent var shapes, dependent var shapes. | [
"Returns",
"the",
"shape",
"of",
"the",
"data",
".",
"In",
"most",
"cases",
"this",
"will",
"be",
"the",
"same",
"for",
"all",
"variables",
"of",
"the",
"same",
"type",
"if",
"not",
"this",
"raises",
"an",
"Exception",
"."
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/core/fit.py#L1067-L1088 |
5,621 | tBuLi/symfit | symfit/core/fit.py | Fit.execute | def execute(self, **minimize_options):
"""
Execute the fit.
:param minimize_options: keyword arguments to be passed to the specified
minimizer.
:return: FitResults instance
"""
minimizer_ans = self.minimizer.execute(**minimize_options)
try: # to build covariance matrix
cov_matrix = minimizer_ans.covariance_matrix
except AttributeError:
cov_matrix = self.covariance_matrix(dict(zip(self.model.params, minimizer_ans._popt)))
else:
if cov_matrix is None:
cov_matrix = self.covariance_matrix(dict(zip(self.model.params, minimizer_ans._popt)))
finally:
minimizer_ans.covariance_matrix = cov_matrix
# Overwrite the DummyModel with the current model
minimizer_ans.model = self.model
minimizer_ans.gof_qualifiers['r_squared'] = r_squared(self.model, minimizer_ans, self.data)
return minimizer_ans | python | def execute(self, **minimize_options):
"""
Execute the fit.
:param minimize_options: keyword arguments to be passed to the specified
minimizer.
:return: FitResults instance
"""
minimizer_ans = self.minimizer.execute(**minimize_options)
try: # to build covariance matrix
cov_matrix = minimizer_ans.covariance_matrix
except AttributeError:
cov_matrix = self.covariance_matrix(dict(zip(self.model.params, minimizer_ans._popt)))
else:
if cov_matrix is None:
cov_matrix = self.covariance_matrix(dict(zip(self.model.params, minimizer_ans._popt)))
finally:
minimizer_ans.covariance_matrix = cov_matrix
# Overwrite the DummyModel with the current model
minimizer_ans.model = self.model
minimizer_ans.gof_qualifiers['r_squared'] = r_squared(self.model, minimizer_ans, self.data)
return minimizer_ans | [
"def",
"execute",
"(",
"self",
",",
"*",
"*",
"minimize_options",
")",
":",
"minimizer_ans",
"=",
"self",
".",
"minimizer",
".",
"execute",
"(",
"*",
"*",
"minimize_options",
")",
"try",
":",
"# to build covariance matrix",
"cov_matrix",
"=",
"minimizer_ans",
".",
"covariance_matrix",
"except",
"AttributeError",
":",
"cov_matrix",
"=",
"self",
".",
"covariance_matrix",
"(",
"dict",
"(",
"zip",
"(",
"self",
".",
"model",
".",
"params",
",",
"minimizer_ans",
".",
"_popt",
")",
")",
")",
"else",
":",
"if",
"cov_matrix",
"is",
"None",
":",
"cov_matrix",
"=",
"self",
".",
"covariance_matrix",
"(",
"dict",
"(",
"zip",
"(",
"self",
".",
"model",
".",
"params",
",",
"minimizer_ans",
".",
"_popt",
")",
")",
")",
"finally",
":",
"minimizer_ans",
".",
"covariance_matrix",
"=",
"cov_matrix",
"# Overwrite the DummyModel with the current model",
"minimizer_ans",
".",
"model",
"=",
"self",
".",
"model",
"minimizer_ans",
".",
"gof_qualifiers",
"[",
"'r_squared'",
"]",
"=",
"r_squared",
"(",
"self",
".",
"model",
",",
"minimizer_ans",
",",
"self",
".",
"data",
")",
"return",
"minimizer_ans"
] | Execute the fit.
:param minimize_options: keyword arguments to be passed to the specified
minimizer.
:return: FitResults instance | [
"Execute",
"the",
"fit",
"."
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/core/fit.py#L1407-L1428 |
5,622 | tBuLi/symfit | symfit/core/fit.py | ODEModel.eval_components | def eval_components(self, *args, **kwargs):
"""
Numerically integrate the system of ODEs.
:param args: Ordered arguments for the parameters and independent
variables
:param kwargs: Keyword arguments for the parameters and independent
variables
:return:
"""
bound_arguments = self.__signature__.bind(*args, **kwargs)
t_like = bound_arguments.arguments[self.independent_vars[0].name]
# System of functions to be integrated
f = lambda ys, t, *a: [c(t, *(list(ys) + list(a))) for c in self._ncomponents]
Dfun = lambda ys, t, *a: [[c(t, *(list(ys) + list(a))) for c in row] for row in self._njacobian]
initial_dependent = [self.initial[var] for var in self.dependent_vars]
t_initial = self.initial[self.independent_vars[0]] # Assuming there's only one
# Check if the time-like data includes the initial value, because integration should start there.
try:
t_like[0]
except (TypeError, IndexError): # Python scalar gives TypeError, numpy scalars IndexError
t_like = np.array([t_like]) # Allow evaluation at one point.
# The strategy is to split the time axis in a part above and below the
# initial value, and to integrate those seperately. At the end we rejoin them.
# np.flip is needed because odeint wants the first point to be t_initial
# and so t_smaller is a declining series.
if t_initial in t_like:
t_bigger = t_like[t_like >= t_initial]
t_smaller = t_like[t_like <= t_initial][::-1]
else:
t_bigger = np.concatenate(
(np.array([t_initial]), t_like[t_like > t_initial])
)
t_smaller = np.concatenate(
(np.array([t_initial]), t_like[t_like < t_initial][::-1])
)
# Properly ordered time axis containing t_initial
t_total = np.concatenate((t_smaller[::-1][:-1], t_bigger))
ans_bigger = odeint(
f,
initial_dependent,
t_bigger,
args=tuple(
bound_arguments.arguments[param.name] for param in self.params),
Dfun=Dfun,
*self.lsoda_args, **self.lsoda_kwargs
)
ans_smaller = odeint(
f,
initial_dependent,
t_smaller,
args=tuple(
bound_arguments.arguments[param.name] for param in self.params),
Dfun=Dfun,
*self.lsoda_args, **self.lsoda_kwargs
)
ans = np.concatenate((ans_smaller[1:][::-1], ans_bigger))
if t_initial in t_like:
# The user also requested to know the value at t_initial, so keep it.
return ans.T
else:
# The user didn't ask for the value at t_initial, so exclude it.
# (t_total contains all the t-points used for the integration,
# and so is t_like with t_initial inserted at the right position).
return ans[t_total != t_initial].T | python | def eval_components(self, *args, **kwargs):
"""
Numerically integrate the system of ODEs.
:param args: Ordered arguments for the parameters and independent
variables
:param kwargs: Keyword arguments for the parameters and independent
variables
:return:
"""
bound_arguments = self.__signature__.bind(*args, **kwargs)
t_like = bound_arguments.arguments[self.independent_vars[0].name]
# System of functions to be integrated
f = lambda ys, t, *a: [c(t, *(list(ys) + list(a))) for c in self._ncomponents]
Dfun = lambda ys, t, *a: [[c(t, *(list(ys) + list(a))) for c in row] for row in self._njacobian]
initial_dependent = [self.initial[var] for var in self.dependent_vars]
t_initial = self.initial[self.independent_vars[0]] # Assuming there's only one
# Check if the time-like data includes the initial value, because integration should start there.
try:
t_like[0]
except (TypeError, IndexError): # Python scalar gives TypeError, numpy scalars IndexError
t_like = np.array([t_like]) # Allow evaluation at one point.
# The strategy is to split the time axis in a part above and below the
# initial value, and to integrate those seperately. At the end we rejoin them.
# np.flip is needed because odeint wants the first point to be t_initial
# and so t_smaller is a declining series.
if t_initial in t_like:
t_bigger = t_like[t_like >= t_initial]
t_smaller = t_like[t_like <= t_initial][::-1]
else:
t_bigger = np.concatenate(
(np.array([t_initial]), t_like[t_like > t_initial])
)
t_smaller = np.concatenate(
(np.array([t_initial]), t_like[t_like < t_initial][::-1])
)
# Properly ordered time axis containing t_initial
t_total = np.concatenate((t_smaller[::-1][:-1], t_bigger))
ans_bigger = odeint(
f,
initial_dependent,
t_bigger,
args=tuple(
bound_arguments.arguments[param.name] for param in self.params),
Dfun=Dfun,
*self.lsoda_args, **self.lsoda_kwargs
)
ans_smaller = odeint(
f,
initial_dependent,
t_smaller,
args=tuple(
bound_arguments.arguments[param.name] for param in self.params),
Dfun=Dfun,
*self.lsoda_args, **self.lsoda_kwargs
)
ans = np.concatenate((ans_smaller[1:][::-1], ans_bigger))
if t_initial in t_like:
# The user also requested to know the value at t_initial, so keep it.
return ans.T
else:
# The user didn't ask for the value at t_initial, so exclude it.
# (t_total contains all the t-points used for the integration,
# and so is t_like with t_initial inserted at the right position).
return ans[t_total != t_initial].T | [
"def",
"eval_components",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"bound_arguments",
"=",
"self",
".",
"__signature__",
".",
"bind",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"t_like",
"=",
"bound_arguments",
".",
"arguments",
"[",
"self",
".",
"independent_vars",
"[",
"0",
"]",
".",
"name",
"]",
"# System of functions to be integrated",
"f",
"=",
"lambda",
"ys",
",",
"t",
",",
"*",
"a",
":",
"[",
"c",
"(",
"t",
",",
"*",
"(",
"list",
"(",
"ys",
")",
"+",
"list",
"(",
"a",
")",
")",
")",
"for",
"c",
"in",
"self",
".",
"_ncomponents",
"]",
"Dfun",
"=",
"lambda",
"ys",
",",
"t",
",",
"*",
"a",
":",
"[",
"[",
"c",
"(",
"t",
",",
"*",
"(",
"list",
"(",
"ys",
")",
"+",
"list",
"(",
"a",
")",
")",
")",
"for",
"c",
"in",
"row",
"]",
"for",
"row",
"in",
"self",
".",
"_njacobian",
"]",
"initial_dependent",
"=",
"[",
"self",
".",
"initial",
"[",
"var",
"]",
"for",
"var",
"in",
"self",
".",
"dependent_vars",
"]",
"t_initial",
"=",
"self",
".",
"initial",
"[",
"self",
".",
"independent_vars",
"[",
"0",
"]",
"]",
"# Assuming there's only one",
"# Check if the time-like data includes the initial value, because integration should start there.",
"try",
":",
"t_like",
"[",
"0",
"]",
"except",
"(",
"TypeError",
",",
"IndexError",
")",
":",
"# Python scalar gives TypeError, numpy scalars IndexError",
"t_like",
"=",
"np",
".",
"array",
"(",
"[",
"t_like",
"]",
")",
"# Allow evaluation at one point.",
"# The strategy is to split the time axis in a part above and below the",
"# initial value, and to integrate those seperately. At the end we rejoin them.",
"# np.flip is needed because odeint wants the first point to be t_initial",
"# and so t_smaller is a declining series.",
"if",
"t_initial",
"in",
"t_like",
":",
"t_bigger",
"=",
"t_like",
"[",
"t_like",
">=",
"t_initial",
"]",
"t_smaller",
"=",
"t_like",
"[",
"t_like",
"<=",
"t_initial",
"]",
"[",
":",
":",
"-",
"1",
"]",
"else",
":",
"t_bigger",
"=",
"np",
".",
"concatenate",
"(",
"(",
"np",
".",
"array",
"(",
"[",
"t_initial",
"]",
")",
",",
"t_like",
"[",
"t_like",
">",
"t_initial",
"]",
")",
")",
"t_smaller",
"=",
"np",
".",
"concatenate",
"(",
"(",
"np",
".",
"array",
"(",
"[",
"t_initial",
"]",
")",
",",
"t_like",
"[",
"t_like",
"<",
"t_initial",
"]",
"[",
":",
":",
"-",
"1",
"]",
")",
")",
"# Properly ordered time axis containing t_initial",
"t_total",
"=",
"np",
".",
"concatenate",
"(",
"(",
"t_smaller",
"[",
":",
":",
"-",
"1",
"]",
"[",
":",
"-",
"1",
"]",
",",
"t_bigger",
")",
")",
"ans_bigger",
"=",
"odeint",
"(",
"f",
",",
"initial_dependent",
",",
"t_bigger",
",",
"args",
"=",
"tuple",
"(",
"bound_arguments",
".",
"arguments",
"[",
"param",
".",
"name",
"]",
"for",
"param",
"in",
"self",
".",
"params",
")",
",",
"Dfun",
"=",
"Dfun",
",",
"*",
"self",
".",
"lsoda_args",
",",
"*",
"*",
"self",
".",
"lsoda_kwargs",
")",
"ans_smaller",
"=",
"odeint",
"(",
"f",
",",
"initial_dependent",
",",
"t_smaller",
",",
"args",
"=",
"tuple",
"(",
"bound_arguments",
".",
"arguments",
"[",
"param",
".",
"name",
"]",
"for",
"param",
"in",
"self",
".",
"params",
")",
",",
"Dfun",
"=",
"Dfun",
",",
"*",
"self",
".",
"lsoda_args",
",",
"*",
"*",
"self",
".",
"lsoda_kwargs",
")",
"ans",
"=",
"np",
".",
"concatenate",
"(",
"(",
"ans_smaller",
"[",
"1",
":",
"]",
"[",
":",
":",
"-",
"1",
"]",
",",
"ans_bigger",
")",
")",
"if",
"t_initial",
"in",
"t_like",
":",
"# The user also requested to know the value at t_initial, so keep it.",
"return",
"ans",
".",
"T",
"else",
":",
"# The user didn't ask for the value at t_initial, so exclude it.",
"# (t_total contains all the t-points used for the integration,",
"# and so is t_like with t_initial inserted at the right position).",
"return",
"ans",
"[",
"t_total",
"!=",
"t_initial",
"]",
".",
"T"
] | Numerically integrate the system of ODEs.
:param args: Ordered arguments for the parameters and independent
variables
:param kwargs: Keyword arguments for the parameters and independent
variables
:return: | [
"Numerically",
"integrate",
"the",
"system",
"of",
"ODEs",
"."
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/core/fit.py#L1590-L1660 |
5,623 | tBuLi/symfit | symfit/core/operators.py | call | def call(self, *values, **named_values):
"""
Call an expression to evaluate it at the given point.
Future improvements: I would like if func and signature could be buffered after the
first call so they don't have to be recalculated for every call. However, nothing
can be stored on self as sympy uses __slots__ for efficiency. This means there is no
instance dict to put stuff in! And I'm pretty sure it's ill advised to hack into the
__slots__ of Expr.
However, for the moment I don't really notice a performance penalty in running tests.
p.s. In the current setup signature is not even needed since no introspection is possible
on the Expr before calling it anyway, which makes calculating the signature absolutely useless.
However, I hope that someday some monkey patching expert in shining armour comes by and finds
a way to store it in __signature__ upon __init__ of any ``symfit`` expr such that calling
inspect_sig.signature on a symbolic expression will tell you which arguments to provide.
:param self: Any subclass of sympy.Expr
:param values: Values for the Parameters and Variables of the Expr.
:param named_values: Values for the vars and params by name. ``named_values`` is
allowed to contain too many values, as this sometimes happens when using
\*\*fit_result.params on a submodel. The irrelevant params are simply ignored.
:return: The function evaluated at ``values``. The type depends entirely on the input.
Typically an array or a float but nothing is enforced.
"""
independent_vars, params = seperate_symbols(self)
# Convert to a pythonic function
func = sympy_to_py(self, independent_vars + params)
# Handle args and kwargs according to the allowed names.
parameters = [ # Note that these are inspect_sig.Parameter's, not symfit parameters!
inspect_sig.Parameter(arg.name, inspect_sig.Parameter.POSITIONAL_OR_KEYWORD)
for arg in independent_vars + params
]
arg_names = [arg.name for arg in independent_vars + params]
relevant_named_values = {
name: value for name, value in named_values.items() if name in arg_names
}
signature = inspect_sig.Signature(parameters=parameters)
bound_arguments = signature.bind(*values, **relevant_named_values)
return func(**bound_arguments.arguments) | python | def call(self, *values, **named_values):
"""
Call an expression to evaluate it at the given point.
Future improvements: I would like if func and signature could be buffered after the
first call so they don't have to be recalculated for every call. However, nothing
can be stored on self as sympy uses __slots__ for efficiency. This means there is no
instance dict to put stuff in! And I'm pretty sure it's ill advised to hack into the
__slots__ of Expr.
However, for the moment I don't really notice a performance penalty in running tests.
p.s. In the current setup signature is not even needed since no introspection is possible
on the Expr before calling it anyway, which makes calculating the signature absolutely useless.
However, I hope that someday some monkey patching expert in shining armour comes by and finds
a way to store it in __signature__ upon __init__ of any ``symfit`` expr such that calling
inspect_sig.signature on a symbolic expression will tell you which arguments to provide.
:param self: Any subclass of sympy.Expr
:param values: Values for the Parameters and Variables of the Expr.
:param named_values: Values for the vars and params by name. ``named_values`` is
allowed to contain too many values, as this sometimes happens when using
\*\*fit_result.params on a submodel. The irrelevant params are simply ignored.
:return: The function evaluated at ``values``. The type depends entirely on the input.
Typically an array or a float but nothing is enforced.
"""
independent_vars, params = seperate_symbols(self)
# Convert to a pythonic function
func = sympy_to_py(self, independent_vars + params)
# Handle args and kwargs according to the allowed names.
parameters = [ # Note that these are inspect_sig.Parameter's, not symfit parameters!
inspect_sig.Parameter(arg.name, inspect_sig.Parameter.POSITIONAL_OR_KEYWORD)
for arg in independent_vars + params
]
arg_names = [arg.name for arg in independent_vars + params]
relevant_named_values = {
name: value for name, value in named_values.items() if name in arg_names
}
signature = inspect_sig.Signature(parameters=parameters)
bound_arguments = signature.bind(*values, **relevant_named_values)
return func(**bound_arguments.arguments) | [
"def",
"call",
"(",
"self",
",",
"*",
"values",
",",
"*",
"*",
"named_values",
")",
":",
"independent_vars",
",",
"params",
"=",
"seperate_symbols",
"(",
"self",
")",
"# Convert to a pythonic function",
"func",
"=",
"sympy_to_py",
"(",
"self",
",",
"independent_vars",
"+",
"params",
")",
"# Handle args and kwargs according to the allowed names.",
"parameters",
"=",
"[",
"# Note that these are inspect_sig.Parameter's, not symfit parameters!",
"inspect_sig",
".",
"Parameter",
"(",
"arg",
".",
"name",
",",
"inspect_sig",
".",
"Parameter",
".",
"POSITIONAL_OR_KEYWORD",
")",
"for",
"arg",
"in",
"independent_vars",
"+",
"params",
"]",
"arg_names",
"=",
"[",
"arg",
".",
"name",
"for",
"arg",
"in",
"independent_vars",
"+",
"params",
"]",
"relevant_named_values",
"=",
"{",
"name",
":",
"value",
"for",
"name",
",",
"value",
"in",
"named_values",
".",
"items",
"(",
")",
"if",
"name",
"in",
"arg_names",
"}",
"signature",
"=",
"inspect_sig",
".",
"Signature",
"(",
"parameters",
"=",
"parameters",
")",
"bound_arguments",
"=",
"signature",
".",
"bind",
"(",
"*",
"values",
",",
"*",
"*",
"relevant_named_values",
")",
"return",
"func",
"(",
"*",
"*",
"bound_arguments",
".",
"arguments",
")"
] | Call an expression to evaluate it at the given point.
Future improvements: I would like if func and signature could be buffered after the
first call so they don't have to be recalculated for every call. However, nothing
can be stored on self as sympy uses __slots__ for efficiency. This means there is no
instance dict to put stuff in! And I'm pretty sure it's ill advised to hack into the
__slots__ of Expr.
However, for the moment I don't really notice a performance penalty in running tests.
p.s. In the current setup signature is not even needed since no introspection is possible
on the Expr before calling it anyway, which makes calculating the signature absolutely useless.
However, I hope that someday some monkey patching expert in shining armour comes by and finds
a way to store it in __signature__ upon __init__ of any ``symfit`` expr such that calling
inspect_sig.signature on a symbolic expression will tell you which arguments to provide.
:param self: Any subclass of sympy.Expr
:param values: Values for the Parameters and Variables of the Expr.
:param named_values: Values for the vars and params by name. ``named_values`` is
allowed to contain too many values, as this sometimes happens when using
\*\*fit_result.params on a submodel. The irrelevant params are simply ignored.
:return: The function evaluated at ``values``. The type depends entirely on the input.
Typically an array or a float but nothing is enforced. | [
"Call",
"an",
"expression",
"to",
"evaluate",
"it",
"at",
"the",
"given",
"point",
"."
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/core/operators.py#L48-L92 |
5,624 | tBuLi/symfit | symfit/core/fit_results.py | FitResults.variance | def variance(self, param):
"""
Return the variance in a given parameter as found by the fit.
:param param: ``Parameter`` Instance.
:return: Variance of ``param``.
"""
param_number = self.model.params.index(param)
try:
return self.covariance_matrix[param_number, param_number]
except TypeError:
# covariance_matrix can be None
return None | python | def variance(self, param):
"""
Return the variance in a given parameter as found by the fit.
:param param: ``Parameter`` Instance.
:return: Variance of ``param``.
"""
param_number = self.model.params.index(param)
try:
return self.covariance_matrix[param_number, param_number]
except TypeError:
# covariance_matrix can be None
return None | [
"def",
"variance",
"(",
"self",
",",
"param",
")",
":",
"param_number",
"=",
"self",
".",
"model",
".",
"params",
".",
"index",
"(",
"param",
")",
"try",
":",
"return",
"self",
".",
"covariance_matrix",
"[",
"param_number",
",",
"param_number",
"]",
"except",
"TypeError",
":",
"# covariance_matrix can be None",
"return",
"None"
] | Return the variance in a given parameter as found by the fit.
:param param: ``Parameter`` Instance.
:return: Variance of ``param``. | [
"Return",
"the",
"variance",
"in",
"a",
"given",
"parameter",
"as",
"found",
"by",
"the",
"fit",
"."
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/core/fit_results.py#L99-L111 |
5,625 | tBuLi/symfit | symfit/core/fit_results.py | FitResults.covariance | def covariance(self, param_1, param_2):
"""
Return the covariance between param_1 and param_2.
:param param_1: ``Parameter`` Instance.
:param param_2: ``Parameter`` Instance.
:return: Covariance of the two params.
"""
param_1_number = self.model.params.index(param_1)
param_2_number = self.model.params.index(param_2)
return self.covariance_matrix[param_1_number, param_2_number] | python | def covariance(self, param_1, param_2):
"""
Return the covariance between param_1 and param_2.
:param param_1: ``Parameter`` Instance.
:param param_2: ``Parameter`` Instance.
:return: Covariance of the two params.
"""
param_1_number = self.model.params.index(param_1)
param_2_number = self.model.params.index(param_2)
return self.covariance_matrix[param_1_number, param_2_number] | [
"def",
"covariance",
"(",
"self",
",",
"param_1",
",",
"param_2",
")",
":",
"param_1_number",
"=",
"self",
".",
"model",
".",
"params",
".",
"index",
"(",
"param_1",
")",
"param_2_number",
"=",
"self",
".",
"model",
".",
"params",
".",
"index",
"(",
"param_2",
")",
"return",
"self",
".",
"covariance_matrix",
"[",
"param_1_number",
",",
"param_2_number",
"]"
] | Return the covariance between param_1 and param_2.
:param param_1: ``Parameter`` Instance.
:param param_2: ``Parameter`` Instance.
:return: Covariance of the two params. | [
"Return",
"the",
"covariance",
"between",
"param_1",
"and",
"param_2",
"."
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/core/fit_results.py#L113-L123 |
5,626 | tBuLi/symfit | symfit/core/fit_results.py | FitResults._array_safe_dict_eq | def _array_safe_dict_eq(one_dict, other_dict):
"""
Dicts containing arrays are hard to compare. This function uses
numpy.allclose to compare arrays, and does normal comparison for all
other types.
:param one_dict:
:param other_dict:
:return: bool
"""
for key in one_dict:
try:
assert one_dict[key] == other_dict[key]
except ValueError as err:
# When dealing with arrays, we need to use numpy for comparison
if isinstance(one_dict[key], dict):
assert FitResults._array_safe_dict_eq(one_dict[key], other_dict[key])
else:
assert np.allclose(one_dict[key], other_dict[key])
except AssertionError:
return False
else: return True | python | def _array_safe_dict_eq(one_dict, other_dict):
"""
Dicts containing arrays are hard to compare. This function uses
numpy.allclose to compare arrays, and does normal comparison for all
other types.
:param one_dict:
:param other_dict:
:return: bool
"""
for key in one_dict:
try:
assert one_dict[key] == other_dict[key]
except ValueError as err:
# When dealing with arrays, we need to use numpy for comparison
if isinstance(one_dict[key], dict):
assert FitResults._array_safe_dict_eq(one_dict[key], other_dict[key])
else:
assert np.allclose(one_dict[key], other_dict[key])
except AssertionError:
return False
else: return True | [
"def",
"_array_safe_dict_eq",
"(",
"one_dict",
",",
"other_dict",
")",
":",
"for",
"key",
"in",
"one_dict",
":",
"try",
":",
"assert",
"one_dict",
"[",
"key",
"]",
"==",
"other_dict",
"[",
"key",
"]",
"except",
"ValueError",
"as",
"err",
":",
"# When dealing with arrays, we need to use numpy for comparison",
"if",
"isinstance",
"(",
"one_dict",
"[",
"key",
"]",
",",
"dict",
")",
":",
"assert",
"FitResults",
".",
"_array_safe_dict_eq",
"(",
"one_dict",
"[",
"key",
"]",
",",
"other_dict",
"[",
"key",
"]",
")",
"else",
":",
"assert",
"np",
".",
"allclose",
"(",
"one_dict",
"[",
"key",
"]",
",",
"other_dict",
"[",
"key",
"]",
")",
"except",
"AssertionError",
":",
"return",
"False",
"else",
":",
"return",
"True"
] | Dicts containing arrays are hard to compare. This function uses
numpy.allclose to compare arrays, and does normal comparison for all
other types.
:param one_dict:
:param other_dict:
:return: bool | [
"Dicts",
"containing",
"arrays",
"are",
"hard",
"to",
"compare",
".",
"This",
"function",
"uses",
"numpy",
".",
"allclose",
"to",
"compare",
"arrays",
"and",
"does",
"normal",
"comparison",
"for",
"all",
"other",
"types",
"."
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/core/fit_results.py#L126-L147 |
5,627 | tBuLi/symfit | examples/callable_numerical_model.py | nonanalytical_func | def nonanalytical_func(x, a, b):
"""
This can be any pythonic function which should be fitted, typically one
which is not easily written or supported as an analytical expression.
"""
# Do your non-trivial magic here. In this case a Piecewise, although this
# could also be done symbolically.
y = np.zeros_like(x)
y[x > b] = (a * (x - b) + b)[x > b]
y[x <= b] = b
return y | python | def nonanalytical_func(x, a, b):
"""
This can be any pythonic function which should be fitted, typically one
which is not easily written or supported as an analytical expression.
"""
# Do your non-trivial magic here. In this case a Piecewise, although this
# could also be done symbolically.
y = np.zeros_like(x)
y[x > b] = (a * (x - b) + b)[x > b]
y[x <= b] = b
return y | [
"def",
"nonanalytical_func",
"(",
"x",
",",
"a",
",",
"b",
")",
":",
"# Do your non-trivial magic here. In this case a Piecewise, although this",
"# could also be done symbolically.",
"y",
"=",
"np",
".",
"zeros_like",
"(",
"x",
")",
"y",
"[",
"x",
">",
"b",
"]",
"=",
"(",
"a",
"*",
"(",
"x",
"-",
"b",
")",
"+",
"b",
")",
"[",
"x",
">",
"b",
"]",
"y",
"[",
"x",
"<=",
"b",
"]",
"=",
"b",
"return",
"y"
] | This can be any pythonic function which should be fitted, typically one
which is not easily written or supported as an analytical expression. | [
"This",
"can",
"be",
"any",
"pythonic",
"function",
"which",
"should",
"be",
"fitted",
"typically",
"one",
"which",
"is",
"not",
"easily",
"written",
"or",
"supported",
"as",
"an",
"analytical",
"expression",
"."
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/examples/callable_numerical_model.py#L5-L15 |
5,628 | mixcloud/django-experiments | experiments/admin.py | ExperimentAdmin.get_form | def get_form(self, request, obj=None, **kwargs):
"""
Add the default alternative dropdown with appropriate choices
"""
if obj:
if obj.alternatives:
choices = [(alternative, alternative) for alternative in obj.alternatives.keys()]
else:
choices = [(conf.CONTROL_GROUP, conf.CONTROL_GROUP)]
class ExperimentModelForm(forms.ModelForm):
default_alternative = forms.ChoiceField(choices=choices,
initial=obj.default_alternative,
required=False)
kwargs['form'] = ExperimentModelForm
return super(ExperimentAdmin, self).get_form(request, obj=obj, **kwargs) | python | def get_form(self, request, obj=None, **kwargs):
"""
Add the default alternative dropdown with appropriate choices
"""
if obj:
if obj.alternatives:
choices = [(alternative, alternative) for alternative in obj.alternatives.keys()]
else:
choices = [(conf.CONTROL_GROUP, conf.CONTROL_GROUP)]
class ExperimentModelForm(forms.ModelForm):
default_alternative = forms.ChoiceField(choices=choices,
initial=obj.default_alternative,
required=False)
kwargs['form'] = ExperimentModelForm
return super(ExperimentAdmin, self).get_form(request, obj=obj, **kwargs) | [
"def",
"get_form",
"(",
"self",
",",
"request",
",",
"obj",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"obj",
":",
"if",
"obj",
".",
"alternatives",
":",
"choices",
"=",
"[",
"(",
"alternative",
",",
"alternative",
")",
"for",
"alternative",
"in",
"obj",
".",
"alternatives",
".",
"keys",
"(",
")",
"]",
"else",
":",
"choices",
"=",
"[",
"(",
"conf",
".",
"CONTROL_GROUP",
",",
"conf",
".",
"CONTROL_GROUP",
")",
"]",
"class",
"ExperimentModelForm",
"(",
"forms",
".",
"ModelForm",
")",
":",
"default_alternative",
"=",
"forms",
".",
"ChoiceField",
"(",
"choices",
"=",
"choices",
",",
"initial",
"=",
"obj",
".",
"default_alternative",
",",
"required",
"=",
"False",
")",
"kwargs",
"[",
"'form'",
"]",
"=",
"ExperimentModelForm",
"return",
"super",
"(",
"ExperimentAdmin",
",",
"self",
")",
".",
"get_form",
"(",
"request",
",",
"obj",
"=",
"obj",
",",
"*",
"*",
"kwargs",
")"
] | Add the default alternative dropdown with appropriate choices | [
"Add",
"the",
"default",
"alternative",
"dropdown",
"with",
"appropriate",
"choices"
] | 1f45e9f8a108b51e44918daa647269b2b8d43f1d | https://github.com/mixcloud/django-experiments/blob/1f45e9f8a108b51e44918daa647269b2b8d43f1d/experiments/admin.py#L46-L61 |
5,629 | mixcloud/django-experiments | experiments/admin.py | ExperimentAdmin.set_alternative_view | def set_alternative_view(self, request):
"""
Allows the admin user to change their assigned alternative
"""
if not request.user.has_perm('experiments.change_experiment'):
return HttpResponseForbidden()
experiment_name = request.POST.get("experiment")
alternative_name = request.POST.get("alternative")
if not (experiment_name and alternative_name):
return HttpResponseBadRequest()
participant(request).set_alternative(experiment_name, alternative_name)
return JsonResponse({
'success': True,
'alternative': participant(request).get_alternative(experiment_name)
}) | python | def set_alternative_view(self, request):
"""
Allows the admin user to change their assigned alternative
"""
if not request.user.has_perm('experiments.change_experiment'):
return HttpResponseForbidden()
experiment_name = request.POST.get("experiment")
alternative_name = request.POST.get("alternative")
if not (experiment_name and alternative_name):
return HttpResponseBadRequest()
participant(request).set_alternative(experiment_name, alternative_name)
return JsonResponse({
'success': True,
'alternative': participant(request).get_alternative(experiment_name)
}) | [
"def",
"set_alternative_view",
"(",
"self",
",",
"request",
")",
":",
"if",
"not",
"request",
".",
"user",
".",
"has_perm",
"(",
"'experiments.change_experiment'",
")",
":",
"return",
"HttpResponseForbidden",
"(",
")",
"experiment_name",
"=",
"request",
".",
"POST",
".",
"get",
"(",
"\"experiment\"",
")",
"alternative_name",
"=",
"request",
".",
"POST",
".",
"get",
"(",
"\"alternative\"",
")",
"if",
"not",
"(",
"experiment_name",
"and",
"alternative_name",
")",
":",
"return",
"HttpResponseBadRequest",
"(",
")",
"participant",
"(",
"request",
")",
".",
"set_alternative",
"(",
"experiment_name",
",",
"alternative_name",
")",
"return",
"JsonResponse",
"(",
"{",
"'success'",
":",
"True",
",",
"'alternative'",
":",
"participant",
"(",
"request",
")",
".",
"get_alternative",
"(",
"experiment_name",
")",
"}",
")"
] | Allows the admin user to change their assigned alternative | [
"Allows",
"the",
"admin",
"user",
"to",
"change",
"their",
"assigned",
"alternative"
] | 1f45e9f8a108b51e44918daa647269b2b8d43f1d | https://github.com/mixcloud/django-experiments/blob/1f45e9f8a108b51e44918daa647269b2b8d43f1d/experiments/admin.py#L112-L128 |
5,630 | mixcloud/django-experiments | experiments/admin.py | ExperimentAdmin.set_state_view | def set_state_view(self, request):
"""
Changes the experiment state
"""
if not request.user.has_perm('experiments.change_experiment'):
return HttpResponseForbidden()
try:
state = int(request.POST.get("state", ""))
except ValueError:
return HttpResponseBadRequest()
try:
experiment = Experiment.objects.get(name=request.POST.get("experiment"))
except Experiment.DoesNotExist:
return HttpResponseBadRequest()
experiment.state = state
if state == 0:
experiment.end_date = timezone.now()
else:
experiment.end_date = None
experiment.save()
return HttpResponse() | python | def set_state_view(self, request):
"""
Changes the experiment state
"""
if not request.user.has_perm('experiments.change_experiment'):
return HttpResponseForbidden()
try:
state = int(request.POST.get("state", ""))
except ValueError:
return HttpResponseBadRequest()
try:
experiment = Experiment.objects.get(name=request.POST.get("experiment"))
except Experiment.DoesNotExist:
return HttpResponseBadRequest()
experiment.state = state
if state == 0:
experiment.end_date = timezone.now()
else:
experiment.end_date = None
experiment.save()
return HttpResponse() | [
"def",
"set_state_view",
"(",
"self",
",",
"request",
")",
":",
"if",
"not",
"request",
".",
"user",
".",
"has_perm",
"(",
"'experiments.change_experiment'",
")",
":",
"return",
"HttpResponseForbidden",
"(",
")",
"try",
":",
"state",
"=",
"int",
"(",
"request",
".",
"POST",
".",
"get",
"(",
"\"state\"",
",",
"\"\"",
")",
")",
"except",
"ValueError",
":",
"return",
"HttpResponseBadRequest",
"(",
")",
"try",
":",
"experiment",
"=",
"Experiment",
".",
"objects",
".",
"get",
"(",
"name",
"=",
"request",
".",
"POST",
".",
"get",
"(",
"\"experiment\"",
")",
")",
"except",
"Experiment",
".",
"DoesNotExist",
":",
"return",
"HttpResponseBadRequest",
"(",
")",
"experiment",
".",
"state",
"=",
"state",
"if",
"state",
"==",
"0",
":",
"experiment",
".",
"end_date",
"=",
"timezone",
".",
"now",
"(",
")",
"else",
":",
"experiment",
".",
"end_date",
"=",
"None",
"experiment",
".",
"save",
"(",
")",
"return",
"HttpResponse",
"(",
")"
] | Changes the experiment state | [
"Changes",
"the",
"experiment",
"state"
] | 1f45e9f8a108b51e44918daa647269b2b8d43f1d | https://github.com/mixcloud/django-experiments/blob/1f45e9f8a108b51e44918daa647269b2b8d43f1d/experiments/admin.py#L130-L156 |
5,631 | mixcloud/django-experiments | experiments/utils.py | WebUser.get_alternative | def get_alternative(self, experiment_name):
"""
Get the alternative this user is enrolled in.
"""
experiment = None
try:
# catching the KeyError instead of using .get so that the experiment is auto created if desired
experiment = experiment_manager[experiment_name]
except KeyError:
pass
if experiment:
if experiment.is_displaying_alternatives():
alternative = self._get_enrollment(experiment)
if alternative is not None:
return alternative
else:
return experiment.default_alternative
return conf.CONTROL_GROUP | python | def get_alternative(self, experiment_name):
"""
Get the alternative this user is enrolled in.
"""
experiment = None
try:
# catching the KeyError instead of using .get so that the experiment is auto created if desired
experiment = experiment_manager[experiment_name]
except KeyError:
pass
if experiment:
if experiment.is_displaying_alternatives():
alternative = self._get_enrollment(experiment)
if alternative is not None:
return alternative
else:
return experiment.default_alternative
return conf.CONTROL_GROUP | [
"def",
"get_alternative",
"(",
"self",
",",
"experiment_name",
")",
":",
"experiment",
"=",
"None",
"try",
":",
"# catching the KeyError instead of using .get so that the experiment is auto created if desired",
"experiment",
"=",
"experiment_manager",
"[",
"experiment_name",
"]",
"except",
"KeyError",
":",
"pass",
"if",
"experiment",
":",
"if",
"experiment",
".",
"is_displaying_alternatives",
"(",
")",
":",
"alternative",
"=",
"self",
".",
"_get_enrollment",
"(",
"experiment",
")",
"if",
"alternative",
"is",
"not",
"None",
":",
"return",
"alternative",
"else",
":",
"return",
"experiment",
".",
"default_alternative",
"return",
"conf",
".",
"CONTROL_GROUP"
] | Get the alternative this user is enrolled in. | [
"Get",
"the",
"alternative",
"this",
"user",
"is",
"enrolled",
"in",
"."
] | 1f45e9f8a108b51e44918daa647269b2b8d43f1d | https://github.com/mixcloud/django-experiments/blob/1f45e9f8a108b51e44918daa647269b2b8d43f1d/experiments/utils.py#L102-L119 |
5,632 | mixcloud/django-experiments | experiments/utils.py | WebUser.set_alternative | def set_alternative(self, experiment_name, alternative):
"""Explicitly set the alternative the user is enrolled in for the specified experiment.
This allows you to change a user between alternatives. The user and goal counts for the new
alternative will be increment, but those for the old one will not be decremented. The user will
be enrolled in the experiment even if the experiment would not normally accept this user."""
experiment = experiment_manager.get_experiment(experiment_name)
if experiment:
self._set_enrollment(experiment, alternative) | python | def set_alternative(self, experiment_name, alternative):
"""Explicitly set the alternative the user is enrolled in for the specified experiment.
This allows you to change a user between alternatives. The user and goal counts for the new
alternative will be increment, but those for the old one will not be decremented. The user will
be enrolled in the experiment even if the experiment would not normally accept this user."""
experiment = experiment_manager.get_experiment(experiment_name)
if experiment:
self._set_enrollment(experiment, alternative) | [
"def",
"set_alternative",
"(",
"self",
",",
"experiment_name",
",",
"alternative",
")",
":",
"experiment",
"=",
"experiment_manager",
".",
"get_experiment",
"(",
"experiment_name",
")",
"if",
"experiment",
":",
"self",
".",
"_set_enrollment",
"(",
"experiment",
",",
"alternative",
")"
] | Explicitly set the alternative the user is enrolled in for the specified experiment.
This allows you to change a user between alternatives. The user and goal counts for the new
alternative will be increment, but those for the old one will not be decremented. The user will
be enrolled in the experiment even if the experiment would not normally accept this user. | [
"Explicitly",
"set",
"the",
"alternative",
"the",
"user",
"is",
"enrolled",
"in",
"for",
"the",
"specified",
"experiment",
"."
] | 1f45e9f8a108b51e44918daa647269b2b8d43f1d | https://github.com/mixcloud/django-experiments/blob/1f45e9f8a108b51e44918daa647269b2b8d43f1d/experiments/utils.py#L121-L129 |
5,633 | mixcloud/django-experiments | experiments/utils.py | WebUser.goal | def goal(self, goal_name, count=1):
"""Record that this user has performed a particular goal
This will update the goal stats for all experiments the user is enrolled in."""
for enrollment in self._get_all_enrollments():
if enrollment.experiment.is_displaying_alternatives():
self._experiment_goal(enrollment.experiment, enrollment.alternative, goal_name, count) | python | def goal(self, goal_name, count=1):
"""Record that this user has performed a particular goal
This will update the goal stats for all experiments the user is enrolled in."""
for enrollment in self._get_all_enrollments():
if enrollment.experiment.is_displaying_alternatives():
self._experiment_goal(enrollment.experiment, enrollment.alternative, goal_name, count) | [
"def",
"goal",
"(",
"self",
",",
"goal_name",
",",
"count",
"=",
"1",
")",
":",
"for",
"enrollment",
"in",
"self",
".",
"_get_all_enrollments",
"(",
")",
":",
"if",
"enrollment",
".",
"experiment",
".",
"is_displaying_alternatives",
"(",
")",
":",
"self",
".",
"_experiment_goal",
"(",
"enrollment",
".",
"experiment",
",",
"enrollment",
".",
"alternative",
",",
"goal_name",
",",
"count",
")"
] | Record that this user has performed a particular goal
This will update the goal stats for all experiments the user is enrolled in. | [
"Record",
"that",
"this",
"user",
"has",
"performed",
"a",
"particular",
"goal"
] | 1f45e9f8a108b51e44918daa647269b2b8d43f1d | https://github.com/mixcloud/django-experiments/blob/1f45e9f8a108b51e44918daa647269b2b8d43f1d/experiments/utils.py#L131-L137 |
5,634 | mixcloud/django-experiments | experiments/utils.py | WebUser.incorporate | def incorporate(self, other_user):
"""Incorporate all enrollments and goals performed by the other user
If this user is not enrolled in a given experiment, the results for the
other user are incorporated. For experiments this user is already
enrolled in the results of the other user are discarded.
This takes a relatively large amount of time for each experiment the other
user is enrolled in."""
for enrollment in other_user._get_all_enrollments():
if not self._get_enrollment(enrollment.experiment):
self._set_enrollment(enrollment.experiment, enrollment.alternative, enrollment.enrollment_date, enrollment.last_seen)
goals = self.experiment_counter.participant_goal_frequencies(enrollment.experiment, enrollment.alternative, other_user._participant_identifier())
for goal_name, count in goals:
self.experiment_counter.increment_goal_count(enrollment.experiment, enrollment.alternative, goal_name, self._participant_identifier(), count)
other_user._cancel_enrollment(enrollment.experiment) | python | def incorporate(self, other_user):
"""Incorporate all enrollments and goals performed by the other user
If this user is not enrolled in a given experiment, the results for the
other user are incorporated. For experiments this user is already
enrolled in the results of the other user are discarded.
This takes a relatively large amount of time for each experiment the other
user is enrolled in."""
for enrollment in other_user._get_all_enrollments():
if not self._get_enrollment(enrollment.experiment):
self._set_enrollment(enrollment.experiment, enrollment.alternative, enrollment.enrollment_date, enrollment.last_seen)
goals = self.experiment_counter.participant_goal_frequencies(enrollment.experiment, enrollment.alternative, other_user._participant_identifier())
for goal_name, count in goals:
self.experiment_counter.increment_goal_count(enrollment.experiment, enrollment.alternative, goal_name, self._participant_identifier(), count)
other_user._cancel_enrollment(enrollment.experiment) | [
"def",
"incorporate",
"(",
"self",
",",
"other_user",
")",
":",
"for",
"enrollment",
"in",
"other_user",
".",
"_get_all_enrollments",
"(",
")",
":",
"if",
"not",
"self",
".",
"_get_enrollment",
"(",
"enrollment",
".",
"experiment",
")",
":",
"self",
".",
"_set_enrollment",
"(",
"enrollment",
".",
"experiment",
",",
"enrollment",
".",
"alternative",
",",
"enrollment",
".",
"enrollment_date",
",",
"enrollment",
".",
"last_seen",
")",
"goals",
"=",
"self",
".",
"experiment_counter",
".",
"participant_goal_frequencies",
"(",
"enrollment",
".",
"experiment",
",",
"enrollment",
".",
"alternative",
",",
"other_user",
".",
"_participant_identifier",
"(",
")",
")",
"for",
"goal_name",
",",
"count",
"in",
"goals",
":",
"self",
".",
"experiment_counter",
".",
"increment_goal_count",
"(",
"enrollment",
".",
"experiment",
",",
"enrollment",
".",
"alternative",
",",
"goal_name",
",",
"self",
".",
"_participant_identifier",
"(",
")",
",",
"count",
")",
"other_user",
".",
"_cancel_enrollment",
"(",
"enrollment",
".",
"experiment",
")"
] | Incorporate all enrollments and goals performed by the other user
If this user is not enrolled in a given experiment, the results for the
other user are incorporated. For experiments this user is already
enrolled in the results of the other user are discarded.
This takes a relatively large amount of time for each experiment the other
user is enrolled in. | [
"Incorporate",
"all",
"enrollments",
"and",
"goals",
"performed",
"by",
"the",
"other",
"user"
] | 1f45e9f8a108b51e44918daa647269b2b8d43f1d | https://github.com/mixcloud/django-experiments/blob/1f45e9f8a108b51e44918daa647269b2b8d43f1d/experiments/utils.py#L143-L158 |
5,635 | mixcloud/django-experiments | experiments/utils.py | WebUser.visit | def visit(self):
"""Record that the user has visited the site for the purposes of retention tracking"""
for enrollment in self._get_all_enrollments():
if enrollment.experiment.is_displaying_alternatives():
# We have two different goals, VISIT_NOT_PRESENT_COUNT_GOAL and VISIT_PRESENT_COUNT_GOAL.
# VISIT_PRESENT_COUNT_GOAL will avoid firing on the first time we set last_seen as it is assumed that the user is
# on the page and therefore it would automatically trigger and be valueless.
# This should be used for experiments when we enroll the user as part of the pageview,
# alternatively we can use the NOT_PRESENT GOAL which will increment on the first pageview,
# this is mainly useful for notification actions when the users isn't initially present.
if not enrollment.last_seen:
self._experiment_goal(enrollment.experiment, enrollment.alternative, conf.VISIT_NOT_PRESENT_COUNT_GOAL, 1)
self._set_last_seen(enrollment.experiment, now())
elif now() - enrollment.last_seen >= timedelta(hours=conf.SESSION_LENGTH):
self._experiment_goal(enrollment.experiment, enrollment.alternative, conf.VISIT_NOT_PRESENT_COUNT_GOAL, 1)
self._experiment_goal(enrollment.experiment, enrollment.alternative, conf.VISIT_PRESENT_COUNT_GOAL, 1)
self._set_last_seen(enrollment.experiment, now()) | python | def visit(self):
"""Record that the user has visited the site for the purposes of retention tracking"""
for enrollment in self._get_all_enrollments():
if enrollment.experiment.is_displaying_alternatives():
# We have two different goals, VISIT_NOT_PRESENT_COUNT_GOAL and VISIT_PRESENT_COUNT_GOAL.
# VISIT_PRESENT_COUNT_GOAL will avoid firing on the first time we set last_seen as it is assumed that the user is
# on the page and therefore it would automatically trigger and be valueless.
# This should be used for experiments when we enroll the user as part of the pageview,
# alternatively we can use the NOT_PRESENT GOAL which will increment on the first pageview,
# this is mainly useful for notification actions when the users isn't initially present.
if not enrollment.last_seen:
self._experiment_goal(enrollment.experiment, enrollment.alternative, conf.VISIT_NOT_PRESENT_COUNT_GOAL, 1)
self._set_last_seen(enrollment.experiment, now())
elif now() - enrollment.last_seen >= timedelta(hours=conf.SESSION_LENGTH):
self._experiment_goal(enrollment.experiment, enrollment.alternative, conf.VISIT_NOT_PRESENT_COUNT_GOAL, 1)
self._experiment_goal(enrollment.experiment, enrollment.alternative, conf.VISIT_PRESENT_COUNT_GOAL, 1)
self._set_last_seen(enrollment.experiment, now()) | [
"def",
"visit",
"(",
"self",
")",
":",
"for",
"enrollment",
"in",
"self",
".",
"_get_all_enrollments",
"(",
")",
":",
"if",
"enrollment",
".",
"experiment",
".",
"is_displaying_alternatives",
"(",
")",
":",
"# We have two different goals, VISIT_NOT_PRESENT_COUNT_GOAL and VISIT_PRESENT_COUNT_GOAL.",
"# VISIT_PRESENT_COUNT_GOAL will avoid firing on the first time we set last_seen as it is assumed that the user is",
"# on the page and therefore it would automatically trigger and be valueless.",
"# This should be used for experiments when we enroll the user as part of the pageview,",
"# alternatively we can use the NOT_PRESENT GOAL which will increment on the first pageview,",
"# this is mainly useful for notification actions when the users isn't initially present.",
"if",
"not",
"enrollment",
".",
"last_seen",
":",
"self",
".",
"_experiment_goal",
"(",
"enrollment",
".",
"experiment",
",",
"enrollment",
".",
"alternative",
",",
"conf",
".",
"VISIT_NOT_PRESENT_COUNT_GOAL",
",",
"1",
")",
"self",
".",
"_set_last_seen",
"(",
"enrollment",
".",
"experiment",
",",
"now",
"(",
")",
")",
"elif",
"now",
"(",
")",
"-",
"enrollment",
".",
"last_seen",
">=",
"timedelta",
"(",
"hours",
"=",
"conf",
".",
"SESSION_LENGTH",
")",
":",
"self",
".",
"_experiment_goal",
"(",
"enrollment",
".",
"experiment",
",",
"enrollment",
".",
"alternative",
",",
"conf",
".",
"VISIT_NOT_PRESENT_COUNT_GOAL",
",",
"1",
")",
"self",
".",
"_experiment_goal",
"(",
"enrollment",
".",
"experiment",
",",
"enrollment",
".",
"alternative",
",",
"conf",
".",
"VISIT_PRESENT_COUNT_GOAL",
",",
"1",
")",
"self",
".",
"_set_last_seen",
"(",
"enrollment",
".",
"experiment",
",",
"now",
"(",
")",
")"
] | Record that the user has visited the site for the purposes of retention tracking | [
"Record",
"that",
"the",
"user",
"has",
"visited",
"the",
"site",
"for",
"the",
"purposes",
"of",
"retention",
"tracking"
] | 1f45e9f8a108b51e44918daa647269b2b8d43f1d | https://github.com/mixcloud/django-experiments/blob/1f45e9f8a108b51e44918daa647269b2b8d43f1d/experiments/utils.py#L160-L177 |
5,636 | jgrassler/mkdocs-pandoc | mkdocs_pandoc/pandoc_converter.py | PandocConverter.flatten_pages | def flatten_pages(self, pages, level=1):
"""Recursively flattens pages data structure into a one-dimensional data structure"""
flattened = []
for page in pages:
if type(page) is list:
flattened.append(
{
'file': page[0],
'title': page[1],
'level': level,
})
if type(page) is dict:
if type(list(page.values())[0]) is str:
flattened.append(
{
'file': list(page.values())[0],
'title': list(page.keys())[0],
'level': level,
})
if type(list(page.values())[0]) is list:
flattened.extend(
self.flatten_pages(
list(page.values())[0],
level + 1)
)
return flattened | python | def flatten_pages(self, pages, level=1):
"""Recursively flattens pages data structure into a one-dimensional data structure"""
flattened = []
for page in pages:
if type(page) is list:
flattened.append(
{
'file': page[0],
'title': page[1],
'level': level,
})
if type(page) is dict:
if type(list(page.values())[0]) is str:
flattened.append(
{
'file': list(page.values())[0],
'title': list(page.keys())[0],
'level': level,
})
if type(list(page.values())[0]) is list:
flattened.extend(
self.flatten_pages(
list(page.values())[0],
level + 1)
)
return flattened | [
"def",
"flatten_pages",
"(",
"self",
",",
"pages",
",",
"level",
"=",
"1",
")",
":",
"flattened",
"=",
"[",
"]",
"for",
"page",
"in",
"pages",
":",
"if",
"type",
"(",
"page",
")",
"is",
"list",
":",
"flattened",
".",
"append",
"(",
"{",
"'file'",
":",
"page",
"[",
"0",
"]",
",",
"'title'",
":",
"page",
"[",
"1",
"]",
",",
"'level'",
":",
"level",
",",
"}",
")",
"if",
"type",
"(",
"page",
")",
"is",
"dict",
":",
"if",
"type",
"(",
"list",
"(",
"page",
".",
"values",
"(",
")",
")",
"[",
"0",
"]",
")",
"is",
"str",
":",
"flattened",
".",
"append",
"(",
"{",
"'file'",
":",
"list",
"(",
"page",
".",
"values",
"(",
")",
")",
"[",
"0",
"]",
",",
"'title'",
":",
"list",
"(",
"page",
".",
"keys",
"(",
")",
")",
"[",
"0",
"]",
",",
"'level'",
":",
"level",
",",
"}",
")",
"if",
"type",
"(",
"list",
"(",
"page",
".",
"values",
"(",
")",
")",
"[",
"0",
"]",
")",
"is",
"list",
":",
"flattened",
".",
"extend",
"(",
"self",
".",
"flatten_pages",
"(",
"list",
"(",
"page",
".",
"values",
"(",
")",
")",
"[",
"0",
"]",
",",
"level",
"+",
"1",
")",
")",
"return",
"flattened"
] | Recursively flattens pages data structure into a one-dimensional data structure | [
"Recursively",
"flattens",
"pages",
"data",
"structure",
"into",
"a",
"one",
"-",
"dimensional",
"data",
"structure"
] | 11edfb90830325dca85bd0369bb8e2da8d6815b3 | https://github.com/jgrassler/mkdocs-pandoc/blob/11edfb90830325dca85bd0369bb8e2da8d6815b3/mkdocs_pandoc/pandoc_converter.py#L68-L96 |
5,637 | jgrassler/mkdocs-pandoc | mkdocs_pandoc/pandoc_converter.py | PandocConverter.convert | def convert(self):
"""User-facing conversion method. Returns pandoc document as a list of
lines."""
lines = []
pages = self.flatten_pages(self.config['pages'])
f_exclude = mkdocs_pandoc.filters.exclude.ExcludeFilter(
exclude=self.exclude)
f_include = mkdocs_pandoc.filters.include.IncludeFilter(
base_path=self.config['docs_dir'],
encoding=self.encoding)
# First, do the processing that must be done on a per-file basis:
# Adjust header levels, insert chapter headings and adjust image paths.
f_headlevel = mkdocs_pandoc.filters.headlevels.HeadlevelFilter(pages)
for page in pages:
fname = os.path.join(self.config['docs_dir'], page['file'])
try:
p = codecs.open(fname, 'r', self.encoding)
except IOError as e:
raise FatalError("Couldn't open %s for reading: %s" % (fname,
e.strerror), 1)
f_chapterhead = mkdocs_pandoc.filters.chapterhead.ChapterheadFilter(
headlevel=page['level'],
title=page['title']
)
f_image = mkdocs_pandoc.filters.images.ImageFilter(
filename=page['file'],
image_path=self.config['site_dir'],
image_ext=self.image_ext)
lines_tmp = []
for line in p.readlines():
lines_tmp.append(line.rstrip())
if self.exclude:
lines_tmp = f_exclude.run(lines_tmp)
if self.filter_include:
lines_tmp = f_include.run(lines_tmp)
lines_tmp = f_headlevel.run(lines_tmp)
lines_tmp = f_chapterhead.run(lines_tmp)
lines_tmp = f_image.run(lines_tmp)
lines.extend(lines_tmp)
# Add an empty line between pages to prevent text from a previous
# file from butting up against headers in a subsequent file.
lines.append('')
# Strip anchor tags
if self.strip_anchors:
lines = mkdocs_pandoc.filters.anchors.AnchorFilter().run(lines)
# Fix cross references
if self.filter_xrefs:
lines = mkdocs_pandoc.filters.xref.XrefFilter().run(lines)
if self.filter_toc:
lines = mkdocs_pandoc.filters.toc.TocFilter().run(lines)
if self.filter_tables:
lines = mkdocs_pandoc.filters.tables.TableFilter().run(lines)
return(lines) | python | def convert(self):
"""User-facing conversion method. Returns pandoc document as a list of
lines."""
lines = []
pages = self.flatten_pages(self.config['pages'])
f_exclude = mkdocs_pandoc.filters.exclude.ExcludeFilter(
exclude=self.exclude)
f_include = mkdocs_pandoc.filters.include.IncludeFilter(
base_path=self.config['docs_dir'],
encoding=self.encoding)
# First, do the processing that must be done on a per-file basis:
# Adjust header levels, insert chapter headings and adjust image paths.
f_headlevel = mkdocs_pandoc.filters.headlevels.HeadlevelFilter(pages)
for page in pages:
fname = os.path.join(self.config['docs_dir'], page['file'])
try:
p = codecs.open(fname, 'r', self.encoding)
except IOError as e:
raise FatalError("Couldn't open %s for reading: %s" % (fname,
e.strerror), 1)
f_chapterhead = mkdocs_pandoc.filters.chapterhead.ChapterheadFilter(
headlevel=page['level'],
title=page['title']
)
f_image = mkdocs_pandoc.filters.images.ImageFilter(
filename=page['file'],
image_path=self.config['site_dir'],
image_ext=self.image_ext)
lines_tmp = []
for line in p.readlines():
lines_tmp.append(line.rstrip())
if self.exclude:
lines_tmp = f_exclude.run(lines_tmp)
if self.filter_include:
lines_tmp = f_include.run(lines_tmp)
lines_tmp = f_headlevel.run(lines_tmp)
lines_tmp = f_chapterhead.run(lines_tmp)
lines_tmp = f_image.run(lines_tmp)
lines.extend(lines_tmp)
# Add an empty line between pages to prevent text from a previous
# file from butting up against headers in a subsequent file.
lines.append('')
# Strip anchor tags
if self.strip_anchors:
lines = mkdocs_pandoc.filters.anchors.AnchorFilter().run(lines)
# Fix cross references
if self.filter_xrefs:
lines = mkdocs_pandoc.filters.xref.XrefFilter().run(lines)
if self.filter_toc:
lines = mkdocs_pandoc.filters.toc.TocFilter().run(lines)
if self.filter_tables:
lines = mkdocs_pandoc.filters.tables.TableFilter().run(lines)
return(lines) | [
"def",
"convert",
"(",
"self",
")",
":",
"lines",
"=",
"[",
"]",
"pages",
"=",
"self",
".",
"flatten_pages",
"(",
"self",
".",
"config",
"[",
"'pages'",
"]",
")",
"f_exclude",
"=",
"mkdocs_pandoc",
".",
"filters",
".",
"exclude",
".",
"ExcludeFilter",
"(",
"exclude",
"=",
"self",
".",
"exclude",
")",
"f_include",
"=",
"mkdocs_pandoc",
".",
"filters",
".",
"include",
".",
"IncludeFilter",
"(",
"base_path",
"=",
"self",
".",
"config",
"[",
"'docs_dir'",
"]",
",",
"encoding",
"=",
"self",
".",
"encoding",
")",
"# First, do the processing that must be done on a per-file basis:",
"# Adjust header levels, insert chapter headings and adjust image paths.",
"f_headlevel",
"=",
"mkdocs_pandoc",
".",
"filters",
".",
"headlevels",
".",
"HeadlevelFilter",
"(",
"pages",
")",
"for",
"page",
"in",
"pages",
":",
"fname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"config",
"[",
"'docs_dir'",
"]",
",",
"page",
"[",
"'file'",
"]",
")",
"try",
":",
"p",
"=",
"codecs",
".",
"open",
"(",
"fname",
",",
"'r'",
",",
"self",
".",
"encoding",
")",
"except",
"IOError",
"as",
"e",
":",
"raise",
"FatalError",
"(",
"\"Couldn't open %s for reading: %s\"",
"%",
"(",
"fname",
",",
"e",
".",
"strerror",
")",
",",
"1",
")",
"f_chapterhead",
"=",
"mkdocs_pandoc",
".",
"filters",
".",
"chapterhead",
".",
"ChapterheadFilter",
"(",
"headlevel",
"=",
"page",
"[",
"'level'",
"]",
",",
"title",
"=",
"page",
"[",
"'title'",
"]",
")",
"f_image",
"=",
"mkdocs_pandoc",
".",
"filters",
".",
"images",
".",
"ImageFilter",
"(",
"filename",
"=",
"page",
"[",
"'file'",
"]",
",",
"image_path",
"=",
"self",
".",
"config",
"[",
"'site_dir'",
"]",
",",
"image_ext",
"=",
"self",
".",
"image_ext",
")",
"lines_tmp",
"=",
"[",
"]",
"for",
"line",
"in",
"p",
".",
"readlines",
"(",
")",
":",
"lines_tmp",
".",
"append",
"(",
"line",
".",
"rstrip",
"(",
")",
")",
"if",
"self",
".",
"exclude",
":",
"lines_tmp",
"=",
"f_exclude",
".",
"run",
"(",
"lines_tmp",
")",
"if",
"self",
".",
"filter_include",
":",
"lines_tmp",
"=",
"f_include",
".",
"run",
"(",
"lines_tmp",
")",
"lines_tmp",
"=",
"f_headlevel",
".",
"run",
"(",
"lines_tmp",
")",
"lines_tmp",
"=",
"f_chapterhead",
".",
"run",
"(",
"lines_tmp",
")",
"lines_tmp",
"=",
"f_image",
".",
"run",
"(",
"lines_tmp",
")",
"lines",
".",
"extend",
"(",
"lines_tmp",
")",
"# Add an empty line between pages to prevent text from a previous",
"# file from butting up against headers in a subsequent file.",
"lines",
".",
"append",
"(",
"''",
")",
"# Strip anchor tags",
"if",
"self",
".",
"strip_anchors",
":",
"lines",
"=",
"mkdocs_pandoc",
".",
"filters",
".",
"anchors",
".",
"AnchorFilter",
"(",
")",
".",
"run",
"(",
"lines",
")",
"# Fix cross references",
"if",
"self",
".",
"filter_xrefs",
":",
"lines",
"=",
"mkdocs_pandoc",
".",
"filters",
".",
"xref",
".",
"XrefFilter",
"(",
")",
".",
"run",
"(",
"lines",
")",
"if",
"self",
".",
"filter_toc",
":",
"lines",
"=",
"mkdocs_pandoc",
".",
"filters",
".",
"toc",
".",
"TocFilter",
"(",
")",
".",
"run",
"(",
"lines",
")",
"if",
"self",
".",
"filter_tables",
":",
"lines",
"=",
"mkdocs_pandoc",
".",
"filters",
".",
"tables",
".",
"TableFilter",
"(",
")",
".",
"run",
"(",
"lines",
")",
"return",
"(",
"lines",
")"
] | User-facing conversion method. Returns pandoc document as a list of
lines. | [
"User",
"-",
"facing",
"conversion",
"method",
".",
"Returns",
"pandoc",
"document",
"as",
"a",
"list",
"of",
"lines",
"."
] | 11edfb90830325dca85bd0369bb8e2da8d6815b3 | https://github.com/jgrassler/mkdocs-pandoc/blob/11edfb90830325dca85bd0369bb8e2da8d6815b3/mkdocs_pandoc/pandoc_converter.py#L98-L167 |
5,638 | jgrassler/mkdocs-pandoc | mkdocs_pandoc/filters/tables.py | TableFilter.blocks | def blocks(self, lines):
"""Groups lines into markdown blocks"""
state = markdown.blockparser.State()
blocks = []
# We use three states: start, ``` and '\n'
state.set('start')
# index of current block
currblock = 0
for line in lines:
line += '\n'
if state.isstate('start'):
if line[:3] == '```':
state.set('```')
else:
state.set('\n')
blocks.append('')
currblock = len(blocks) - 1
else:
marker = line[:3] # Will capture either '\n' or '```'
if state.isstate(marker):
state.reset()
blocks[currblock] += line
return blocks | python | def blocks(self, lines):
"""Groups lines into markdown blocks"""
state = markdown.blockparser.State()
blocks = []
# We use three states: start, ``` and '\n'
state.set('start')
# index of current block
currblock = 0
for line in lines:
line += '\n'
if state.isstate('start'):
if line[:3] == '```':
state.set('```')
else:
state.set('\n')
blocks.append('')
currblock = len(blocks) - 1
else:
marker = line[:3] # Will capture either '\n' or '```'
if state.isstate(marker):
state.reset()
blocks[currblock] += line
return blocks | [
"def",
"blocks",
"(",
"self",
",",
"lines",
")",
":",
"state",
"=",
"markdown",
".",
"blockparser",
".",
"State",
"(",
")",
"blocks",
"=",
"[",
"]",
"# We use three states: start, ``` and '\\n'",
"state",
".",
"set",
"(",
"'start'",
")",
"# index of current block",
"currblock",
"=",
"0",
"for",
"line",
"in",
"lines",
":",
"line",
"+=",
"'\\n'",
"if",
"state",
".",
"isstate",
"(",
"'start'",
")",
":",
"if",
"line",
"[",
":",
"3",
"]",
"==",
"'```'",
":",
"state",
".",
"set",
"(",
"'```'",
")",
"else",
":",
"state",
".",
"set",
"(",
"'\\n'",
")",
"blocks",
".",
"append",
"(",
"''",
")",
"currblock",
"=",
"len",
"(",
"blocks",
")",
"-",
"1",
"else",
":",
"marker",
"=",
"line",
"[",
":",
"3",
"]",
"# Will capture either '\\n' or '```'",
"if",
"state",
".",
"isstate",
"(",
"marker",
")",
":",
"state",
".",
"reset",
"(",
")",
"blocks",
"[",
"currblock",
"]",
"+=",
"line",
"return",
"blocks"
] | Groups lines into markdown blocks | [
"Groups",
"lines",
"into",
"markdown",
"blocks"
] | 11edfb90830325dca85bd0369bb8e2da8d6815b3 | https://github.com/jgrassler/mkdocs-pandoc/blob/11edfb90830325dca85bd0369bb8e2da8d6815b3/mkdocs_pandoc/filters/tables.py#L31-L57 |
5,639 | jgrassler/mkdocs-pandoc | mkdocs_pandoc/filters/tables.py | TableFilter.ruler_line | def ruler_line(self, widths, linetype='-'):
"""Generates a ruler line for separating rows from each other"""
cells = []
for w in widths:
cells.append(linetype * (w+2))
return '+' + '+'.join(cells) + '+' | python | def ruler_line(self, widths, linetype='-'):
"""Generates a ruler line for separating rows from each other"""
cells = []
for w in widths:
cells.append(linetype * (w+2))
return '+' + '+'.join(cells) + '+' | [
"def",
"ruler_line",
"(",
"self",
",",
"widths",
",",
"linetype",
"=",
"'-'",
")",
":",
"cells",
"=",
"[",
"]",
"for",
"w",
"in",
"widths",
":",
"cells",
".",
"append",
"(",
"linetype",
"*",
"(",
"w",
"+",
"2",
")",
")",
"return",
"'+'",
"+",
"'+'",
".",
"join",
"(",
"cells",
")",
"+",
"'+'"
] | Generates a ruler line for separating rows from each other | [
"Generates",
"a",
"ruler",
"line",
"for",
"separating",
"rows",
"from",
"each",
"other"
] | 11edfb90830325dca85bd0369bb8e2da8d6815b3 | https://github.com/jgrassler/mkdocs-pandoc/blob/11edfb90830325dca85bd0369bb8e2da8d6815b3/mkdocs_pandoc/filters/tables.py#L182-L187 |
5,640 | jgrassler/mkdocs-pandoc | mkdocs_pandoc/filters/tables.py | TableFilter.wrap_row | def wrap_row(self, widths, row, width_default=None):
"""Wraps a single line table row into a fixed width, multi-line table."""
lines = []
longest = 0 # longest wrapped column in row
if not width_default:
width_default = self.width_default
# Wrap column contents
for i in range(0, len(row)):
w=width_default # column width
# Only set column width dynamicaly for non-rogue rows
if i < len(widths):
w = widths[i]
tw = textwrap.TextWrapper(width=w, break_on_hyphens=False)
# Wrap and left-justify
row[i] = tw.wrap(textwrap.dedent(row[i]))
# Pad with spaces up to to fixed column width
for l in range(0, len(row[i])):
row[i][l] += (w - len(row[i][l])) * ' '
if len(row[i]) > longest:
longest = len(row[i])
# Pad all columns to have the same number of lines
for i in range(0, len(row)):
w=width_default # column width
# Only set column width dynamicaly for non-rogue rows
if i < len(widths):
w = widths[i]
if len(row[i]) < longest:
for j in range(len(row[i]), longest):
row[i].append(w * ' ')
for l in range(0,longest):
line = []
for c in range(len(row)):
line.append(row[c][l])
line = '| ' + ' | '.join(line) + ' |'
lines.append(line)
return lines | python | def wrap_row(self, widths, row, width_default=None):
"""Wraps a single line table row into a fixed width, multi-line table."""
lines = []
longest = 0 # longest wrapped column in row
if not width_default:
width_default = self.width_default
# Wrap column contents
for i in range(0, len(row)):
w=width_default # column width
# Only set column width dynamicaly for non-rogue rows
if i < len(widths):
w = widths[i]
tw = textwrap.TextWrapper(width=w, break_on_hyphens=False)
# Wrap and left-justify
row[i] = tw.wrap(textwrap.dedent(row[i]))
# Pad with spaces up to to fixed column width
for l in range(0, len(row[i])):
row[i][l] += (w - len(row[i][l])) * ' '
if len(row[i]) > longest:
longest = len(row[i])
# Pad all columns to have the same number of lines
for i in range(0, len(row)):
w=width_default # column width
# Only set column width dynamicaly for non-rogue rows
if i < len(widths):
w = widths[i]
if len(row[i]) < longest:
for j in range(len(row[i]), longest):
row[i].append(w * ' ')
for l in range(0,longest):
line = []
for c in range(len(row)):
line.append(row[c][l])
line = '| ' + ' | '.join(line) + ' |'
lines.append(line)
return lines | [
"def",
"wrap_row",
"(",
"self",
",",
"widths",
",",
"row",
",",
"width_default",
"=",
"None",
")",
":",
"lines",
"=",
"[",
"]",
"longest",
"=",
"0",
"# longest wrapped column in row",
"if",
"not",
"width_default",
":",
"width_default",
"=",
"self",
".",
"width_default",
"# Wrap column contents",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"row",
")",
")",
":",
"w",
"=",
"width_default",
"# column width",
"# Only set column width dynamicaly for non-rogue rows",
"if",
"i",
"<",
"len",
"(",
"widths",
")",
":",
"w",
"=",
"widths",
"[",
"i",
"]",
"tw",
"=",
"textwrap",
".",
"TextWrapper",
"(",
"width",
"=",
"w",
",",
"break_on_hyphens",
"=",
"False",
")",
"# Wrap and left-justify",
"row",
"[",
"i",
"]",
"=",
"tw",
".",
"wrap",
"(",
"textwrap",
".",
"dedent",
"(",
"row",
"[",
"i",
"]",
")",
")",
"# Pad with spaces up to to fixed column width",
"for",
"l",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"row",
"[",
"i",
"]",
")",
")",
":",
"row",
"[",
"i",
"]",
"[",
"l",
"]",
"+=",
"(",
"w",
"-",
"len",
"(",
"row",
"[",
"i",
"]",
"[",
"l",
"]",
")",
")",
"*",
"' '",
"if",
"len",
"(",
"row",
"[",
"i",
"]",
")",
">",
"longest",
":",
"longest",
"=",
"len",
"(",
"row",
"[",
"i",
"]",
")",
"# Pad all columns to have the same number of lines",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"row",
")",
")",
":",
"w",
"=",
"width_default",
"# column width",
"# Only set column width dynamicaly for non-rogue rows",
"if",
"i",
"<",
"len",
"(",
"widths",
")",
":",
"w",
"=",
"widths",
"[",
"i",
"]",
"if",
"len",
"(",
"row",
"[",
"i",
"]",
")",
"<",
"longest",
":",
"for",
"j",
"in",
"range",
"(",
"len",
"(",
"row",
"[",
"i",
"]",
")",
",",
"longest",
")",
":",
"row",
"[",
"i",
"]",
".",
"append",
"(",
"w",
"*",
"' '",
")",
"for",
"l",
"in",
"range",
"(",
"0",
",",
"longest",
")",
":",
"line",
"=",
"[",
"]",
"for",
"c",
"in",
"range",
"(",
"len",
"(",
"row",
")",
")",
":",
"line",
".",
"append",
"(",
"row",
"[",
"c",
"]",
"[",
"l",
"]",
")",
"line",
"=",
"'| '",
"+",
"' | '",
".",
"join",
"(",
"line",
")",
"+",
"' |'",
"lines",
".",
"append",
"(",
"line",
")",
"return",
"lines"
] | Wraps a single line table row into a fixed width, multi-line table. | [
"Wraps",
"a",
"single",
"line",
"table",
"row",
"into",
"a",
"fixed",
"width",
"multi",
"-",
"line",
"table",
"."
] | 11edfb90830325dca85bd0369bb8e2da8d6815b3 | https://github.com/jgrassler/mkdocs-pandoc/blob/11edfb90830325dca85bd0369bb8e2da8d6815b3/mkdocs_pandoc/filters/tables.py#L190-L234 |
5,641 | mishbahr/djangocms-forms | djangocms_forms/admin.py | FormSubmissionAdmin.render_export_form | def render_export_form(self, request, context, form_url=''):
"""
Render the from submission export form.
"""
context.update({
'has_change_permission': self.has_change_permission(request),
'form_url': mark_safe(form_url),
'opts': self.opts,
'add': True,
'save_on_top': self.save_on_top,
})
return TemplateResponse(request, self.export_form_template, context) | python | def render_export_form(self, request, context, form_url=''):
"""
Render the from submission export form.
"""
context.update({
'has_change_permission': self.has_change_permission(request),
'form_url': mark_safe(form_url),
'opts': self.opts,
'add': True,
'save_on_top': self.save_on_top,
})
return TemplateResponse(request, self.export_form_template, context) | [
"def",
"render_export_form",
"(",
"self",
",",
"request",
",",
"context",
",",
"form_url",
"=",
"''",
")",
":",
"context",
".",
"update",
"(",
"{",
"'has_change_permission'",
":",
"self",
".",
"has_change_permission",
"(",
"request",
")",
",",
"'form_url'",
":",
"mark_safe",
"(",
"form_url",
")",
",",
"'opts'",
":",
"self",
".",
"opts",
",",
"'add'",
":",
"True",
",",
"'save_on_top'",
":",
"self",
".",
"save_on_top",
",",
"}",
")",
"return",
"TemplateResponse",
"(",
"request",
",",
"self",
".",
"export_form_template",
",",
"context",
")"
] | Render the from submission export form. | [
"Render",
"the",
"from",
"submission",
"export",
"form",
"."
] | 9d7a4ef9769fd5e1526921c084d6da7b8070a2c1 | https://github.com/mishbahr/djangocms-forms/blob/9d7a4ef9769fd5e1526921c084d6da7b8070a2c1/djangocms_forms/admin.py#L260-L272 |
5,642 | mishbahr/djangocms-forms | djangocms_forms/forms.py | FormDefinitionAdminForm.clean_form_template | def clean_form_template(self):
""" Check if template exists """
form_template = self.cleaned_data.get('form_template', '')
if form_template:
try:
get_template(form_template)
except TemplateDoesNotExist:
msg = _('Selected Form Template does not exist.')
raise forms.ValidationError(msg)
return form_template | python | def clean_form_template(self):
""" Check if template exists """
form_template = self.cleaned_data.get('form_template', '')
if form_template:
try:
get_template(form_template)
except TemplateDoesNotExist:
msg = _('Selected Form Template does not exist.')
raise forms.ValidationError(msg)
return form_template | [
"def",
"clean_form_template",
"(",
"self",
")",
":",
"form_template",
"=",
"self",
".",
"cleaned_data",
".",
"get",
"(",
"'form_template'",
",",
"''",
")",
"if",
"form_template",
":",
"try",
":",
"get_template",
"(",
"form_template",
")",
"except",
"TemplateDoesNotExist",
":",
"msg",
"=",
"_",
"(",
"'Selected Form Template does not exist.'",
")",
"raise",
"forms",
".",
"ValidationError",
"(",
"msg",
")",
"return",
"form_template"
] | Check if template exists | [
"Check",
"if",
"template",
"exists"
] | 9d7a4ef9769fd5e1526921c084d6da7b8070a2c1 | https://github.com/mishbahr/djangocms-forms/blob/9d7a4ef9769fd5e1526921c084d6da7b8070a2c1/djangocms_forms/forms.py#L75-L84 |
5,643 | nikdon/pyEntropy | pyentrp/entropy.py | _embed | def _embed(x, order=3, delay=1):
"""Time-delay embedding.
Parameters
----------
x : 1d-array, shape (n_times)
Time series
order : int
Embedding dimension (order)
delay : int
Delay.
Returns
-------
embedded : ndarray, shape (n_times - (order - 1) * delay, order)
Embedded time-series.
"""
N = len(x)
Y = np.empty((order, N - (order - 1) * delay))
for i in range(order):
Y[i] = x[i * delay:i * delay + Y.shape[1]]
return Y.T | python | def _embed(x, order=3, delay=1):
"""Time-delay embedding.
Parameters
----------
x : 1d-array, shape (n_times)
Time series
order : int
Embedding dimension (order)
delay : int
Delay.
Returns
-------
embedded : ndarray, shape (n_times - (order - 1) * delay, order)
Embedded time-series.
"""
N = len(x)
Y = np.empty((order, N - (order - 1) * delay))
for i in range(order):
Y[i] = x[i * delay:i * delay + Y.shape[1]]
return Y.T | [
"def",
"_embed",
"(",
"x",
",",
"order",
"=",
"3",
",",
"delay",
"=",
"1",
")",
":",
"N",
"=",
"len",
"(",
"x",
")",
"Y",
"=",
"np",
".",
"empty",
"(",
"(",
"order",
",",
"N",
"-",
"(",
"order",
"-",
"1",
")",
"*",
"delay",
")",
")",
"for",
"i",
"in",
"range",
"(",
"order",
")",
":",
"Y",
"[",
"i",
"]",
"=",
"x",
"[",
"i",
"*",
"delay",
":",
"i",
"*",
"delay",
"+",
"Y",
".",
"shape",
"[",
"1",
"]",
"]",
"return",
"Y",
".",
"T"
] | Time-delay embedding.
Parameters
----------
x : 1d-array, shape (n_times)
Time series
order : int
Embedding dimension (order)
delay : int
Delay.
Returns
-------
embedded : ndarray, shape (n_times - (order - 1) * delay, order)
Embedded time-series. | [
"Time",
"-",
"delay",
"embedding",
"."
] | ae2bf71c2e5b6edb2e468ff52183b30acf7073e6 | https://github.com/nikdon/pyEntropy/blob/ae2bf71c2e5b6edb2e468ff52183b30acf7073e6/pyentrp/entropy.py#L10-L31 |
5,644 | nikdon/pyEntropy | pyentrp/entropy.py | util_pattern_space | def util_pattern_space(time_series, lag, dim):
"""Create a set of sequences with given lag and dimension
Args:
time_series: Vector or string of the sample data
lag: Lag between beginning of sequences
dim: Dimension (number of patterns)
Returns:
2D array of vectors
"""
n = len(time_series)
if lag * dim > n:
raise Exception('Result matrix exceeded size limit, try to change lag or dim.')
elif lag < 1:
raise Exception('Lag should be greater or equal to 1.')
pattern_space = np.empty((n - lag * (dim - 1), dim))
for i in range(n - lag * (dim - 1)):
for j in range(dim):
pattern_space[i][j] = time_series[i + j * lag]
return pattern_space | python | def util_pattern_space(time_series, lag, dim):
"""Create a set of sequences with given lag and dimension
Args:
time_series: Vector or string of the sample data
lag: Lag between beginning of sequences
dim: Dimension (number of patterns)
Returns:
2D array of vectors
"""
n = len(time_series)
if lag * dim > n:
raise Exception('Result matrix exceeded size limit, try to change lag or dim.')
elif lag < 1:
raise Exception('Lag should be greater or equal to 1.')
pattern_space = np.empty((n - lag * (dim - 1), dim))
for i in range(n - lag * (dim - 1)):
for j in range(dim):
pattern_space[i][j] = time_series[i + j * lag]
return pattern_space | [
"def",
"util_pattern_space",
"(",
"time_series",
",",
"lag",
",",
"dim",
")",
":",
"n",
"=",
"len",
"(",
"time_series",
")",
"if",
"lag",
"*",
"dim",
">",
"n",
":",
"raise",
"Exception",
"(",
"'Result matrix exceeded size limit, try to change lag or dim.'",
")",
"elif",
"lag",
"<",
"1",
":",
"raise",
"Exception",
"(",
"'Lag should be greater or equal to 1.'",
")",
"pattern_space",
"=",
"np",
".",
"empty",
"(",
"(",
"n",
"-",
"lag",
"*",
"(",
"dim",
"-",
"1",
")",
",",
"dim",
")",
")",
"for",
"i",
"in",
"range",
"(",
"n",
"-",
"lag",
"*",
"(",
"dim",
"-",
"1",
")",
")",
":",
"for",
"j",
"in",
"range",
"(",
"dim",
")",
":",
"pattern_space",
"[",
"i",
"]",
"[",
"j",
"]",
"=",
"time_series",
"[",
"i",
"+",
"j",
"*",
"lag",
"]",
"return",
"pattern_space"
] | Create a set of sequences with given lag and dimension
Args:
time_series: Vector or string of the sample data
lag: Lag between beginning of sequences
dim: Dimension (number of patterns)
Returns:
2D array of vectors | [
"Create",
"a",
"set",
"of",
"sequences",
"with",
"given",
"lag",
"and",
"dimension"
] | ae2bf71c2e5b6edb2e468ff52183b30acf7073e6 | https://github.com/nikdon/pyEntropy/blob/ae2bf71c2e5b6edb2e468ff52183b30acf7073e6/pyentrp/entropy.py#L34-L57 |
5,645 | nikdon/pyEntropy | pyentrp/entropy.py | util_granulate_time_series | def util_granulate_time_series(time_series, scale):
"""Extract coarse-grained time series
Args:
time_series: Time series
scale: Scale factor
Returns:
Vector of coarse-grained time series with given scale factor
"""
n = len(time_series)
b = int(np.fix(n / scale))
temp = np.reshape(time_series[0:b*scale], (b, scale))
cts = np.mean(temp, axis = 1)
return cts | python | def util_granulate_time_series(time_series, scale):
"""Extract coarse-grained time series
Args:
time_series: Time series
scale: Scale factor
Returns:
Vector of coarse-grained time series with given scale factor
"""
n = len(time_series)
b = int(np.fix(n / scale))
temp = np.reshape(time_series[0:b*scale], (b, scale))
cts = np.mean(temp, axis = 1)
return cts | [
"def",
"util_granulate_time_series",
"(",
"time_series",
",",
"scale",
")",
":",
"n",
"=",
"len",
"(",
"time_series",
")",
"b",
"=",
"int",
"(",
"np",
".",
"fix",
"(",
"n",
"/",
"scale",
")",
")",
"temp",
"=",
"np",
".",
"reshape",
"(",
"time_series",
"[",
"0",
":",
"b",
"*",
"scale",
"]",
",",
"(",
"b",
",",
"scale",
")",
")",
"cts",
"=",
"np",
".",
"mean",
"(",
"temp",
",",
"axis",
"=",
"1",
")",
"return",
"cts"
] | Extract coarse-grained time series
Args:
time_series: Time series
scale: Scale factor
Returns:
Vector of coarse-grained time series with given scale factor | [
"Extract",
"coarse",
"-",
"grained",
"time",
"series"
] | ae2bf71c2e5b6edb2e468ff52183b30acf7073e6 | https://github.com/nikdon/pyEntropy/blob/ae2bf71c2e5b6edb2e468ff52183b30acf7073e6/pyentrp/entropy.py#L64-L78 |
5,646 | nikdon/pyEntropy | pyentrp/entropy.py | shannon_entropy | def shannon_entropy(time_series):
"""Return the Shannon Entropy of the sample data.
Args:
time_series: Vector or string of the sample data
Returns:
The Shannon Entropy as float value
"""
# Check if string
if not isinstance(time_series, str):
time_series = list(time_series)
# Create a frequency data
data_set = list(set(time_series))
freq_list = []
for entry in data_set:
counter = 0.
for i in time_series:
if i == entry:
counter += 1
freq_list.append(float(counter) / len(time_series))
# Shannon entropy
ent = 0.0
for freq in freq_list:
ent += freq * np.log2(freq)
ent = -ent
return ent | python | def shannon_entropy(time_series):
"""Return the Shannon Entropy of the sample data.
Args:
time_series: Vector or string of the sample data
Returns:
The Shannon Entropy as float value
"""
# Check if string
if not isinstance(time_series, str):
time_series = list(time_series)
# Create a frequency data
data_set = list(set(time_series))
freq_list = []
for entry in data_set:
counter = 0.
for i in time_series:
if i == entry:
counter += 1
freq_list.append(float(counter) / len(time_series))
# Shannon entropy
ent = 0.0
for freq in freq_list:
ent += freq * np.log2(freq)
ent = -ent
return ent | [
"def",
"shannon_entropy",
"(",
"time_series",
")",
":",
"# Check if string",
"if",
"not",
"isinstance",
"(",
"time_series",
",",
"str",
")",
":",
"time_series",
"=",
"list",
"(",
"time_series",
")",
"# Create a frequency data",
"data_set",
"=",
"list",
"(",
"set",
"(",
"time_series",
")",
")",
"freq_list",
"=",
"[",
"]",
"for",
"entry",
"in",
"data_set",
":",
"counter",
"=",
"0.",
"for",
"i",
"in",
"time_series",
":",
"if",
"i",
"==",
"entry",
":",
"counter",
"+=",
"1",
"freq_list",
".",
"append",
"(",
"float",
"(",
"counter",
")",
"/",
"len",
"(",
"time_series",
")",
")",
"# Shannon entropy",
"ent",
"=",
"0.0",
"for",
"freq",
"in",
"freq_list",
":",
"ent",
"+=",
"freq",
"*",
"np",
".",
"log2",
"(",
"freq",
")",
"ent",
"=",
"-",
"ent",
"return",
"ent"
] | Return the Shannon Entropy of the sample data.
Args:
time_series: Vector or string of the sample data
Returns:
The Shannon Entropy as float value | [
"Return",
"the",
"Shannon",
"Entropy",
"of",
"the",
"sample",
"data",
"."
] | ae2bf71c2e5b6edb2e468ff52183b30acf7073e6 | https://github.com/nikdon/pyEntropy/blob/ae2bf71c2e5b6edb2e468ff52183b30acf7073e6/pyentrp/entropy.py#L81-L110 |
5,647 | nikdon/pyEntropy | pyentrp/entropy.py | sample_entropy | def sample_entropy(time_series, sample_length, tolerance = None):
"""Calculates the sample entropy of degree m of a time_series.
This method uses chebychev norm.
It is quite fast for random data, but can be slower is there is
structure in the input time series.
Args:
time_series: numpy array of time series
sample_length: length of longest template vector
tolerance: tolerance (defaults to 0.1 * std(time_series)))
Returns:
Array of sample entropies:
SE[k] is ratio "#templates of length k+1" / "#templates of length k"
where #templates of length 0" = n*(n - 1) / 2, by definition
Note:
The parameter 'sample_length' is equal to m + 1 in Ref[1].
References:
[1] http://en.wikipedia.org/wiki/Sample_Entropy
[2] http://physionet.incor.usp.br/physiotools/sampen/
[3] Madalena Costa, Ary Goldberger, CK Peng. Multiscale entropy analysis
of biological signals
"""
#The code below follows the sample length convention of Ref [1] so:
M = sample_length - 1;
time_series = np.array(time_series)
if tolerance is None:
tolerance = 0.1*np.std(time_series)
n = len(time_series)
#Ntemp is a vector that holds the number of matches. N[k] holds matches templates of length k
Ntemp = np.zeros(M + 2)
#Templates of length 0 matches by definition:
Ntemp[0] = n*(n - 1) / 2
for i in range(n - M - 1):
template = time_series[i:(i+M+1)];#We have 'M+1' elements in the template
rem_time_series = time_series[i+1:]
searchlist = np.nonzero(np.abs(rem_time_series - template[0]) < tolerance)[0]
go = len(searchlist) > 0;
length = 1;
Ntemp[length] += len(searchlist)
while go:
length += 1
nextindxlist = searchlist + 1;
nextindxlist = nextindxlist[nextindxlist < n - 1 - i]#Remove candidates too close to the end
nextcandidates = rem_time_series[nextindxlist]
hitlist = np.abs(nextcandidates - template[length-1]) < tolerance
searchlist = nextindxlist[hitlist]
Ntemp[length] += np.sum(hitlist)
go = any(hitlist) and length < M + 1
sampen = - np.log(Ntemp[1:] / Ntemp[:-1])
return sampen | python | def sample_entropy(time_series, sample_length, tolerance = None):
"""Calculates the sample entropy of degree m of a time_series.
This method uses chebychev norm.
It is quite fast for random data, but can be slower is there is
structure in the input time series.
Args:
time_series: numpy array of time series
sample_length: length of longest template vector
tolerance: tolerance (defaults to 0.1 * std(time_series)))
Returns:
Array of sample entropies:
SE[k] is ratio "#templates of length k+1" / "#templates of length k"
where #templates of length 0" = n*(n - 1) / 2, by definition
Note:
The parameter 'sample_length' is equal to m + 1 in Ref[1].
References:
[1] http://en.wikipedia.org/wiki/Sample_Entropy
[2] http://physionet.incor.usp.br/physiotools/sampen/
[3] Madalena Costa, Ary Goldberger, CK Peng. Multiscale entropy analysis
of biological signals
"""
#The code below follows the sample length convention of Ref [1] so:
M = sample_length - 1;
time_series = np.array(time_series)
if tolerance is None:
tolerance = 0.1*np.std(time_series)
n = len(time_series)
#Ntemp is a vector that holds the number of matches. N[k] holds matches templates of length k
Ntemp = np.zeros(M + 2)
#Templates of length 0 matches by definition:
Ntemp[0] = n*(n - 1) / 2
for i in range(n - M - 1):
template = time_series[i:(i+M+1)];#We have 'M+1' elements in the template
rem_time_series = time_series[i+1:]
searchlist = np.nonzero(np.abs(rem_time_series - template[0]) < tolerance)[0]
go = len(searchlist) > 0;
length = 1;
Ntemp[length] += len(searchlist)
while go:
length += 1
nextindxlist = searchlist + 1;
nextindxlist = nextindxlist[nextindxlist < n - 1 - i]#Remove candidates too close to the end
nextcandidates = rem_time_series[nextindxlist]
hitlist = np.abs(nextcandidates - template[length-1]) < tolerance
searchlist = nextindxlist[hitlist]
Ntemp[length] += np.sum(hitlist)
go = any(hitlist) and length < M + 1
sampen = - np.log(Ntemp[1:] / Ntemp[:-1])
return sampen | [
"def",
"sample_entropy",
"(",
"time_series",
",",
"sample_length",
",",
"tolerance",
"=",
"None",
")",
":",
"#The code below follows the sample length convention of Ref [1] so:",
"M",
"=",
"sample_length",
"-",
"1",
"time_series",
"=",
"np",
".",
"array",
"(",
"time_series",
")",
"if",
"tolerance",
"is",
"None",
":",
"tolerance",
"=",
"0.1",
"*",
"np",
".",
"std",
"(",
"time_series",
")",
"n",
"=",
"len",
"(",
"time_series",
")",
"#Ntemp is a vector that holds the number of matches. N[k] holds matches templates of length k",
"Ntemp",
"=",
"np",
".",
"zeros",
"(",
"M",
"+",
"2",
")",
"#Templates of length 0 matches by definition:",
"Ntemp",
"[",
"0",
"]",
"=",
"n",
"*",
"(",
"n",
"-",
"1",
")",
"/",
"2",
"for",
"i",
"in",
"range",
"(",
"n",
"-",
"M",
"-",
"1",
")",
":",
"template",
"=",
"time_series",
"[",
"i",
":",
"(",
"i",
"+",
"M",
"+",
"1",
")",
"]",
"#We have 'M+1' elements in the template",
"rem_time_series",
"=",
"time_series",
"[",
"i",
"+",
"1",
":",
"]",
"searchlist",
"=",
"np",
".",
"nonzero",
"(",
"np",
".",
"abs",
"(",
"rem_time_series",
"-",
"template",
"[",
"0",
"]",
")",
"<",
"tolerance",
")",
"[",
"0",
"]",
"go",
"=",
"len",
"(",
"searchlist",
")",
">",
"0",
"length",
"=",
"1",
"Ntemp",
"[",
"length",
"]",
"+=",
"len",
"(",
"searchlist",
")",
"while",
"go",
":",
"length",
"+=",
"1",
"nextindxlist",
"=",
"searchlist",
"+",
"1",
"nextindxlist",
"=",
"nextindxlist",
"[",
"nextindxlist",
"<",
"n",
"-",
"1",
"-",
"i",
"]",
"#Remove candidates too close to the end",
"nextcandidates",
"=",
"rem_time_series",
"[",
"nextindxlist",
"]",
"hitlist",
"=",
"np",
".",
"abs",
"(",
"nextcandidates",
"-",
"template",
"[",
"length",
"-",
"1",
"]",
")",
"<",
"tolerance",
"searchlist",
"=",
"nextindxlist",
"[",
"hitlist",
"]",
"Ntemp",
"[",
"length",
"]",
"+=",
"np",
".",
"sum",
"(",
"hitlist",
")",
"go",
"=",
"any",
"(",
"hitlist",
")",
"and",
"length",
"<",
"M",
"+",
"1",
"sampen",
"=",
"-",
"np",
".",
"log",
"(",
"Ntemp",
"[",
"1",
":",
"]",
"/",
"Ntemp",
"[",
":",
"-",
"1",
"]",
")",
"return",
"sampen"
] | Calculates the sample entropy of degree m of a time_series.
This method uses chebychev norm.
It is quite fast for random data, but can be slower is there is
structure in the input time series.
Args:
time_series: numpy array of time series
sample_length: length of longest template vector
tolerance: tolerance (defaults to 0.1 * std(time_series)))
Returns:
Array of sample entropies:
SE[k] is ratio "#templates of length k+1" / "#templates of length k"
where #templates of length 0" = n*(n - 1) / 2, by definition
Note:
The parameter 'sample_length' is equal to m + 1 in Ref[1].
References:
[1] http://en.wikipedia.org/wiki/Sample_Entropy
[2] http://physionet.incor.usp.br/physiotools/sampen/
[3] Madalena Costa, Ary Goldberger, CK Peng. Multiscale entropy analysis
of biological signals | [
"Calculates",
"the",
"sample",
"entropy",
"of",
"degree",
"m",
"of",
"a",
"time_series",
"."
] | ae2bf71c2e5b6edb2e468ff52183b30acf7073e6 | https://github.com/nikdon/pyEntropy/blob/ae2bf71c2e5b6edb2e468ff52183b30acf7073e6/pyentrp/entropy.py#L113-L179 |
5,648 | nikdon/pyEntropy | pyentrp/entropy.py | multiscale_entropy | def multiscale_entropy(time_series, sample_length, tolerance = None, maxscale = None):
"""Calculate the Multiscale Entropy of the given time series considering
different time-scales of the time series.
Args:
time_series: Time series for analysis
sample_length: Bandwidth or group of points
tolerance: Tolerance (default = 0.1*std(time_series))
Returns:
Vector containing Multiscale Entropy
Reference:
[1] http://en.pudn.com/downloads149/sourcecode/math/detail646216_en.html
"""
if tolerance is None:
#we need to fix the tolerance at this level. If it remains 'None' it will be changed in call to sample_entropy()
tolerance = 0.1*np.std(time_series)
if maxscale is None:
maxscale = len(time_series)
mse = np.zeros(maxscale)
for i in range(maxscale):
temp = util_granulate_time_series(time_series, i+1)
mse[i] = sample_entropy(temp, sample_length, tolerance)[-1]
return mse | python | def multiscale_entropy(time_series, sample_length, tolerance = None, maxscale = None):
"""Calculate the Multiscale Entropy of the given time series considering
different time-scales of the time series.
Args:
time_series: Time series for analysis
sample_length: Bandwidth or group of points
tolerance: Tolerance (default = 0.1*std(time_series))
Returns:
Vector containing Multiscale Entropy
Reference:
[1] http://en.pudn.com/downloads149/sourcecode/math/detail646216_en.html
"""
if tolerance is None:
#we need to fix the tolerance at this level. If it remains 'None' it will be changed in call to sample_entropy()
tolerance = 0.1*np.std(time_series)
if maxscale is None:
maxscale = len(time_series)
mse = np.zeros(maxscale)
for i in range(maxscale):
temp = util_granulate_time_series(time_series, i+1)
mse[i] = sample_entropy(temp, sample_length, tolerance)[-1]
return mse | [
"def",
"multiscale_entropy",
"(",
"time_series",
",",
"sample_length",
",",
"tolerance",
"=",
"None",
",",
"maxscale",
"=",
"None",
")",
":",
"if",
"tolerance",
"is",
"None",
":",
"#we need to fix the tolerance at this level. If it remains 'None' it will be changed in call to sample_entropy()",
"tolerance",
"=",
"0.1",
"*",
"np",
".",
"std",
"(",
"time_series",
")",
"if",
"maxscale",
"is",
"None",
":",
"maxscale",
"=",
"len",
"(",
"time_series",
")",
"mse",
"=",
"np",
".",
"zeros",
"(",
"maxscale",
")",
"for",
"i",
"in",
"range",
"(",
"maxscale",
")",
":",
"temp",
"=",
"util_granulate_time_series",
"(",
"time_series",
",",
"i",
"+",
"1",
")",
"mse",
"[",
"i",
"]",
"=",
"sample_entropy",
"(",
"temp",
",",
"sample_length",
",",
"tolerance",
")",
"[",
"-",
"1",
"]",
"return",
"mse"
] | Calculate the Multiscale Entropy of the given time series considering
different time-scales of the time series.
Args:
time_series: Time series for analysis
sample_length: Bandwidth or group of points
tolerance: Tolerance (default = 0.1*std(time_series))
Returns:
Vector containing Multiscale Entropy
Reference:
[1] http://en.pudn.com/downloads149/sourcecode/math/detail646216_en.html | [
"Calculate",
"the",
"Multiscale",
"Entropy",
"of",
"the",
"given",
"time",
"series",
"considering",
"different",
"time",
"-",
"scales",
"of",
"the",
"time",
"series",
"."
] | ae2bf71c2e5b6edb2e468ff52183b30acf7073e6 | https://github.com/nikdon/pyEntropy/blob/ae2bf71c2e5b6edb2e468ff52183b30acf7073e6/pyentrp/entropy.py#L182-L209 |
5,649 | nikdon/pyEntropy | pyentrp/entropy.py | permutation_entropy | def permutation_entropy(time_series, order=3, delay=1, normalize=False):
"""Permutation Entropy.
Parameters
----------
time_series : list or np.array
Time series
order : int
Order of permutation entropy
delay : int
Time delay
normalize : bool
If True, divide by log2(factorial(m)) to normalize the entropy
between 0 and 1. Otherwise, return the permutation entropy in bit.
Returns
-------
pe : float
Permutation Entropy
References
----------
.. [1] Massimiliano Zanin et al. Permutation Entropy and Its Main
Biomedical and Econophysics Applications: A Review.
http://www.mdpi.com/1099-4300/14/8/1553/pdf
.. [2] Christoph Bandt and Bernd Pompe. Permutation entropy — a natural
complexity measure for time series.
http://stubber.math-inf.uni-greifswald.de/pub/full/prep/2001/11.pdf
Notes
-----
Last updated (Oct 2018) by Raphael Vallat ([email protected]):
- Major speed improvements
- Use of base 2 instead of base e
- Added normalization
Examples
--------
1. Permutation entropy with order 2
>>> x = [4, 7, 9, 10, 6, 11, 3]
>>> # Return a value between 0 and log2(factorial(order))
>>> print(permutation_entropy(x, order=2))
0.918
2. Normalized permutation entropy with order 3
>>> x = [4, 7, 9, 10, 6, 11, 3]
>>> # Return a value comprised between 0 and 1.
>>> print(permutation_entropy(x, order=3, normalize=True))
0.589
"""
x = np.array(time_series)
hashmult = np.power(order, np.arange(order))
# Embed x and sort the order of permutations
sorted_idx = _embed(x, order=order, delay=delay).argsort(kind='quicksort')
# Associate unique integer to each permutations
hashval = (np.multiply(sorted_idx, hashmult)).sum(1)
# Return the counts
_, c = np.unique(hashval, return_counts=True)
# Use np.true_divide for Python 2 compatibility
p = np.true_divide(c, c.sum())
pe = -np.multiply(p, np.log2(p)).sum()
if normalize:
pe /= np.log2(factorial(order))
return pe | python | def permutation_entropy(time_series, order=3, delay=1, normalize=False):
"""Permutation Entropy.
Parameters
----------
time_series : list or np.array
Time series
order : int
Order of permutation entropy
delay : int
Time delay
normalize : bool
If True, divide by log2(factorial(m)) to normalize the entropy
between 0 and 1. Otherwise, return the permutation entropy in bit.
Returns
-------
pe : float
Permutation Entropy
References
----------
.. [1] Massimiliano Zanin et al. Permutation Entropy and Its Main
Biomedical and Econophysics Applications: A Review.
http://www.mdpi.com/1099-4300/14/8/1553/pdf
.. [2] Christoph Bandt and Bernd Pompe. Permutation entropy — a natural
complexity measure for time series.
http://stubber.math-inf.uni-greifswald.de/pub/full/prep/2001/11.pdf
Notes
-----
Last updated (Oct 2018) by Raphael Vallat ([email protected]):
- Major speed improvements
- Use of base 2 instead of base e
- Added normalization
Examples
--------
1. Permutation entropy with order 2
>>> x = [4, 7, 9, 10, 6, 11, 3]
>>> # Return a value between 0 and log2(factorial(order))
>>> print(permutation_entropy(x, order=2))
0.918
2. Normalized permutation entropy with order 3
>>> x = [4, 7, 9, 10, 6, 11, 3]
>>> # Return a value comprised between 0 and 1.
>>> print(permutation_entropy(x, order=3, normalize=True))
0.589
"""
x = np.array(time_series)
hashmult = np.power(order, np.arange(order))
# Embed x and sort the order of permutations
sorted_idx = _embed(x, order=order, delay=delay).argsort(kind='quicksort')
# Associate unique integer to each permutations
hashval = (np.multiply(sorted_idx, hashmult)).sum(1)
# Return the counts
_, c = np.unique(hashval, return_counts=True)
# Use np.true_divide for Python 2 compatibility
p = np.true_divide(c, c.sum())
pe = -np.multiply(p, np.log2(p)).sum()
if normalize:
pe /= np.log2(factorial(order))
return pe | [
"def",
"permutation_entropy",
"(",
"time_series",
",",
"order",
"=",
"3",
",",
"delay",
"=",
"1",
",",
"normalize",
"=",
"False",
")",
":",
"x",
"=",
"np",
".",
"array",
"(",
"time_series",
")",
"hashmult",
"=",
"np",
".",
"power",
"(",
"order",
",",
"np",
".",
"arange",
"(",
"order",
")",
")",
"# Embed x and sort the order of permutations",
"sorted_idx",
"=",
"_embed",
"(",
"x",
",",
"order",
"=",
"order",
",",
"delay",
"=",
"delay",
")",
".",
"argsort",
"(",
"kind",
"=",
"'quicksort'",
")",
"# Associate unique integer to each permutations",
"hashval",
"=",
"(",
"np",
".",
"multiply",
"(",
"sorted_idx",
",",
"hashmult",
")",
")",
".",
"sum",
"(",
"1",
")",
"# Return the counts",
"_",
",",
"c",
"=",
"np",
".",
"unique",
"(",
"hashval",
",",
"return_counts",
"=",
"True",
")",
"# Use np.true_divide for Python 2 compatibility",
"p",
"=",
"np",
".",
"true_divide",
"(",
"c",
",",
"c",
".",
"sum",
"(",
")",
")",
"pe",
"=",
"-",
"np",
".",
"multiply",
"(",
"p",
",",
"np",
".",
"log2",
"(",
"p",
")",
")",
".",
"sum",
"(",
")",
"if",
"normalize",
":",
"pe",
"/=",
"np",
".",
"log2",
"(",
"factorial",
"(",
"order",
")",
")",
"return",
"pe"
] | Permutation Entropy.
Parameters
----------
time_series : list or np.array
Time series
order : int
Order of permutation entropy
delay : int
Time delay
normalize : bool
If True, divide by log2(factorial(m)) to normalize the entropy
between 0 and 1. Otherwise, return the permutation entropy in bit.
Returns
-------
pe : float
Permutation Entropy
References
----------
.. [1] Massimiliano Zanin et al. Permutation Entropy and Its Main
Biomedical and Econophysics Applications: A Review.
http://www.mdpi.com/1099-4300/14/8/1553/pdf
.. [2] Christoph Bandt and Bernd Pompe. Permutation entropy — a natural
complexity measure for time series.
http://stubber.math-inf.uni-greifswald.de/pub/full/prep/2001/11.pdf
Notes
-----
Last updated (Oct 2018) by Raphael Vallat ([email protected]):
- Major speed improvements
- Use of base 2 instead of base e
- Added normalization
Examples
--------
1. Permutation entropy with order 2
>>> x = [4, 7, 9, 10, 6, 11, 3]
>>> # Return a value between 0 and log2(factorial(order))
>>> print(permutation_entropy(x, order=2))
0.918
2. Normalized permutation entropy with order 3
>>> x = [4, 7, 9, 10, 6, 11, 3]
>>> # Return a value comprised between 0 and 1.
>>> print(permutation_entropy(x, order=3, normalize=True))
0.589 | [
"Permutation",
"Entropy",
"."
] | ae2bf71c2e5b6edb2e468ff52183b30acf7073e6 | https://github.com/nikdon/pyEntropy/blob/ae2bf71c2e5b6edb2e468ff52183b30acf7073e6/pyentrp/entropy.py#L212-L278 |
5,650 | nikdon/pyEntropy | pyentrp/entropy.py | multiscale_permutation_entropy | def multiscale_permutation_entropy(time_series, m, delay, scale):
"""Calculate the Multiscale Permutation Entropy
Args:
time_series: Time series for analysis
m: Order of permutation entropy
delay: Time delay
scale: Scale factor
Returns:
Vector containing Multiscale Permutation Entropy
Reference:
[1] Francesco Carlo Morabito et al. Multivariate Multi-Scale Permutation Entropy for
Complexity Analysis of Alzheimer’s Disease EEG. www.mdpi.com/1099-4300/14/7/1186
[2] http://www.mathworks.com/matlabcentral/fileexchange/37288-multiscale-permutation-entropy-mpe/content/MPerm.m
"""
mspe = []
for i in range(scale):
coarse_time_series = util_granulate_time_series(time_series, i + 1)
pe = permutation_entropy(coarse_time_series, order=m, delay=delay)
mspe.append(pe)
return mspe | python | def multiscale_permutation_entropy(time_series, m, delay, scale):
"""Calculate the Multiscale Permutation Entropy
Args:
time_series: Time series for analysis
m: Order of permutation entropy
delay: Time delay
scale: Scale factor
Returns:
Vector containing Multiscale Permutation Entropy
Reference:
[1] Francesco Carlo Morabito et al. Multivariate Multi-Scale Permutation Entropy for
Complexity Analysis of Alzheimer’s Disease EEG. www.mdpi.com/1099-4300/14/7/1186
[2] http://www.mathworks.com/matlabcentral/fileexchange/37288-multiscale-permutation-entropy-mpe/content/MPerm.m
"""
mspe = []
for i in range(scale):
coarse_time_series = util_granulate_time_series(time_series, i + 1)
pe = permutation_entropy(coarse_time_series, order=m, delay=delay)
mspe.append(pe)
return mspe | [
"def",
"multiscale_permutation_entropy",
"(",
"time_series",
",",
"m",
",",
"delay",
",",
"scale",
")",
":",
"mspe",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"scale",
")",
":",
"coarse_time_series",
"=",
"util_granulate_time_series",
"(",
"time_series",
",",
"i",
"+",
"1",
")",
"pe",
"=",
"permutation_entropy",
"(",
"coarse_time_series",
",",
"order",
"=",
"m",
",",
"delay",
"=",
"delay",
")",
"mspe",
".",
"append",
"(",
"pe",
")",
"return",
"mspe"
] | Calculate the Multiscale Permutation Entropy
Args:
time_series: Time series for analysis
m: Order of permutation entropy
delay: Time delay
scale: Scale factor
Returns:
Vector containing Multiscale Permutation Entropy
Reference:
[1] Francesco Carlo Morabito et al. Multivariate Multi-Scale Permutation Entropy for
Complexity Analysis of Alzheimer’s Disease EEG. www.mdpi.com/1099-4300/14/7/1186
[2] http://www.mathworks.com/matlabcentral/fileexchange/37288-multiscale-permutation-entropy-mpe/content/MPerm.m | [
"Calculate",
"the",
"Multiscale",
"Permutation",
"Entropy"
] | ae2bf71c2e5b6edb2e468ff52183b30acf7073e6 | https://github.com/nikdon/pyEntropy/blob/ae2bf71c2e5b6edb2e468ff52183b30acf7073e6/pyentrp/entropy.py#L281-L303 |
5,651 | nikdon/pyEntropy | pyentrp/entropy.py | composite_multiscale_entropy | def composite_multiscale_entropy(time_series, sample_length, scale, tolerance=None):
"""Calculate the Composite Multiscale Entropy of the given time series.
Args:
time_series: Time series for analysis
sample_length: Number of sequential points of the time series
scale: Scale factor
tolerance: Tolerance (default = 0.1...0.2 * std(time_series))
Returns:
Vector containing Composite Multiscale Entropy
Reference:
[1] Wu, Shuen-De, et al. "Time series analysis using
composite multiscale entropy." Entropy 15.3 (2013): 1069-1084.
"""
cmse = np.zeros((1, scale))
for i in range(scale):
for j in range(i):
tmp = util_granulate_time_series(time_series[j:], i + 1)
cmse[i] += sample_entropy(tmp, sample_length, tolerance) / (i + 1)
return cmse | python | def composite_multiscale_entropy(time_series, sample_length, scale, tolerance=None):
"""Calculate the Composite Multiscale Entropy of the given time series.
Args:
time_series: Time series for analysis
sample_length: Number of sequential points of the time series
scale: Scale factor
tolerance: Tolerance (default = 0.1...0.2 * std(time_series))
Returns:
Vector containing Composite Multiscale Entropy
Reference:
[1] Wu, Shuen-De, et al. "Time series analysis using
composite multiscale entropy." Entropy 15.3 (2013): 1069-1084.
"""
cmse = np.zeros((1, scale))
for i in range(scale):
for j in range(i):
tmp = util_granulate_time_series(time_series[j:], i + 1)
cmse[i] += sample_entropy(tmp, sample_length, tolerance) / (i + 1)
return cmse | [
"def",
"composite_multiscale_entropy",
"(",
"time_series",
",",
"sample_length",
",",
"scale",
",",
"tolerance",
"=",
"None",
")",
":",
"cmse",
"=",
"np",
".",
"zeros",
"(",
"(",
"1",
",",
"scale",
")",
")",
"for",
"i",
"in",
"range",
"(",
"scale",
")",
":",
"for",
"j",
"in",
"range",
"(",
"i",
")",
":",
"tmp",
"=",
"util_granulate_time_series",
"(",
"time_series",
"[",
"j",
":",
"]",
",",
"i",
"+",
"1",
")",
"cmse",
"[",
"i",
"]",
"+=",
"sample_entropy",
"(",
"tmp",
",",
"sample_length",
",",
"tolerance",
")",
"/",
"(",
"i",
"+",
"1",
")",
"return",
"cmse"
] | Calculate the Composite Multiscale Entropy of the given time series.
Args:
time_series: Time series for analysis
sample_length: Number of sequential points of the time series
scale: Scale factor
tolerance: Tolerance (default = 0.1...0.2 * std(time_series))
Returns:
Vector containing Composite Multiscale Entropy
Reference:
[1] Wu, Shuen-De, et al. "Time series analysis using
composite multiscale entropy." Entropy 15.3 (2013): 1069-1084. | [
"Calculate",
"the",
"Composite",
"Multiscale",
"Entropy",
"of",
"the",
"given",
"time",
"series",
"."
] | ae2bf71c2e5b6edb2e468ff52183b30acf7073e6 | https://github.com/nikdon/pyEntropy/blob/ae2bf71c2e5b6edb2e468ff52183b30acf7073e6/pyentrp/entropy.py#L307-L329 |
5,652 | kyan001/ping3 | ping3.py | ones_comp_sum16 | def ones_comp_sum16(num1: int, num2: int) -> int:
"""Calculates the 1's complement sum for 16-bit numbers.
Args:
num1: 16-bit number.
num2: 16-bit number.
Returns:
The calculated result.
"""
carry = 1 << 16
result = num1 + num2
return result if result < carry else result + 1 - carry | python | def ones_comp_sum16(num1: int, num2: int) -> int:
"""Calculates the 1's complement sum for 16-bit numbers.
Args:
num1: 16-bit number.
num2: 16-bit number.
Returns:
The calculated result.
"""
carry = 1 << 16
result = num1 + num2
return result if result < carry else result + 1 - carry | [
"def",
"ones_comp_sum16",
"(",
"num1",
":",
"int",
",",
"num2",
":",
"int",
")",
"->",
"int",
":",
"carry",
"=",
"1",
"<<",
"16",
"result",
"=",
"num1",
"+",
"num2",
"return",
"result",
"if",
"result",
"<",
"carry",
"else",
"result",
"+",
"1",
"-",
"carry"
] | Calculates the 1's complement sum for 16-bit numbers.
Args:
num1: 16-bit number.
num2: 16-bit number.
Returns:
The calculated result. | [
"Calculates",
"the",
"1",
"s",
"complement",
"sum",
"for",
"16",
"-",
"bit",
"numbers",
"."
] | fc9e8a4b828965a800036dfbd019e97114ad80b3 | https://github.com/kyan001/ping3/blob/fc9e8a4b828965a800036dfbd019e97114ad80b3/ping3.py#L34-L47 |
5,653 | kyan001/ping3 | ping3.py | checksum | def checksum(source: bytes) -> int:
"""Calculates the checksum of the input bytes.
RFC1071: https://tools.ietf.org/html/rfc1071
RFC792: https://tools.ietf.org/html/rfc792
Args:
source: The input to be calculated.
Returns:
Calculated checksum.
"""
if len(source) % 2: # if the total length is odd, padding with one octet of zeros for computing the checksum
source += b'\x00'
sum = 0
for i in range(0, len(source), 2):
sum = ones_comp_sum16(sum, (source[i + 1] << 8) + source[i])
return ~sum & 0xffff | python | def checksum(source: bytes) -> int:
"""Calculates the checksum of the input bytes.
RFC1071: https://tools.ietf.org/html/rfc1071
RFC792: https://tools.ietf.org/html/rfc792
Args:
source: The input to be calculated.
Returns:
Calculated checksum.
"""
if len(source) % 2: # if the total length is odd, padding with one octet of zeros for computing the checksum
source += b'\x00'
sum = 0
for i in range(0, len(source), 2):
sum = ones_comp_sum16(sum, (source[i + 1] << 8) + source[i])
return ~sum & 0xffff | [
"def",
"checksum",
"(",
"source",
":",
"bytes",
")",
"->",
"int",
":",
"if",
"len",
"(",
"source",
")",
"%",
"2",
":",
"# if the total length is odd, padding with one octet of zeros for computing the checksum",
"source",
"+=",
"b'\\x00'",
"sum",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"source",
")",
",",
"2",
")",
":",
"sum",
"=",
"ones_comp_sum16",
"(",
"sum",
",",
"(",
"source",
"[",
"i",
"+",
"1",
"]",
"<<",
"8",
")",
"+",
"source",
"[",
"i",
"]",
")",
"return",
"~",
"sum",
"&",
"0xffff"
] | Calculates the checksum of the input bytes.
RFC1071: https://tools.ietf.org/html/rfc1071
RFC792: https://tools.ietf.org/html/rfc792
Args:
source: The input to be calculated.
Returns:
Calculated checksum. | [
"Calculates",
"the",
"checksum",
"of",
"the",
"input",
"bytes",
"."
] | fc9e8a4b828965a800036dfbd019e97114ad80b3 | https://github.com/kyan001/ping3/blob/fc9e8a4b828965a800036dfbd019e97114ad80b3/ping3.py#L50-L67 |
5,654 | kyan001/ping3 | ping3.py | send_one_ping | def send_one_ping(sock: socket, dest_addr: str, icmp_id: int, seq: int, size: int):
"""Sends one ping to the given destination.
ICMP Header (bits): type (8), code (8), checksum (16), id (16), sequence (16)
ICMP Payload: time (double), data
ICMP Wikipedia: https://en.wikipedia.org/wiki/Internet_Control_Message_Protocol
Args:
sock: Socket.
dest_addr: The destination address, can be an IP address or a domain name. Ex. "192.168.1.1"/"example.com"
icmp_id: ICMP packet id, usually is same as pid.
seq: ICMP packet sequence, usually increases from 0 in the same process.
size: The ICMP packet payload size in bytes. Note this is only for the payload part.
Raises:
HostUnkown: If destination address is a domain name and cannot resolved.
"""
try:
dest_addr = socket.gethostbyname(dest_addr) # Domain name will translated into IP address, and IP address leaves unchanged.
except socket.gaierror as e:
print("Cannot resolve {}: Unknown host".format(dest_addr))
raise errors.HostUnknown(dest_addr) from e
pseudo_checksum = 0 # Pseudo checksum is used to calculate the real checksum.
icmp_header = struct.pack(ICMP_HEADER_FORMAT, IcmpType.ECHO_REQUEST, ICMP_DEFAULT_CODE, pseudo_checksum, icmp_id, seq)
padding = (size - struct.calcsize(ICMP_TIME_FORMAT) - struct.calcsize(ICMP_HEADER_FORMAT)) * "Q" # Using double to store current time.
icmp_payload = struct.pack(ICMP_TIME_FORMAT, default_timer()) + padding.encode()
real_checksum = checksum(icmp_header + icmp_payload) # Calculates the checksum on the dummy header and the icmp_payload.
# Don't know why I need socket.htons() on real_checksum since ICMP_HEADER_FORMAT already in Network Bytes Order (big-endian)
icmp_header = struct.pack(ICMP_HEADER_FORMAT, IcmpType.ECHO_REQUEST, ICMP_DEFAULT_CODE, socket.htons(real_checksum), icmp_id, seq) # Put real checksum into ICMP header.
packet = icmp_header + icmp_payload
sock.sendto(packet, (dest_addr, 0)) | python | def send_one_ping(sock: socket, dest_addr: str, icmp_id: int, seq: int, size: int):
"""Sends one ping to the given destination.
ICMP Header (bits): type (8), code (8), checksum (16), id (16), sequence (16)
ICMP Payload: time (double), data
ICMP Wikipedia: https://en.wikipedia.org/wiki/Internet_Control_Message_Protocol
Args:
sock: Socket.
dest_addr: The destination address, can be an IP address or a domain name. Ex. "192.168.1.1"/"example.com"
icmp_id: ICMP packet id, usually is same as pid.
seq: ICMP packet sequence, usually increases from 0 in the same process.
size: The ICMP packet payload size in bytes. Note this is only for the payload part.
Raises:
HostUnkown: If destination address is a domain name and cannot resolved.
"""
try:
dest_addr = socket.gethostbyname(dest_addr) # Domain name will translated into IP address, and IP address leaves unchanged.
except socket.gaierror as e:
print("Cannot resolve {}: Unknown host".format(dest_addr))
raise errors.HostUnknown(dest_addr) from e
pseudo_checksum = 0 # Pseudo checksum is used to calculate the real checksum.
icmp_header = struct.pack(ICMP_HEADER_FORMAT, IcmpType.ECHO_REQUEST, ICMP_DEFAULT_CODE, pseudo_checksum, icmp_id, seq)
padding = (size - struct.calcsize(ICMP_TIME_FORMAT) - struct.calcsize(ICMP_HEADER_FORMAT)) * "Q" # Using double to store current time.
icmp_payload = struct.pack(ICMP_TIME_FORMAT, default_timer()) + padding.encode()
real_checksum = checksum(icmp_header + icmp_payload) # Calculates the checksum on the dummy header and the icmp_payload.
# Don't know why I need socket.htons() on real_checksum since ICMP_HEADER_FORMAT already in Network Bytes Order (big-endian)
icmp_header = struct.pack(ICMP_HEADER_FORMAT, IcmpType.ECHO_REQUEST, ICMP_DEFAULT_CODE, socket.htons(real_checksum), icmp_id, seq) # Put real checksum into ICMP header.
packet = icmp_header + icmp_payload
sock.sendto(packet, (dest_addr, 0)) | [
"def",
"send_one_ping",
"(",
"sock",
":",
"socket",
",",
"dest_addr",
":",
"str",
",",
"icmp_id",
":",
"int",
",",
"seq",
":",
"int",
",",
"size",
":",
"int",
")",
":",
"try",
":",
"dest_addr",
"=",
"socket",
".",
"gethostbyname",
"(",
"dest_addr",
")",
"# Domain name will translated into IP address, and IP address leaves unchanged.",
"except",
"socket",
".",
"gaierror",
"as",
"e",
":",
"print",
"(",
"\"Cannot resolve {}: Unknown host\"",
".",
"format",
"(",
"dest_addr",
")",
")",
"raise",
"errors",
".",
"HostUnknown",
"(",
"dest_addr",
")",
"from",
"e",
"pseudo_checksum",
"=",
"0",
"# Pseudo checksum is used to calculate the real checksum.",
"icmp_header",
"=",
"struct",
".",
"pack",
"(",
"ICMP_HEADER_FORMAT",
",",
"IcmpType",
".",
"ECHO_REQUEST",
",",
"ICMP_DEFAULT_CODE",
",",
"pseudo_checksum",
",",
"icmp_id",
",",
"seq",
")",
"padding",
"=",
"(",
"size",
"-",
"struct",
".",
"calcsize",
"(",
"ICMP_TIME_FORMAT",
")",
"-",
"struct",
".",
"calcsize",
"(",
"ICMP_HEADER_FORMAT",
")",
")",
"*",
"\"Q\"",
"# Using double to store current time.",
"icmp_payload",
"=",
"struct",
".",
"pack",
"(",
"ICMP_TIME_FORMAT",
",",
"default_timer",
"(",
")",
")",
"+",
"padding",
".",
"encode",
"(",
")",
"real_checksum",
"=",
"checksum",
"(",
"icmp_header",
"+",
"icmp_payload",
")",
"# Calculates the checksum on the dummy header and the icmp_payload.",
"# Don't know why I need socket.htons() on real_checksum since ICMP_HEADER_FORMAT already in Network Bytes Order (big-endian)",
"icmp_header",
"=",
"struct",
".",
"pack",
"(",
"ICMP_HEADER_FORMAT",
",",
"IcmpType",
".",
"ECHO_REQUEST",
",",
"ICMP_DEFAULT_CODE",
",",
"socket",
".",
"htons",
"(",
"real_checksum",
")",
",",
"icmp_id",
",",
"seq",
")",
"# Put real checksum into ICMP header.",
"packet",
"=",
"icmp_header",
"+",
"icmp_payload",
"sock",
".",
"sendto",
"(",
"packet",
",",
"(",
"dest_addr",
",",
"0",
")",
")"
] | Sends one ping to the given destination.
ICMP Header (bits): type (8), code (8), checksum (16), id (16), sequence (16)
ICMP Payload: time (double), data
ICMP Wikipedia: https://en.wikipedia.org/wiki/Internet_Control_Message_Protocol
Args:
sock: Socket.
dest_addr: The destination address, can be an IP address or a domain name. Ex. "192.168.1.1"/"example.com"
icmp_id: ICMP packet id, usually is same as pid.
seq: ICMP packet sequence, usually increases from 0 in the same process.
size: The ICMP packet payload size in bytes. Note this is only for the payload part.
Raises:
HostUnkown: If destination address is a domain name and cannot resolved. | [
"Sends",
"one",
"ping",
"to",
"the",
"given",
"destination",
"."
] | fc9e8a4b828965a800036dfbd019e97114ad80b3 | https://github.com/kyan001/ping3/blob/fc9e8a4b828965a800036dfbd019e97114ad80b3/ping3.py#L70-L100 |
5,655 | kyan001/ping3 | ping3.py | receive_one_ping | def receive_one_ping(sock: socket, icmp_id: int, seq: int, timeout: int) -> float or None:
"""Receives the ping from the socket.
IP Header (bits): version (8), type of service (8), length (16), id (16), flags (16), time to live (8), protocol (8), checksum (16), source ip (32), destination ip (32).
ICMP Packet (bytes): IP Header (20), ICMP Header (8), ICMP Payload (*).
Ping Wikipedia: https://en.wikipedia.org/wiki/Ping_(networking_utility)
ToS (Type of Service) in IP header for ICMP is 0. Protocol in IP header for ICMP is 1.
Args:
sock: The same socket used for send the ping.
icmp_id: ICMP packet id. Sent packet id should be identical with received packet id.
seq: ICMP packet sequence. Sent packet sequence should be identical with received packet sequence.
timeout: Timeout in seconds.
Returns:
The delay in seconds or None on timeout.
Raises:
TimeToLiveExpired: If the Time-To-Live in IP Header is not large enough for destination.
TimeExceeded: If time exceeded but Time-To-Live does not expired.
"""
ip_header_slice = slice(0, struct.calcsize(IP_HEADER_FORMAT)) # [0:20]
icmp_header_slice = slice(ip_header_slice.stop, ip_header_slice.stop + struct.calcsize(ICMP_HEADER_FORMAT)) # [20:28]
ip_header_keys = ('version', 'tos', 'len', 'id', 'flags', 'ttl', 'protocol', 'checksum', 'src_addr', 'dest_addr')
icmp_header_keys = ('type', 'code', 'checksum', 'id', 'seq')
while True:
selected = select.select([sock], [], [], timeout)
if selected[0] == []: # Timeout
raise errors.Timeout(timeout)
time_recv = default_timer()
recv_data, addr = sock.recvfrom(1024)
ip_header_raw, icmp_header_raw, icmp_payload_raw = recv_data[ip_header_slice], recv_data[icmp_header_slice], recv_data[icmp_header_slice.stop:]
ip_header = dict(zip(ip_header_keys, struct.unpack(IP_HEADER_FORMAT, ip_header_raw)))
_debug("IP HEADER:", ip_header)
icmp_header = dict(zip(icmp_header_keys, struct.unpack(ICMP_HEADER_FORMAT, icmp_header_raw)))
_debug("ICMP HEADER:", icmp_header)
if icmp_header['type'] == IcmpType.TIME_EXCEEDED: # TIME_EXCEEDED has no icmp_id and icmp_seq. Usually they are 0.
if icmp_header['code'] == IcmpTimeExceededCode.TTL_EXPIRED:
raise errors.TimeToLiveExpired() # Some router does not report TTL expired and then timeout shows.
raise errors.TimeExceeded()
if icmp_header['id'] == icmp_id and icmp_header['seq'] == seq: # ECHO_REPLY should match the
if icmp_header['type'] == IcmpType.ECHO_REQUEST: # filters out the ECHO_REQUEST itself.
_debug("ECHO_REQUEST filtered out.")
continue
if icmp_header['type'] == IcmpType.ECHO_REPLY:
time_sent = struct.unpack(ICMP_TIME_FORMAT, icmp_payload_raw[0:struct.calcsize(ICMP_TIME_FORMAT)])[0]
return time_recv - time_sent | python | def receive_one_ping(sock: socket, icmp_id: int, seq: int, timeout: int) -> float or None:
"""Receives the ping from the socket.
IP Header (bits): version (8), type of service (8), length (16), id (16), flags (16), time to live (8), protocol (8), checksum (16), source ip (32), destination ip (32).
ICMP Packet (bytes): IP Header (20), ICMP Header (8), ICMP Payload (*).
Ping Wikipedia: https://en.wikipedia.org/wiki/Ping_(networking_utility)
ToS (Type of Service) in IP header for ICMP is 0. Protocol in IP header for ICMP is 1.
Args:
sock: The same socket used for send the ping.
icmp_id: ICMP packet id. Sent packet id should be identical with received packet id.
seq: ICMP packet sequence. Sent packet sequence should be identical with received packet sequence.
timeout: Timeout in seconds.
Returns:
The delay in seconds or None on timeout.
Raises:
TimeToLiveExpired: If the Time-To-Live in IP Header is not large enough for destination.
TimeExceeded: If time exceeded but Time-To-Live does not expired.
"""
ip_header_slice = slice(0, struct.calcsize(IP_HEADER_FORMAT)) # [0:20]
icmp_header_slice = slice(ip_header_slice.stop, ip_header_slice.stop + struct.calcsize(ICMP_HEADER_FORMAT)) # [20:28]
ip_header_keys = ('version', 'tos', 'len', 'id', 'flags', 'ttl', 'protocol', 'checksum', 'src_addr', 'dest_addr')
icmp_header_keys = ('type', 'code', 'checksum', 'id', 'seq')
while True:
selected = select.select([sock], [], [], timeout)
if selected[0] == []: # Timeout
raise errors.Timeout(timeout)
time_recv = default_timer()
recv_data, addr = sock.recvfrom(1024)
ip_header_raw, icmp_header_raw, icmp_payload_raw = recv_data[ip_header_slice], recv_data[icmp_header_slice], recv_data[icmp_header_slice.stop:]
ip_header = dict(zip(ip_header_keys, struct.unpack(IP_HEADER_FORMAT, ip_header_raw)))
_debug("IP HEADER:", ip_header)
icmp_header = dict(zip(icmp_header_keys, struct.unpack(ICMP_HEADER_FORMAT, icmp_header_raw)))
_debug("ICMP HEADER:", icmp_header)
if icmp_header['type'] == IcmpType.TIME_EXCEEDED: # TIME_EXCEEDED has no icmp_id and icmp_seq. Usually they are 0.
if icmp_header['code'] == IcmpTimeExceededCode.TTL_EXPIRED:
raise errors.TimeToLiveExpired() # Some router does not report TTL expired and then timeout shows.
raise errors.TimeExceeded()
if icmp_header['id'] == icmp_id and icmp_header['seq'] == seq: # ECHO_REPLY should match the
if icmp_header['type'] == IcmpType.ECHO_REQUEST: # filters out the ECHO_REQUEST itself.
_debug("ECHO_REQUEST filtered out.")
continue
if icmp_header['type'] == IcmpType.ECHO_REPLY:
time_sent = struct.unpack(ICMP_TIME_FORMAT, icmp_payload_raw[0:struct.calcsize(ICMP_TIME_FORMAT)])[0]
return time_recv - time_sent | [
"def",
"receive_one_ping",
"(",
"sock",
":",
"socket",
",",
"icmp_id",
":",
"int",
",",
"seq",
":",
"int",
",",
"timeout",
":",
"int",
")",
"->",
"float",
"or",
"None",
":",
"ip_header_slice",
"=",
"slice",
"(",
"0",
",",
"struct",
".",
"calcsize",
"(",
"IP_HEADER_FORMAT",
")",
")",
"# [0:20]",
"icmp_header_slice",
"=",
"slice",
"(",
"ip_header_slice",
".",
"stop",
",",
"ip_header_slice",
".",
"stop",
"+",
"struct",
".",
"calcsize",
"(",
"ICMP_HEADER_FORMAT",
")",
")",
"# [20:28]",
"ip_header_keys",
"=",
"(",
"'version'",
",",
"'tos'",
",",
"'len'",
",",
"'id'",
",",
"'flags'",
",",
"'ttl'",
",",
"'protocol'",
",",
"'checksum'",
",",
"'src_addr'",
",",
"'dest_addr'",
")",
"icmp_header_keys",
"=",
"(",
"'type'",
",",
"'code'",
",",
"'checksum'",
",",
"'id'",
",",
"'seq'",
")",
"while",
"True",
":",
"selected",
"=",
"select",
".",
"select",
"(",
"[",
"sock",
"]",
",",
"[",
"]",
",",
"[",
"]",
",",
"timeout",
")",
"if",
"selected",
"[",
"0",
"]",
"==",
"[",
"]",
":",
"# Timeout",
"raise",
"errors",
".",
"Timeout",
"(",
"timeout",
")",
"time_recv",
"=",
"default_timer",
"(",
")",
"recv_data",
",",
"addr",
"=",
"sock",
".",
"recvfrom",
"(",
"1024",
")",
"ip_header_raw",
",",
"icmp_header_raw",
",",
"icmp_payload_raw",
"=",
"recv_data",
"[",
"ip_header_slice",
"]",
",",
"recv_data",
"[",
"icmp_header_slice",
"]",
",",
"recv_data",
"[",
"icmp_header_slice",
".",
"stop",
":",
"]",
"ip_header",
"=",
"dict",
"(",
"zip",
"(",
"ip_header_keys",
",",
"struct",
".",
"unpack",
"(",
"IP_HEADER_FORMAT",
",",
"ip_header_raw",
")",
")",
")",
"_debug",
"(",
"\"IP HEADER:\"",
",",
"ip_header",
")",
"icmp_header",
"=",
"dict",
"(",
"zip",
"(",
"icmp_header_keys",
",",
"struct",
".",
"unpack",
"(",
"ICMP_HEADER_FORMAT",
",",
"icmp_header_raw",
")",
")",
")",
"_debug",
"(",
"\"ICMP HEADER:\"",
",",
"icmp_header",
")",
"if",
"icmp_header",
"[",
"'type'",
"]",
"==",
"IcmpType",
".",
"TIME_EXCEEDED",
":",
"# TIME_EXCEEDED has no icmp_id and icmp_seq. Usually they are 0.",
"if",
"icmp_header",
"[",
"'code'",
"]",
"==",
"IcmpTimeExceededCode",
".",
"TTL_EXPIRED",
":",
"raise",
"errors",
".",
"TimeToLiveExpired",
"(",
")",
"# Some router does not report TTL expired and then timeout shows.",
"raise",
"errors",
".",
"TimeExceeded",
"(",
")",
"if",
"icmp_header",
"[",
"'id'",
"]",
"==",
"icmp_id",
"and",
"icmp_header",
"[",
"'seq'",
"]",
"==",
"seq",
":",
"# ECHO_REPLY should match the",
"if",
"icmp_header",
"[",
"'type'",
"]",
"==",
"IcmpType",
".",
"ECHO_REQUEST",
":",
"# filters out the ECHO_REQUEST itself.",
"_debug",
"(",
"\"ECHO_REQUEST filtered out.\"",
")",
"continue",
"if",
"icmp_header",
"[",
"'type'",
"]",
"==",
"IcmpType",
".",
"ECHO_REPLY",
":",
"time_sent",
"=",
"struct",
".",
"unpack",
"(",
"ICMP_TIME_FORMAT",
",",
"icmp_payload_raw",
"[",
"0",
":",
"struct",
".",
"calcsize",
"(",
"ICMP_TIME_FORMAT",
")",
"]",
")",
"[",
"0",
"]",
"return",
"time_recv",
"-",
"time_sent"
] | Receives the ping from the socket.
IP Header (bits): version (8), type of service (8), length (16), id (16), flags (16), time to live (8), protocol (8), checksum (16), source ip (32), destination ip (32).
ICMP Packet (bytes): IP Header (20), ICMP Header (8), ICMP Payload (*).
Ping Wikipedia: https://en.wikipedia.org/wiki/Ping_(networking_utility)
ToS (Type of Service) in IP header for ICMP is 0. Protocol in IP header for ICMP is 1.
Args:
sock: The same socket used for send the ping.
icmp_id: ICMP packet id. Sent packet id should be identical with received packet id.
seq: ICMP packet sequence. Sent packet sequence should be identical with received packet sequence.
timeout: Timeout in seconds.
Returns:
The delay in seconds or None on timeout.
Raises:
TimeToLiveExpired: If the Time-To-Live in IP Header is not large enough for destination.
TimeExceeded: If time exceeded but Time-To-Live does not expired. | [
"Receives",
"the",
"ping",
"from",
"the",
"socket",
"."
] | fc9e8a4b828965a800036dfbd019e97114ad80b3 | https://github.com/kyan001/ping3/blob/fc9e8a4b828965a800036dfbd019e97114ad80b3/ping3.py#L103-L149 |
5,656 | kyan001/ping3 | ping3.py | ping | def ping(dest_addr: str, timeout: int = 4, unit: str = "s", src_addr: str = None, ttl: int = 64, seq: int = 0, size: int = 56) -> float or None:
"""
Send one ping to destination address with the given timeout.
Args:
dest_addr: The destination address, can be an IP address or a domain name. Ex. "192.168.1.1"/"example.com"
timeout: Timeout in seconds. Default is 4s, same as Windows CMD. (default 4)
unit: The unit of returned value. "s" for seconds, "ms" for milliseconds. (default "s")
src_addr: The IP address to ping from. This is for multi-interface clients. Ex. "192.168.1.20". (default None)
ttl: The Time-To-Live of the outgoing packet. Default is 64, same as in Linux and macOS. (default 64)
seq: ICMP packet sequence, usually increases from 0 in the same process. (default 0)
size: The ICMP packet payload size in bytes. Default is 56, same as in macOS. (default 56)
Returns:
The delay in seconds/milliseconds or None on timeout.
Raises:
PingError: Any PingError will raise again if `ping3.EXCEPTIONS` is True.
"""
with socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_ICMP) as sock:
sock.setsockopt(socket.SOL_IP, socket.IP_TTL, ttl)
if src_addr:
sock.bind((src_addr, 0))
icmp_id = threading.current_thread().ident % 0xFFFF
try:
send_one_ping(sock=sock, dest_addr=dest_addr, icmp_id=icmp_id, seq=seq, size=size)
delay = receive_one_ping(sock=sock, icmp_id=icmp_id, seq=seq, timeout=timeout) # in seconds
except errors.PingError as e:
_debug(e)
if EXCEPTIONS:
raise e
return None
if delay is None:
return None
if unit == "ms":
delay *= 1000 # in milliseconds
return delay | python | def ping(dest_addr: str, timeout: int = 4, unit: str = "s", src_addr: str = None, ttl: int = 64, seq: int = 0, size: int = 56) -> float or None:
"""
Send one ping to destination address with the given timeout.
Args:
dest_addr: The destination address, can be an IP address or a domain name. Ex. "192.168.1.1"/"example.com"
timeout: Timeout in seconds. Default is 4s, same as Windows CMD. (default 4)
unit: The unit of returned value. "s" for seconds, "ms" for milliseconds. (default "s")
src_addr: The IP address to ping from. This is for multi-interface clients. Ex. "192.168.1.20". (default None)
ttl: The Time-To-Live of the outgoing packet. Default is 64, same as in Linux and macOS. (default 64)
seq: ICMP packet sequence, usually increases from 0 in the same process. (default 0)
size: The ICMP packet payload size in bytes. Default is 56, same as in macOS. (default 56)
Returns:
The delay in seconds/milliseconds or None on timeout.
Raises:
PingError: Any PingError will raise again if `ping3.EXCEPTIONS` is True.
"""
with socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_ICMP) as sock:
sock.setsockopt(socket.SOL_IP, socket.IP_TTL, ttl)
if src_addr:
sock.bind((src_addr, 0))
icmp_id = threading.current_thread().ident % 0xFFFF
try:
send_one_ping(sock=sock, dest_addr=dest_addr, icmp_id=icmp_id, seq=seq, size=size)
delay = receive_one_ping(sock=sock, icmp_id=icmp_id, seq=seq, timeout=timeout) # in seconds
except errors.PingError as e:
_debug(e)
if EXCEPTIONS:
raise e
return None
if delay is None:
return None
if unit == "ms":
delay *= 1000 # in milliseconds
return delay | [
"def",
"ping",
"(",
"dest_addr",
":",
"str",
",",
"timeout",
":",
"int",
"=",
"4",
",",
"unit",
":",
"str",
"=",
"\"s\"",
",",
"src_addr",
":",
"str",
"=",
"None",
",",
"ttl",
":",
"int",
"=",
"64",
",",
"seq",
":",
"int",
"=",
"0",
",",
"size",
":",
"int",
"=",
"56",
")",
"->",
"float",
"or",
"None",
":",
"with",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_INET",
",",
"socket",
".",
"SOCK_RAW",
",",
"socket",
".",
"IPPROTO_ICMP",
")",
"as",
"sock",
":",
"sock",
".",
"setsockopt",
"(",
"socket",
".",
"SOL_IP",
",",
"socket",
".",
"IP_TTL",
",",
"ttl",
")",
"if",
"src_addr",
":",
"sock",
".",
"bind",
"(",
"(",
"src_addr",
",",
"0",
")",
")",
"icmp_id",
"=",
"threading",
".",
"current_thread",
"(",
")",
".",
"ident",
"%",
"0xFFFF",
"try",
":",
"send_one_ping",
"(",
"sock",
"=",
"sock",
",",
"dest_addr",
"=",
"dest_addr",
",",
"icmp_id",
"=",
"icmp_id",
",",
"seq",
"=",
"seq",
",",
"size",
"=",
"size",
")",
"delay",
"=",
"receive_one_ping",
"(",
"sock",
"=",
"sock",
",",
"icmp_id",
"=",
"icmp_id",
",",
"seq",
"=",
"seq",
",",
"timeout",
"=",
"timeout",
")",
"# in seconds",
"except",
"errors",
".",
"PingError",
"as",
"e",
":",
"_debug",
"(",
"e",
")",
"if",
"EXCEPTIONS",
":",
"raise",
"e",
"return",
"None",
"if",
"delay",
"is",
"None",
":",
"return",
"None",
"if",
"unit",
"==",
"\"ms\"",
":",
"delay",
"*=",
"1000",
"# in milliseconds",
"return",
"delay"
] | Send one ping to destination address with the given timeout.
Args:
dest_addr: The destination address, can be an IP address or a domain name. Ex. "192.168.1.1"/"example.com"
timeout: Timeout in seconds. Default is 4s, same as Windows CMD. (default 4)
unit: The unit of returned value. "s" for seconds, "ms" for milliseconds. (default "s")
src_addr: The IP address to ping from. This is for multi-interface clients. Ex. "192.168.1.20". (default None)
ttl: The Time-To-Live of the outgoing packet. Default is 64, same as in Linux and macOS. (default 64)
seq: ICMP packet sequence, usually increases from 0 in the same process. (default 0)
size: The ICMP packet payload size in bytes. Default is 56, same as in macOS. (default 56)
Returns:
The delay in seconds/milliseconds or None on timeout.
Raises:
PingError: Any PingError will raise again if `ping3.EXCEPTIONS` is True. | [
"Send",
"one",
"ping",
"to",
"destination",
"address",
"with",
"the",
"given",
"timeout",
"."
] | fc9e8a4b828965a800036dfbd019e97114ad80b3 | https://github.com/kyan001/ping3/blob/fc9e8a4b828965a800036dfbd019e97114ad80b3/ping3.py#L152-L188 |
5,657 | kyan001/ping3 | ping3.py | verbose_ping | def verbose_ping(dest_addr: str, count: int = 4, *args, **kwargs):
"""
Send pings to destination address with the given timeout and display the result.
Args:
dest_addr: The destination address. Ex. "192.168.1.1"/"example.com"
count: How many pings should be sent. Default is 4, same as Windows CMD. (default 4)
*args and **kwargs: And all the other arguments available in ping() except `seq`.
Returns:
Formatted ping results printed.
"""
timeout = kwargs.get("timeout")
src = kwargs.get("src")
unit = kwargs.setdefault("unit", "ms")
for i in range(count):
output_text = "ping '{}'".format(dest_addr)
output_text += " from '{}'".format(src) if src else ""
output_text += " ... "
print(output_text, end="")
delay = ping(dest_addr, seq=i, *args, **kwargs)
if delay is None:
print("Timeout > {}s".format(timeout) if timeout else "Timeout")
else:
print("{value}{unit}".format(value=int(delay), unit=unit)) | python | def verbose_ping(dest_addr: str, count: int = 4, *args, **kwargs):
"""
Send pings to destination address with the given timeout and display the result.
Args:
dest_addr: The destination address. Ex. "192.168.1.1"/"example.com"
count: How many pings should be sent. Default is 4, same as Windows CMD. (default 4)
*args and **kwargs: And all the other arguments available in ping() except `seq`.
Returns:
Formatted ping results printed.
"""
timeout = kwargs.get("timeout")
src = kwargs.get("src")
unit = kwargs.setdefault("unit", "ms")
for i in range(count):
output_text = "ping '{}'".format(dest_addr)
output_text += " from '{}'".format(src) if src else ""
output_text += " ... "
print(output_text, end="")
delay = ping(dest_addr, seq=i, *args, **kwargs)
if delay is None:
print("Timeout > {}s".format(timeout) if timeout else "Timeout")
else:
print("{value}{unit}".format(value=int(delay), unit=unit)) | [
"def",
"verbose_ping",
"(",
"dest_addr",
":",
"str",
",",
"count",
":",
"int",
"=",
"4",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"timeout",
"=",
"kwargs",
".",
"get",
"(",
"\"timeout\"",
")",
"src",
"=",
"kwargs",
".",
"get",
"(",
"\"src\"",
")",
"unit",
"=",
"kwargs",
".",
"setdefault",
"(",
"\"unit\"",
",",
"\"ms\"",
")",
"for",
"i",
"in",
"range",
"(",
"count",
")",
":",
"output_text",
"=",
"\"ping '{}'\"",
".",
"format",
"(",
"dest_addr",
")",
"output_text",
"+=",
"\" from '{}'\"",
".",
"format",
"(",
"src",
")",
"if",
"src",
"else",
"\"\"",
"output_text",
"+=",
"\" ... \"",
"print",
"(",
"output_text",
",",
"end",
"=",
"\"\"",
")",
"delay",
"=",
"ping",
"(",
"dest_addr",
",",
"seq",
"=",
"i",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"delay",
"is",
"None",
":",
"print",
"(",
"\"Timeout > {}s\"",
".",
"format",
"(",
"timeout",
")",
"if",
"timeout",
"else",
"\"Timeout\"",
")",
"else",
":",
"print",
"(",
"\"{value}{unit}\"",
".",
"format",
"(",
"value",
"=",
"int",
"(",
"delay",
")",
",",
"unit",
"=",
"unit",
")",
")"
] | Send pings to destination address with the given timeout and display the result.
Args:
dest_addr: The destination address. Ex. "192.168.1.1"/"example.com"
count: How many pings should be sent. Default is 4, same as Windows CMD. (default 4)
*args and **kwargs: And all the other arguments available in ping() except `seq`.
Returns:
Formatted ping results printed. | [
"Send",
"pings",
"to",
"destination",
"address",
"with",
"the",
"given",
"timeout",
"and",
"display",
"the",
"result",
"."
] | fc9e8a4b828965a800036dfbd019e97114ad80b3 | https://github.com/kyan001/ping3/blob/fc9e8a4b828965a800036dfbd019e97114ad80b3/ping3.py#L191-L215 |
5,658 | barrust/pyspellchecker | spellchecker/spellchecker.py | SpellChecker.distance | def distance(self, val):
""" set the distance parameter """
tmp = 2
try:
int(val)
if val > 0 and val <= 2:
tmp = val
except (ValueError, TypeError):
pass
self._distance = tmp | python | def distance(self, val):
""" set the distance parameter """
tmp = 2
try:
int(val)
if val > 0 and val <= 2:
tmp = val
except (ValueError, TypeError):
pass
self._distance = tmp | [
"def",
"distance",
"(",
"self",
",",
"val",
")",
":",
"tmp",
"=",
"2",
"try",
":",
"int",
"(",
"val",
")",
"if",
"val",
">",
"0",
"and",
"val",
"<=",
"2",
":",
"tmp",
"=",
"val",
"except",
"(",
"ValueError",
",",
"TypeError",
")",
":",
"pass",
"self",
".",
"_distance",
"=",
"tmp"
] | set the distance parameter | [
"set",
"the",
"distance",
"parameter"
] | fa96024c0cdeba99e10e11060d5fd7aba796b271 | https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/spellchecker.py#L79-L88 |
5,659 | barrust/pyspellchecker | spellchecker/spellchecker.py | SpellChecker.export | def export(self, filepath, encoding="utf-8", gzipped=True):
""" Export the word frequency list for import in the future
Args:
filepath (str): The filepath to the exported dictionary
encoding (str): The encoding of the resulting output
gzipped (bool): Whether to gzip the dictionary or not """
data = json.dumps(self.word_frequency.dictionary, sort_keys=True)
write_file(filepath, encoding, gzipped, data) | python | def export(self, filepath, encoding="utf-8", gzipped=True):
""" Export the word frequency list for import in the future
Args:
filepath (str): The filepath to the exported dictionary
encoding (str): The encoding of the resulting output
gzipped (bool): Whether to gzip the dictionary or not """
data = json.dumps(self.word_frequency.dictionary, sort_keys=True)
write_file(filepath, encoding, gzipped, data) | [
"def",
"export",
"(",
"self",
",",
"filepath",
",",
"encoding",
"=",
"\"utf-8\"",
",",
"gzipped",
"=",
"True",
")",
":",
"data",
"=",
"json",
".",
"dumps",
"(",
"self",
".",
"word_frequency",
".",
"dictionary",
",",
"sort_keys",
"=",
"True",
")",
"write_file",
"(",
"filepath",
",",
"encoding",
",",
"gzipped",
",",
"data",
")"
] | Export the word frequency list for import in the future
Args:
filepath (str): The filepath to the exported dictionary
encoding (str): The encoding of the resulting output
gzipped (bool): Whether to gzip the dictionary or not | [
"Export",
"the",
"word",
"frequency",
"list",
"for",
"import",
"in",
"the",
"future"
] | fa96024c0cdeba99e10e11060d5fd7aba796b271 | https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/spellchecker.py#L100-L108 |
5,660 | barrust/pyspellchecker | spellchecker/spellchecker.py | SpellChecker.word_probability | def word_probability(self, word, total_words=None):
""" Calculate the probability of the `word` being the desired, correct
word
Args:
word (str): The word for which the word probability is \
calculated
total_words (int): The total number of words to use in the \
calculation; use the default for using the whole word \
frequency
Returns:
float: The probability that the word is the correct word """
if total_words is None:
total_words = self._word_frequency.total_words
return self._word_frequency.dictionary[word] / total_words | python | def word_probability(self, word, total_words=None):
""" Calculate the probability of the `word` being the desired, correct
word
Args:
word (str): The word for which the word probability is \
calculated
total_words (int): The total number of words to use in the \
calculation; use the default for using the whole word \
frequency
Returns:
float: The probability that the word is the correct word """
if total_words is None:
total_words = self._word_frequency.total_words
return self._word_frequency.dictionary[word] / total_words | [
"def",
"word_probability",
"(",
"self",
",",
"word",
",",
"total_words",
"=",
"None",
")",
":",
"if",
"total_words",
"is",
"None",
":",
"total_words",
"=",
"self",
".",
"_word_frequency",
".",
"total_words",
"return",
"self",
".",
"_word_frequency",
".",
"dictionary",
"[",
"word",
"]",
"/",
"total_words"
] | Calculate the probability of the `word` being the desired, correct
word
Args:
word (str): The word for which the word probability is \
calculated
total_words (int): The total number of words to use in the \
calculation; use the default for using the whole word \
frequency
Returns:
float: The probability that the word is the correct word | [
"Calculate",
"the",
"probability",
"of",
"the",
"word",
"being",
"the",
"desired",
"correct",
"word"
] | fa96024c0cdeba99e10e11060d5fd7aba796b271 | https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/spellchecker.py#L110-L124 |
5,661 | barrust/pyspellchecker | spellchecker/spellchecker.py | SpellChecker.correction | def correction(self, word):
""" The most probable correct spelling for the word
Args:
word (str): The word to correct
Returns:
str: The most likely candidate """
return max(self.candidates(word), key=self.word_probability) | python | def correction(self, word):
""" The most probable correct spelling for the word
Args:
word (str): The word to correct
Returns:
str: The most likely candidate """
return max(self.candidates(word), key=self.word_probability) | [
"def",
"correction",
"(",
"self",
",",
"word",
")",
":",
"return",
"max",
"(",
"self",
".",
"candidates",
"(",
"word",
")",
",",
"key",
"=",
"self",
".",
"word_probability",
")"
] | The most probable correct spelling for the word
Args:
word (str): The word to correct
Returns:
str: The most likely candidate | [
"The",
"most",
"probable",
"correct",
"spelling",
"for",
"the",
"word"
] | fa96024c0cdeba99e10e11060d5fd7aba796b271 | https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/spellchecker.py#L126-L133 |
5,662 | barrust/pyspellchecker | spellchecker/spellchecker.py | SpellChecker.candidates | def candidates(self, word):
""" Generate possible spelling corrections for the provided word up to
an edit distance of two, if and only when needed
Args:
word (str): The word for which to calculate candidate spellings
Returns:
set: The set of words that are possible candidates """
if self.known([word]): # short-cut if word is correct already
return {word}
# get edit distance 1...
res = [x for x in self.edit_distance_1(word)]
tmp = self.known(res)
if tmp:
return tmp
# if still not found, use the edit distance 1 to calc edit distance 2
if self._distance == 2:
tmp = self.known([x for x in self.__edit_distance_alt(res)])
if tmp:
return tmp
return {word} | python | def candidates(self, word):
""" Generate possible spelling corrections for the provided word up to
an edit distance of two, if and only when needed
Args:
word (str): The word for which to calculate candidate spellings
Returns:
set: The set of words that are possible candidates """
if self.known([word]): # short-cut if word is correct already
return {word}
# get edit distance 1...
res = [x for x in self.edit_distance_1(word)]
tmp = self.known(res)
if tmp:
return tmp
# if still not found, use the edit distance 1 to calc edit distance 2
if self._distance == 2:
tmp = self.known([x for x in self.__edit_distance_alt(res)])
if tmp:
return tmp
return {word} | [
"def",
"candidates",
"(",
"self",
",",
"word",
")",
":",
"if",
"self",
".",
"known",
"(",
"[",
"word",
"]",
")",
":",
"# short-cut if word is correct already",
"return",
"{",
"word",
"}",
"# get edit distance 1...",
"res",
"=",
"[",
"x",
"for",
"x",
"in",
"self",
".",
"edit_distance_1",
"(",
"word",
")",
"]",
"tmp",
"=",
"self",
".",
"known",
"(",
"res",
")",
"if",
"tmp",
":",
"return",
"tmp",
"# if still not found, use the edit distance 1 to calc edit distance 2",
"if",
"self",
".",
"_distance",
"==",
"2",
":",
"tmp",
"=",
"self",
".",
"known",
"(",
"[",
"x",
"for",
"x",
"in",
"self",
".",
"__edit_distance_alt",
"(",
"res",
")",
"]",
")",
"if",
"tmp",
":",
"return",
"tmp",
"return",
"{",
"word",
"}"
] | Generate possible spelling corrections for the provided word up to
an edit distance of two, if and only when needed
Args:
word (str): The word for which to calculate candidate spellings
Returns:
set: The set of words that are possible candidates | [
"Generate",
"possible",
"spelling",
"corrections",
"for",
"the",
"provided",
"word",
"up",
"to",
"an",
"edit",
"distance",
"of",
"two",
"if",
"and",
"only",
"when",
"needed"
] | fa96024c0cdeba99e10e11060d5fd7aba796b271 | https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/spellchecker.py#L135-L155 |
5,663 | barrust/pyspellchecker | spellchecker/spellchecker.py | SpellChecker.known | def known(self, words):
""" The subset of `words` that appear in the dictionary of words
Args:
words (list): List of words to determine which are in the \
corpus
Returns:
set: The set of those words from the input that are in the \
corpus """
tmp = [w.lower() for w in words]
return set(
w
for w in tmp
if w in self._word_frequency.dictionary
or not self._check_if_should_check(w)
) | python | def known(self, words):
""" The subset of `words` that appear in the dictionary of words
Args:
words (list): List of words to determine which are in the \
corpus
Returns:
set: The set of those words from the input that are in the \
corpus """
tmp = [w.lower() for w in words]
return set(
w
for w in tmp
if w in self._word_frequency.dictionary
or not self._check_if_should_check(w)
) | [
"def",
"known",
"(",
"self",
",",
"words",
")",
":",
"tmp",
"=",
"[",
"w",
".",
"lower",
"(",
")",
"for",
"w",
"in",
"words",
"]",
"return",
"set",
"(",
"w",
"for",
"w",
"in",
"tmp",
"if",
"w",
"in",
"self",
".",
"_word_frequency",
".",
"dictionary",
"or",
"not",
"self",
".",
"_check_if_should_check",
"(",
"w",
")",
")"
] | The subset of `words` that appear in the dictionary of words
Args:
words (list): List of words to determine which are in the \
corpus
Returns:
set: The set of those words from the input that are in the \
corpus | [
"The",
"subset",
"of",
"words",
"that",
"appear",
"in",
"the",
"dictionary",
"of",
"words"
] | fa96024c0cdeba99e10e11060d5fd7aba796b271 | https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/spellchecker.py#L157-L172 |
5,664 | barrust/pyspellchecker | spellchecker/spellchecker.py | SpellChecker.edit_distance_1 | def edit_distance_1(self, word):
""" Compute all strings that are one edit away from `word` using only
the letters in the corpus
Args:
word (str): The word for which to calculate the edit distance
Returns:
set: The set of strings that are edit distance one from the \
provided word """
word = word.lower()
if self._check_if_should_check(word) is False:
return {word}
letters = self._word_frequency.letters
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [L + R[1:] for L, R in splits if R]
transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R) > 1]
replaces = [L + c + R[1:] for L, R in splits if R for c in letters]
inserts = [L + c + R for L, R in splits for c in letters]
return set(deletes + transposes + replaces + inserts) | python | def edit_distance_1(self, word):
""" Compute all strings that are one edit away from `word` using only
the letters in the corpus
Args:
word (str): The word for which to calculate the edit distance
Returns:
set: The set of strings that are edit distance one from the \
provided word """
word = word.lower()
if self._check_if_should_check(word) is False:
return {word}
letters = self._word_frequency.letters
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [L + R[1:] for L, R in splits if R]
transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R) > 1]
replaces = [L + c + R[1:] for L, R in splits if R for c in letters]
inserts = [L + c + R for L, R in splits for c in letters]
return set(deletes + transposes + replaces + inserts) | [
"def",
"edit_distance_1",
"(",
"self",
",",
"word",
")",
":",
"word",
"=",
"word",
".",
"lower",
"(",
")",
"if",
"self",
".",
"_check_if_should_check",
"(",
"word",
")",
"is",
"False",
":",
"return",
"{",
"word",
"}",
"letters",
"=",
"self",
".",
"_word_frequency",
".",
"letters",
"splits",
"=",
"[",
"(",
"word",
"[",
":",
"i",
"]",
",",
"word",
"[",
"i",
":",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"word",
")",
"+",
"1",
")",
"]",
"deletes",
"=",
"[",
"L",
"+",
"R",
"[",
"1",
":",
"]",
"for",
"L",
",",
"R",
"in",
"splits",
"if",
"R",
"]",
"transposes",
"=",
"[",
"L",
"+",
"R",
"[",
"1",
"]",
"+",
"R",
"[",
"0",
"]",
"+",
"R",
"[",
"2",
":",
"]",
"for",
"L",
",",
"R",
"in",
"splits",
"if",
"len",
"(",
"R",
")",
">",
"1",
"]",
"replaces",
"=",
"[",
"L",
"+",
"c",
"+",
"R",
"[",
"1",
":",
"]",
"for",
"L",
",",
"R",
"in",
"splits",
"if",
"R",
"for",
"c",
"in",
"letters",
"]",
"inserts",
"=",
"[",
"L",
"+",
"c",
"+",
"R",
"for",
"L",
",",
"R",
"in",
"splits",
"for",
"c",
"in",
"letters",
"]",
"return",
"set",
"(",
"deletes",
"+",
"transposes",
"+",
"replaces",
"+",
"inserts",
")"
] | Compute all strings that are one edit away from `word` using only
the letters in the corpus
Args:
word (str): The word for which to calculate the edit distance
Returns:
set: The set of strings that are edit distance one from the \
provided word | [
"Compute",
"all",
"strings",
"that",
"are",
"one",
"edit",
"away",
"from",
"word",
"using",
"only",
"the",
"letters",
"in",
"the",
"corpus"
] | fa96024c0cdeba99e10e11060d5fd7aba796b271 | https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/spellchecker.py#L186-L204 |
5,665 | barrust/pyspellchecker | spellchecker/spellchecker.py | SpellChecker.edit_distance_2 | def edit_distance_2(self, word):
""" Compute all strings that are two edits away from `word` using only
the letters in the corpus
Args:
word (str): The word for which to calculate the edit distance
Returns:
set: The set of strings that are edit distance two from the \
provided word """
word = word.lower()
return [
e2 for e1 in self.edit_distance_1(word) for e2 in self.edit_distance_1(e1)
] | python | def edit_distance_2(self, word):
""" Compute all strings that are two edits away from `word` using only
the letters in the corpus
Args:
word (str): The word for which to calculate the edit distance
Returns:
set: The set of strings that are edit distance two from the \
provided word """
word = word.lower()
return [
e2 for e1 in self.edit_distance_1(word) for e2 in self.edit_distance_1(e1)
] | [
"def",
"edit_distance_2",
"(",
"self",
",",
"word",
")",
":",
"word",
"=",
"word",
".",
"lower",
"(",
")",
"return",
"[",
"e2",
"for",
"e1",
"in",
"self",
".",
"edit_distance_1",
"(",
"word",
")",
"for",
"e2",
"in",
"self",
".",
"edit_distance_1",
"(",
"e1",
")",
"]"
] | Compute all strings that are two edits away from `word` using only
the letters in the corpus
Args:
word (str): The word for which to calculate the edit distance
Returns:
set: The set of strings that are edit distance two from the \
provided word | [
"Compute",
"all",
"strings",
"that",
"are",
"two",
"edits",
"away",
"from",
"word",
"using",
"only",
"the",
"letters",
"in",
"the",
"corpus"
] | fa96024c0cdeba99e10e11060d5fd7aba796b271 | https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/spellchecker.py#L206-L218 |
5,666 | barrust/pyspellchecker | spellchecker/spellchecker.py | SpellChecker.__edit_distance_alt | def __edit_distance_alt(self, words):
""" Compute all strings that are 1 edits away from all the words using
only the letters in the corpus
Args:
words (list): The words for which to calculate the edit distance
Returns:
set: The set of strings that are edit distance two from the \
provided words """
words = [x.lower() for x in words]
return [e2 for e1 in words for e2 in self.edit_distance_1(e1)] | python | def __edit_distance_alt(self, words):
""" Compute all strings that are 1 edits away from all the words using
only the letters in the corpus
Args:
words (list): The words for which to calculate the edit distance
Returns:
set: The set of strings that are edit distance two from the \
provided words """
words = [x.lower() for x in words]
return [e2 for e1 in words for e2 in self.edit_distance_1(e1)] | [
"def",
"__edit_distance_alt",
"(",
"self",
",",
"words",
")",
":",
"words",
"=",
"[",
"x",
".",
"lower",
"(",
")",
"for",
"x",
"in",
"words",
"]",
"return",
"[",
"e2",
"for",
"e1",
"in",
"words",
"for",
"e2",
"in",
"self",
".",
"edit_distance_1",
"(",
"e1",
")",
"]"
] | Compute all strings that are 1 edits away from all the words using
only the letters in the corpus
Args:
words (list): The words for which to calculate the edit distance
Returns:
set: The set of strings that are edit distance two from the \
provided words | [
"Compute",
"all",
"strings",
"that",
"are",
"1",
"edits",
"away",
"from",
"all",
"the",
"words",
"using",
"only",
"the",
"letters",
"in",
"the",
"corpus"
] | fa96024c0cdeba99e10e11060d5fd7aba796b271 | https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/spellchecker.py#L220-L230 |
5,667 | barrust/pyspellchecker | spellchecker/spellchecker.py | WordFrequency.pop | def pop(self, key, default=None):
""" Remove the key and return the associated value or default if not
found
Args:
key (str): The key to remove
default (obj): The value to return if key is not present """
return self._dictionary.pop(key.lower(), default) | python | def pop(self, key, default=None):
""" Remove the key and return the associated value or default if not
found
Args:
key (str): The key to remove
default (obj): The value to return if key is not present """
return self._dictionary.pop(key.lower(), default) | [
"def",
"pop",
"(",
"self",
",",
"key",
",",
"default",
"=",
"None",
")",
":",
"return",
"self",
".",
"_dictionary",
".",
"pop",
"(",
"key",
".",
"lower",
"(",
")",
",",
"default",
")"
] | Remove the key and return the associated value or default if not
found
Args:
key (str): The key to remove
default (obj): The value to return if key is not present | [
"Remove",
"the",
"key",
"and",
"return",
"the",
"associated",
"value",
"or",
"default",
"if",
"not",
"found"
] | fa96024c0cdeba99e10e11060d5fd7aba796b271 | https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/spellchecker.py#L275-L282 |
5,668 | barrust/pyspellchecker | spellchecker/spellchecker.py | WordFrequency.items | def items(self):
""" Iterator over the words in the dictionary
Yields:
str: The next word in the dictionary
int: The number of instances in the dictionary
Note:
This is the same as `dict.items()` """
for word in self._dictionary.keys():
yield word, self._dictionary[word] | python | def items(self):
""" Iterator over the words in the dictionary
Yields:
str: The next word in the dictionary
int: The number of instances in the dictionary
Note:
This is the same as `dict.items()` """
for word in self._dictionary.keys():
yield word, self._dictionary[word] | [
"def",
"items",
"(",
"self",
")",
":",
"for",
"word",
"in",
"self",
".",
"_dictionary",
".",
"keys",
"(",
")",
":",
"yield",
"word",
",",
"self",
".",
"_dictionary",
"[",
"word",
"]"
] | Iterator over the words in the dictionary
Yields:
str: The next word in the dictionary
int: The number of instances in the dictionary
Note:
This is the same as `dict.items()` | [
"Iterator",
"over",
"the",
"words",
"in",
"the",
"dictionary"
] | fa96024c0cdeba99e10e11060d5fd7aba796b271 | https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/spellchecker.py#L350-L359 |
5,669 | barrust/pyspellchecker | spellchecker/spellchecker.py | WordFrequency.load_dictionary | def load_dictionary(self, filename, encoding="utf-8"):
""" Load in a pre-built word frequency list
Args:
filename (str): The filepath to the json (optionally gzipped) \
file to be loaded
encoding (str): The encoding of the dictionary """
with load_file(filename, encoding) as data:
self._dictionary.update(json.loads(data.lower(), encoding=encoding))
self._update_dictionary() | python | def load_dictionary(self, filename, encoding="utf-8"):
""" Load in a pre-built word frequency list
Args:
filename (str): The filepath to the json (optionally gzipped) \
file to be loaded
encoding (str): The encoding of the dictionary """
with load_file(filename, encoding) as data:
self._dictionary.update(json.loads(data.lower(), encoding=encoding))
self._update_dictionary() | [
"def",
"load_dictionary",
"(",
"self",
",",
"filename",
",",
"encoding",
"=",
"\"utf-8\"",
")",
":",
"with",
"load_file",
"(",
"filename",
",",
"encoding",
")",
"as",
"data",
":",
"self",
".",
"_dictionary",
".",
"update",
"(",
"json",
".",
"loads",
"(",
"data",
".",
"lower",
"(",
")",
",",
"encoding",
"=",
"encoding",
")",
")",
"self",
".",
"_update_dictionary",
"(",
")"
] | Load in a pre-built word frequency list
Args:
filename (str): The filepath to the json (optionally gzipped) \
file to be loaded
encoding (str): The encoding of the dictionary | [
"Load",
"in",
"a",
"pre",
"-",
"built",
"word",
"frequency",
"list"
] | fa96024c0cdeba99e10e11060d5fd7aba796b271 | https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/spellchecker.py#L361-L370 |
5,670 | barrust/pyspellchecker | spellchecker/spellchecker.py | WordFrequency.load_text_file | def load_text_file(self, filename, encoding="utf-8", tokenizer=None):
""" Load in a text file from which to generate a word frequency list
Args:
filename (str): The filepath to the text file to be loaded
encoding (str): The encoding of the text file
tokenizer (function): The function to use to tokenize a string
"""
with load_file(filename, encoding=encoding) as data:
self.load_text(data, tokenizer) | python | def load_text_file(self, filename, encoding="utf-8", tokenizer=None):
""" Load in a text file from which to generate a word frequency list
Args:
filename (str): The filepath to the text file to be loaded
encoding (str): The encoding of the text file
tokenizer (function): The function to use to tokenize a string
"""
with load_file(filename, encoding=encoding) as data:
self.load_text(data, tokenizer) | [
"def",
"load_text_file",
"(",
"self",
",",
"filename",
",",
"encoding",
"=",
"\"utf-8\"",
",",
"tokenizer",
"=",
"None",
")",
":",
"with",
"load_file",
"(",
"filename",
",",
"encoding",
"=",
"encoding",
")",
"as",
"data",
":",
"self",
".",
"load_text",
"(",
"data",
",",
"tokenizer",
")"
] | Load in a text file from which to generate a word frequency list
Args:
filename (str): The filepath to the text file to be loaded
encoding (str): The encoding of the text file
tokenizer (function): The function to use to tokenize a string | [
"Load",
"in",
"a",
"text",
"file",
"from",
"which",
"to",
"generate",
"a",
"word",
"frequency",
"list"
] | fa96024c0cdeba99e10e11060d5fd7aba796b271 | https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/spellchecker.py#L372-L381 |
5,671 | barrust/pyspellchecker | spellchecker/spellchecker.py | WordFrequency.load_text | def load_text(self, text, tokenizer=None):
""" Load text from which to generate a word frequency list
Args:
text (str): The text to be loaded
tokenizer (function): The function to use to tokenize a string
"""
if tokenizer:
words = [x.lower() for x in tokenizer(text)]
else:
words = self.tokenize(text)
self._dictionary.update(words)
self._update_dictionary() | python | def load_text(self, text, tokenizer=None):
""" Load text from which to generate a word frequency list
Args:
text (str): The text to be loaded
tokenizer (function): The function to use to tokenize a string
"""
if tokenizer:
words = [x.lower() for x in tokenizer(text)]
else:
words = self.tokenize(text)
self._dictionary.update(words)
self._update_dictionary() | [
"def",
"load_text",
"(",
"self",
",",
"text",
",",
"tokenizer",
"=",
"None",
")",
":",
"if",
"tokenizer",
":",
"words",
"=",
"[",
"x",
".",
"lower",
"(",
")",
"for",
"x",
"in",
"tokenizer",
"(",
"text",
")",
"]",
"else",
":",
"words",
"=",
"self",
".",
"tokenize",
"(",
"text",
")",
"self",
".",
"_dictionary",
".",
"update",
"(",
"words",
")",
"self",
".",
"_update_dictionary",
"(",
")"
] | Load text from which to generate a word frequency list
Args:
text (str): The text to be loaded
tokenizer (function): The function to use to tokenize a string | [
"Load",
"text",
"from",
"which",
"to",
"generate",
"a",
"word",
"frequency",
"list"
] | fa96024c0cdeba99e10e11060d5fd7aba796b271 | https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/spellchecker.py#L383-L396 |
5,672 | barrust/pyspellchecker | spellchecker/spellchecker.py | WordFrequency.load_words | def load_words(self, words):
""" Load a list of words from which to generate a word frequency list
Args:
words (list): The list of words to be loaded """
self._dictionary.update([word.lower() for word in words])
self._update_dictionary() | python | def load_words(self, words):
""" Load a list of words from which to generate a word frequency list
Args:
words (list): The list of words to be loaded """
self._dictionary.update([word.lower() for word in words])
self._update_dictionary() | [
"def",
"load_words",
"(",
"self",
",",
"words",
")",
":",
"self",
".",
"_dictionary",
".",
"update",
"(",
"[",
"word",
".",
"lower",
"(",
")",
"for",
"word",
"in",
"words",
"]",
")",
"self",
".",
"_update_dictionary",
"(",
")"
] | Load a list of words from which to generate a word frequency list
Args:
words (list): The list of words to be loaded | [
"Load",
"a",
"list",
"of",
"words",
"from",
"which",
"to",
"generate",
"a",
"word",
"frequency",
"list"
] | fa96024c0cdeba99e10e11060d5fd7aba796b271 | https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/spellchecker.py#L398-L404 |
5,673 | barrust/pyspellchecker | spellchecker/spellchecker.py | WordFrequency.remove_words | def remove_words(self, words):
""" Remove a list of words from the word frequency list
Args:
words (list): The list of words to remove """
for word in words:
self._dictionary.pop(word.lower())
self._update_dictionary() | python | def remove_words(self, words):
""" Remove a list of words from the word frequency list
Args:
words (list): The list of words to remove """
for word in words:
self._dictionary.pop(word.lower())
self._update_dictionary() | [
"def",
"remove_words",
"(",
"self",
",",
"words",
")",
":",
"for",
"word",
"in",
"words",
":",
"self",
".",
"_dictionary",
".",
"pop",
"(",
"word",
".",
"lower",
"(",
")",
")",
"self",
".",
"_update_dictionary",
"(",
")"
] | Remove a list of words from the word frequency list
Args:
words (list): The list of words to remove | [
"Remove",
"a",
"list",
"of",
"words",
"from",
"the",
"word",
"frequency",
"list"
] | fa96024c0cdeba99e10e11060d5fd7aba796b271 | https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/spellchecker.py#L413-L420 |
5,674 | barrust/pyspellchecker | spellchecker/spellchecker.py | WordFrequency.remove | def remove(self, word):
""" Remove a word from the word frequency list
Args:
word (str): The word to remove """
self._dictionary.pop(word.lower())
self._update_dictionary() | python | def remove(self, word):
""" Remove a word from the word frequency list
Args:
word (str): The word to remove """
self._dictionary.pop(word.lower())
self._update_dictionary() | [
"def",
"remove",
"(",
"self",
",",
"word",
")",
":",
"self",
".",
"_dictionary",
".",
"pop",
"(",
"word",
".",
"lower",
"(",
")",
")",
"self",
".",
"_update_dictionary",
"(",
")"
] | Remove a word from the word frequency list
Args:
word (str): The word to remove | [
"Remove",
"a",
"word",
"from",
"the",
"word",
"frequency",
"list"
] | fa96024c0cdeba99e10e11060d5fd7aba796b271 | https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/spellchecker.py#L422-L428 |
5,675 | barrust/pyspellchecker | spellchecker/spellchecker.py | WordFrequency.remove_by_threshold | def remove_by_threshold(self, threshold=5):
""" Remove all words at, or below, the provided threshold
Args:
threshold (int): The threshold at which a word is to be \
removed """
keys = [x for x in self._dictionary.keys()]
for key in keys:
if self._dictionary[key] <= threshold:
self._dictionary.pop(key)
self._update_dictionary() | python | def remove_by_threshold(self, threshold=5):
""" Remove all words at, or below, the provided threshold
Args:
threshold (int): The threshold at which a word is to be \
removed """
keys = [x for x in self._dictionary.keys()]
for key in keys:
if self._dictionary[key] <= threshold:
self._dictionary.pop(key)
self._update_dictionary() | [
"def",
"remove_by_threshold",
"(",
"self",
",",
"threshold",
"=",
"5",
")",
":",
"keys",
"=",
"[",
"x",
"for",
"x",
"in",
"self",
".",
"_dictionary",
".",
"keys",
"(",
")",
"]",
"for",
"key",
"in",
"keys",
":",
"if",
"self",
".",
"_dictionary",
"[",
"key",
"]",
"<=",
"threshold",
":",
"self",
".",
"_dictionary",
".",
"pop",
"(",
"key",
")",
"self",
".",
"_update_dictionary",
"(",
")"
] | Remove all words at, or below, the provided threshold
Args:
threshold (int): The threshold at which a word is to be \
removed | [
"Remove",
"all",
"words",
"at",
"or",
"below",
"the",
"provided",
"threshold"
] | fa96024c0cdeba99e10e11060d5fd7aba796b271 | https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/spellchecker.py#L430-L440 |
5,676 | barrust/pyspellchecker | spellchecker/spellchecker.py | WordFrequency._update_dictionary | def _update_dictionary(self):
""" Update the word frequency object """
self._total_words = sum(self._dictionary.values())
self._unique_words = len(self._dictionary.keys())
self._letters = set()
for key in self._dictionary:
self._letters.update(key) | python | def _update_dictionary(self):
""" Update the word frequency object """
self._total_words = sum(self._dictionary.values())
self._unique_words = len(self._dictionary.keys())
self._letters = set()
for key in self._dictionary:
self._letters.update(key) | [
"def",
"_update_dictionary",
"(",
"self",
")",
":",
"self",
".",
"_total_words",
"=",
"sum",
"(",
"self",
".",
"_dictionary",
".",
"values",
"(",
")",
")",
"self",
".",
"_unique_words",
"=",
"len",
"(",
"self",
".",
"_dictionary",
".",
"keys",
"(",
")",
")",
"self",
".",
"_letters",
"=",
"set",
"(",
")",
"for",
"key",
"in",
"self",
".",
"_dictionary",
":",
"self",
".",
"_letters",
".",
"update",
"(",
"key",
")"
] | Update the word frequency object | [
"Update",
"the",
"word",
"frequency",
"object"
] | fa96024c0cdeba99e10e11060d5fd7aba796b271 | https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/spellchecker.py#L442-L448 |
5,677 | barrust/pyspellchecker | spellchecker/utils.py | load_file | def load_file(filename, encoding):
""" Context manager to handle opening a gzip or text file correctly and
reading all the data
Args:
filename (str): The filename to open
encoding (str): The file encoding to use
Yields:
str: The string data from the file read
"""
try:
with gzip.open(filename, mode="rt") as fobj:
yield fobj.read()
except (OSError, IOError):
with OPEN(filename, mode="r", encoding=encoding) as fobj:
yield fobj.read() | python | def load_file(filename, encoding):
""" Context manager to handle opening a gzip or text file correctly and
reading all the data
Args:
filename (str): The filename to open
encoding (str): The file encoding to use
Yields:
str: The string data from the file read
"""
try:
with gzip.open(filename, mode="rt") as fobj:
yield fobj.read()
except (OSError, IOError):
with OPEN(filename, mode="r", encoding=encoding) as fobj:
yield fobj.read() | [
"def",
"load_file",
"(",
"filename",
",",
"encoding",
")",
":",
"try",
":",
"with",
"gzip",
".",
"open",
"(",
"filename",
",",
"mode",
"=",
"\"rt\"",
")",
"as",
"fobj",
":",
"yield",
"fobj",
".",
"read",
"(",
")",
"except",
"(",
"OSError",
",",
"IOError",
")",
":",
"with",
"OPEN",
"(",
"filename",
",",
"mode",
"=",
"\"r\"",
",",
"encoding",
"=",
"encoding",
")",
"as",
"fobj",
":",
"yield",
"fobj",
".",
"read",
"(",
")"
] | Context manager to handle opening a gzip or text file correctly and
reading all the data
Args:
filename (str): The filename to open
encoding (str): The file encoding to use
Yields:
str: The string data from the file read | [
"Context",
"manager",
"to",
"handle",
"opening",
"a",
"gzip",
"or",
"text",
"file",
"correctly",
"and",
"reading",
"all",
"the",
"data"
] | fa96024c0cdeba99e10e11060d5fd7aba796b271 | https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/utils.py#L16-L31 |
5,678 | barrust/pyspellchecker | spellchecker/utils.py | write_file | def write_file(filepath, encoding, gzipped, data):
""" Write the data to file either as a gzip file or text based on the
gzipped parameter
Args:
filepath (str): The filename to open
encoding (str): The file encoding to use
gzipped (bool): Whether the file should be gzipped or not
data (str): The data to be written out
"""
if gzipped:
with gzip.open(filepath, "wt") as fobj:
fobj.write(data)
else:
with OPEN(filepath, "w", encoding=encoding) as fobj:
if sys.version_info < (3, 0):
data = data.decode(encoding)
fobj.write(data) | python | def write_file(filepath, encoding, gzipped, data):
""" Write the data to file either as a gzip file or text based on the
gzipped parameter
Args:
filepath (str): The filename to open
encoding (str): The file encoding to use
gzipped (bool): Whether the file should be gzipped or not
data (str): The data to be written out
"""
if gzipped:
with gzip.open(filepath, "wt") as fobj:
fobj.write(data)
else:
with OPEN(filepath, "w", encoding=encoding) as fobj:
if sys.version_info < (3, 0):
data = data.decode(encoding)
fobj.write(data) | [
"def",
"write_file",
"(",
"filepath",
",",
"encoding",
",",
"gzipped",
",",
"data",
")",
":",
"if",
"gzipped",
":",
"with",
"gzip",
".",
"open",
"(",
"filepath",
",",
"\"wt\"",
")",
"as",
"fobj",
":",
"fobj",
".",
"write",
"(",
"data",
")",
"else",
":",
"with",
"OPEN",
"(",
"filepath",
",",
"\"w\"",
",",
"encoding",
"=",
"encoding",
")",
"as",
"fobj",
":",
"if",
"sys",
".",
"version_info",
"<",
"(",
"3",
",",
"0",
")",
":",
"data",
"=",
"data",
".",
"decode",
"(",
"encoding",
")",
"fobj",
".",
"write",
"(",
"data",
")"
] | Write the data to file either as a gzip file or text based on the
gzipped parameter
Args:
filepath (str): The filename to open
encoding (str): The file encoding to use
gzipped (bool): Whether the file should be gzipped or not
data (str): The data to be written out | [
"Write",
"the",
"data",
"to",
"file",
"either",
"as",
"a",
"gzip",
"file",
"or",
"text",
"based",
"on",
"the",
"gzipped",
"parameter"
] | fa96024c0cdeba99e10e11060d5fd7aba796b271 | https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/utils.py#L34-L51 |
5,679 | merantix/picasso | picasso/examples/keras/model.py | KerasMNISTModel.preprocess | def preprocess(self, raw_inputs):
"""Convert images into the format required by our model.
Our model requires that inputs be grayscale (mode 'L'), be resized to
`MNIST_DIM`, and be represented as float32 numpy arrays in range
[0, 1].
Args:
raw_inputs (list of Images): a list of PIL Image objects
Returns:
array (float32): num images * height * width * num channels
"""
image_arrays = []
for raw_im in raw_inputs:
im = raw_im.convert('L')
im = im.resize(MNIST_DIM, Image.ANTIALIAS)
arr = np.array(im)
image_arrays.append(arr)
inputs = np.array(image_arrays)
return inputs.reshape(len(inputs),
MNIST_DIM[0],
MNIST_DIM[1], 1).astype('float32') / 255 | python | def preprocess(self, raw_inputs):
"""Convert images into the format required by our model.
Our model requires that inputs be grayscale (mode 'L'), be resized to
`MNIST_DIM`, and be represented as float32 numpy arrays in range
[0, 1].
Args:
raw_inputs (list of Images): a list of PIL Image objects
Returns:
array (float32): num images * height * width * num channels
"""
image_arrays = []
for raw_im in raw_inputs:
im = raw_im.convert('L')
im = im.resize(MNIST_DIM, Image.ANTIALIAS)
arr = np.array(im)
image_arrays.append(arr)
inputs = np.array(image_arrays)
return inputs.reshape(len(inputs),
MNIST_DIM[0],
MNIST_DIM[1], 1).astype('float32') / 255 | [
"def",
"preprocess",
"(",
"self",
",",
"raw_inputs",
")",
":",
"image_arrays",
"=",
"[",
"]",
"for",
"raw_im",
"in",
"raw_inputs",
":",
"im",
"=",
"raw_im",
".",
"convert",
"(",
"'L'",
")",
"im",
"=",
"im",
".",
"resize",
"(",
"MNIST_DIM",
",",
"Image",
".",
"ANTIALIAS",
")",
"arr",
"=",
"np",
".",
"array",
"(",
"im",
")",
"image_arrays",
".",
"append",
"(",
"arr",
")",
"inputs",
"=",
"np",
".",
"array",
"(",
"image_arrays",
")",
"return",
"inputs",
".",
"reshape",
"(",
"len",
"(",
"inputs",
")",
",",
"MNIST_DIM",
"[",
"0",
"]",
",",
"MNIST_DIM",
"[",
"1",
"]",
",",
"1",
")",
".",
"astype",
"(",
"'float32'",
")",
"/",
"255"
] | Convert images into the format required by our model.
Our model requires that inputs be grayscale (mode 'L'), be resized to
`MNIST_DIM`, and be represented as float32 numpy arrays in range
[0, 1].
Args:
raw_inputs (list of Images): a list of PIL Image objects
Returns:
array (float32): num images * height * width * num channels | [
"Convert",
"images",
"into",
"the",
"format",
"required",
"by",
"our",
"model",
"."
] | d276b9b7408dd1032fe0ccb84ea9b6604a32915e | https://github.com/merantix/picasso/blob/d276b9b7408dd1032fe0ccb84ea9b6604a32915e/picasso/examples/keras/model.py#L23-L47 |
5,680 | merantix/picasso | picasso/interfaces/rest.py | initialize_new_session | def initialize_new_session():
"""Check session and initialize if necessary
Before every request, check the user session. If no session exists, add
one and provide temporary locations for images
"""
if 'image_uid_counter' in session and 'image_list' in session:
logger.debug('images are already being tracked')
else:
# reset image list counter for the session
session['image_uid_counter'] = 0
session['image_list'] = []
if 'img_input_dir' in session and 'img_output_dir' in session:
logger.debug('temporary image directories already exist')
else:
# make image upload directory
session['img_input_dir'] = mkdtemp()
session['img_output_dir'] = mkdtemp() | python | def initialize_new_session():
"""Check session and initialize if necessary
Before every request, check the user session. If no session exists, add
one and provide temporary locations for images
"""
if 'image_uid_counter' in session and 'image_list' in session:
logger.debug('images are already being tracked')
else:
# reset image list counter for the session
session['image_uid_counter'] = 0
session['image_list'] = []
if 'img_input_dir' in session and 'img_output_dir' in session:
logger.debug('temporary image directories already exist')
else:
# make image upload directory
session['img_input_dir'] = mkdtemp()
session['img_output_dir'] = mkdtemp() | [
"def",
"initialize_new_session",
"(",
")",
":",
"if",
"'image_uid_counter'",
"in",
"session",
"and",
"'image_list'",
"in",
"session",
":",
"logger",
".",
"debug",
"(",
"'images are already being tracked'",
")",
"else",
":",
"# reset image list counter for the session",
"session",
"[",
"'image_uid_counter'",
"]",
"=",
"0",
"session",
"[",
"'image_list'",
"]",
"=",
"[",
"]",
"if",
"'img_input_dir'",
"in",
"session",
"and",
"'img_output_dir'",
"in",
"session",
":",
"logger",
".",
"debug",
"(",
"'temporary image directories already exist'",
")",
"else",
":",
"# make image upload directory",
"session",
"[",
"'img_input_dir'",
"]",
"=",
"mkdtemp",
"(",
")",
"session",
"[",
"'img_output_dir'",
"]",
"=",
"mkdtemp",
"(",
")"
] | Check session and initialize if necessary
Before every request, check the user session. If no session exists, add
one and provide temporary locations for images | [
"Check",
"session",
"and",
"initialize",
"if",
"necessary"
] | d276b9b7408dd1032fe0ccb84ea9b6604a32915e | https://github.com/merantix/picasso/blob/d276b9b7408dd1032fe0ccb84ea9b6604a32915e/picasso/interfaces/rest.py#L42-L60 |
5,681 | merantix/picasso | picasso/interfaces/rest.py | images | def images():
"""Upload images via REST interface
Check if file upload was successful and sanatize user input.
TODO: return file URL instead of filename
"""
if request.method == 'POST':
file_upload = request.files['file']
if file_upload:
image = dict()
image['filename'] = secure_filename(file_upload.filename)
full_path = os.path.join(session['img_input_dir'],
image['filename'])
file_upload.save(full_path)
image['uid'] = session['image_uid_counter']
session['image_uid_counter'] += 1
current_app.logger.debug('File %d is saved as %s',
image['uid'],
image['filename'])
session['image_list'].append(image)
return jsonify(ok="true", file=image['filename'], uid=image['uid'])
return jsonify(ok="false")
if request.method == 'GET':
return jsonify(images=session['image_list']) | python | def images():
"""Upload images via REST interface
Check if file upload was successful and sanatize user input.
TODO: return file URL instead of filename
"""
if request.method == 'POST':
file_upload = request.files['file']
if file_upload:
image = dict()
image['filename'] = secure_filename(file_upload.filename)
full_path = os.path.join(session['img_input_dir'],
image['filename'])
file_upload.save(full_path)
image['uid'] = session['image_uid_counter']
session['image_uid_counter'] += 1
current_app.logger.debug('File %d is saved as %s',
image['uid'],
image['filename'])
session['image_list'].append(image)
return jsonify(ok="true", file=image['filename'], uid=image['uid'])
return jsonify(ok="false")
if request.method == 'GET':
return jsonify(images=session['image_list']) | [
"def",
"images",
"(",
")",
":",
"if",
"request",
".",
"method",
"==",
"'POST'",
":",
"file_upload",
"=",
"request",
".",
"files",
"[",
"'file'",
"]",
"if",
"file_upload",
":",
"image",
"=",
"dict",
"(",
")",
"image",
"[",
"'filename'",
"]",
"=",
"secure_filename",
"(",
"file_upload",
".",
"filename",
")",
"full_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"session",
"[",
"'img_input_dir'",
"]",
",",
"image",
"[",
"'filename'",
"]",
")",
"file_upload",
".",
"save",
"(",
"full_path",
")",
"image",
"[",
"'uid'",
"]",
"=",
"session",
"[",
"'image_uid_counter'",
"]",
"session",
"[",
"'image_uid_counter'",
"]",
"+=",
"1",
"current_app",
".",
"logger",
".",
"debug",
"(",
"'File %d is saved as %s'",
",",
"image",
"[",
"'uid'",
"]",
",",
"image",
"[",
"'filename'",
"]",
")",
"session",
"[",
"'image_list'",
"]",
".",
"append",
"(",
"image",
")",
"return",
"jsonify",
"(",
"ok",
"=",
"\"true\"",
",",
"file",
"=",
"image",
"[",
"'filename'",
"]",
",",
"uid",
"=",
"image",
"[",
"'uid'",
"]",
")",
"return",
"jsonify",
"(",
"ok",
"=",
"\"false\"",
")",
"if",
"request",
".",
"method",
"==",
"'GET'",
":",
"return",
"jsonify",
"(",
"images",
"=",
"session",
"[",
"'image_list'",
"]",
")"
] | Upload images via REST interface
Check if file upload was successful and sanatize user input.
TODO: return file URL instead of filename | [
"Upload",
"images",
"via",
"REST",
"interface"
] | d276b9b7408dd1032fe0ccb84ea9b6604a32915e | https://github.com/merantix/picasso/blob/d276b9b7408dd1032fe0ccb84ea9b6604a32915e/picasso/interfaces/rest.py#L84-L109 |
5,682 | merantix/picasso | picasso/interfaces/rest.py | visualizers | def visualizers():
"""Get a list of available visualizers
Responses with a JSON list of available visualizers
"""
list_of_visualizers = []
for visualizer in get_visualizations():
list_of_visualizers.append({'name': visualizer})
return jsonify(visualizers=list_of_visualizers) | python | def visualizers():
"""Get a list of available visualizers
Responses with a JSON list of available visualizers
"""
list_of_visualizers = []
for visualizer in get_visualizations():
list_of_visualizers.append({'name': visualizer})
return jsonify(visualizers=list_of_visualizers) | [
"def",
"visualizers",
"(",
")",
":",
"list_of_visualizers",
"=",
"[",
"]",
"for",
"visualizer",
"in",
"get_visualizations",
"(",
")",
":",
"list_of_visualizers",
".",
"append",
"(",
"{",
"'name'",
":",
"visualizer",
"}",
")",
"return",
"jsonify",
"(",
"visualizers",
"=",
"list_of_visualizers",
")"
] | Get a list of available visualizers
Responses with a JSON list of available visualizers | [
"Get",
"a",
"list",
"of",
"available",
"visualizers"
] | d276b9b7408dd1032fe0ccb84ea9b6604a32915e | https://github.com/merantix/picasso/blob/d276b9b7408dd1032fe0ccb84ea9b6604a32915e/picasso/interfaces/rest.py#L113-L122 |
5,683 | merantix/picasso | picasso/interfaces/rest.py | visualize | def visualize():
"""Trigger a visualization via the REST API
Takes a single image and generates the visualization data, returning the
output exactly as given by the target visualization.
"""
session['settings'] = {}
image_uid = request.args.get('image')
vis_name = request.args.get('visualizer')
vis = get_visualizations()[vis_name]
if vis.ALLOWED_SETTINGS:
for key in vis.ALLOWED_SETTINGS.keys():
if request.args.get(key) is not None:
session['settings'][key] = request.args.get(key)
else:
session['settings'][key] = vis.ALLOWED_SETTINGS[key][0]
else:
logger.debug('Selected Visualizer {0} has no settings.'.format(vis_name))
inputs = []
for image in session['image_list']:
if image['uid'] == int(image_uid):
full_path = os.path.join(session['img_input_dir'],
image['filename'])
entry = dict()
entry['filename'] = image['filename']
entry['data'] = Image.open(full_path)
inputs.append(entry)
vis.update_settings(session['settings'])
output = vis.make_visualization(
inputs, output_dir=session['img_output_dir'])
return jsonify(output[0]) | python | def visualize():
"""Trigger a visualization via the REST API
Takes a single image and generates the visualization data, returning the
output exactly as given by the target visualization.
"""
session['settings'] = {}
image_uid = request.args.get('image')
vis_name = request.args.get('visualizer')
vis = get_visualizations()[vis_name]
if vis.ALLOWED_SETTINGS:
for key in vis.ALLOWED_SETTINGS.keys():
if request.args.get(key) is not None:
session['settings'][key] = request.args.get(key)
else:
session['settings'][key] = vis.ALLOWED_SETTINGS[key][0]
else:
logger.debug('Selected Visualizer {0} has no settings.'.format(vis_name))
inputs = []
for image in session['image_list']:
if image['uid'] == int(image_uid):
full_path = os.path.join(session['img_input_dir'],
image['filename'])
entry = dict()
entry['filename'] = image['filename']
entry['data'] = Image.open(full_path)
inputs.append(entry)
vis.update_settings(session['settings'])
output = vis.make_visualization(
inputs, output_dir=session['img_output_dir'])
return jsonify(output[0]) | [
"def",
"visualize",
"(",
")",
":",
"session",
"[",
"'settings'",
"]",
"=",
"{",
"}",
"image_uid",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'image'",
")",
"vis_name",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'visualizer'",
")",
"vis",
"=",
"get_visualizations",
"(",
")",
"[",
"vis_name",
"]",
"if",
"vis",
".",
"ALLOWED_SETTINGS",
":",
"for",
"key",
"in",
"vis",
".",
"ALLOWED_SETTINGS",
".",
"keys",
"(",
")",
":",
"if",
"request",
".",
"args",
".",
"get",
"(",
"key",
")",
"is",
"not",
"None",
":",
"session",
"[",
"'settings'",
"]",
"[",
"key",
"]",
"=",
"request",
".",
"args",
".",
"get",
"(",
"key",
")",
"else",
":",
"session",
"[",
"'settings'",
"]",
"[",
"key",
"]",
"=",
"vis",
".",
"ALLOWED_SETTINGS",
"[",
"key",
"]",
"[",
"0",
"]",
"else",
":",
"logger",
".",
"debug",
"(",
"'Selected Visualizer {0} has no settings.'",
".",
"format",
"(",
"vis_name",
")",
")",
"inputs",
"=",
"[",
"]",
"for",
"image",
"in",
"session",
"[",
"'image_list'",
"]",
":",
"if",
"image",
"[",
"'uid'",
"]",
"==",
"int",
"(",
"image_uid",
")",
":",
"full_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"session",
"[",
"'img_input_dir'",
"]",
",",
"image",
"[",
"'filename'",
"]",
")",
"entry",
"=",
"dict",
"(",
")",
"entry",
"[",
"'filename'",
"]",
"=",
"image",
"[",
"'filename'",
"]",
"entry",
"[",
"'data'",
"]",
"=",
"Image",
".",
"open",
"(",
"full_path",
")",
"inputs",
".",
"append",
"(",
"entry",
")",
"vis",
".",
"update_settings",
"(",
"session",
"[",
"'settings'",
"]",
")",
"output",
"=",
"vis",
".",
"make_visualization",
"(",
"inputs",
",",
"output_dir",
"=",
"session",
"[",
"'img_output_dir'",
"]",
")",
"return",
"jsonify",
"(",
"output",
"[",
"0",
"]",
")"
] | Trigger a visualization via the REST API
Takes a single image and generates the visualization data, returning the
output exactly as given by the target visualization. | [
"Trigger",
"a",
"visualization",
"via",
"the",
"REST",
"API"
] | d276b9b7408dd1032fe0ccb84ea9b6604a32915e | https://github.com/merantix/picasso/blob/d276b9b7408dd1032fe0ccb84ea9b6604a32915e/picasso/interfaces/rest.py#L133-L166 |
5,684 | merantix/picasso | picasso/interfaces/rest.py | reset | def reset():
"""Delete the session and clear temporary directories
"""
shutil.rmtree(session['img_input_dir'])
shutil.rmtree(session['img_output_dir'])
session.clear()
return jsonify(ok='true') | python | def reset():
"""Delete the session and clear temporary directories
"""
shutil.rmtree(session['img_input_dir'])
shutil.rmtree(session['img_output_dir'])
session.clear()
return jsonify(ok='true') | [
"def",
"reset",
"(",
")",
":",
"shutil",
".",
"rmtree",
"(",
"session",
"[",
"'img_input_dir'",
"]",
")",
"shutil",
".",
"rmtree",
"(",
"session",
"[",
"'img_output_dir'",
"]",
")",
"session",
".",
"clear",
"(",
")",
"return",
"jsonify",
"(",
"ok",
"=",
"'true'",
")"
] | Delete the session and clear temporary directories | [
"Delete",
"the",
"session",
"and",
"clear",
"temporary",
"directories"
] | d276b9b7408dd1032fe0ccb84ea9b6604a32915e | https://github.com/merantix/picasso/blob/d276b9b7408dd1032fe0ccb84ea9b6604a32915e/picasso/interfaces/rest.py#L170-L177 |
5,685 | merantix/picasso | picasso/visualizations/base.py | BaseVisualization.update_settings | def update_settings(self, settings):
"""Update the settings
If a derived class has an ALLOWED_SETTINGS dict, we check here that
incoming settings from the web app are allowed, and set the child
properties as appropriate.
"""
def error_string(setting, setting_val):
return ('{val} is not an acceptable value for '
'parameter {param} for visualization'
'{vis}.').format(val=setting_val,
param=setting,
vis=self.__class__.__name__)
for setting in settings:
if settings[setting] in self.ALLOWED_SETTINGS[setting]:
# if the setting is allowed, set the attribute but remove
# invalid variable characters
#
# see:
#
# https://stackoverflow.com/questions/3303312/how-do-i-convert-a-string-to-a-valid-variable-name-in-python
setattr(self, '_' + re.sub('\W|^(?=\d)', '_', setting).lower(),
settings[setting])
else:
raise ValueError(error_string(settings[setting], setting)) | python | def update_settings(self, settings):
"""Update the settings
If a derived class has an ALLOWED_SETTINGS dict, we check here that
incoming settings from the web app are allowed, and set the child
properties as appropriate.
"""
def error_string(setting, setting_val):
return ('{val} is not an acceptable value for '
'parameter {param} for visualization'
'{vis}.').format(val=setting_val,
param=setting,
vis=self.__class__.__name__)
for setting in settings:
if settings[setting] in self.ALLOWED_SETTINGS[setting]:
# if the setting is allowed, set the attribute but remove
# invalid variable characters
#
# see:
#
# https://stackoverflow.com/questions/3303312/how-do-i-convert-a-string-to-a-valid-variable-name-in-python
setattr(self, '_' + re.sub('\W|^(?=\d)', '_', setting).lower(),
settings[setting])
else:
raise ValueError(error_string(settings[setting], setting)) | [
"def",
"update_settings",
"(",
"self",
",",
"settings",
")",
":",
"def",
"error_string",
"(",
"setting",
",",
"setting_val",
")",
":",
"return",
"(",
"'{val} is not an acceptable value for '",
"'parameter {param} for visualization'",
"'{vis}.'",
")",
".",
"format",
"(",
"val",
"=",
"setting_val",
",",
"param",
"=",
"setting",
",",
"vis",
"=",
"self",
".",
"__class__",
".",
"__name__",
")",
"for",
"setting",
"in",
"settings",
":",
"if",
"settings",
"[",
"setting",
"]",
"in",
"self",
".",
"ALLOWED_SETTINGS",
"[",
"setting",
"]",
":",
"# if the setting is allowed, set the attribute but remove",
"# invalid variable characters",
"#",
"# see:",
"#",
"# https://stackoverflow.com/questions/3303312/how-do-i-convert-a-string-to-a-valid-variable-name-in-python",
"setattr",
"(",
"self",
",",
"'_'",
"+",
"re",
".",
"sub",
"(",
"'\\W|^(?=\\d)'",
",",
"'_'",
",",
"setting",
")",
".",
"lower",
"(",
")",
",",
"settings",
"[",
"setting",
"]",
")",
"else",
":",
"raise",
"ValueError",
"(",
"error_string",
"(",
"settings",
"[",
"setting",
"]",
",",
"setting",
")",
")"
] | Update the settings
If a derived class has an ALLOWED_SETTINGS dict, we check here that
incoming settings from the web app are allowed, and set the child
properties as appropriate. | [
"Update",
"the",
"settings"
] | d276b9b7408dd1032fe0ccb84ea9b6604a32915e | https://github.com/merantix/picasso/blob/d276b9b7408dd1032fe0ccb84ea9b6604a32915e/picasso/visualizations/base.py#L60-L87 |
5,686 | merantix/picasso | picasso/models/base.py | load_model | def load_model(model_cls_path, model_cls_name, model_load_args):
"""Get an instance of the described model.
Args:
model_cls_path: Path to the module in which the model class
is defined.
model_cls_name: Name of the model class.
model_load_args: Dictionary of args to pass to the `load` method
of the model instance.
Returns:
An instance of :class:`.models.model.BaseModel` or subclass
"""
spec = importlib.util.spec_from_file_location('active_model',
model_cls_path)
model_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(model_module)
model_cls = getattr(model_module, model_cls_name)
model = model_cls()
if not isinstance(model, BaseModel):
warnings.warn("Loaded model '%s' at '%s' is not an instance of %r"
% (model_cls_name, model_cls_path, BaseModel))
model.load(**model_load_args)
return model | python | def load_model(model_cls_path, model_cls_name, model_load_args):
"""Get an instance of the described model.
Args:
model_cls_path: Path to the module in which the model class
is defined.
model_cls_name: Name of the model class.
model_load_args: Dictionary of args to pass to the `load` method
of the model instance.
Returns:
An instance of :class:`.models.model.BaseModel` or subclass
"""
spec = importlib.util.spec_from_file_location('active_model',
model_cls_path)
model_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(model_module)
model_cls = getattr(model_module, model_cls_name)
model = model_cls()
if not isinstance(model, BaseModel):
warnings.warn("Loaded model '%s' at '%s' is not an instance of %r"
% (model_cls_name, model_cls_path, BaseModel))
model.load(**model_load_args)
return model | [
"def",
"load_model",
"(",
"model_cls_path",
",",
"model_cls_name",
",",
"model_load_args",
")",
":",
"spec",
"=",
"importlib",
".",
"util",
".",
"spec_from_file_location",
"(",
"'active_model'",
",",
"model_cls_path",
")",
"model_module",
"=",
"importlib",
".",
"util",
".",
"module_from_spec",
"(",
"spec",
")",
"spec",
".",
"loader",
".",
"exec_module",
"(",
"model_module",
")",
"model_cls",
"=",
"getattr",
"(",
"model_module",
",",
"model_cls_name",
")",
"model",
"=",
"model_cls",
"(",
")",
"if",
"not",
"isinstance",
"(",
"model",
",",
"BaseModel",
")",
":",
"warnings",
".",
"warn",
"(",
"\"Loaded model '%s' at '%s' is not an instance of %r\"",
"%",
"(",
"model_cls_name",
",",
"model_cls_path",
",",
"BaseModel",
")",
")",
"model",
".",
"load",
"(",
"*",
"*",
"model_load_args",
")",
"return",
"model"
] | Get an instance of the described model.
Args:
model_cls_path: Path to the module in which the model class
is defined.
model_cls_name: Name of the model class.
model_load_args: Dictionary of args to pass to the `load` method
of the model instance.
Returns:
An instance of :class:`.models.model.BaseModel` or subclass | [
"Get",
"an",
"instance",
"of",
"the",
"described",
"model",
"."
] | d276b9b7408dd1032fe0ccb84ea9b6604a32915e | https://github.com/merantix/picasso/blob/d276b9b7408dd1032fe0ccb84ea9b6604a32915e/picasso/models/base.py#L18-L42 |
5,687 | merantix/picasso | picasso/models/base.py | BaseModel.decode_prob | def decode_prob(self, class_probabilities):
"""Given predicted class probabilites for a set of examples, annotate
each logit with a class name.
By default, we name each class using its index in the logits array.
Args:
class_probabilities (array): Class probabilities as output by
`self.predict`, i.e., a numpy array of shape (num_examples,
num_classes).
Returns:
Annotated class probabilities for each input example, as a list of
dicts where each dict is formatted as:
{
'index': class_index,
'name': class_name,
'prob': class_probability
}
"""
results = []
for row in class_probabilities:
entries = []
for i, prob in enumerate(row):
entries.append({'index': i,
'name': str(i),
'prob': prob})
entries = sorted(entries,
key=itemgetter('prob'),
reverse=True)[:self.top_probs]
for entry in entries:
entry['prob'] = '{:.3f}'.format(entry['prob'])
results.append(entries)
return results | python | def decode_prob(self, class_probabilities):
"""Given predicted class probabilites for a set of examples, annotate
each logit with a class name.
By default, we name each class using its index in the logits array.
Args:
class_probabilities (array): Class probabilities as output by
`self.predict`, i.e., a numpy array of shape (num_examples,
num_classes).
Returns:
Annotated class probabilities for each input example, as a list of
dicts where each dict is formatted as:
{
'index': class_index,
'name': class_name,
'prob': class_probability
}
"""
results = []
for row in class_probabilities:
entries = []
for i, prob in enumerate(row):
entries.append({'index': i,
'name': str(i),
'prob': prob})
entries = sorted(entries,
key=itemgetter('prob'),
reverse=True)[:self.top_probs]
for entry in entries:
entry['prob'] = '{:.3f}'.format(entry['prob'])
results.append(entries)
return results | [
"def",
"decode_prob",
"(",
"self",
",",
"class_probabilities",
")",
":",
"results",
"=",
"[",
"]",
"for",
"row",
"in",
"class_probabilities",
":",
"entries",
"=",
"[",
"]",
"for",
"i",
",",
"prob",
"in",
"enumerate",
"(",
"row",
")",
":",
"entries",
".",
"append",
"(",
"{",
"'index'",
":",
"i",
",",
"'name'",
":",
"str",
"(",
"i",
")",
",",
"'prob'",
":",
"prob",
"}",
")",
"entries",
"=",
"sorted",
"(",
"entries",
",",
"key",
"=",
"itemgetter",
"(",
"'prob'",
")",
",",
"reverse",
"=",
"True",
")",
"[",
":",
"self",
".",
"top_probs",
"]",
"for",
"entry",
"in",
"entries",
":",
"entry",
"[",
"'prob'",
"]",
"=",
"'{:.3f}'",
".",
"format",
"(",
"entry",
"[",
"'prob'",
"]",
")",
"results",
".",
"append",
"(",
"entries",
")",
"return",
"results"
] | Given predicted class probabilites for a set of examples, annotate
each logit with a class name.
By default, we name each class using its index in the logits array.
Args:
class_probabilities (array): Class probabilities as output by
`self.predict`, i.e., a numpy array of shape (num_examples,
num_classes).
Returns:
Annotated class probabilities for each input example, as a list of
dicts where each dict is formatted as:
{
'index': class_index,
'name': class_name,
'prob': class_probability
} | [
"Given",
"predicted",
"class",
"probabilites",
"for",
"a",
"set",
"of",
"examples",
"annotate",
"each",
"logit",
"with",
"a",
"class",
"name",
"."
] | d276b9b7408dd1032fe0ccb84ea9b6604a32915e | https://github.com/merantix/picasso/blob/d276b9b7408dd1032fe0ccb84ea9b6604a32915e/picasso/models/base.py#L174-L210 |
5,688 | merantix/picasso | picasso/utils.py | _get_visualization_classes | def _get_visualization_classes():
"""Import visualizations classes dynamically
"""
visualization_attr = vars(import_module('picasso.visualizations'))
visualization_submodules = [
visualization_attr[x]
for x in visualization_attr
if isinstance(visualization_attr[x], ModuleType)]
visualization_classes = []
for submodule in visualization_submodules:
attrs = vars(submodule)
for attr_name in attrs:
attr = attrs[attr_name]
if (inspect.isclass(attr)
and issubclass(attr, BaseVisualization)
and attr is not BaseVisualization):
visualization_classes.append(attr)
return visualization_classes | python | def _get_visualization_classes():
"""Import visualizations classes dynamically
"""
visualization_attr = vars(import_module('picasso.visualizations'))
visualization_submodules = [
visualization_attr[x]
for x in visualization_attr
if isinstance(visualization_attr[x], ModuleType)]
visualization_classes = []
for submodule in visualization_submodules:
attrs = vars(submodule)
for attr_name in attrs:
attr = attrs[attr_name]
if (inspect.isclass(attr)
and issubclass(attr, BaseVisualization)
and attr is not BaseVisualization):
visualization_classes.append(attr)
return visualization_classes | [
"def",
"_get_visualization_classes",
"(",
")",
":",
"visualization_attr",
"=",
"vars",
"(",
"import_module",
"(",
"'picasso.visualizations'",
")",
")",
"visualization_submodules",
"=",
"[",
"visualization_attr",
"[",
"x",
"]",
"for",
"x",
"in",
"visualization_attr",
"if",
"isinstance",
"(",
"visualization_attr",
"[",
"x",
"]",
",",
"ModuleType",
")",
"]",
"visualization_classes",
"=",
"[",
"]",
"for",
"submodule",
"in",
"visualization_submodules",
":",
"attrs",
"=",
"vars",
"(",
"submodule",
")",
"for",
"attr_name",
"in",
"attrs",
":",
"attr",
"=",
"attrs",
"[",
"attr_name",
"]",
"if",
"(",
"inspect",
".",
"isclass",
"(",
"attr",
")",
"and",
"issubclass",
"(",
"attr",
",",
"BaseVisualization",
")",
"and",
"attr",
"is",
"not",
"BaseVisualization",
")",
":",
"visualization_classes",
".",
"append",
"(",
"attr",
")",
"return",
"visualization_classes"
] | Import visualizations classes dynamically | [
"Import",
"visualizations",
"classes",
"dynamically"
] | d276b9b7408dd1032fe0ccb84ea9b6604a32915e | https://github.com/merantix/picasso/blob/d276b9b7408dd1032fe0ccb84ea9b6604a32915e/picasso/utils.py#L32-L49 |
5,689 | merantix/picasso | picasso/utils.py | get_model | def get_model():
"""Get the NN model that's being analyzed from the request context. Put
the model in the request context if it is not yet there.
Returns:
instance of :class:`.models.model.Model` or derived
class
"""
if not hasattr(g, 'model'):
g.model = load_model(current_app.config['MODEL_CLS_PATH'],
current_app.config['MODEL_CLS_NAME'],
current_app.config['MODEL_LOAD_ARGS'])
return g.model | python | def get_model():
"""Get the NN model that's being analyzed from the request context. Put
the model in the request context if it is not yet there.
Returns:
instance of :class:`.models.model.Model` or derived
class
"""
if not hasattr(g, 'model'):
g.model = load_model(current_app.config['MODEL_CLS_PATH'],
current_app.config['MODEL_CLS_NAME'],
current_app.config['MODEL_LOAD_ARGS'])
return g.model | [
"def",
"get_model",
"(",
")",
":",
"if",
"not",
"hasattr",
"(",
"g",
",",
"'model'",
")",
":",
"g",
".",
"model",
"=",
"load_model",
"(",
"current_app",
".",
"config",
"[",
"'MODEL_CLS_PATH'",
"]",
",",
"current_app",
".",
"config",
"[",
"'MODEL_CLS_NAME'",
"]",
",",
"current_app",
".",
"config",
"[",
"'MODEL_LOAD_ARGS'",
"]",
")",
"return",
"g",
".",
"model"
] | Get the NN model that's being analyzed from the request context. Put
the model in the request context if it is not yet there.
Returns:
instance of :class:`.models.model.Model` or derived
class | [
"Get",
"the",
"NN",
"model",
"that",
"s",
"being",
"analyzed",
"from",
"the",
"request",
"context",
".",
"Put",
"the",
"model",
"in",
"the",
"request",
"context",
"if",
"it",
"is",
"not",
"yet",
"there",
"."
] | d276b9b7408dd1032fe0ccb84ea9b6604a32915e | https://github.com/merantix/picasso/blob/d276b9b7408dd1032fe0ccb84ea9b6604a32915e/picasso/utils.py#L52-L64 |
5,690 | merantix/picasso | picasso/utils.py | get_visualizations | def get_visualizations():
"""Get the available visualizations from the request context. Put the
visualizations in the request context if they are not yet there.
Returns:
:obj:`list` of instances of :class:`.BaseVisualization` or
derived class
"""
if not hasattr(g, 'visualizations'):
g.visualizations = {}
for VisClass in _get_visualization_classes():
vis = VisClass(get_model())
g.visualizations[vis.__class__.__name__] = vis
return g.visualizations | python | def get_visualizations():
"""Get the available visualizations from the request context. Put the
visualizations in the request context if they are not yet there.
Returns:
:obj:`list` of instances of :class:`.BaseVisualization` or
derived class
"""
if not hasattr(g, 'visualizations'):
g.visualizations = {}
for VisClass in _get_visualization_classes():
vis = VisClass(get_model())
g.visualizations[vis.__class__.__name__] = vis
return g.visualizations | [
"def",
"get_visualizations",
"(",
")",
":",
"if",
"not",
"hasattr",
"(",
"g",
",",
"'visualizations'",
")",
":",
"g",
".",
"visualizations",
"=",
"{",
"}",
"for",
"VisClass",
"in",
"_get_visualization_classes",
"(",
")",
":",
"vis",
"=",
"VisClass",
"(",
"get_model",
"(",
")",
")",
"g",
".",
"visualizations",
"[",
"vis",
".",
"__class__",
".",
"__name__",
"]",
"=",
"vis",
"return",
"g",
".",
"visualizations"
] | Get the available visualizations from the request context. Put the
visualizations in the request context if they are not yet there.
Returns:
:obj:`list` of instances of :class:`.BaseVisualization` or
derived class | [
"Get",
"the",
"available",
"visualizations",
"from",
"the",
"request",
"context",
".",
"Put",
"the",
"visualizations",
"in",
"the",
"request",
"context",
"if",
"they",
"are",
"not",
"yet",
"there",
"."
] | d276b9b7408dd1032fe0ccb84ea9b6604a32915e | https://github.com/merantix/picasso/blob/d276b9b7408dd1032fe0ccb84ea9b6604a32915e/picasso/utils.py#L67-L81 |
5,691 | merantix/picasso | picasso/utils.py | get_app_state | def get_app_state():
"""Get current status of application in context
Returns:
:obj:`dict` of application status
"""
if not hasattr(g, 'app_state'):
model = get_model()
g.app_state = {
'app_title': APP_TITLE,
'model_name': type(model).__name__,
'latest_ckpt_name': model.latest_ckpt_name,
'latest_ckpt_time': model.latest_ckpt_time
}
return g.app_state | python | def get_app_state():
"""Get current status of application in context
Returns:
:obj:`dict` of application status
"""
if not hasattr(g, 'app_state'):
model = get_model()
g.app_state = {
'app_title': APP_TITLE,
'model_name': type(model).__name__,
'latest_ckpt_name': model.latest_ckpt_name,
'latest_ckpt_time': model.latest_ckpt_time
}
return g.app_state | [
"def",
"get_app_state",
"(",
")",
":",
"if",
"not",
"hasattr",
"(",
"g",
",",
"'app_state'",
")",
":",
"model",
"=",
"get_model",
"(",
")",
"g",
".",
"app_state",
"=",
"{",
"'app_title'",
":",
"APP_TITLE",
",",
"'model_name'",
":",
"type",
"(",
"model",
")",
".",
"__name__",
",",
"'latest_ckpt_name'",
":",
"model",
".",
"latest_ckpt_name",
",",
"'latest_ckpt_time'",
":",
"model",
".",
"latest_ckpt_time",
"}",
"return",
"g",
".",
"app_state"
] | Get current status of application in context
Returns:
:obj:`dict` of application status | [
"Get",
"current",
"status",
"of",
"application",
"in",
"context"
] | d276b9b7408dd1032fe0ccb84ea9b6604a32915e | https://github.com/merantix/picasso/blob/d276b9b7408dd1032fe0ccb84ea9b6604a32915e/picasso/utils.py#L84-L99 |
5,692 | arraylabs/pymyq | pymyq/api.py | login | async def login(
username: str, password: str, brand: str,
websession: ClientSession = None) -> API:
"""Log in to the API."""
api = API(brand, websession)
await api.authenticate(username, password)
return api | python | async def login(
username: str, password: str, brand: str,
websession: ClientSession = None) -> API:
"""Log in to the API."""
api = API(brand, websession)
await api.authenticate(username, password)
return api | [
"async",
"def",
"login",
"(",
"username",
":",
"str",
",",
"password",
":",
"str",
",",
"brand",
":",
"str",
",",
"websession",
":",
"ClientSession",
"=",
"None",
")",
"->",
"API",
":",
"api",
"=",
"API",
"(",
"brand",
",",
"websession",
")",
"await",
"api",
".",
"authenticate",
"(",
"username",
",",
"password",
")",
"return",
"api"
] | Log in to the API. | [
"Log",
"in",
"to",
"the",
"API",
"."
] | 413ae01ca23568f7b5f698a87e872f456072356b | https://github.com/arraylabs/pymyq/blob/413ae01ca23568f7b5f698a87e872f456072356b/pymyq/api.py#L286-L292 |
5,693 | arraylabs/pymyq | pymyq/api.py | API._create_websession | def _create_websession(self):
"""Create a web session."""
from socket import AF_INET
from aiohttp import ClientTimeout, TCPConnector
_LOGGER.debug('Creating web session')
conn = TCPConnector(
family=AF_INET,
limit_per_host=5,
enable_cleanup_closed=True,
)
# Create session object.
session_timeout = ClientTimeout(connect=10)
self._websession = ClientSession(connector=conn,
timeout=session_timeout)
self._supplied_websession = False | python | def _create_websession(self):
"""Create a web session."""
from socket import AF_INET
from aiohttp import ClientTimeout, TCPConnector
_LOGGER.debug('Creating web session')
conn = TCPConnector(
family=AF_INET,
limit_per_host=5,
enable_cleanup_closed=True,
)
# Create session object.
session_timeout = ClientTimeout(connect=10)
self._websession = ClientSession(connector=conn,
timeout=session_timeout)
self._supplied_websession = False | [
"def",
"_create_websession",
"(",
"self",
")",
":",
"from",
"socket",
"import",
"AF_INET",
"from",
"aiohttp",
"import",
"ClientTimeout",
",",
"TCPConnector",
"_LOGGER",
".",
"debug",
"(",
"'Creating web session'",
")",
"conn",
"=",
"TCPConnector",
"(",
"family",
"=",
"AF_INET",
",",
"limit_per_host",
"=",
"5",
",",
"enable_cleanup_closed",
"=",
"True",
",",
")",
"# Create session object.",
"session_timeout",
"=",
"ClientTimeout",
"(",
"connect",
"=",
"10",
")",
"self",
".",
"_websession",
"=",
"ClientSession",
"(",
"connector",
"=",
"conn",
",",
"timeout",
"=",
"session_timeout",
")",
"self",
".",
"_supplied_websession",
"=",
"False"
] | Create a web session. | [
"Create",
"a",
"web",
"session",
"."
] | 413ae01ca23568f7b5f698a87e872f456072356b | https://github.com/arraylabs/pymyq/blob/413ae01ca23568f7b5f698a87e872f456072356b/pymyq/api.py#L73-L89 |
5,694 | arraylabs/pymyq | pymyq/api.py | API.close_websession | async def close_websession(self):
"""Close web session if not already closed and created by us."""
# We do not close the web session if it was provided.
if self._supplied_websession or self._websession is None:
return
_LOGGER.debug('Closing connections')
# Need to set _websession to none first to prevent any other task
# from closing it as well.
temp_websession = self._websession
self._websession = None
await temp_websession.close()
await asyncio.sleep(0)
_LOGGER.debug('Connections closed') | python | async def close_websession(self):
"""Close web session if not already closed and created by us."""
# We do not close the web session if it was provided.
if self._supplied_websession or self._websession is None:
return
_LOGGER.debug('Closing connections')
# Need to set _websession to none first to prevent any other task
# from closing it as well.
temp_websession = self._websession
self._websession = None
await temp_websession.close()
await asyncio.sleep(0)
_LOGGER.debug('Connections closed') | [
"async",
"def",
"close_websession",
"(",
"self",
")",
":",
"# We do not close the web session if it was provided.",
"if",
"self",
".",
"_supplied_websession",
"or",
"self",
".",
"_websession",
"is",
"None",
":",
"return",
"_LOGGER",
".",
"debug",
"(",
"'Closing connections'",
")",
"# Need to set _websession to none first to prevent any other task",
"# from closing it as well.",
"temp_websession",
"=",
"self",
".",
"_websession",
"self",
".",
"_websession",
"=",
"None",
"await",
"temp_websession",
".",
"close",
"(",
")",
"await",
"asyncio",
".",
"sleep",
"(",
"0",
")",
"_LOGGER",
".",
"debug",
"(",
"'Connections closed'",
")"
] | Close web session if not already closed and created by us. | [
"Close",
"web",
"session",
"if",
"not",
"already",
"closed",
"and",
"created",
"by",
"us",
"."
] | 413ae01ca23568f7b5f698a87e872f456072356b | https://github.com/arraylabs/pymyq/blob/413ae01ca23568f7b5f698a87e872f456072356b/pymyq/api.py#L91-L104 |
5,695 | arraylabs/pymyq | pymyq/api.py | API.authenticate | async def authenticate(self, username: str, password: str) -> None:
"""Authenticate against the API."""
self._credentials = {
'username': username,
'password': password,
}
await self._get_security_token() | python | async def authenticate(self, username: str, password: str) -> None:
"""Authenticate against the API."""
self._credentials = {
'username': username,
'password': password,
}
await self._get_security_token() | [
"async",
"def",
"authenticate",
"(",
"self",
",",
"username",
":",
"str",
",",
"password",
":",
"str",
")",
"->",
"None",
":",
"self",
".",
"_credentials",
"=",
"{",
"'username'",
":",
"username",
",",
"'password'",
":",
"password",
",",
"}",
"await",
"self",
".",
"_get_security_token",
"(",
")"
] | Authenticate against the API. | [
"Authenticate",
"against",
"the",
"API",
"."
] | 413ae01ca23568f7b5f698a87e872f456072356b | https://github.com/arraylabs/pymyq/blob/413ae01ca23568f7b5f698a87e872f456072356b/pymyq/api.py#L219-L226 |
5,696 | arraylabs/pymyq | pymyq/api.py | API._get_security_token | async def _get_security_token(self) -> None:
"""Request a security token."""
_LOGGER.debug('Requesting security token.')
if self._credentials is None:
return
# Make sure only 1 request can be sent at a time.
async with self._security_token_lock:
# Confirm there is still no security token.
if self._security_token is None:
login_resp = await self._request(
'post',
LOGIN_ENDPOINT,
json=self._credentials,
login_request=True,
)
return_code = int(login_resp.get('ReturnCode', 1))
if return_code != 0:
if return_code == 203:
# Invalid username or password.
_LOGGER.debug('Invalid username or password')
self._credentials = None
raise MyQError(login_resp['ErrorMessage'])
self._security_token = login_resp['SecurityToken'] | python | async def _get_security_token(self) -> None:
"""Request a security token."""
_LOGGER.debug('Requesting security token.')
if self._credentials is None:
return
# Make sure only 1 request can be sent at a time.
async with self._security_token_lock:
# Confirm there is still no security token.
if self._security_token is None:
login_resp = await self._request(
'post',
LOGIN_ENDPOINT,
json=self._credentials,
login_request=True,
)
return_code = int(login_resp.get('ReturnCode', 1))
if return_code != 0:
if return_code == 203:
# Invalid username or password.
_LOGGER.debug('Invalid username or password')
self._credentials = None
raise MyQError(login_resp['ErrorMessage'])
self._security_token = login_resp['SecurityToken'] | [
"async",
"def",
"_get_security_token",
"(",
"self",
")",
"->",
"None",
":",
"_LOGGER",
".",
"debug",
"(",
"'Requesting security token.'",
")",
"if",
"self",
".",
"_credentials",
"is",
"None",
":",
"return",
"# Make sure only 1 request can be sent at a time.",
"async",
"with",
"self",
".",
"_security_token_lock",
":",
"# Confirm there is still no security token.",
"if",
"self",
".",
"_security_token",
"is",
"None",
":",
"login_resp",
"=",
"await",
"self",
".",
"_request",
"(",
"'post'",
",",
"LOGIN_ENDPOINT",
",",
"json",
"=",
"self",
".",
"_credentials",
",",
"login_request",
"=",
"True",
",",
")",
"return_code",
"=",
"int",
"(",
"login_resp",
".",
"get",
"(",
"'ReturnCode'",
",",
"1",
")",
")",
"if",
"return_code",
"!=",
"0",
":",
"if",
"return_code",
"==",
"203",
":",
"# Invalid username or password.",
"_LOGGER",
".",
"debug",
"(",
"'Invalid username or password'",
")",
"self",
".",
"_credentials",
"=",
"None",
"raise",
"MyQError",
"(",
"login_resp",
"[",
"'ErrorMessage'",
"]",
")",
"self",
".",
"_security_token",
"=",
"login_resp",
"[",
"'SecurityToken'",
"]"
] | Request a security token. | [
"Request",
"a",
"security",
"token",
"."
] | 413ae01ca23568f7b5f698a87e872f456072356b | https://github.com/arraylabs/pymyq/blob/413ae01ca23568f7b5f698a87e872f456072356b/pymyq/api.py#L228-L253 |
5,697 | arraylabs/pymyq | pymyq/api.py | API.get_devices | async def get_devices(self, covers_only: bool = True) -> list:
"""Get a list of all devices associated with the account."""
from .device import MyQDevice
_LOGGER.debug('Retrieving list of devices')
devices_resp = await self._request('get', DEVICE_LIST_ENDPOINT)
# print(json.dumps(devices_resp, indent=4))
device_list = []
if devices_resp is None:
return device_list
for device in devices_resp['Devices']:
if not covers_only or \
device['MyQDeviceTypeName'] in SUPPORTED_DEVICE_TYPE_NAMES:
self._devices.append({
'device_id': device['MyQDeviceId'],
'device_info': device
})
myq_device = MyQDevice(
self._devices[-1], self._brand, self)
device_list.append(myq_device)
# Store current device states.
self._store_device_states(devices_resp.get('Devices', []))
_LOGGER.debug('List of devices retrieved')
return device_list | python | async def get_devices(self, covers_only: bool = True) -> list:
"""Get a list of all devices associated with the account."""
from .device import MyQDevice
_LOGGER.debug('Retrieving list of devices')
devices_resp = await self._request('get', DEVICE_LIST_ENDPOINT)
# print(json.dumps(devices_resp, indent=4))
device_list = []
if devices_resp is None:
return device_list
for device in devices_resp['Devices']:
if not covers_only or \
device['MyQDeviceTypeName'] in SUPPORTED_DEVICE_TYPE_NAMES:
self._devices.append({
'device_id': device['MyQDeviceId'],
'device_info': device
})
myq_device = MyQDevice(
self._devices[-1], self._brand, self)
device_list.append(myq_device)
# Store current device states.
self._store_device_states(devices_resp.get('Devices', []))
_LOGGER.debug('List of devices retrieved')
return device_list | [
"async",
"def",
"get_devices",
"(",
"self",
",",
"covers_only",
":",
"bool",
"=",
"True",
")",
"->",
"list",
":",
"from",
".",
"device",
"import",
"MyQDevice",
"_LOGGER",
".",
"debug",
"(",
"'Retrieving list of devices'",
")",
"devices_resp",
"=",
"await",
"self",
".",
"_request",
"(",
"'get'",
",",
"DEVICE_LIST_ENDPOINT",
")",
"# print(json.dumps(devices_resp, indent=4))",
"device_list",
"=",
"[",
"]",
"if",
"devices_resp",
"is",
"None",
":",
"return",
"device_list",
"for",
"device",
"in",
"devices_resp",
"[",
"'Devices'",
"]",
":",
"if",
"not",
"covers_only",
"or",
"device",
"[",
"'MyQDeviceTypeName'",
"]",
"in",
"SUPPORTED_DEVICE_TYPE_NAMES",
":",
"self",
".",
"_devices",
".",
"append",
"(",
"{",
"'device_id'",
":",
"device",
"[",
"'MyQDeviceId'",
"]",
",",
"'device_info'",
":",
"device",
"}",
")",
"myq_device",
"=",
"MyQDevice",
"(",
"self",
".",
"_devices",
"[",
"-",
"1",
"]",
",",
"self",
".",
"_brand",
",",
"self",
")",
"device_list",
".",
"append",
"(",
"myq_device",
")",
"# Store current device states.",
"self",
".",
"_store_device_states",
"(",
"devices_resp",
".",
"get",
"(",
"'Devices'",
",",
"[",
"]",
")",
")",
"_LOGGER",
".",
"debug",
"(",
"'List of devices retrieved'",
")",
"return",
"device_list"
] | Get a list of all devices associated with the account. | [
"Get",
"a",
"list",
"of",
"all",
"devices",
"associated",
"with",
"the",
"account",
"."
] | 413ae01ca23568f7b5f698a87e872f456072356b | https://github.com/arraylabs/pymyq/blob/413ae01ca23568f7b5f698a87e872f456072356b/pymyq/api.py#L255-L283 |
5,698 | arraylabs/pymyq | pymyq/device.py | MyQDevice.name | def name(self) -> str:
"""Return the device name."""
return next(
attr['Value'] for attr in self._device_json.get('Attributes', [])
if attr.get('AttributeDisplayName') == 'desc') | python | def name(self) -> str:
"""Return the device name."""
return next(
attr['Value'] for attr in self._device_json.get('Attributes', [])
if attr.get('AttributeDisplayName') == 'desc') | [
"def",
"name",
"(",
"self",
")",
"->",
"str",
":",
"return",
"next",
"(",
"attr",
"[",
"'Value'",
"]",
"for",
"attr",
"in",
"self",
".",
"_device_json",
".",
"get",
"(",
"'Attributes'",
",",
"[",
"]",
")",
"if",
"attr",
".",
"get",
"(",
"'AttributeDisplayName'",
")",
"==",
"'desc'",
")"
] | Return the device name. | [
"Return",
"the",
"device",
"name",
"."
] | 413ae01ca23568f7b5f698a87e872f456072356b | https://github.com/arraylabs/pymyq/blob/413ae01ca23568f7b5f698a87e872f456072356b/pymyq/device.py#L64-L68 |
5,699 | arraylabs/pymyq | pymyq/device.py | MyQDevice.available | def available(self) -> bool:
"""Return if device is online or not."""
# Both ability to retrieve state from MyQ cloud AND device itself has
# to be online.
is_available = self.api.online and \
next(
attr['Value'] for attr in
self._device_json.get('Attributes', [])
if attr.get('AttributeDisplayName') == 'online') == "True"
return is_available | python | def available(self) -> bool:
"""Return if device is online or not."""
# Both ability to retrieve state from MyQ cloud AND device itself has
# to be online.
is_available = self.api.online and \
next(
attr['Value'] for attr in
self._device_json.get('Attributes', [])
if attr.get('AttributeDisplayName') == 'online') == "True"
return is_available | [
"def",
"available",
"(",
"self",
")",
"->",
"bool",
":",
"# Both ability to retrieve state from MyQ cloud AND device itself has",
"# to be online.",
"is_available",
"=",
"self",
".",
"api",
".",
"online",
"and",
"next",
"(",
"attr",
"[",
"'Value'",
"]",
"for",
"attr",
"in",
"self",
".",
"_device_json",
".",
"get",
"(",
"'Attributes'",
",",
"[",
"]",
")",
"if",
"attr",
".",
"get",
"(",
"'AttributeDisplayName'",
")",
"==",
"'online'",
")",
"==",
"\"True\"",
"return",
"is_available"
] | Return if device is online or not. | [
"Return",
"if",
"device",
"is",
"online",
"or",
"not",
"."
] | 413ae01ca23568f7b5f698a87e872f456072356b | https://github.com/arraylabs/pymyq/blob/413ae01ca23568f7b5f698a87e872f456072356b/pymyq/device.py#L71-L81 |
Subsets and Splits