repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
projectatomic/atomic-reactor
|
atomic_reactor/core.py
|
https://github.com/projectatomic/atomic-reactor/blob/fd31c01b964097210bf169960d051e5f04019a80/atomic_reactor/core.py#L491-L520
|
def get_image_info_by_image_name(self, image, exact_tag=True):
"""
using `docker images`, provide information about an image
:param image: ImageName, name of image
:param exact_tag: bool, if false then return info for all images of the
given name regardless what their tag is
:return: list of dicts
"""
logger.info("getting info about provided image specified by name '%s'", image)
logger.debug("image_name = '%s'", image)
# returns list of
# {u'Created': 1414577076,
# u'Id': u'3ab9a7ed8a169ab89b09fb3e12a14a390d3c662703b65b4541c0c7bde0ee97eb',
# u'ParentId': u'a79ad4dac406fcf85b9c7315fe08de5b620c1f7a12f45c8185c843f4b4a49c4e',
# u'RepoTags': [u'buildroot-fedora:latest'],
# u'Size': 0,
# u'VirtualSize': 856564160}
images = self.d.images(name=image.to_str(tag=False))
if exact_tag:
# tag is specified, we are looking for the exact image
for found_image in images:
if image.to_str(explicit_tag=True) in found_image['RepoTags']:
logger.debug("image '%s' found", image)
return [found_image]
images = [] # image not found
logger.debug("%d matching images found", len(images))
return images
|
[
"def",
"get_image_info_by_image_name",
"(",
"self",
",",
"image",
",",
"exact_tag",
"=",
"True",
")",
":",
"logger",
".",
"info",
"(",
"\"getting info about provided image specified by name '%s'\"",
",",
"image",
")",
"logger",
".",
"debug",
"(",
"\"image_name = '%s'\"",
",",
"image",
")",
"# returns list of",
"# {u'Created': 1414577076,",
"# u'Id': u'3ab9a7ed8a169ab89b09fb3e12a14a390d3c662703b65b4541c0c7bde0ee97eb',",
"# u'ParentId': u'a79ad4dac406fcf85b9c7315fe08de5b620c1f7a12f45c8185c843f4b4a49c4e',",
"# u'RepoTags': [u'buildroot-fedora:latest'],",
"# u'Size': 0,",
"# u'VirtualSize': 856564160}",
"images",
"=",
"self",
".",
"d",
".",
"images",
"(",
"name",
"=",
"image",
".",
"to_str",
"(",
"tag",
"=",
"False",
")",
")",
"if",
"exact_tag",
":",
"# tag is specified, we are looking for the exact image",
"for",
"found_image",
"in",
"images",
":",
"if",
"image",
".",
"to_str",
"(",
"explicit_tag",
"=",
"True",
")",
"in",
"found_image",
"[",
"'RepoTags'",
"]",
":",
"logger",
".",
"debug",
"(",
"\"image '%s' found\"",
",",
"image",
")",
"return",
"[",
"found_image",
"]",
"images",
"=",
"[",
"]",
"# image not found",
"logger",
".",
"debug",
"(",
"\"%d matching images found\"",
",",
"len",
"(",
"images",
")",
")",
"return",
"images"
] |
using `docker images`, provide information about an image
:param image: ImageName, name of image
:param exact_tag: bool, if false then return info for all images of the
given name regardless what their tag is
:return: list of dicts
|
[
"using",
"docker",
"images",
"provide",
"information",
"about",
"an",
"image"
] |
python
|
train
|
RudolfCardinal/pythonlib
|
cardinal_pythonlib/convert.py
|
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/convert.py#L90-L98
|
def convert_attrs_to_bool(obj: Any,
attrs: Iterable[str],
default: bool = None) -> None:
"""
Applies :func:`convert_to_bool` to the specified attributes of an object,
modifying it in place.
"""
for a in attrs:
setattr(obj, a, convert_to_bool(getattr(obj, a), default=default))
|
[
"def",
"convert_attrs_to_bool",
"(",
"obj",
":",
"Any",
",",
"attrs",
":",
"Iterable",
"[",
"str",
"]",
",",
"default",
":",
"bool",
"=",
"None",
")",
"->",
"None",
":",
"for",
"a",
"in",
"attrs",
":",
"setattr",
"(",
"obj",
",",
"a",
",",
"convert_to_bool",
"(",
"getattr",
"(",
"obj",
",",
"a",
")",
",",
"default",
"=",
"default",
")",
")"
] |
Applies :func:`convert_to_bool` to the specified attributes of an object,
modifying it in place.
|
[
"Applies",
":",
"func",
":",
"convert_to_bool",
"to",
"the",
"specified",
"attributes",
"of",
"an",
"object",
"modifying",
"it",
"in",
"place",
"."
] |
python
|
train
|
gwastro/pycbc
|
pycbc/filter/matchedfilter.py
|
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/filter/matchedfilter.py#L1101-L1120
|
def sigma(htilde, psd = None, low_frequency_cutoff=None,
high_frequency_cutoff=None):
""" Return the sigma of the waveform. See sigmasq for more details.
Parameters
----------
htilde : TimeSeries or FrequencySeries
The input vector containing a waveform.
psd : {None, FrequencySeries}, optional
The psd used to weight the accumulated power.
low_frequency_cutoff : {None, float}, optional
The frequency to begin considering waveform power.
high_frequency_cutoff : {None, float}, optional
The frequency to stop considering waveform power.
Returns
-------
sigmasq: float
"""
return sqrt(sigmasq(htilde, psd, low_frequency_cutoff, high_frequency_cutoff))
|
[
"def",
"sigma",
"(",
"htilde",
",",
"psd",
"=",
"None",
",",
"low_frequency_cutoff",
"=",
"None",
",",
"high_frequency_cutoff",
"=",
"None",
")",
":",
"return",
"sqrt",
"(",
"sigmasq",
"(",
"htilde",
",",
"psd",
",",
"low_frequency_cutoff",
",",
"high_frequency_cutoff",
")",
")"
] |
Return the sigma of the waveform. See sigmasq for more details.
Parameters
----------
htilde : TimeSeries or FrequencySeries
The input vector containing a waveform.
psd : {None, FrequencySeries}, optional
The psd used to weight the accumulated power.
low_frequency_cutoff : {None, float}, optional
The frequency to begin considering waveform power.
high_frequency_cutoff : {None, float}, optional
The frequency to stop considering waveform power.
Returns
-------
sigmasq: float
|
[
"Return",
"the",
"sigma",
"of",
"the",
"waveform",
".",
"See",
"sigmasq",
"for",
"more",
"details",
"."
] |
python
|
train
|
sirfoga/pyhal
|
hal/streams/pretty_table.py
|
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/streams/pretty_table.py#L111-L122
|
def get_blank_row(self, filler="-", splitter="+"):
"""Gets blank row
:param filler: Fill empty columns with this char
:param splitter: Separate columns with this char
:return: Pretty formatted blank row (with no meaningful data in it)
"""
return self.get_pretty_row(
["" for _ in self.widths], # blanks
filler, # fill with this
splitter, # split columns with this
)
|
[
"def",
"get_blank_row",
"(",
"self",
",",
"filler",
"=",
"\"-\"",
",",
"splitter",
"=",
"\"+\"",
")",
":",
"return",
"self",
".",
"get_pretty_row",
"(",
"[",
"\"\"",
"for",
"_",
"in",
"self",
".",
"widths",
"]",
",",
"# blanks",
"filler",
",",
"# fill with this",
"splitter",
",",
"# split columns with this",
")"
] |
Gets blank row
:param filler: Fill empty columns with this char
:param splitter: Separate columns with this char
:return: Pretty formatted blank row (with no meaningful data in it)
|
[
"Gets",
"blank",
"row"
] |
python
|
train
|
astropy/photutils
|
photutils/psf/epsf.py
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/epsf.py#L862-L916
|
def _interpolate_missing_data(data, mask, method='cubic'):
"""
Interpolate missing data as identified by the ``mask`` keyword.
Parameters
----------
data : 2D `~numpy.ndarray`
An array containing the 2D image.
mask : 2D bool `~numpy.ndarray`
A 2D booleen mask array with the same shape as the input
``data``, where a `True` value indicates the corresponding
element of ``data`` is masked. The masked data points are
those that will be interpolated.
method : {'cubic', 'nearest'}, optional
The method of used to interpolate the missing data:
* ``'cubic'``: Masked data are interpolated using 2D cubic
splines. This is the default.
* ``'nearest'``: Masked data are interpolated using
nearest-neighbor interpolation.
Returns
-------
data_interp : 2D `~numpy.ndarray`
The interpolated 2D image.
"""
from scipy import interpolate
data_interp = np.array(data, copy=True)
if len(data_interp.shape) != 2:
raise ValueError('data must be a 2D array.')
if mask.shape != data.shape:
raise ValueError('mask and data must have the same shape.')
y, x = np.indices(data_interp.shape)
xy = np.dstack((x[~mask].ravel(), y[~mask].ravel()))[0]
z = data_interp[~mask].ravel()
if method == 'nearest':
interpol = interpolate.NearestNDInterpolator(xy, z)
elif method == 'cubic':
interpol = interpolate.CloughTocher2DInterpolator(xy, z)
else:
raise ValueError('Unsupported interpolation method.')
xy_missing = np.dstack((x[mask].ravel(), y[mask].ravel()))[0]
data_interp[mask] = interpol(xy_missing)
return data_interp
|
[
"def",
"_interpolate_missing_data",
"(",
"data",
",",
"mask",
",",
"method",
"=",
"'cubic'",
")",
":",
"from",
"scipy",
"import",
"interpolate",
"data_interp",
"=",
"np",
".",
"array",
"(",
"data",
",",
"copy",
"=",
"True",
")",
"if",
"len",
"(",
"data_interp",
".",
"shape",
")",
"!=",
"2",
":",
"raise",
"ValueError",
"(",
"'data must be a 2D array.'",
")",
"if",
"mask",
".",
"shape",
"!=",
"data",
".",
"shape",
":",
"raise",
"ValueError",
"(",
"'mask and data must have the same shape.'",
")",
"y",
",",
"x",
"=",
"np",
".",
"indices",
"(",
"data_interp",
".",
"shape",
")",
"xy",
"=",
"np",
".",
"dstack",
"(",
"(",
"x",
"[",
"~",
"mask",
"]",
".",
"ravel",
"(",
")",
",",
"y",
"[",
"~",
"mask",
"]",
".",
"ravel",
"(",
")",
")",
")",
"[",
"0",
"]",
"z",
"=",
"data_interp",
"[",
"~",
"mask",
"]",
".",
"ravel",
"(",
")",
"if",
"method",
"==",
"'nearest'",
":",
"interpol",
"=",
"interpolate",
".",
"NearestNDInterpolator",
"(",
"xy",
",",
"z",
")",
"elif",
"method",
"==",
"'cubic'",
":",
"interpol",
"=",
"interpolate",
".",
"CloughTocher2DInterpolator",
"(",
"xy",
",",
"z",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Unsupported interpolation method.'",
")",
"xy_missing",
"=",
"np",
".",
"dstack",
"(",
"(",
"x",
"[",
"mask",
"]",
".",
"ravel",
"(",
")",
",",
"y",
"[",
"mask",
"]",
".",
"ravel",
"(",
")",
")",
")",
"[",
"0",
"]",
"data_interp",
"[",
"mask",
"]",
"=",
"interpol",
"(",
"xy_missing",
")",
"return",
"data_interp"
] |
Interpolate missing data as identified by the ``mask`` keyword.
Parameters
----------
data : 2D `~numpy.ndarray`
An array containing the 2D image.
mask : 2D bool `~numpy.ndarray`
A 2D booleen mask array with the same shape as the input
``data``, where a `True` value indicates the corresponding
element of ``data`` is masked. The masked data points are
those that will be interpolated.
method : {'cubic', 'nearest'}, optional
The method of used to interpolate the missing data:
* ``'cubic'``: Masked data are interpolated using 2D cubic
splines. This is the default.
* ``'nearest'``: Masked data are interpolated using
nearest-neighbor interpolation.
Returns
-------
data_interp : 2D `~numpy.ndarray`
The interpolated 2D image.
|
[
"Interpolate",
"missing",
"data",
"as",
"identified",
"by",
"the",
"mask",
"keyword",
"."
] |
python
|
train
|
BerkeleyAutomation/autolab_core
|
autolab_core/points.py
|
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/points.py#L1201-L1212
|
def remove_zero_points(self):
"""Remove all elements where the norms and points are zero.
Note
----
This returns nothing and updates the NormalCloud in-place.
"""
points_of_interest = np.where((np.linalg.norm(self.point_cloud.data, axis=0) != 0.0) &
(np.linalg.norm(self.normal_cloud.data, axis=0) != 0.0) &
(np.isfinite(self.normal_cloud.data[0,:])))[0]
self.point_cloud._data = self.point_cloud.data[:, points_of_interest]
self.normal_cloud._data = self.normal_cloud.data[:, points_of_interest]
|
[
"def",
"remove_zero_points",
"(",
"self",
")",
":",
"points_of_interest",
"=",
"np",
".",
"where",
"(",
"(",
"np",
".",
"linalg",
".",
"norm",
"(",
"self",
".",
"point_cloud",
".",
"data",
",",
"axis",
"=",
"0",
")",
"!=",
"0.0",
")",
"&",
"(",
"np",
".",
"linalg",
".",
"norm",
"(",
"self",
".",
"normal_cloud",
".",
"data",
",",
"axis",
"=",
"0",
")",
"!=",
"0.0",
")",
"&",
"(",
"np",
".",
"isfinite",
"(",
"self",
".",
"normal_cloud",
".",
"data",
"[",
"0",
",",
":",
"]",
")",
")",
")",
"[",
"0",
"]",
"self",
".",
"point_cloud",
".",
"_data",
"=",
"self",
".",
"point_cloud",
".",
"data",
"[",
":",
",",
"points_of_interest",
"]",
"self",
".",
"normal_cloud",
".",
"_data",
"=",
"self",
".",
"normal_cloud",
".",
"data",
"[",
":",
",",
"points_of_interest",
"]"
] |
Remove all elements where the norms and points are zero.
Note
----
This returns nothing and updates the NormalCloud in-place.
|
[
"Remove",
"all",
"elements",
"where",
"the",
"norms",
"and",
"points",
"are",
"zero",
"."
] |
python
|
train
|
why2pac/dp-tornado
|
dp_tornado/helper/serialization/json.py
|
https://github.com/why2pac/dp-tornado/blob/a5948f5693f6ee2d9bab31f611fedc074e1caa96/dp_tornado/helper/serialization/json.py#L17-L23
|
def parse(self, text, encoding='utf8', raise_exception=False):
"""Alias of helper.string.serialization.json.parse"""
return self.helper.string.serialization.json.parse(
text=text,
encoding=encoding,
raise_exception=raise_exception)
|
[
"def",
"parse",
"(",
"self",
",",
"text",
",",
"encoding",
"=",
"'utf8'",
",",
"raise_exception",
"=",
"False",
")",
":",
"return",
"self",
".",
"helper",
".",
"string",
".",
"serialization",
".",
"json",
".",
"parse",
"(",
"text",
"=",
"text",
",",
"encoding",
"=",
"encoding",
",",
"raise_exception",
"=",
"raise_exception",
")"
] |
Alias of helper.string.serialization.json.parse
|
[
"Alias",
"of",
"helper",
".",
"string",
".",
"serialization",
".",
"json",
".",
"parse"
] |
python
|
train
|
RJT1990/pyflux
|
pyflux/var/var.py
|
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/var/var.py#L409-L433
|
def neg_loglik(self,beta):
""" Creates the negative log-likelihood of the model
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
Returns
----------
The negative logliklihood of the model
"""
mu, Y = self._model(beta)
if self.use_ols_covariance is False:
cm = self.custom_covariance(beta)
else:
cm = self.ols_covariance()
diff = Y.T - mu.T
ll1 = -(mu.T.shape[0]*mu.T.shape[1]/2.0)*np.log(2.0*np.pi) - (mu.T.shape[0]/2.0)*np.linalg.slogdet(cm)[1]
inverse = np.linalg.pinv(cm)
return var_likelihood(ll1, mu.T.shape[0], diff, inverse)
|
[
"def",
"neg_loglik",
"(",
"self",
",",
"beta",
")",
":",
"mu",
",",
"Y",
"=",
"self",
".",
"_model",
"(",
"beta",
")",
"if",
"self",
".",
"use_ols_covariance",
"is",
"False",
":",
"cm",
"=",
"self",
".",
"custom_covariance",
"(",
"beta",
")",
"else",
":",
"cm",
"=",
"self",
".",
"ols_covariance",
"(",
")",
"diff",
"=",
"Y",
".",
"T",
"-",
"mu",
".",
"T",
"ll1",
"=",
"-",
"(",
"mu",
".",
"T",
".",
"shape",
"[",
"0",
"]",
"*",
"mu",
".",
"T",
".",
"shape",
"[",
"1",
"]",
"/",
"2.0",
")",
"*",
"np",
".",
"log",
"(",
"2.0",
"*",
"np",
".",
"pi",
")",
"-",
"(",
"mu",
".",
"T",
".",
"shape",
"[",
"0",
"]",
"/",
"2.0",
")",
"*",
"np",
".",
"linalg",
".",
"slogdet",
"(",
"cm",
")",
"[",
"1",
"]",
"inverse",
"=",
"np",
".",
"linalg",
".",
"pinv",
"(",
"cm",
")",
"return",
"var_likelihood",
"(",
"ll1",
",",
"mu",
".",
"T",
".",
"shape",
"[",
"0",
"]",
",",
"diff",
",",
"inverse",
")"
] |
Creates the negative log-likelihood of the model
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
Returns
----------
The negative logliklihood of the model
|
[
"Creates",
"the",
"negative",
"log",
"-",
"likelihood",
"of",
"the",
"model"
] |
python
|
train
|
theislab/scanpy
|
scanpy/preprocessing/_recipes.py
|
https://github.com/theislab/scanpy/blob/9e4e5ee02e04cf618872d9b098e24f0542e8b227/scanpy/preprocessing/_recipes.py#L62-L119
|
def recipe_zheng17(adata, n_top_genes=1000, log=True, plot=False, copy=False):
"""Normalization and filtering as of [Zheng17]_.
Reproduces the preprocessing of [Zheng17]_ - the Cell Ranger R Kit of 10x
Genomics.
Expects non-logarithmized data. If using logarithmized data, pass `log=False`.
The recipe runs the following steps
.. code:: python
sc.pp.filter_genes(adata, min_counts=1) # only consider genes with more than 1 count
sc.pp.normalize_per_cell( # normalize with total UMI count per cell
adata, key_n_counts='n_counts_all')
filter_result = sc.pp.filter_genes_dispersion( # select highly-variable genes
adata.X, flavor='cell_ranger', n_top_genes=n_top_genes, log=False)
adata = adata[:, filter_result.gene_subset] # subset the genes
sc.pp.normalize_per_cell(adata) # renormalize after filtering
if log: sc.pp.log1p(adata) # log transform: adata.X = log(adata.X + 1)
sc.pp.scale(adata) # scale to unit variance and shift to zero mean
Parameters
----------
adata : :class:`~anndata.AnnData`
Annotated data matrix.
n_top_genes : `int`, optional (default: 1000)
Number of genes to keep.
log : `bool`, optional (default: `True`)
Take logarithm.
plot : `bool`, optional (default: `True`)
Show a plot of the gene dispersion vs. mean relation.
copy : `bool`, optional (default: `False`)
Return a copy of `adata` instead of updating it.
Returns
-------
Returns or updates `adata` depending on `copy`.
"""
logg.info('running recipe zheng17', reset=True)
if copy: adata = adata.copy()
pp.filter_genes(adata, min_counts=1) # only consider genes with more than 1 count
pp.normalize_per_cell(adata, # normalize with total UMI count per cell
key_n_counts='n_counts_all')
filter_result = filter_genes_dispersion(
adata.X, flavor='cell_ranger', n_top_genes=n_top_genes, log=False)
if plot:
from ..plotting import _preprocessing as ppp # should not import at the top of the file
ppp.filter_genes_dispersion(filter_result, log=True)
# actually filter the genes, the following is the inplace version of
# adata = adata[:, filter_result.gene_subset]
adata._inplace_subset_var(filter_result.gene_subset) # filter genes
pp.normalize_per_cell(adata) # renormalize after filtering
if log: pp.log1p(adata) # log transform: X = log(X + 1)
pp.scale(adata)
logg.info(' finished', time=True)
return adata if copy else None
|
[
"def",
"recipe_zheng17",
"(",
"adata",
",",
"n_top_genes",
"=",
"1000",
",",
"log",
"=",
"True",
",",
"plot",
"=",
"False",
",",
"copy",
"=",
"False",
")",
":",
"logg",
".",
"info",
"(",
"'running recipe zheng17'",
",",
"reset",
"=",
"True",
")",
"if",
"copy",
":",
"adata",
"=",
"adata",
".",
"copy",
"(",
")",
"pp",
".",
"filter_genes",
"(",
"adata",
",",
"min_counts",
"=",
"1",
")",
"# only consider genes with more than 1 count",
"pp",
".",
"normalize_per_cell",
"(",
"adata",
",",
"# normalize with total UMI count per cell",
"key_n_counts",
"=",
"'n_counts_all'",
")",
"filter_result",
"=",
"filter_genes_dispersion",
"(",
"adata",
".",
"X",
",",
"flavor",
"=",
"'cell_ranger'",
",",
"n_top_genes",
"=",
"n_top_genes",
",",
"log",
"=",
"False",
")",
"if",
"plot",
":",
"from",
".",
".",
"plotting",
"import",
"_preprocessing",
"as",
"ppp",
"# should not import at the top of the file",
"ppp",
".",
"filter_genes_dispersion",
"(",
"filter_result",
",",
"log",
"=",
"True",
")",
"# actually filter the genes, the following is the inplace version of",
"# adata = adata[:, filter_result.gene_subset]",
"adata",
".",
"_inplace_subset_var",
"(",
"filter_result",
".",
"gene_subset",
")",
"# filter genes",
"pp",
".",
"normalize_per_cell",
"(",
"adata",
")",
"# renormalize after filtering",
"if",
"log",
":",
"pp",
".",
"log1p",
"(",
"adata",
")",
"# log transform: X = log(X + 1)",
"pp",
".",
"scale",
"(",
"adata",
")",
"logg",
".",
"info",
"(",
"' finished'",
",",
"time",
"=",
"True",
")",
"return",
"adata",
"if",
"copy",
"else",
"None"
] |
Normalization and filtering as of [Zheng17]_.
Reproduces the preprocessing of [Zheng17]_ - the Cell Ranger R Kit of 10x
Genomics.
Expects non-logarithmized data. If using logarithmized data, pass `log=False`.
The recipe runs the following steps
.. code:: python
sc.pp.filter_genes(adata, min_counts=1) # only consider genes with more than 1 count
sc.pp.normalize_per_cell( # normalize with total UMI count per cell
adata, key_n_counts='n_counts_all')
filter_result = sc.pp.filter_genes_dispersion( # select highly-variable genes
adata.X, flavor='cell_ranger', n_top_genes=n_top_genes, log=False)
adata = adata[:, filter_result.gene_subset] # subset the genes
sc.pp.normalize_per_cell(adata) # renormalize after filtering
if log: sc.pp.log1p(adata) # log transform: adata.X = log(adata.X + 1)
sc.pp.scale(adata) # scale to unit variance and shift to zero mean
Parameters
----------
adata : :class:`~anndata.AnnData`
Annotated data matrix.
n_top_genes : `int`, optional (default: 1000)
Number of genes to keep.
log : `bool`, optional (default: `True`)
Take logarithm.
plot : `bool`, optional (default: `True`)
Show a plot of the gene dispersion vs. mean relation.
copy : `bool`, optional (default: `False`)
Return a copy of `adata` instead of updating it.
Returns
-------
Returns or updates `adata` depending on `copy`.
|
[
"Normalization",
"and",
"filtering",
"as",
"of",
"[",
"Zheng17",
"]",
"_",
"."
] |
python
|
train
|
erdc/RAPIDpy
|
RAPIDpy/inflow/CreateInflowFileFromGriddedRunoff.py
|
https://github.com/erdc/RAPIDpy/blob/50e14e130554b254a00ff23b226cd7e4c6cfe91a/RAPIDpy/inflow/CreateInflowFileFromGriddedRunoff.py#L52-L86
|
def read_in_weight_table(self, in_weight_table):
"""
Read in weight table
"""
print("Reading the weight table...")
with open_csv(in_weight_table, "r") as csvfile:
reader = csv.reader(csvfile)
header_row = next(reader)
# check number of columns in the weight table
if len(header_row) < len(self.header_wt):
raise Exception(self.error_messages[4])
# check header
if header_row[1:len(self.header_wt)] != self.header_wt[1:]:
raise Exception(self.error_messages[5])
self.dict_list = \
np.loadtxt(
in_weight_table,
delimiter=",",
usecols=(0, 1, 2, 3, 4),
skiprows=1,
dtype={
'names': (self.header_wt[0],
self.header_wt[1],
self.header_wt[2],
self.header_wt[3],
self.header_wt[4]),
'formats': ('i8', 'f8', 'i8', 'i8', 'i8')
},
)
self.count = self.dict_list.shape[0]
self.size_stream_id = \
len(np.unique(np.array(self.dict_list[self.header_wt[0]],
dtype=np.int32)))
|
[
"def",
"read_in_weight_table",
"(",
"self",
",",
"in_weight_table",
")",
":",
"print",
"(",
"\"Reading the weight table...\"",
")",
"with",
"open_csv",
"(",
"in_weight_table",
",",
"\"r\"",
")",
"as",
"csvfile",
":",
"reader",
"=",
"csv",
".",
"reader",
"(",
"csvfile",
")",
"header_row",
"=",
"next",
"(",
"reader",
")",
"# check number of columns in the weight table\r",
"if",
"len",
"(",
"header_row",
")",
"<",
"len",
"(",
"self",
".",
"header_wt",
")",
":",
"raise",
"Exception",
"(",
"self",
".",
"error_messages",
"[",
"4",
"]",
")",
"# check header\r",
"if",
"header_row",
"[",
"1",
":",
"len",
"(",
"self",
".",
"header_wt",
")",
"]",
"!=",
"self",
".",
"header_wt",
"[",
"1",
":",
"]",
":",
"raise",
"Exception",
"(",
"self",
".",
"error_messages",
"[",
"5",
"]",
")",
"self",
".",
"dict_list",
"=",
"np",
".",
"loadtxt",
"(",
"in_weight_table",
",",
"delimiter",
"=",
"\",\"",
",",
"usecols",
"=",
"(",
"0",
",",
"1",
",",
"2",
",",
"3",
",",
"4",
")",
",",
"skiprows",
"=",
"1",
",",
"dtype",
"=",
"{",
"'names'",
":",
"(",
"self",
".",
"header_wt",
"[",
"0",
"]",
",",
"self",
".",
"header_wt",
"[",
"1",
"]",
",",
"self",
".",
"header_wt",
"[",
"2",
"]",
",",
"self",
".",
"header_wt",
"[",
"3",
"]",
",",
"self",
".",
"header_wt",
"[",
"4",
"]",
")",
",",
"'formats'",
":",
"(",
"'i8'",
",",
"'f8'",
",",
"'i8'",
",",
"'i8'",
",",
"'i8'",
")",
"}",
",",
")",
"self",
".",
"count",
"=",
"self",
".",
"dict_list",
".",
"shape",
"[",
"0",
"]",
"self",
".",
"size_stream_id",
"=",
"len",
"(",
"np",
".",
"unique",
"(",
"np",
".",
"array",
"(",
"self",
".",
"dict_list",
"[",
"self",
".",
"header_wt",
"[",
"0",
"]",
"]",
",",
"dtype",
"=",
"np",
".",
"int32",
")",
")",
")"
] |
Read in weight table
|
[
"Read",
"in",
"weight",
"table"
] |
python
|
train
|
inasafe/inasafe
|
safe/plugin.py
|
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/plugin.py#L170-L185
|
def _create_dock_toggle_action(self):
"""Create action for plugin dockable window (show/hide)."""
# pylint: disable=W0201
icon = resources_path('img', 'icons', 'icon.svg')
self.action_dock = QAction(
QIcon(icon),
self.tr('Toggle InaSAFE Dock'), self.iface.mainWindow())
self.action_dock.setObjectName('InaSAFEDockToggle')
self.action_dock.setStatusTip(self.tr(
'Show/hide InaSAFE dock widget'))
self.action_dock.setWhatsThis(self.tr(
'Show/hide InaSAFE dock widget'))
self.action_dock.setCheckable(True)
self.action_dock.setChecked(True)
self.action_dock.triggered.connect(self.toggle_dock_visibility)
self.add_action(self.action_dock)
|
[
"def",
"_create_dock_toggle_action",
"(",
"self",
")",
":",
"# pylint: disable=W0201",
"icon",
"=",
"resources_path",
"(",
"'img'",
",",
"'icons'",
",",
"'icon.svg'",
")",
"self",
".",
"action_dock",
"=",
"QAction",
"(",
"QIcon",
"(",
"icon",
")",
",",
"self",
".",
"tr",
"(",
"'Toggle InaSAFE Dock'",
")",
",",
"self",
".",
"iface",
".",
"mainWindow",
"(",
")",
")",
"self",
".",
"action_dock",
".",
"setObjectName",
"(",
"'InaSAFEDockToggle'",
")",
"self",
".",
"action_dock",
".",
"setStatusTip",
"(",
"self",
".",
"tr",
"(",
"'Show/hide InaSAFE dock widget'",
")",
")",
"self",
".",
"action_dock",
".",
"setWhatsThis",
"(",
"self",
".",
"tr",
"(",
"'Show/hide InaSAFE dock widget'",
")",
")",
"self",
".",
"action_dock",
".",
"setCheckable",
"(",
"True",
")",
"self",
".",
"action_dock",
".",
"setChecked",
"(",
"True",
")",
"self",
".",
"action_dock",
".",
"triggered",
".",
"connect",
"(",
"self",
".",
"toggle_dock_visibility",
")",
"self",
".",
"add_action",
"(",
"self",
".",
"action_dock",
")"
] |
Create action for plugin dockable window (show/hide).
|
[
"Create",
"action",
"for",
"plugin",
"dockable",
"window",
"(",
"show",
"/",
"hide",
")",
"."
] |
python
|
train
|
DataDog/integrations-core
|
sqlserver/datadog_checks/sqlserver/sqlserver.py
|
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/sqlserver/datadog_checks/sqlserver/sqlserver.py#L597-L611
|
def close_db_connections(self, instance, db_key, db_name=None):
"""
We close the db connections explicitly b/c when we don't they keep
locks on the db. This presents as issues such as the SQL Server Agent
being unable to stop.
"""
conn_key = self._conn_key(instance, db_key, db_name)
if conn_key not in self.connections:
return
try:
self.connections[conn_key]['conn'].close()
del self.connections[conn_key]
except Exception as e:
self.log.warning("Could not close adodbapi db connection\n{0}".format(e))
|
[
"def",
"close_db_connections",
"(",
"self",
",",
"instance",
",",
"db_key",
",",
"db_name",
"=",
"None",
")",
":",
"conn_key",
"=",
"self",
".",
"_conn_key",
"(",
"instance",
",",
"db_key",
",",
"db_name",
")",
"if",
"conn_key",
"not",
"in",
"self",
".",
"connections",
":",
"return",
"try",
":",
"self",
".",
"connections",
"[",
"conn_key",
"]",
"[",
"'conn'",
"]",
".",
"close",
"(",
")",
"del",
"self",
".",
"connections",
"[",
"conn_key",
"]",
"except",
"Exception",
"as",
"e",
":",
"self",
".",
"log",
".",
"warning",
"(",
"\"Could not close adodbapi db connection\\n{0}\"",
".",
"format",
"(",
"e",
")",
")"
] |
We close the db connections explicitly b/c when we don't they keep
locks on the db. This presents as issues such as the SQL Server Agent
being unable to stop.
|
[
"We",
"close",
"the",
"db",
"connections",
"explicitly",
"b",
"/",
"c",
"when",
"we",
"don",
"t",
"they",
"keep",
"locks",
"on",
"the",
"db",
".",
"This",
"presents",
"as",
"issues",
"such",
"as",
"the",
"SQL",
"Server",
"Agent",
"being",
"unable",
"to",
"stop",
"."
] |
python
|
train
|
dpkp/kafka-python
|
kafka/consumer/fetcher.py
|
https://github.com/dpkp/kafka-python/blob/f6a8a38937688ea2cc5dc13d3d1039493be5c9b5/kafka/consumer/fetcher.py#L218-L245
|
def _reset_offset(self, partition):
"""Reset offsets for the given partition using the offset reset strategy.
Arguments:
partition (TopicPartition): the partition that needs reset offset
Raises:
NoOffsetForPartitionError: if no offset reset strategy is defined
"""
timestamp = self._subscriptions.assignment[partition].reset_strategy
if timestamp is OffsetResetStrategy.EARLIEST:
strategy = 'earliest'
elif timestamp is OffsetResetStrategy.LATEST:
strategy = 'latest'
else:
raise NoOffsetForPartitionError(partition)
log.debug("Resetting offset for partition %s to %s offset.",
partition, strategy)
offsets = self._retrieve_offsets({partition: timestamp})
if partition not in offsets:
raise NoOffsetForPartitionError(partition)
offset = offsets[partition][0]
# we might lose the assignment while fetching the offset,
# so check it is still active
if self._subscriptions.is_assigned(partition):
self._subscriptions.seek(partition, offset)
|
[
"def",
"_reset_offset",
"(",
"self",
",",
"partition",
")",
":",
"timestamp",
"=",
"self",
".",
"_subscriptions",
".",
"assignment",
"[",
"partition",
"]",
".",
"reset_strategy",
"if",
"timestamp",
"is",
"OffsetResetStrategy",
".",
"EARLIEST",
":",
"strategy",
"=",
"'earliest'",
"elif",
"timestamp",
"is",
"OffsetResetStrategy",
".",
"LATEST",
":",
"strategy",
"=",
"'latest'",
"else",
":",
"raise",
"NoOffsetForPartitionError",
"(",
"partition",
")",
"log",
".",
"debug",
"(",
"\"Resetting offset for partition %s to %s offset.\"",
",",
"partition",
",",
"strategy",
")",
"offsets",
"=",
"self",
".",
"_retrieve_offsets",
"(",
"{",
"partition",
":",
"timestamp",
"}",
")",
"if",
"partition",
"not",
"in",
"offsets",
":",
"raise",
"NoOffsetForPartitionError",
"(",
"partition",
")",
"offset",
"=",
"offsets",
"[",
"partition",
"]",
"[",
"0",
"]",
"# we might lose the assignment while fetching the offset,",
"# so check it is still active",
"if",
"self",
".",
"_subscriptions",
".",
"is_assigned",
"(",
"partition",
")",
":",
"self",
".",
"_subscriptions",
".",
"seek",
"(",
"partition",
",",
"offset",
")"
] |
Reset offsets for the given partition using the offset reset strategy.
Arguments:
partition (TopicPartition): the partition that needs reset offset
Raises:
NoOffsetForPartitionError: if no offset reset strategy is defined
|
[
"Reset",
"offsets",
"for",
"the",
"given",
"partition",
"using",
"the",
"offset",
"reset",
"strategy",
"."
] |
python
|
train
|
botstory/botstory
|
botstory/ast/story_context/reducers.py
|
https://github.com/botstory/botstory/blob/9c5b2fc7f7a14dbd467d70f60d5ba855ef89dac3/botstory/ast/story_context/reducers.py#L161-L194
|
def scope_out(ctx):
"""
drop last stack item if:
- we have reach the end of stack
- and don't wait any input
:param ctx:
:return:
"""
logger.debug('# scope_out')
logger.debug(ctx)
# we reach the end of story line
# so we could collapse previous scope and related stack item
if ctx.is_tail_of_story() and ctx.could_scope_out():
logger.debug('# [<] return')
ctx = ctx.clone()
ctx.message['session']['stack'] = ctx.message['session']['stack'][:-1]
if not ctx.is_empty_stack() and \
(ctx.is_scope_level_part() or \
ctx.is_breaking_a_loop()):
# isinstance(ctx.get_current_story_part(), loop.StoriesLoopNode) and \
# isinstance(ctx.waiting_for, callable.EndOfStory) or \
ctx.message = modify_stack_in_message(ctx.message,
lambda stack: stack[:-1] + [{
'data': stack[-1]['data'],
'step': stack[-1]['step'] + 1,
'topic': stack[-1]['topic'],
}])
if ctx.is_breaking_a_loop() and not ctx.is_scope_level():
ctx.waiting_for = None
logger.debug(ctx)
return ctx
|
[
"def",
"scope_out",
"(",
"ctx",
")",
":",
"logger",
".",
"debug",
"(",
"'# scope_out'",
")",
"logger",
".",
"debug",
"(",
"ctx",
")",
"# we reach the end of story line",
"# so we could collapse previous scope and related stack item",
"if",
"ctx",
".",
"is_tail_of_story",
"(",
")",
"and",
"ctx",
".",
"could_scope_out",
"(",
")",
":",
"logger",
".",
"debug",
"(",
"'# [<] return'",
")",
"ctx",
"=",
"ctx",
".",
"clone",
"(",
")",
"ctx",
".",
"message",
"[",
"'session'",
"]",
"[",
"'stack'",
"]",
"=",
"ctx",
".",
"message",
"[",
"'session'",
"]",
"[",
"'stack'",
"]",
"[",
":",
"-",
"1",
"]",
"if",
"not",
"ctx",
".",
"is_empty_stack",
"(",
")",
"and",
"(",
"ctx",
".",
"is_scope_level_part",
"(",
")",
"or",
"ctx",
".",
"is_breaking_a_loop",
"(",
")",
")",
":",
"# isinstance(ctx.get_current_story_part(), loop.StoriesLoopNode) and \\",
"# isinstance(ctx.waiting_for, callable.EndOfStory) or \\",
"ctx",
".",
"message",
"=",
"modify_stack_in_message",
"(",
"ctx",
".",
"message",
",",
"lambda",
"stack",
":",
"stack",
"[",
":",
"-",
"1",
"]",
"+",
"[",
"{",
"'data'",
":",
"stack",
"[",
"-",
"1",
"]",
"[",
"'data'",
"]",
",",
"'step'",
":",
"stack",
"[",
"-",
"1",
"]",
"[",
"'step'",
"]",
"+",
"1",
",",
"'topic'",
":",
"stack",
"[",
"-",
"1",
"]",
"[",
"'topic'",
"]",
",",
"}",
"]",
")",
"if",
"ctx",
".",
"is_breaking_a_loop",
"(",
")",
"and",
"not",
"ctx",
".",
"is_scope_level",
"(",
")",
":",
"ctx",
".",
"waiting_for",
"=",
"None",
"logger",
".",
"debug",
"(",
"ctx",
")",
"return",
"ctx"
] |
drop last stack item if:
- we have reach the end of stack
- and don't wait any input
:param ctx:
:return:
|
[
"drop",
"last",
"stack",
"item",
"if",
":",
"-",
"we",
"have",
"reach",
"the",
"end",
"of",
"stack",
"-",
"and",
"don",
"t",
"wait",
"any",
"input"
] |
python
|
train
|
mkouhei/tonicdnscli
|
src/tonicdnscli/command.py
|
https://github.com/mkouhei/tonicdnscli/blob/df2d6fb2104ae4d49fa89d1bba2f3ccd2fed388c/src/tonicdnscli/command.py#L121-L136
|
def show(args):
"""Convert and print JSON.
Argument:
args: arguments object
"""
domain = check_infile(args.infile)
action = True
try:
print(json.dumps(set_json(domain, action, filename=args.infile),
sort_keys=True, indent=2))
except UnicodeDecodeError as e:
sys.stderr.write("ERROR: \"%s\" is invalid format file.\n"
% args.infile)
exit(1)
|
[
"def",
"show",
"(",
"args",
")",
":",
"domain",
"=",
"check_infile",
"(",
"args",
".",
"infile",
")",
"action",
"=",
"True",
"try",
":",
"print",
"(",
"json",
".",
"dumps",
"(",
"set_json",
"(",
"domain",
",",
"action",
",",
"filename",
"=",
"args",
".",
"infile",
")",
",",
"sort_keys",
"=",
"True",
",",
"indent",
"=",
"2",
")",
")",
"except",
"UnicodeDecodeError",
"as",
"e",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"ERROR: \\\"%s\\\" is invalid format file.\\n\"",
"%",
"args",
".",
"infile",
")",
"exit",
"(",
"1",
")"
] |
Convert and print JSON.
Argument:
args: arguments object
|
[
"Convert",
"and",
"print",
"JSON",
"."
] |
python
|
train
|
neuropsychology/NeuroKit.py
|
examples/UnderDev/eeg/eeg_microstates.py
|
https://github.com/neuropsychology/NeuroKit.py/blob/c9589348fbbde0fa7e986048c48f38e6b488adfe/examples/UnderDev/eeg/eeg_microstates.py#L489-L547
|
def eeg_microstates_plot(method, path="", extension=".png", show_sensors_position=False, show_sensors_name=False, plot=True, save=True, dpi=150, contours=0, colorbar=False, separate=False):
"""
Plot the microstates.
"""
# Generate and store figures
figures = []
names = []
# Check if microstates metrics available
try:
microstates = method["microstates_good_fit"]
except KeyError:
microstates = method["microstates"]
# Create individual plot for each microstate
for microstate in set(microstates):
if microstate != "Bad":
values = np.mean(method["data"][np.where(microstates == microstate)], axis=0)
values = np.array(values, ndmin=2).T
evoked = mne.EvokedArray(values, method["raw.info_example"], 0)
fig = evoked.plot_topomap(times=0, title=microstate, size=6, contours=contours, time_format="", show=plot, colorbar=colorbar, show_names=show_sensors_name, sensors=show_sensors_position)
figures.append(fig)
# Save separate figures
name = path + "microstate_%s_%s%s%s_%s%i_%s%s" %(microstate, method["data_scale"], method["data_normalize"], method["data_smoothing"], method["feature_reduction_method"], method["n_features"], method["clustering_method"], extension)
fig.savefig(name, dpi=dpi)
names.append(name)
# Save Combined plot
if save is True:
# Combine all plots
image_template = PIL.Image.open(names[0])
X, Y = image_template.size
image_template.close()
combined = PIL.Image.new('RGB', (int(X*len(set(microstates))/2), int( Y*len(set(microstates))/2)))
fig = 0
for x in np.arange(0, len(set(microstates))/2*int(X), int(X)):
for y in np.arange(0, len(set(microstates))/2*int(Y), int(Y)):
try:
newfig = PIL.Image.open(names[fig])
combined.paste(newfig, (int(x), int(y)))
newfig.close()
except:
pass
fig += 1
#combined.show()
combined_name = path + "microstates_%s%s%s_%s%i_%s%s" %(method["data_scale"], method["data_normalize"], method["data_smoothing"], method["feature_reduction_method"], method["n_features"], method["clustering_method"], extension)
combined.save(combined_name)
# Detete separate plots in needed
if separate is False or save is False:
for name in names:
os.remove(name)
return(figures)
|
[
"def",
"eeg_microstates_plot",
"(",
"method",
",",
"path",
"=",
"\"\"",
",",
"extension",
"=",
"\".png\"",
",",
"show_sensors_position",
"=",
"False",
",",
"show_sensors_name",
"=",
"False",
",",
"plot",
"=",
"True",
",",
"save",
"=",
"True",
",",
"dpi",
"=",
"150",
",",
"contours",
"=",
"0",
",",
"colorbar",
"=",
"False",
",",
"separate",
"=",
"False",
")",
":",
"# Generate and store figures",
"figures",
"=",
"[",
"]",
"names",
"=",
"[",
"]",
"# Check if microstates metrics available",
"try",
":",
"microstates",
"=",
"method",
"[",
"\"microstates_good_fit\"",
"]",
"except",
"KeyError",
":",
"microstates",
"=",
"method",
"[",
"\"microstates\"",
"]",
"# Create individual plot for each microstate",
"for",
"microstate",
"in",
"set",
"(",
"microstates",
")",
":",
"if",
"microstate",
"!=",
"\"Bad\"",
":",
"values",
"=",
"np",
".",
"mean",
"(",
"method",
"[",
"\"data\"",
"]",
"[",
"np",
".",
"where",
"(",
"microstates",
"==",
"microstate",
")",
"]",
",",
"axis",
"=",
"0",
")",
"values",
"=",
"np",
".",
"array",
"(",
"values",
",",
"ndmin",
"=",
"2",
")",
".",
"T",
"evoked",
"=",
"mne",
".",
"EvokedArray",
"(",
"values",
",",
"method",
"[",
"\"raw.info_example\"",
"]",
",",
"0",
")",
"fig",
"=",
"evoked",
".",
"plot_topomap",
"(",
"times",
"=",
"0",
",",
"title",
"=",
"microstate",
",",
"size",
"=",
"6",
",",
"contours",
"=",
"contours",
",",
"time_format",
"=",
"\"\"",
",",
"show",
"=",
"plot",
",",
"colorbar",
"=",
"colorbar",
",",
"show_names",
"=",
"show_sensors_name",
",",
"sensors",
"=",
"show_sensors_position",
")",
"figures",
".",
"append",
"(",
"fig",
")",
"# Save separate figures",
"name",
"=",
"path",
"+",
"\"microstate_%s_%s%s%s_%s%i_%s%s\"",
"%",
"(",
"microstate",
",",
"method",
"[",
"\"data_scale\"",
"]",
",",
"method",
"[",
"\"data_normalize\"",
"]",
",",
"method",
"[",
"\"data_smoothing\"",
"]",
",",
"method",
"[",
"\"feature_reduction_method\"",
"]",
",",
"method",
"[",
"\"n_features\"",
"]",
",",
"method",
"[",
"\"clustering_method\"",
"]",
",",
"extension",
")",
"fig",
".",
"savefig",
"(",
"name",
",",
"dpi",
"=",
"dpi",
")",
"names",
".",
"append",
"(",
"name",
")",
"# Save Combined plot",
"if",
"save",
"is",
"True",
":",
"# Combine all plots",
"image_template",
"=",
"PIL",
".",
"Image",
".",
"open",
"(",
"names",
"[",
"0",
"]",
")",
"X",
",",
"Y",
"=",
"image_template",
".",
"size",
"image_template",
".",
"close",
"(",
")",
"combined",
"=",
"PIL",
".",
"Image",
".",
"new",
"(",
"'RGB'",
",",
"(",
"int",
"(",
"X",
"*",
"len",
"(",
"set",
"(",
"microstates",
")",
")",
"/",
"2",
")",
",",
"int",
"(",
"Y",
"*",
"len",
"(",
"set",
"(",
"microstates",
")",
")",
"/",
"2",
")",
")",
")",
"fig",
"=",
"0",
"for",
"x",
"in",
"np",
".",
"arange",
"(",
"0",
",",
"len",
"(",
"set",
"(",
"microstates",
")",
")",
"/",
"2",
"*",
"int",
"(",
"X",
")",
",",
"int",
"(",
"X",
")",
")",
":",
"for",
"y",
"in",
"np",
".",
"arange",
"(",
"0",
",",
"len",
"(",
"set",
"(",
"microstates",
")",
")",
"/",
"2",
"*",
"int",
"(",
"Y",
")",
",",
"int",
"(",
"Y",
")",
")",
":",
"try",
":",
"newfig",
"=",
"PIL",
".",
"Image",
".",
"open",
"(",
"names",
"[",
"fig",
"]",
")",
"combined",
".",
"paste",
"(",
"newfig",
",",
"(",
"int",
"(",
"x",
")",
",",
"int",
"(",
"y",
")",
")",
")",
"newfig",
".",
"close",
"(",
")",
"except",
":",
"pass",
"fig",
"+=",
"1",
"#combined.show()",
"combined_name",
"=",
"path",
"+",
"\"microstates_%s%s%s_%s%i_%s%s\"",
"%",
"(",
"method",
"[",
"\"data_scale\"",
"]",
",",
"method",
"[",
"\"data_normalize\"",
"]",
",",
"method",
"[",
"\"data_smoothing\"",
"]",
",",
"method",
"[",
"\"feature_reduction_method\"",
"]",
",",
"method",
"[",
"\"n_features\"",
"]",
",",
"method",
"[",
"\"clustering_method\"",
"]",
",",
"extension",
")",
"combined",
".",
"save",
"(",
"combined_name",
")",
"# Detete separate plots in needed",
"if",
"separate",
"is",
"False",
"or",
"save",
"is",
"False",
":",
"for",
"name",
"in",
"names",
":",
"os",
".",
"remove",
"(",
"name",
")",
"return",
"(",
"figures",
")"
] |
Plot the microstates.
|
[
"Plot",
"the",
"microstates",
"."
] |
python
|
train
|
SetBased/py-stratum
|
pystratum/Constants.py
|
https://github.com/SetBased/py-stratum/blob/7c5ffaa2fdd03f865832a5190b5897ff2c0e3155/pystratum/Constants.py#L131-L142
|
def _read_configuration_file(self, config_filename):
"""
Reads parameters from the configuration file.
:param str config_filename: The name of the configuration file.
"""
config = configparser.ConfigParser()
config.read(config_filename)
self._constants_filename = config.get('constants', 'columns')
self._prefix = config.get('constants', 'prefix')
self._class_name = config.get('constants', 'class')
|
[
"def",
"_read_configuration_file",
"(",
"self",
",",
"config_filename",
")",
":",
"config",
"=",
"configparser",
".",
"ConfigParser",
"(",
")",
"config",
".",
"read",
"(",
"config_filename",
")",
"self",
".",
"_constants_filename",
"=",
"config",
".",
"get",
"(",
"'constants'",
",",
"'columns'",
")",
"self",
".",
"_prefix",
"=",
"config",
".",
"get",
"(",
"'constants'",
",",
"'prefix'",
")",
"self",
".",
"_class_name",
"=",
"config",
".",
"get",
"(",
"'constants'",
",",
"'class'",
")"
] |
Reads parameters from the configuration file.
:param str config_filename: The name of the configuration file.
|
[
"Reads",
"parameters",
"from",
"the",
"configuration",
"file",
"."
] |
python
|
train
|
e7dal/bubble3
|
behave4cmd0/textutil.py
|
https://github.com/e7dal/bubble3/blob/59c735281a95b44f6263a25f4d6ce24fca520082/behave4cmd0/textutil.py#L174-L188
|
def text_normalize(text):
"""
Whitespace normalization:
- Strip empty lines
- Strip leading whitespace in a line
- Strip trailing whitespace in a line
- Normalize line endings
"""
# if not isinstance(text, str):
if isinstance(text, bytes):
# -- MAYBE: command.ouput => bytes, encoded stream output.
text = codecs.decode(text)
lines = [ line.strip() for line in text.splitlines() if line.strip() ]
return "\n".join(lines)
|
[
"def",
"text_normalize",
"(",
"text",
")",
":",
"# if not isinstance(text, str):",
"if",
"isinstance",
"(",
"text",
",",
"bytes",
")",
":",
"# -- MAYBE: command.ouput => bytes, encoded stream output.",
"text",
"=",
"codecs",
".",
"decode",
"(",
"text",
")",
"lines",
"=",
"[",
"line",
".",
"strip",
"(",
")",
"for",
"line",
"in",
"text",
".",
"splitlines",
"(",
")",
"if",
"line",
".",
"strip",
"(",
")",
"]",
"return",
"\"\\n\"",
".",
"join",
"(",
"lines",
")"
] |
Whitespace normalization:
- Strip empty lines
- Strip leading whitespace in a line
- Strip trailing whitespace in a line
- Normalize line endings
|
[
"Whitespace",
"normalization",
":"
] |
python
|
train
|
senaite/senaite.core
|
bika/lims/browser/__init__.py
|
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/__init__.py#L86-L123
|
def ulocalized_time(time, long_format=None, time_only=None, context=None,
request=None):
"""
This function gets ans string as time or a DateTime objects and returns a
string with the time formatted
:param time: The time to process
:type time: str/DateTime
:param long_format: If True, return time in ling format
:type portal_type: boolean/null
:param time_only: If True, only returns time.
:type title: boolean/null
:param context: The current context
:type context: ATContentType
:param request: The current request
:type request: HTTPRequest object
:returns: The formatted date as string
:rtype: string
"""
# if time is a string, we'll try pass it through strptime with the various
# formats defined.
time = get_date(context, time)
if not time or not isinstance(time, DateTime):
return ''
# no printing times if they were not specified in inputs
if time.second() + time.minute() + time.hour() == 0:
long_format = False
try:
time_str = _ut(time, long_format, time_only, context, 'senaite.core', request)
except ValueError:
err_msg = traceback.format_exc() + '\n'
logger.warn(
err_msg + '\n' +
"Error converting '{}' time to string in {}."
.format(time, context))
time_str = ''
return time_str
|
[
"def",
"ulocalized_time",
"(",
"time",
",",
"long_format",
"=",
"None",
",",
"time_only",
"=",
"None",
",",
"context",
"=",
"None",
",",
"request",
"=",
"None",
")",
":",
"# if time is a string, we'll try pass it through strptime with the various",
"# formats defined.",
"time",
"=",
"get_date",
"(",
"context",
",",
"time",
")",
"if",
"not",
"time",
"or",
"not",
"isinstance",
"(",
"time",
",",
"DateTime",
")",
":",
"return",
"''",
"# no printing times if they were not specified in inputs",
"if",
"time",
".",
"second",
"(",
")",
"+",
"time",
".",
"minute",
"(",
")",
"+",
"time",
".",
"hour",
"(",
")",
"==",
"0",
":",
"long_format",
"=",
"False",
"try",
":",
"time_str",
"=",
"_ut",
"(",
"time",
",",
"long_format",
",",
"time_only",
",",
"context",
",",
"'senaite.core'",
",",
"request",
")",
"except",
"ValueError",
":",
"err_msg",
"=",
"traceback",
".",
"format_exc",
"(",
")",
"+",
"'\\n'",
"logger",
".",
"warn",
"(",
"err_msg",
"+",
"'\\n'",
"+",
"\"Error converting '{}' time to string in {}.\"",
".",
"format",
"(",
"time",
",",
"context",
")",
")",
"time_str",
"=",
"''",
"return",
"time_str"
] |
This function gets ans string as time or a DateTime objects and returns a
string with the time formatted
:param time: The time to process
:type time: str/DateTime
:param long_format: If True, return time in ling format
:type portal_type: boolean/null
:param time_only: If True, only returns time.
:type title: boolean/null
:param context: The current context
:type context: ATContentType
:param request: The current request
:type request: HTTPRequest object
:returns: The formatted date as string
:rtype: string
|
[
"This",
"function",
"gets",
"ans",
"string",
"as",
"time",
"or",
"a",
"DateTime",
"objects",
"and",
"returns",
"a",
"string",
"with",
"the",
"time",
"formatted"
] |
python
|
train
|
josiahcarlson/rom
|
rom/util.py
|
https://github.com/josiahcarlson/rom/blob/8b5607a856341df85df33422accc30ba9294dbdb/rom/util.py#L658-L670
|
def refresh_all(self, *objects, **kwargs):
'''
This method is an alternate API for refreshing all entities tracked
by the session. You can call::
session.refresh_all()
session.refresh_all(force=True)
And all entities known by the session will be reloaded from Redis.
To force reloading for modified entities, you can pass ``force=True``.
'''
self.refresh(*self.known.values(), force=kwargs.get('force'))
|
[
"def",
"refresh_all",
"(",
"self",
",",
"*",
"objects",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"refresh",
"(",
"*",
"self",
".",
"known",
".",
"values",
"(",
")",
",",
"force",
"=",
"kwargs",
".",
"get",
"(",
"'force'",
")",
")"
] |
This method is an alternate API for refreshing all entities tracked
by the session. You can call::
session.refresh_all()
session.refresh_all(force=True)
And all entities known by the session will be reloaded from Redis.
To force reloading for modified entities, you can pass ``force=True``.
|
[
"This",
"method",
"is",
"an",
"alternate",
"API",
"for",
"refreshing",
"all",
"entities",
"tracked",
"by",
"the",
"session",
".",
"You",
"can",
"call",
"::"
] |
python
|
test
|
assamite/creamas
|
creamas/core/simulation.py
|
https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/core/simulation.py#L220-L229
|
def async_step(self):
"""Progress simulation by running all agents once asynchronously.
"""
assert len(self._agents_to_act) == 0
self._init_step()
t = time.time()
aiomas.run(until=self.env.trigger_all())
self._agents_to_act = []
self._step_processing_time = time.time() - t
self._finalize_step()
|
[
"def",
"async_step",
"(",
"self",
")",
":",
"assert",
"len",
"(",
"self",
".",
"_agents_to_act",
")",
"==",
"0",
"self",
".",
"_init_step",
"(",
")",
"t",
"=",
"time",
".",
"time",
"(",
")",
"aiomas",
".",
"run",
"(",
"until",
"=",
"self",
".",
"env",
".",
"trigger_all",
"(",
")",
")",
"self",
".",
"_agents_to_act",
"=",
"[",
"]",
"self",
".",
"_step_processing_time",
"=",
"time",
".",
"time",
"(",
")",
"-",
"t",
"self",
".",
"_finalize_step",
"(",
")"
] |
Progress simulation by running all agents once asynchronously.
|
[
"Progress",
"simulation",
"by",
"running",
"all",
"agents",
"once",
"asynchronously",
"."
] |
python
|
train
|
quantmind/pulsar
|
examples/snippets/remote.py
|
https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/examples/snippets/remote.py#L8-L19
|
def remote_call(request, cls, method, args, kw):
'''Command for executing remote calls on a remote object
'''
actor = request.actor
name = 'remote_%s' % cls.__name__
if not hasattr(actor, name):
object = cls(actor)
setattr(actor, name, object)
else:
object = getattr(actor, name)
method_name = '%s%s' % (PREFIX, method)
return getattr(object, method_name)(request, *args, **kw)
|
[
"def",
"remote_call",
"(",
"request",
",",
"cls",
",",
"method",
",",
"args",
",",
"kw",
")",
":",
"actor",
"=",
"request",
".",
"actor",
"name",
"=",
"'remote_%s'",
"%",
"cls",
".",
"__name__",
"if",
"not",
"hasattr",
"(",
"actor",
",",
"name",
")",
":",
"object",
"=",
"cls",
"(",
"actor",
")",
"setattr",
"(",
"actor",
",",
"name",
",",
"object",
")",
"else",
":",
"object",
"=",
"getattr",
"(",
"actor",
",",
"name",
")",
"method_name",
"=",
"'%s%s'",
"%",
"(",
"PREFIX",
",",
"method",
")",
"return",
"getattr",
"(",
"object",
",",
"method_name",
")",
"(",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kw",
")"
] |
Command for executing remote calls on a remote object
|
[
"Command",
"for",
"executing",
"remote",
"calls",
"on",
"a",
"remote",
"object"
] |
python
|
train
|
SheffieldML/GPy
|
GPy/kern/src/todo/symmetric.py
|
https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/kern/src/todo/symmetric.py#L30-L41
|
def K(self,X,X2,target):
"""Compute the covariance matrix between X and X2."""
AX = np.dot(X,self.transform)
if X2 is None:
X2 = X
AX2 = AX
else:
AX2 = np.dot(X2, self.transform)
self.k.K(X,X2,target)
self.k.K(AX,X2,target)
self.k.K(X,AX2,target)
self.k.K(AX,AX2,target)
|
[
"def",
"K",
"(",
"self",
",",
"X",
",",
"X2",
",",
"target",
")",
":",
"AX",
"=",
"np",
".",
"dot",
"(",
"X",
",",
"self",
".",
"transform",
")",
"if",
"X2",
"is",
"None",
":",
"X2",
"=",
"X",
"AX2",
"=",
"AX",
"else",
":",
"AX2",
"=",
"np",
".",
"dot",
"(",
"X2",
",",
"self",
".",
"transform",
")",
"self",
".",
"k",
".",
"K",
"(",
"X",
",",
"X2",
",",
"target",
")",
"self",
".",
"k",
".",
"K",
"(",
"AX",
",",
"X2",
",",
"target",
")",
"self",
".",
"k",
".",
"K",
"(",
"X",
",",
"AX2",
",",
"target",
")",
"self",
".",
"k",
".",
"K",
"(",
"AX",
",",
"AX2",
",",
"target",
")"
] |
Compute the covariance matrix between X and X2.
|
[
"Compute",
"the",
"covariance",
"matrix",
"between",
"X",
"and",
"X2",
"."
] |
python
|
train
|
Archived-Object/ligament
|
ligament/buildcontext.py
|
https://github.com/Archived-Object/ligament/blob/ff3d78130522676a20dc64086dc8a27b197cc20f/ligament/buildcontext.py#L126-L137
|
def resolve_dependency_graph(self, target):
""" resolves the build order for interdependent build targets
Assumes no cyclic dependencies
"""
targets = self.deep_dependendants(target)
# print "deep dependants:", targets
return sorted(targets,
cmp=lambda a, b:
1 if b in self.deep_dependendants(a) else
-1 if a in self.deep_dependendants(b) else
0)
|
[
"def",
"resolve_dependency_graph",
"(",
"self",
",",
"target",
")",
":",
"targets",
"=",
"self",
".",
"deep_dependendants",
"(",
"target",
")",
"# print \"deep dependants:\", targets",
"return",
"sorted",
"(",
"targets",
",",
"cmp",
"=",
"lambda",
"a",
",",
"b",
":",
"1",
"if",
"b",
"in",
"self",
".",
"deep_dependendants",
"(",
"a",
")",
"else",
"-",
"1",
"if",
"a",
"in",
"self",
".",
"deep_dependendants",
"(",
"b",
")",
"else",
"0",
")"
] |
resolves the build order for interdependent build targets
Assumes no cyclic dependencies
|
[
"resolves",
"the",
"build",
"order",
"for",
"interdependent",
"build",
"targets"
] |
python
|
train
|
bububa/pyTOP
|
pyTOP/packages/requests/models.py
|
https://github.com/bububa/pyTOP/blob/1e48009bcfe886be392628244b370e6374e1f2b2/pyTOP/packages/requests/models.py#L260-L282
|
def _encode_params(data):
"""Encode parameters in a piece of data.
If the data supplied is a dictionary, encodes each parameter in it, and
returns a list of tuples containing the encoded parameters, and a urlencoded
version of that.
Otherwise, assumes the data is already encoded appropriately, and
returns it twice.
"""
if hasattr(data, '__iter__'):
data = dict(data)
if hasattr(data, 'items'):
result = []
for k, vs in data.items():
for v in isinstance(vs, list) and vs or [vs]:
result.append((k.encode('utf-8') if isinstance(k, unicode) else k,
v.encode('utf-8') if isinstance(v, unicode) else v))
return result, urllib.urlencode(result, doseq=True)
else:
return data, data
|
[
"def",
"_encode_params",
"(",
"data",
")",
":",
"if",
"hasattr",
"(",
"data",
",",
"'__iter__'",
")",
":",
"data",
"=",
"dict",
"(",
"data",
")",
"if",
"hasattr",
"(",
"data",
",",
"'items'",
")",
":",
"result",
"=",
"[",
"]",
"for",
"k",
",",
"vs",
"in",
"data",
".",
"items",
"(",
")",
":",
"for",
"v",
"in",
"isinstance",
"(",
"vs",
",",
"list",
")",
"and",
"vs",
"or",
"[",
"vs",
"]",
":",
"result",
".",
"append",
"(",
"(",
"k",
".",
"encode",
"(",
"'utf-8'",
")",
"if",
"isinstance",
"(",
"k",
",",
"unicode",
")",
"else",
"k",
",",
"v",
".",
"encode",
"(",
"'utf-8'",
")",
"if",
"isinstance",
"(",
"v",
",",
"unicode",
")",
"else",
"v",
")",
")",
"return",
"result",
",",
"urllib",
".",
"urlencode",
"(",
"result",
",",
"doseq",
"=",
"True",
")",
"else",
":",
"return",
"data",
",",
"data"
] |
Encode parameters in a piece of data.
If the data supplied is a dictionary, encodes each parameter in it, and
returns a list of tuples containing the encoded parameters, and a urlencoded
version of that.
Otherwise, assumes the data is already encoded appropriately, and
returns it twice.
|
[
"Encode",
"parameters",
"in",
"a",
"piece",
"of",
"data",
"."
] |
python
|
train
|
ga4gh/ga4gh-common
|
ga4gh/common/utils.py
|
https://github.com/ga4gh/ga4gh-common/blob/ea1b562dce5bf088ac4577b838cfac7745f08346/ga4gh/common/utils.py#L182-L197
|
def getFilePathsWithExtensionsInDirectory(dirTree, patterns, sort=True):
"""
Returns all file paths that match any one of patterns in a
file tree with its root at dirTree. Sorts the paths by default.
"""
filePaths = []
for root, dirs, files in os.walk(dirTree):
for filePath in files:
for pattern in patterns:
if fnmatch.fnmatch(filePath, pattern):
fullPath = os.path.join(root, filePath)
filePaths.append(fullPath)
break
if sort:
filePaths.sort()
return filePaths
|
[
"def",
"getFilePathsWithExtensionsInDirectory",
"(",
"dirTree",
",",
"patterns",
",",
"sort",
"=",
"True",
")",
":",
"filePaths",
"=",
"[",
"]",
"for",
"root",
",",
"dirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"dirTree",
")",
":",
"for",
"filePath",
"in",
"files",
":",
"for",
"pattern",
"in",
"patterns",
":",
"if",
"fnmatch",
".",
"fnmatch",
"(",
"filePath",
",",
"pattern",
")",
":",
"fullPath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"filePath",
")",
"filePaths",
".",
"append",
"(",
"fullPath",
")",
"break",
"if",
"sort",
":",
"filePaths",
".",
"sort",
"(",
")",
"return",
"filePaths"
] |
Returns all file paths that match any one of patterns in a
file tree with its root at dirTree. Sorts the paths by default.
|
[
"Returns",
"all",
"file",
"paths",
"that",
"match",
"any",
"one",
"of",
"patterns",
"in",
"a",
"file",
"tree",
"with",
"its",
"root",
"at",
"dirTree",
".",
"Sorts",
"the",
"paths",
"by",
"default",
"."
] |
python
|
train
|
CxAalto/gtfspy
|
gtfspy/import_loaders/stop_times_loader.py
|
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/import_loaders/stop_times_loader.py#L97-L184
|
def calculate_trip_shape_breakpoints(conn):
"""Pre-compute the shape points corresponding to each trip's stop.
Depends: shapes"""
from gtfspy import shapes
cur = conn.cursor()
breakpoints_cache = {}
# Counters for problems - don't print every problem.
count_bad_shape_ordering = 0
count_bad_shape_fit = 0
count_no_shape_fit = 0
trip_Is = [x[0] for x in
cur.execute('SELECT DISTINCT trip_I FROM stop_times').fetchall()]
for trip_I in trip_Is:
# Get the shape points
row = cur.execute('''SELECT shape_id
FROM trips WHERE trip_I=?''', (trip_I,)).fetchone()
if row is None:
continue
shape_id = row[0]
if shape_id is None or shape_id == '':
continue
# Get the stop points
cur.execute('''SELECT seq, lat, lon, stop_id
FROM stop_times LEFT JOIN stops USING (stop_I)
WHERE trip_I=?
ORDER BY seq''',
(trip_I,))
#print '%20s, %s'%(run_code, datetime.fromtimestamp(run_sch_starttime))
stop_points = [dict(seq=row[0],
lat=row[1],
lon=row[2],
stop_I=row[3])
for row in cur if row[1] and row[2]]
# Calculate a cache key for this sequence.
# If both shape_id, and all stop_Is are same, then we can re-use existing breakpoints:
cache_key = (shape_id, tuple(x['stop_I'] for x in stop_points))
if cache_key in breakpoints_cache:
breakpoints = breakpoints_cache[cache_key]
else:
# Must re-calculate breakpoints:
shape_points = shapes.get_shape_points(cur, shape_id)
breakpoints, badness \
= shapes.find_segments(stop_points, shape_points)
if breakpoints != sorted(breakpoints):
# route_name, route_id, route_I, trip_id, trip_I = \
# cur.execute('''SELECT name, route_id, route_I, trip_id, trip_I
# FROM trips LEFT JOIN routes USING (route_I)
# WHERE trip_I=? LIMIT 1''', (trip_I,)).fetchone()
# print "Ignoring: Route with bad shape ordering:", route_name, route_id, route_I, trip_id, trip_I
count_bad_shape_ordering += 1
# select * from stop_times where trip_I=NNNN order by shape_break;
breakpoints_cache[cache_key] = None
continue # Do not set shape_break for this trip.
# Add it to cache
breakpoints_cache[cache_key] = breakpoints
if badness > 30 * len(breakpoints):
#print "bad shape fit: %s (%s, %s, %s)" % (badness, trip_I, shape_id, len(breakpoints))
count_bad_shape_fit += 1
if breakpoints is None:
continue
if len(breakpoints) == 0:
# No valid route could be identified.
#print "Ignoring: No shape identified for trip_I=%s, shape_id=%s" % (trip_I, shape_id)
count_no_shape_fit += 1
continue
# breakpoints is the corresponding points for each stop
assert len(breakpoints) == len(stop_points)
cur.executemany('UPDATE stop_times SET shape_break=? '
'WHERE trip_I=? AND seq=? ',
((int(bkpt), int(trip_I), int(stpt['seq']))
for bkpt, stpt in zip(breakpoints, stop_points)))
if count_bad_shape_fit > 0:
print(" Shape trip breakpoints: %s bad fits" % count_bad_shape_fit)
if count_bad_shape_ordering > 0:
print(" Shape trip breakpoints: %s bad shape orderings" % count_bad_shape_ordering)
if count_no_shape_fit > 0:
print(" Shape trip breakpoints: %s no shape fits" % count_no_shape_fit)
conn.commit()
|
[
"def",
"calculate_trip_shape_breakpoints",
"(",
"conn",
")",
":",
"from",
"gtfspy",
"import",
"shapes",
"cur",
"=",
"conn",
".",
"cursor",
"(",
")",
"breakpoints_cache",
"=",
"{",
"}",
"# Counters for problems - don't print every problem.",
"count_bad_shape_ordering",
"=",
"0",
"count_bad_shape_fit",
"=",
"0",
"count_no_shape_fit",
"=",
"0",
"trip_Is",
"=",
"[",
"x",
"[",
"0",
"]",
"for",
"x",
"in",
"cur",
".",
"execute",
"(",
"'SELECT DISTINCT trip_I FROM stop_times'",
")",
".",
"fetchall",
"(",
")",
"]",
"for",
"trip_I",
"in",
"trip_Is",
":",
"# Get the shape points",
"row",
"=",
"cur",
".",
"execute",
"(",
"'''SELECT shape_id\n FROM trips WHERE trip_I=?'''",
",",
"(",
"trip_I",
",",
")",
")",
".",
"fetchone",
"(",
")",
"if",
"row",
"is",
"None",
":",
"continue",
"shape_id",
"=",
"row",
"[",
"0",
"]",
"if",
"shape_id",
"is",
"None",
"or",
"shape_id",
"==",
"''",
":",
"continue",
"# Get the stop points",
"cur",
".",
"execute",
"(",
"'''SELECT seq, lat, lon, stop_id\n FROM stop_times LEFT JOIN stops USING (stop_I)\n WHERE trip_I=?\n ORDER BY seq'''",
",",
"(",
"trip_I",
",",
")",
")",
"#print '%20s, %s'%(run_code, datetime.fromtimestamp(run_sch_starttime))",
"stop_points",
"=",
"[",
"dict",
"(",
"seq",
"=",
"row",
"[",
"0",
"]",
",",
"lat",
"=",
"row",
"[",
"1",
"]",
",",
"lon",
"=",
"row",
"[",
"2",
"]",
",",
"stop_I",
"=",
"row",
"[",
"3",
"]",
")",
"for",
"row",
"in",
"cur",
"if",
"row",
"[",
"1",
"]",
"and",
"row",
"[",
"2",
"]",
"]",
"# Calculate a cache key for this sequence.",
"# If both shape_id, and all stop_Is are same, then we can re-use existing breakpoints:",
"cache_key",
"=",
"(",
"shape_id",
",",
"tuple",
"(",
"x",
"[",
"'stop_I'",
"]",
"for",
"x",
"in",
"stop_points",
")",
")",
"if",
"cache_key",
"in",
"breakpoints_cache",
":",
"breakpoints",
"=",
"breakpoints_cache",
"[",
"cache_key",
"]",
"else",
":",
"# Must re-calculate breakpoints:",
"shape_points",
"=",
"shapes",
".",
"get_shape_points",
"(",
"cur",
",",
"shape_id",
")",
"breakpoints",
",",
"badness",
"=",
"shapes",
".",
"find_segments",
"(",
"stop_points",
",",
"shape_points",
")",
"if",
"breakpoints",
"!=",
"sorted",
"(",
"breakpoints",
")",
":",
"# route_name, route_id, route_I, trip_id, trip_I = \\",
"# cur.execute('''SELECT name, route_id, route_I, trip_id, trip_I",
"# FROM trips LEFT JOIN routes USING (route_I)",
"# WHERE trip_I=? LIMIT 1''', (trip_I,)).fetchone()",
"# print \"Ignoring: Route with bad shape ordering:\", route_name, route_id, route_I, trip_id, trip_I",
"count_bad_shape_ordering",
"+=",
"1",
"# select * from stop_times where trip_I=NNNN order by shape_break;",
"breakpoints_cache",
"[",
"cache_key",
"]",
"=",
"None",
"continue",
"# Do not set shape_break for this trip.",
"# Add it to cache",
"breakpoints_cache",
"[",
"cache_key",
"]",
"=",
"breakpoints",
"if",
"badness",
">",
"30",
"*",
"len",
"(",
"breakpoints",
")",
":",
"#print \"bad shape fit: %s (%s, %s, %s)\" % (badness, trip_I, shape_id, len(breakpoints))",
"count_bad_shape_fit",
"+=",
"1",
"if",
"breakpoints",
"is",
"None",
":",
"continue",
"if",
"len",
"(",
"breakpoints",
")",
"==",
"0",
":",
"# No valid route could be identified.",
"#print \"Ignoring: No shape identified for trip_I=%s, shape_id=%s\" % (trip_I, shape_id)",
"count_no_shape_fit",
"+=",
"1",
"continue",
"# breakpoints is the corresponding points for each stop",
"assert",
"len",
"(",
"breakpoints",
")",
"==",
"len",
"(",
"stop_points",
")",
"cur",
".",
"executemany",
"(",
"'UPDATE stop_times SET shape_break=? '",
"'WHERE trip_I=? AND seq=? '",
",",
"(",
"(",
"int",
"(",
"bkpt",
")",
",",
"int",
"(",
"trip_I",
")",
",",
"int",
"(",
"stpt",
"[",
"'seq'",
"]",
")",
")",
"for",
"bkpt",
",",
"stpt",
"in",
"zip",
"(",
"breakpoints",
",",
"stop_points",
")",
")",
")",
"if",
"count_bad_shape_fit",
">",
"0",
":",
"print",
"(",
"\" Shape trip breakpoints: %s bad fits\"",
"%",
"count_bad_shape_fit",
")",
"if",
"count_bad_shape_ordering",
">",
"0",
":",
"print",
"(",
"\" Shape trip breakpoints: %s bad shape orderings\"",
"%",
"count_bad_shape_ordering",
")",
"if",
"count_no_shape_fit",
">",
"0",
":",
"print",
"(",
"\" Shape trip breakpoints: %s no shape fits\"",
"%",
"count_no_shape_fit",
")",
"conn",
".",
"commit",
"(",
")"
] |
Pre-compute the shape points corresponding to each trip's stop.
Depends: shapes
|
[
"Pre",
"-",
"compute",
"the",
"shape",
"points",
"corresponding",
"to",
"each",
"trip",
"s",
"stop",
"."
] |
python
|
valid
|
senaite/senaite.core
|
bika/lims/content/analysisservice.py
|
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/content/analysisservice.py#L557-L566
|
def getAvailableInstruments(self):
""" Returns the instruments available for this service.
If the service has the getInstrumentEntryOfResults(), returns
the instruments capable to perform this service. Otherwhise,
returns an empty list.
"""
instruments = self.getInstruments() \
if self.getInstrumentEntryOfResults() is True \
else None
return instruments if instruments else []
|
[
"def",
"getAvailableInstruments",
"(",
"self",
")",
":",
"instruments",
"=",
"self",
".",
"getInstruments",
"(",
")",
"if",
"self",
".",
"getInstrumentEntryOfResults",
"(",
")",
"is",
"True",
"else",
"None",
"return",
"instruments",
"if",
"instruments",
"else",
"[",
"]"
] |
Returns the instruments available for this service.
If the service has the getInstrumentEntryOfResults(), returns
the instruments capable to perform this service. Otherwhise,
returns an empty list.
|
[
"Returns",
"the",
"instruments",
"available",
"for",
"this",
"service",
".",
"If",
"the",
"service",
"has",
"the",
"getInstrumentEntryOfResults",
"()",
"returns",
"the",
"instruments",
"capable",
"to",
"perform",
"this",
"service",
".",
"Otherwhise",
"returns",
"an",
"empty",
"list",
"."
] |
python
|
train
|
brocade/pynos
|
pynos/versions/base/interface.py
|
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/base/interface.py#L4079-L4138
|
def create_ve(self, **kwargs):
"""
Add Ve Interface .
Args:
ve_name: Ve name with which the Ve interface needs to be
created.
enable (bool): If vrf fowarding should be enabled
or disabled.Default:``True``.
get (bool) : Get config instead of editing config. (True, False)
rbridge_id (str): rbridge-id for device.
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
ValueError: if `ve_name` is invalid.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.create_ve(
... ve_name='100',
... rbridge_id='1')
... output = dev.interface.create_ve(
... get=True,
... ve_name='100',
... rbridge_id='1')
... output = dev.interface.create_ve(
... get=True,
... rbridge_id='1')
... output = dev.interface.create_ve(
... enable=False,
... ve_name='101',
... rbridge_id='1')
... # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
KeyError
"""
ve_name = kwargs.pop('ve_name', '')
rbridge_id = kwargs.pop('rbridge_id', '1')
enable = kwargs.pop('enable', True)
get = kwargs.pop('get', False)
callback = kwargs.pop('callback', self._callback)
ve_args = dict(name=ve_name, rbridge_id=rbridge_id)
if get:
enable = None
method_class = self._rbridge
method_name = 'rbridge_id_interface_ve_name'
create_ve = getattr(method_class, method_name)
config = create_ve(**ve_args)
if get:
return callback(config, handler='get_config')
if not enable:
config.find('.//*ve').set('operation', 'delete')
return callback(config)
|
[
"def",
"create_ve",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"ve_name",
"=",
"kwargs",
".",
"pop",
"(",
"'ve_name'",
",",
"''",
")",
"rbridge_id",
"=",
"kwargs",
".",
"pop",
"(",
"'rbridge_id'",
",",
"'1'",
")",
"enable",
"=",
"kwargs",
".",
"pop",
"(",
"'enable'",
",",
"True",
")",
"get",
"=",
"kwargs",
".",
"pop",
"(",
"'get'",
",",
"False",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"ve_args",
"=",
"dict",
"(",
"name",
"=",
"ve_name",
",",
"rbridge_id",
"=",
"rbridge_id",
")",
"if",
"get",
":",
"enable",
"=",
"None",
"method_class",
"=",
"self",
".",
"_rbridge",
"method_name",
"=",
"'rbridge_id_interface_ve_name'",
"create_ve",
"=",
"getattr",
"(",
"method_class",
",",
"method_name",
")",
"config",
"=",
"create_ve",
"(",
"*",
"*",
"ve_args",
")",
"if",
"get",
":",
"return",
"callback",
"(",
"config",
",",
"handler",
"=",
"'get_config'",
")",
"if",
"not",
"enable",
":",
"config",
".",
"find",
"(",
"'.//*ve'",
")",
".",
"set",
"(",
"'operation'",
",",
"'delete'",
")",
"return",
"callback",
"(",
"config",
")"
] |
Add Ve Interface .
Args:
ve_name: Ve name with which the Ve interface needs to be
created.
enable (bool): If vrf fowarding should be enabled
or disabled.Default:``True``.
get (bool) : Get config instead of editing config. (True, False)
rbridge_id (str): rbridge-id for device.
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
ValueError: if `ve_name` is invalid.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.create_ve(
... ve_name='100',
... rbridge_id='1')
... output = dev.interface.create_ve(
... get=True,
... ve_name='100',
... rbridge_id='1')
... output = dev.interface.create_ve(
... get=True,
... rbridge_id='1')
... output = dev.interface.create_ve(
... enable=False,
... ve_name='101',
... rbridge_id='1')
... # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
KeyError
|
[
"Add",
"Ve",
"Interface",
".",
"Args",
":",
"ve_name",
":",
"Ve",
"name",
"with",
"which",
"the",
"Ve",
"interface",
"needs",
"to",
"be",
"created",
".",
"enable",
"(",
"bool",
")",
":",
"If",
"vrf",
"fowarding",
"should",
"be",
"enabled",
"or",
"disabled",
".",
"Default",
":",
"True",
".",
"get",
"(",
"bool",
")",
":",
"Get",
"config",
"instead",
"of",
"editing",
"config",
".",
"(",
"True",
"False",
")",
"rbridge_id",
"(",
"str",
")",
":",
"rbridge",
"-",
"id",
"for",
"device",
".",
"callback",
"(",
"function",
")",
":",
"A",
"function",
"executed",
"upon",
"completion",
"of",
"the",
"method",
".",
"The",
"only",
"parameter",
"passed",
"to",
"callback",
"will",
"be",
"the",
"ElementTree",
"config",
".",
"Returns",
":",
"Return",
"value",
"of",
"callback",
".",
"Raises",
":",
"ValueError",
":",
"if",
"ve_name",
"is",
"invalid",
".",
"Examples",
":",
">>>",
"import",
"pynos",
".",
"device",
">>>",
"switches",
"=",
"[",
"10",
".",
"24",
".",
"39",
".",
"211",
"10",
".",
"24",
".",
"39",
".",
"203",
"]",
">>>",
"auth",
"=",
"(",
"admin",
"password",
")",
">>>",
"for",
"switch",
"in",
"switches",
":",
"...",
"conn",
"=",
"(",
"switch",
"22",
")",
"...",
"with",
"pynos",
".",
"device",
".",
"Device",
"(",
"conn",
"=",
"conn",
"auth",
"=",
"auth",
")",
"as",
"dev",
":",
"...",
"output",
"=",
"dev",
".",
"interface",
".",
"create_ve",
"(",
"...",
"ve_name",
"=",
"100",
"...",
"rbridge_id",
"=",
"1",
")",
"...",
"output",
"=",
"dev",
".",
"interface",
".",
"create_ve",
"(",
"...",
"get",
"=",
"True",
"...",
"ve_name",
"=",
"100",
"...",
"rbridge_id",
"=",
"1",
")",
"...",
"output",
"=",
"dev",
".",
"interface",
".",
"create_ve",
"(",
"...",
"get",
"=",
"True",
"...",
"rbridge_id",
"=",
"1",
")",
"...",
"output",
"=",
"dev",
".",
"interface",
".",
"create_ve",
"(",
"...",
"enable",
"=",
"False",
"...",
"ve_name",
"=",
"101",
"...",
"rbridge_id",
"=",
"1",
")",
"...",
"#",
"doctest",
":",
"+",
"IGNORE_EXCEPTION_DETAIL",
"Traceback",
"(",
"most",
"recent",
"call",
"last",
")",
":",
"KeyError"
] |
python
|
train
|
ArchiveTeam/wpull
|
wpull/protocol/abstract/client.py
|
https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/protocol/abstract/client.py#L61-L70
|
def recycle(self):
'''Clean up and return connections back to the pool.
Connections should be kept alive if supported.
'''
for connection in self._connections:
self._connection_pool.no_wait_release(connection)
self._connections.clear()
|
[
"def",
"recycle",
"(",
"self",
")",
":",
"for",
"connection",
"in",
"self",
".",
"_connections",
":",
"self",
".",
"_connection_pool",
".",
"no_wait_release",
"(",
"connection",
")",
"self",
".",
"_connections",
".",
"clear",
"(",
")"
] |
Clean up and return connections back to the pool.
Connections should be kept alive if supported.
|
[
"Clean",
"up",
"and",
"return",
"connections",
"back",
"to",
"the",
"pool",
"."
] |
python
|
train
|
Opentrons/opentrons
|
api/src/opentrons/protocol_api/contexts.py
|
https://github.com/Opentrons/opentrons/blob/a7c15cc2636ecb64ab56c7edc1d8a57163aaeadf/api/src/opentrons/protocol_api/contexts.py#L1716-L1720
|
def close(self):
""" Closes the lid"""
self._geometry.lid_status = self._module.close()
self._ctx.deck.recalculate_high_z()
return self._geometry.lid_status
|
[
"def",
"close",
"(",
"self",
")",
":",
"self",
".",
"_geometry",
".",
"lid_status",
"=",
"self",
".",
"_module",
".",
"close",
"(",
")",
"self",
".",
"_ctx",
".",
"deck",
".",
"recalculate_high_z",
"(",
")",
"return",
"self",
".",
"_geometry",
".",
"lid_status"
] |
Closes the lid
|
[
"Closes",
"the",
"lid"
] |
python
|
train
|
KrzyHonk/bpmn-python
|
bpmn_python/graph/classes/root_element/process_type.py
|
https://github.com/KrzyHonk/bpmn-python/blob/6e5e28e3d656dbf5bd3d85d78fe8e3f2fb462629/bpmn_python/graph/classes/root_element/process_type.py#L34-L44
|
def set_process_type(self, value):
"""
Setter for 'process_type' field.
:param value - a new value of 'process_type' field.
"""
if value is None or not isinstance(value, str):
raise TypeError("ProcessType must be set to a String")
elif value not in Process.__process_type_list:
raise ValueError("ProcessType must be one of specified values: 'None', 'Public', 'Private'")
else:
self.__process_type = value
|
[
"def",
"set_process_type",
"(",
"self",
",",
"value",
")",
":",
"if",
"value",
"is",
"None",
"or",
"not",
"isinstance",
"(",
"value",
",",
"str",
")",
":",
"raise",
"TypeError",
"(",
"\"ProcessType must be set to a String\"",
")",
"elif",
"value",
"not",
"in",
"Process",
".",
"__process_type_list",
":",
"raise",
"ValueError",
"(",
"\"ProcessType must be one of specified values: 'None', 'Public', 'Private'\"",
")",
"else",
":",
"self",
".",
"__process_type",
"=",
"value"
] |
Setter for 'process_type' field.
:param value - a new value of 'process_type' field.
|
[
"Setter",
"for",
"process_type",
"field",
".",
":",
"param",
"value",
"-",
"a",
"new",
"value",
"of",
"process_type",
"field",
"."
] |
python
|
train
|
spyder-ide/spyder
|
spyder/app/mainwindow.py
|
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/app/mainwindow.py#L2180-L2204
|
def update_search_menu(self):
"""Update search menu"""
# Disabling all actions except the last one
# (which is Find in files) to begin with
for child in self.search_menu.actions()[:-1]:
child.setEnabled(False)
widget, textedit_properties = self.get_focus_widget_properties()
if textedit_properties is None: # widget is not an editor/console
return
# !!! Below this line, widget is expected to be a QPlainTextEdit
# instance
console, not_readonly, readwrite_editor = textedit_properties
# Find actions only trigger an effect in the Editor
if not console:
for action in self.search_menu.actions():
try:
action.setEnabled(True)
except RuntimeError:
pass
# Disable the replace action for read-only files
self.search_menu_actions[3].setEnabled(readwrite_editor)
|
[
"def",
"update_search_menu",
"(",
"self",
")",
":",
"# Disabling all actions except the last one\r",
"# (which is Find in files) to begin with\r",
"for",
"child",
"in",
"self",
".",
"search_menu",
".",
"actions",
"(",
")",
"[",
":",
"-",
"1",
"]",
":",
"child",
".",
"setEnabled",
"(",
"False",
")",
"widget",
",",
"textedit_properties",
"=",
"self",
".",
"get_focus_widget_properties",
"(",
")",
"if",
"textedit_properties",
"is",
"None",
":",
"# widget is not an editor/console\r",
"return",
"# !!! Below this line, widget is expected to be a QPlainTextEdit\r",
"# instance\r",
"console",
",",
"not_readonly",
",",
"readwrite_editor",
"=",
"textedit_properties",
"# Find actions only trigger an effect in the Editor\r",
"if",
"not",
"console",
":",
"for",
"action",
"in",
"self",
".",
"search_menu",
".",
"actions",
"(",
")",
":",
"try",
":",
"action",
".",
"setEnabled",
"(",
"True",
")",
"except",
"RuntimeError",
":",
"pass",
"# Disable the replace action for read-only files\r",
"self",
".",
"search_menu_actions",
"[",
"3",
"]",
".",
"setEnabled",
"(",
"readwrite_editor",
")"
] |
Update search menu
|
[
"Update",
"search",
"menu"
] |
python
|
train
|
Jammy2211/PyAutoLens
|
autolens/lens/plotters/lens_fit_plotters.py
|
https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/lens/plotters/lens_fit_plotters.py#L45-L158
|
def plot_fit_subplot_lens_plane_only(
fit, should_plot_mask=True, extract_array_from_mask=False, zoom_around_mask=False, positions=None,
should_plot_image_plane_pix=False,
units='arcsec', figsize=None, aspect='square',
cmap='jet', norm='linear', norm_min=None, norm_max=None, linthresh=0.05, linscale=0.01,
cb_ticksize=10, cb_fraction=0.047, cb_pad=0.01, cb_tick_values=None, cb_tick_labels=None,
titlesize=10, xlabelsize=10, ylabelsize=10, xyticksize=10,
mask_pointsize=10, position_pointsize=10, grid_pointsize=1,
output_path=None, output_filename='lens_fit', output_format='show'):
"""Plot the model datas_ of an analysis, using the *Fitter* class object.
The visualization and output type can be fully customized.
Parameters
-----------
fit : autolens.lens.fitting.Fitter
Class containing fit between the model datas_ and observed lens datas_ (including residual_map, chi_squared_map etc.)
output_path : str
The path where the datas_ is output if the output_type is a file format (e.g. png, fits)
output_filename : str
The name of the file that is output, if the output_type is a file format (e.g. png, fits)
output_format : str
How the datas_ is output. File formats (e.g. png, fits) output the datas_ to harddisk. 'show' displays the datas_ \
in the python interpreter window.
"""
rows, columns, figsize_tool = plotter_util.get_subplot_rows_columns_figsize(number_subplots=6)
mask = lens_plotter_util.get_mask(fit=fit, should_plot_mask=should_plot_mask)
if figsize is None:
figsize = figsize_tool
plt.figure(figsize=figsize)
plt.subplot(rows, columns, 1)
kpc_per_arcsec = fit.tracer.image_plane.kpc_per_arcsec
image_plane_pix_grid = lens_plotter_util.get_image_plane_pix_grid(should_plot_image_plane_pix, fit)
lens_plotter_util.plot_image(
fit=fit, mask=mask, extract_array_from_mask=extract_array_from_mask, zoom_around_mask=zoom_around_mask,
positions=positions, image_plane_pix_grid=image_plane_pix_grid, as_subplot=True,
units=units, kpc_per_arcsec=kpc_per_arcsec, figsize=figsize, aspect=aspect,
cmap=cmap, norm=norm, norm_min=norm_min, norm_max=norm_max, linthresh=linthresh, linscale=linscale,
cb_ticksize=cb_ticksize, cb_fraction=cb_fraction, cb_pad=cb_pad,
cb_tick_values=cb_tick_values, cb_tick_labels=cb_tick_labels,
titlesize=titlesize, xlabelsize=xlabelsize, ylabelsize=ylabelsize, xyticksize=xyticksize,
grid_pointsize=grid_pointsize, position_pointsize=position_pointsize, mask_pointsize=mask_pointsize,
output_path=output_path, output_filename='', output_format=output_format)
plt.subplot(rows, columns, 2)
lens_plotter_util.plot_noise_map(
fit=fit, mask=mask, extract_array_from_mask=extract_array_from_mask, zoom_around_mask=zoom_around_mask,
positions=positions, as_subplot=True,
units=units, kpc_per_arcsec=kpc_per_arcsec, figsize=figsize, aspect=aspect,
cmap=cmap, norm=norm, norm_min=norm_min, norm_max=norm_max, linthresh=linthresh, linscale=linscale,
cb_ticksize=cb_ticksize, cb_fraction=cb_fraction, cb_pad=cb_pad,
cb_tick_values=cb_tick_values, cb_tick_labels=cb_tick_labels,
titlesize=titlesize, xlabelsize=xlabelsize, ylabelsize=ylabelsize, xyticksize=xyticksize,
position_pointsize=position_pointsize, mask_pointsize=mask_pointsize,
output_path=output_path, output_filename='', output_format=output_format)
plt.subplot(rows, columns, 3)
lens_plotter_util.plot_signal_to_noise_map(
fit=fit, mask=mask, extract_array_from_mask=extract_array_from_mask, zoom_around_mask=zoom_around_mask,
positions=positions, as_subplot=True,
units=units, kpc_per_arcsec=kpc_per_arcsec, figsize=figsize, aspect=aspect,
cmap=cmap, norm=norm, norm_min=norm_min, norm_max=norm_max, linthresh=linthresh, linscale=linscale,
cb_ticksize=cb_ticksize, cb_fraction=cb_fraction, cb_pad=cb_pad,
cb_tick_values=cb_tick_values, cb_tick_labels=cb_tick_labels,
titlesize=titlesize, xlabelsize=xlabelsize, ylabelsize=ylabelsize, xyticksize=xyticksize,
position_pointsize=position_pointsize, mask_pointsize=mask_pointsize,
output_path=output_path, output_filename='', output_format=output_format)
plt.subplot(rows, columns, 4)
lens_plotter_util.plot_model_data(
fit=fit, mask=mask, extract_array_from_mask=extract_array_from_mask, zoom_around_mask=zoom_around_mask, as_subplot=True,
units=units, kpc_per_arcsec=kpc_per_arcsec, figsize=figsize, aspect=aspect,
cmap=cmap, norm=norm, norm_min=norm_min, norm_max=norm_max, linthresh=linthresh, linscale=linscale,
cb_ticksize=cb_ticksize, cb_fraction=cb_fraction, cb_pad=cb_pad,
cb_tick_values=cb_tick_values, cb_tick_labels=cb_tick_labels,
titlesize=titlesize, xlabelsize=xlabelsize, ylabelsize=ylabelsize, xyticksize=xyticksize,
output_path=output_path, output_filename='', output_format=output_format)
plt.subplot(rows, columns, 5)
lens_plotter_util.plot_residual_map(
fit=fit, mask=mask, extract_array_from_mask=extract_array_from_mask, zoom_around_mask=zoom_around_mask, as_subplot=True,
units=units, kpc_per_arcsec=kpc_per_arcsec, figsize=figsize, aspect=aspect,
cmap=cmap, norm=norm, norm_min=norm_min, norm_max=norm_max, linthresh=linthresh, linscale=linscale,
cb_ticksize=cb_ticksize, cb_fraction=cb_fraction, cb_pad=cb_pad,
cb_tick_values=cb_tick_values, cb_tick_labels=cb_tick_labels,
titlesize=titlesize, xlabelsize=xlabelsize, ylabelsize=ylabelsize, xyticksize=xyticksize,
output_path=output_path, output_filename='', output_format=output_format)
plt.subplot(rows, columns, 6)
lens_plotter_util.plot_chi_squared_map(
fit=fit, mask=mask, extract_array_from_mask=extract_array_from_mask, zoom_around_mask=zoom_around_mask, as_subplot=True,
units=units, kpc_per_arcsec=kpc_per_arcsec, figsize=figsize, aspect=aspect,
cmap=cmap, norm=norm, norm_min=norm_min, norm_max=norm_max, linthresh=linthresh, linscale=linscale,
cb_ticksize=cb_ticksize, cb_fraction=cb_fraction, cb_pad=cb_pad,
cb_tick_values=cb_tick_values, cb_tick_labels=cb_tick_labels,
titlesize=titlesize, xlabelsize=xlabelsize, ylabelsize=ylabelsize, xyticksize=xyticksize,
output_path=output_path, output_filename='', output_format=output_format)
plotter_util.output_subplot_array(output_path=output_path, output_filename=output_filename,
output_format=output_format)
plt.close()
|
[
"def",
"plot_fit_subplot_lens_plane_only",
"(",
"fit",
",",
"should_plot_mask",
"=",
"True",
",",
"extract_array_from_mask",
"=",
"False",
",",
"zoom_around_mask",
"=",
"False",
",",
"positions",
"=",
"None",
",",
"should_plot_image_plane_pix",
"=",
"False",
",",
"units",
"=",
"'arcsec'",
",",
"figsize",
"=",
"None",
",",
"aspect",
"=",
"'square'",
",",
"cmap",
"=",
"'jet'",
",",
"norm",
"=",
"'linear'",
",",
"norm_min",
"=",
"None",
",",
"norm_max",
"=",
"None",
",",
"linthresh",
"=",
"0.05",
",",
"linscale",
"=",
"0.01",
",",
"cb_ticksize",
"=",
"10",
",",
"cb_fraction",
"=",
"0.047",
",",
"cb_pad",
"=",
"0.01",
",",
"cb_tick_values",
"=",
"None",
",",
"cb_tick_labels",
"=",
"None",
",",
"titlesize",
"=",
"10",
",",
"xlabelsize",
"=",
"10",
",",
"ylabelsize",
"=",
"10",
",",
"xyticksize",
"=",
"10",
",",
"mask_pointsize",
"=",
"10",
",",
"position_pointsize",
"=",
"10",
",",
"grid_pointsize",
"=",
"1",
",",
"output_path",
"=",
"None",
",",
"output_filename",
"=",
"'lens_fit'",
",",
"output_format",
"=",
"'show'",
")",
":",
"rows",
",",
"columns",
",",
"figsize_tool",
"=",
"plotter_util",
".",
"get_subplot_rows_columns_figsize",
"(",
"number_subplots",
"=",
"6",
")",
"mask",
"=",
"lens_plotter_util",
".",
"get_mask",
"(",
"fit",
"=",
"fit",
",",
"should_plot_mask",
"=",
"should_plot_mask",
")",
"if",
"figsize",
"is",
"None",
":",
"figsize",
"=",
"figsize_tool",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"figsize",
")",
"plt",
".",
"subplot",
"(",
"rows",
",",
"columns",
",",
"1",
")",
"kpc_per_arcsec",
"=",
"fit",
".",
"tracer",
".",
"image_plane",
".",
"kpc_per_arcsec",
"image_plane_pix_grid",
"=",
"lens_plotter_util",
".",
"get_image_plane_pix_grid",
"(",
"should_plot_image_plane_pix",
",",
"fit",
")",
"lens_plotter_util",
".",
"plot_image",
"(",
"fit",
"=",
"fit",
",",
"mask",
"=",
"mask",
",",
"extract_array_from_mask",
"=",
"extract_array_from_mask",
",",
"zoom_around_mask",
"=",
"zoom_around_mask",
",",
"positions",
"=",
"positions",
",",
"image_plane_pix_grid",
"=",
"image_plane_pix_grid",
",",
"as_subplot",
"=",
"True",
",",
"units",
"=",
"units",
",",
"kpc_per_arcsec",
"=",
"kpc_per_arcsec",
",",
"figsize",
"=",
"figsize",
",",
"aspect",
"=",
"aspect",
",",
"cmap",
"=",
"cmap",
",",
"norm",
"=",
"norm",
",",
"norm_min",
"=",
"norm_min",
",",
"norm_max",
"=",
"norm_max",
",",
"linthresh",
"=",
"linthresh",
",",
"linscale",
"=",
"linscale",
",",
"cb_ticksize",
"=",
"cb_ticksize",
",",
"cb_fraction",
"=",
"cb_fraction",
",",
"cb_pad",
"=",
"cb_pad",
",",
"cb_tick_values",
"=",
"cb_tick_values",
",",
"cb_tick_labels",
"=",
"cb_tick_labels",
",",
"titlesize",
"=",
"titlesize",
",",
"xlabelsize",
"=",
"xlabelsize",
",",
"ylabelsize",
"=",
"ylabelsize",
",",
"xyticksize",
"=",
"xyticksize",
",",
"grid_pointsize",
"=",
"grid_pointsize",
",",
"position_pointsize",
"=",
"position_pointsize",
",",
"mask_pointsize",
"=",
"mask_pointsize",
",",
"output_path",
"=",
"output_path",
",",
"output_filename",
"=",
"''",
",",
"output_format",
"=",
"output_format",
")",
"plt",
".",
"subplot",
"(",
"rows",
",",
"columns",
",",
"2",
")",
"lens_plotter_util",
".",
"plot_noise_map",
"(",
"fit",
"=",
"fit",
",",
"mask",
"=",
"mask",
",",
"extract_array_from_mask",
"=",
"extract_array_from_mask",
",",
"zoom_around_mask",
"=",
"zoom_around_mask",
",",
"positions",
"=",
"positions",
",",
"as_subplot",
"=",
"True",
",",
"units",
"=",
"units",
",",
"kpc_per_arcsec",
"=",
"kpc_per_arcsec",
",",
"figsize",
"=",
"figsize",
",",
"aspect",
"=",
"aspect",
",",
"cmap",
"=",
"cmap",
",",
"norm",
"=",
"norm",
",",
"norm_min",
"=",
"norm_min",
",",
"norm_max",
"=",
"norm_max",
",",
"linthresh",
"=",
"linthresh",
",",
"linscale",
"=",
"linscale",
",",
"cb_ticksize",
"=",
"cb_ticksize",
",",
"cb_fraction",
"=",
"cb_fraction",
",",
"cb_pad",
"=",
"cb_pad",
",",
"cb_tick_values",
"=",
"cb_tick_values",
",",
"cb_tick_labels",
"=",
"cb_tick_labels",
",",
"titlesize",
"=",
"titlesize",
",",
"xlabelsize",
"=",
"xlabelsize",
",",
"ylabelsize",
"=",
"ylabelsize",
",",
"xyticksize",
"=",
"xyticksize",
",",
"position_pointsize",
"=",
"position_pointsize",
",",
"mask_pointsize",
"=",
"mask_pointsize",
",",
"output_path",
"=",
"output_path",
",",
"output_filename",
"=",
"''",
",",
"output_format",
"=",
"output_format",
")",
"plt",
".",
"subplot",
"(",
"rows",
",",
"columns",
",",
"3",
")",
"lens_plotter_util",
".",
"plot_signal_to_noise_map",
"(",
"fit",
"=",
"fit",
",",
"mask",
"=",
"mask",
",",
"extract_array_from_mask",
"=",
"extract_array_from_mask",
",",
"zoom_around_mask",
"=",
"zoom_around_mask",
",",
"positions",
"=",
"positions",
",",
"as_subplot",
"=",
"True",
",",
"units",
"=",
"units",
",",
"kpc_per_arcsec",
"=",
"kpc_per_arcsec",
",",
"figsize",
"=",
"figsize",
",",
"aspect",
"=",
"aspect",
",",
"cmap",
"=",
"cmap",
",",
"norm",
"=",
"norm",
",",
"norm_min",
"=",
"norm_min",
",",
"norm_max",
"=",
"norm_max",
",",
"linthresh",
"=",
"linthresh",
",",
"linscale",
"=",
"linscale",
",",
"cb_ticksize",
"=",
"cb_ticksize",
",",
"cb_fraction",
"=",
"cb_fraction",
",",
"cb_pad",
"=",
"cb_pad",
",",
"cb_tick_values",
"=",
"cb_tick_values",
",",
"cb_tick_labels",
"=",
"cb_tick_labels",
",",
"titlesize",
"=",
"titlesize",
",",
"xlabelsize",
"=",
"xlabelsize",
",",
"ylabelsize",
"=",
"ylabelsize",
",",
"xyticksize",
"=",
"xyticksize",
",",
"position_pointsize",
"=",
"position_pointsize",
",",
"mask_pointsize",
"=",
"mask_pointsize",
",",
"output_path",
"=",
"output_path",
",",
"output_filename",
"=",
"''",
",",
"output_format",
"=",
"output_format",
")",
"plt",
".",
"subplot",
"(",
"rows",
",",
"columns",
",",
"4",
")",
"lens_plotter_util",
".",
"plot_model_data",
"(",
"fit",
"=",
"fit",
",",
"mask",
"=",
"mask",
",",
"extract_array_from_mask",
"=",
"extract_array_from_mask",
",",
"zoom_around_mask",
"=",
"zoom_around_mask",
",",
"as_subplot",
"=",
"True",
",",
"units",
"=",
"units",
",",
"kpc_per_arcsec",
"=",
"kpc_per_arcsec",
",",
"figsize",
"=",
"figsize",
",",
"aspect",
"=",
"aspect",
",",
"cmap",
"=",
"cmap",
",",
"norm",
"=",
"norm",
",",
"norm_min",
"=",
"norm_min",
",",
"norm_max",
"=",
"norm_max",
",",
"linthresh",
"=",
"linthresh",
",",
"linscale",
"=",
"linscale",
",",
"cb_ticksize",
"=",
"cb_ticksize",
",",
"cb_fraction",
"=",
"cb_fraction",
",",
"cb_pad",
"=",
"cb_pad",
",",
"cb_tick_values",
"=",
"cb_tick_values",
",",
"cb_tick_labels",
"=",
"cb_tick_labels",
",",
"titlesize",
"=",
"titlesize",
",",
"xlabelsize",
"=",
"xlabelsize",
",",
"ylabelsize",
"=",
"ylabelsize",
",",
"xyticksize",
"=",
"xyticksize",
",",
"output_path",
"=",
"output_path",
",",
"output_filename",
"=",
"''",
",",
"output_format",
"=",
"output_format",
")",
"plt",
".",
"subplot",
"(",
"rows",
",",
"columns",
",",
"5",
")",
"lens_plotter_util",
".",
"plot_residual_map",
"(",
"fit",
"=",
"fit",
",",
"mask",
"=",
"mask",
",",
"extract_array_from_mask",
"=",
"extract_array_from_mask",
",",
"zoom_around_mask",
"=",
"zoom_around_mask",
",",
"as_subplot",
"=",
"True",
",",
"units",
"=",
"units",
",",
"kpc_per_arcsec",
"=",
"kpc_per_arcsec",
",",
"figsize",
"=",
"figsize",
",",
"aspect",
"=",
"aspect",
",",
"cmap",
"=",
"cmap",
",",
"norm",
"=",
"norm",
",",
"norm_min",
"=",
"norm_min",
",",
"norm_max",
"=",
"norm_max",
",",
"linthresh",
"=",
"linthresh",
",",
"linscale",
"=",
"linscale",
",",
"cb_ticksize",
"=",
"cb_ticksize",
",",
"cb_fraction",
"=",
"cb_fraction",
",",
"cb_pad",
"=",
"cb_pad",
",",
"cb_tick_values",
"=",
"cb_tick_values",
",",
"cb_tick_labels",
"=",
"cb_tick_labels",
",",
"titlesize",
"=",
"titlesize",
",",
"xlabelsize",
"=",
"xlabelsize",
",",
"ylabelsize",
"=",
"ylabelsize",
",",
"xyticksize",
"=",
"xyticksize",
",",
"output_path",
"=",
"output_path",
",",
"output_filename",
"=",
"''",
",",
"output_format",
"=",
"output_format",
")",
"plt",
".",
"subplot",
"(",
"rows",
",",
"columns",
",",
"6",
")",
"lens_plotter_util",
".",
"plot_chi_squared_map",
"(",
"fit",
"=",
"fit",
",",
"mask",
"=",
"mask",
",",
"extract_array_from_mask",
"=",
"extract_array_from_mask",
",",
"zoom_around_mask",
"=",
"zoom_around_mask",
",",
"as_subplot",
"=",
"True",
",",
"units",
"=",
"units",
",",
"kpc_per_arcsec",
"=",
"kpc_per_arcsec",
",",
"figsize",
"=",
"figsize",
",",
"aspect",
"=",
"aspect",
",",
"cmap",
"=",
"cmap",
",",
"norm",
"=",
"norm",
",",
"norm_min",
"=",
"norm_min",
",",
"norm_max",
"=",
"norm_max",
",",
"linthresh",
"=",
"linthresh",
",",
"linscale",
"=",
"linscale",
",",
"cb_ticksize",
"=",
"cb_ticksize",
",",
"cb_fraction",
"=",
"cb_fraction",
",",
"cb_pad",
"=",
"cb_pad",
",",
"cb_tick_values",
"=",
"cb_tick_values",
",",
"cb_tick_labels",
"=",
"cb_tick_labels",
",",
"titlesize",
"=",
"titlesize",
",",
"xlabelsize",
"=",
"xlabelsize",
",",
"ylabelsize",
"=",
"ylabelsize",
",",
"xyticksize",
"=",
"xyticksize",
",",
"output_path",
"=",
"output_path",
",",
"output_filename",
"=",
"''",
",",
"output_format",
"=",
"output_format",
")",
"plotter_util",
".",
"output_subplot_array",
"(",
"output_path",
"=",
"output_path",
",",
"output_filename",
"=",
"output_filename",
",",
"output_format",
"=",
"output_format",
")",
"plt",
".",
"close",
"(",
")"
] |
Plot the model datas_ of an analysis, using the *Fitter* class object.
The visualization and output type can be fully customized.
Parameters
-----------
fit : autolens.lens.fitting.Fitter
Class containing fit between the model datas_ and observed lens datas_ (including residual_map, chi_squared_map etc.)
output_path : str
The path where the datas_ is output if the output_type is a file format (e.g. png, fits)
output_filename : str
The name of the file that is output, if the output_type is a file format (e.g. png, fits)
output_format : str
How the datas_ is output. File formats (e.g. png, fits) output the datas_ to harddisk. 'show' displays the datas_ \
in the python interpreter window.
|
[
"Plot",
"the",
"model",
"datas_",
"of",
"an",
"analysis",
"using",
"the",
"*",
"Fitter",
"*",
"class",
"object",
"."
] |
python
|
valid
|
apache/spark
|
python/pyspark/rdd.py
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L922-L955
|
def aggregate(self, zeroValue, seqOp, combOp):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given combine functions and a neutral "zero
value."
The functions C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
The first function (seqOp) can return a different result type, U, than
the type of this RDD. Thus, we need one operation for merging a T into
an U and one operation for merging two U
>>> seqOp = (lambda x, y: (x[0] + y, x[1] + 1))
>>> combOp = (lambda x, y: (x[0] + y[0], x[1] + y[1]))
>>> sc.parallelize([1, 2, 3, 4]).aggregate((0, 0), seqOp, combOp)
(10, 4)
>>> sc.parallelize([]).aggregate((0, 0), seqOp, combOp)
(0, 0)
"""
seqOp = fail_on_stopiteration(seqOp)
combOp = fail_on_stopiteration(combOp)
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
# collecting result of mapPartitions here ensures that the copy of
# zeroValue provided to each partition is unique from the one provided
# to the final reduce call
vals = self.mapPartitions(func).collect()
return reduce(combOp, vals, zeroValue)
|
[
"def",
"aggregate",
"(",
"self",
",",
"zeroValue",
",",
"seqOp",
",",
"combOp",
")",
":",
"seqOp",
"=",
"fail_on_stopiteration",
"(",
"seqOp",
")",
"combOp",
"=",
"fail_on_stopiteration",
"(",
"combOp",
")",
"def",
"func",
"(",
"iterator",
")",
":",
"acc",
"=",
"zeroValue",
"for",
"obj",
"in",
"iterator",
":",
"acc",
"=",
"seqOp",
"(",
"acc",
",",
"obj",
")",
"yield",
"acc",
"# collecting result of mapPartitions here ensures that the copy of",
"# zeroValue provided to each partition is unique from the one provided",
"# to the final reduce call",
"vals",
"=",
"self",
".",
"mapPartitions",
"(",
"func",
")",
".",
"collect",
"(",
")",
"return",
"reduce",
"(",
"combOp",
",",
"vals",
",",
"zeroValue",
")"
] |
Aggregate the elements of each partition, and then the results for all
the partitions, using a given combine functions and a neutral "zero
value."
The functions C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
The first function (seqOp) can return a different result type, U, than
the type of this RDD. Thus, we need one operation for merging a T into
an U and one operation for merging two U
>>> seqOp = (lambda x, y: (x[0] + y, x[1] + 1))
>>> combOp = (lambda x, y: (x[0] + y[0], x[1] + y[1]))
>>> sc.parallelize([1, 2, 3, 4]).aggregate((0, 0), seqOp, combOp)
(10, 4)
>>> sc.parallelize([]).aggregate((0, 0), seqOp, combOp)
(0, 0)
|
[
"Aggregate",
"the",
"elements",
"of",
"each",
"partition",
"and",
"then",
"the",
"results",
"for",
"all",
"the",
"partitions",
"using",
"a",
"given",
"combine",
"functions",
"and",
"a",
"neutral",
"zero",
"value",
"."
] |
python
|
train
|
danielhrisca/asammdf
|
asammdf/blocks/mdf_v4.py
|
https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/blocks/mdf_v4.py#L4915-L5162
|
def get_can_signal(
self, name, database=None, db=None, ignore_invalidation_bits=False
):
""" get CAN message signal. You can specify an external CAN database (
*database* argument) or canmatrix databse object that has already been
loaded from a file (*db* argument).
The signal name can be specified in the following ways
* ``CAN<ID>.<MESSAGE_NAME>.<SIGNAL_NAME>`` - the `ID` value starts from 1
and must match the ID found in the measurement (the source CAN bus ID)
Example: CAN1.Wheels.FL_WheelSpeed
* ``CAN<ID>.CAN_DataFrame_<MESSAGE_ID>.<SIGNAL_NAME>`` - the `ID` value
starts from 1 and the `MESSAGE_ID` is the decimal message ID as found
in the database. Example: CAN1.CAN_DataFrame_218.FL_WheelSpeed
* ``<MESSAGE_NAME>.SIGNAL_NAME`` - in this case the first occurence of
the message name and signal are returned (the same message could be
found on muplit CAN buses; for example on CAN1 and CAN3)
Example: Wheels.FL_WheelSpeed
* ``CAN_DataFrame_<MESSAGE_ID>.<SIGNAL_NAME>`` - in this case the first
occurence of the message name and signal are returned (the same
message could be found on muplit CAN buses; for example on CAN1 and
CAN3). Example: CAN_DataFrame_218.FL_WheelSpeed
* ``<SIGNAL_NAME>`` - in this case the first occurence of the signal
name is returned ( the same signal anme coudl be found in multiple
messages and on multiple CAN buses). Example: FL_WheelSpeed
Parameters
----------
name : str
signal name
database : str
path of external CAN database file (.dbc or .arxml); default *None*
db : canmatrix.database
canmatrix CAN database object; default *None*
ignore_invalidation_bits : bool
option to ignore invalidation bits
Returns
-------
sig : Signal
Signal object with the physical values
"""
if database is None and db is None:
return self.get(name)
if db is None:
if not database.lower().endswith(("dbc", "arxml")):
message = f'Expected .dbc or .arxml file as CAN channel attachment but got "{database}"'
logger.exception(message)
raise MdfException(message)
else:
import_type = "dbc" if database.lower().endswith("dbc") else "arxml"
with open(database, "rb") as db:
db_string = db.read()
md5_sum = md5(db_string).digest()
if md5_sum in self._external_dbc_cache:
db = self._external_dbc_cache[md5_sum]
else:
try:
db_string = db_string.decode("utf-8")
db = self._external_dbc_cache[md5_sum] = loads(
db_string, importType=import_type, key="db"
)["db"]
except UnicodeDecodeError:
try:
from cchardet import detect
encoding = detect(db_string)["encoding"]
db_string = db_string.decode(encoding)
db = self._dbc_cache[md5_sum] = loads(
db_string,
importType=import_type,
key="db",
encoding=encoding,
)["db"]
except ImportError:
message = (
"Unicode exception occured while processing the database "
f'attachment "{database}" and "cChardet" package is '
'not installed. Mdf version 4 expects "utf-8" '
"strings and this package may detect if a different"
" encoding was used"
)
logger.warning(message)
name_ = name.split(".")
if len(name_) == 3:
can_id_str, message_id_str, signal = name_
can_id = v4c.CAN_ID_PATTERN.search(can_id_str)
if can_id is None:
raise MdfException(
f'CAN id "{can_id_str}" of signal name "{name}" is not recognised by this library'
)
else:
can_id = f'CAN{can_id.group("id")}'
message_id = v4c.CAN_DATA_FRAME_PATTERN.search(message_id_str)
if message_id is None:
message_id = message_id_str
else:
message_id = int(message_id)
elif len(name_) == 2:
message_id_str, signal = name_
can_id = None
message_id = v4c.CAN_DATA_FRAME_PATTERN.search(message_id_str)
if message_id is None:
message_id = message_id_str
else:
message_id = int(message_id.group('id'))
else:
can_id = message_id = None
signal = name
if isinstance(message_id, str):
message = db.frame_by_name(message_id)
else:
message = db.frame_by_id(message_id)
for sig in message.signals:
if sig.name == signal:
signal = sig
break
else:
raise MdfException(
f'Signal "{signal}" not found in message "{message.name}" of "{database}"'
)
if can_id is None:
for _can_id, messages in self.can_logging_db.items():
message_id = message.id
if message_id > 0x80000000:
message_id -= 0x80000000
if message_id in messages:
index = messages[message.id]
break
else:
raise MdfException(
f'Message "{message.name}" (ID={hex(message.id)}) not found in the measurement'
)
else:
if can_id in self.can_logging_db:
if message.is_j1939:
test_ids = [
canmatrix.ArbitrationId(id_, extended=True).pgn
for id_ in self.can_logging_db[can_id]
]
id_ = message.arbitration_id.pgn
else:
id_ = message.arbitration_id.id
test_ids = self.can_logging_db[can_id]
if id_ in test_ids:
if message.is_j1939:
for id__, idx in self.can_logging_db[can_id].items():
if canmatrix.ArbitrationId(id__, extended=True).pgn == id_:
index = idx
break
else:
index = self.can_logging_db[can_id][message.arbitration_id.id]
else:
raise MdfException(
f'Message "{message.name}" (ID={hex(message.arbitration_id.id)}) not found in the measurement'
)
else:
raise MdfException(
f'No logging from "{can_id}" was found in the measurement'
)
can_ids = self.get(
"CAN_DataFrame.ID",
group=index,
ignore_invalidation_bits=ignore_invalidation_bits,
)
payload = self.get(
"CAN_DataFrame.DataBytes",
group=index,
samples_only=True,
ignore_invalidation_bits=ignore_invalidation_bits,
)[0]
if message.is_j1939:
ps = (can_ids.samples >> 8) & 0xFF
pf = (can_ids.samples >> 16) & 0xFF
_pgn = pf << 8
_pgn = where(
pf >= 240,
_pgn + ps,
_pgn,
)
idx = nonzero(_pgn == message.arbitration_id.pgn)[0]
else:
idx = nonzero(can_ids.samples == message.arbitration_id.id)[0]
vals = payload[idx]
t = can_ids.timestamps[idx].copy()
if can_ids.invalidation_bits is not None:
invalidation_bits = can_ids.invalidation_bits[idx]
else:
invalidation_bits = None
vals = extract_can_signal(signal, vals)
comment = signal.comment or ""
if ignore_invalidation_bits:
return Signal(
samples=vals,
timestamps=t,
name=name,
unit=signal.unit or "",
comment=comment,
invalidation_bits=invalidation_bits,
)
else:
if invalidation_bits is not None:
vals = vals[nonzero(~invalidation_bits)[0]]
t = t[nonzero(~invalidation_bits)[0]]
return Signal(
samples=vals,
timestamps=t,
name=name,
unit=signal.unit or "",
comment=comment,
)
|
[
"def",
"get_can_signal",
"(",
"self",
",",
"name",
",",
"database",
"=",
"None",
",",
"db",
"=",
"None",
",",
"ignore_invalidation_bits",
"=",
"False",
")",
":",
"if",
"database",
"is",
"None",
"and",
"db",
"is",
"None",
":",
"return",
"self",
".",
"get",
"(",
"name",
")",
"if",
"db",
"is",
"None",
":",
"if",
"not",
"database",
".",
"lower",
"(",
")",
".",
"endswith",
"(",
"(",
"\"dbc\"",
",",
"\"arxml\"",
")",
")",
":",
"message",
"=",
"f'Expected .dbc or .arxml file as CAN channel attachment but got \"{database}\"'",
"logger",
".",
"exception",
"(",
"message",
")",
"raise",
"MdfException",
"(",
"message",
")",
"else",
":",
"import_type",
"=",
"\"dbc\"",
"if",
"database",
".",
"lower",
"(",
")",
".",
"endswith",
"(",
"\"dbc\"",
")",
"else",
"\"arxml\"",
"with",
"open",
"(",
"database",
",",
"\"rb\"",
")",
"as",
"db",
":",
"db_string",
"=",
"db",
".",
"read",
"(",
")",
"md5_sum",
"=",
"md5",
"(",
"db_string",
")",
".",
"digest",
"(",
")",
"if",
"md5_sum",
"in",
"self",
".",
"_external_dbc_cache",
":",
"db",
"=",
"self",
".",
"_external_dbc_cache",
"[",
"md5_sum",
"]",
"else",
":",
"try",
":",
"db_string",
"=",
"db_string",
".",
"decode",
"(",
"\"utf-8\"",
")",
"db",
"=",
"self",
".",
"_external_dbc_cache",
"[",
"md5_sum",
"]",
"=",
"loads",
"(",
"db_string",
",",
"importType",
"=",
"import_type",
",",
"key",
"=",
"\"db\"",
")",
"[",
"\"db\"",
"]",
"except",
"UnicodeDecodeError",
":",
"try",
":",
"from",
"cchardet",
"import",
"detect",
"encoding",
"=",
"detect",
"(",
"db_string",
")",
"[",
"\"encoding\"",
"]",
"db_string",
"=",
"db_string",
".",
"decode",
"(",
"encoding",
")",
"db",
"=",
"self",
".",
"_dbc_cache",
"[",
"md5_sum",
"]",
"=",
"loads",
"(",
"db_string",
",",
"importType",
"=",
"import_type",
",",
"key",
"=",
"\"db\"",
",",
"encoding",
"=",
"encoding",
",",
")",
"[",
"\"db\"",
"]",
"except",
"ImportError",
":",
"message",
"=",
"(",
"\"Unicode exception occured while processing the database \"",
"f'attachment \"{database}\" and \"cChardet\" package is '",
"'not installed. Mdf version 4 expects \"utf-8\" '",
"\"strings and this package may detect if a different\"",
"\" encoding was used\"",
")",
"logger",
".",
"warning",
"(",
"message",
")",
"name_",
"=",
"name",
".",
"split",
"(",
"\".\"",
")",
"if",
"len",
"(",
"name_",
")",
"==",
"3",
":",
"can_id_str",
",",
"message_id_str",
",",
"signal",
"=",
"name_",
"can_id",
"=",
"v4c",
".",
"CAN_ID_PATTERN",
".",
"search",
"(",
"can_id_str",
")",
"if",
"can_id",
"is",
"None",
":",
"raise",
"MdfException",
"(",
"f'CAN id \"{can_id_str}\" of signal name \"{name}\" is not recognised by this library'",
")",
"else",
":",
"can_id",
"=",
"f'CAN{can_id.group(\"id\")}'",
"message_id",
"=",
"v4c",
".",
"CAN_DATA_FRAME_PATTERN",
".",
"search",
"(",
"message_id_str",
")",
"if",
"message_id",
"is",
"None",
":",
"message_id",
"=",
"message_id_str",
"else",
":",
"message_id",
"=",
"int",
"(",
"message_id",
")",
"elif",
"len",
"(",
"name_",
")",
"==",
"2",
":",
"message_id_str",
",",
"signal",
"=",
"name_",
"can_id",
"=",
"None",
"message_id",
"=",
"v4c",
".",
"CAN_DATA_FRAME_PATTERN",
".",
"search",
"(",
"message_id_str",
")",
"if",
"message_id",
"is",
"None",
":",
"message_id",
"=",
"message_id_str",
"else",
":",
"message_id",
"=",
"int",
"(",
"message_id",
".",
"group",
"(",
"'id'",
")",
")",
"else",
":",
"can_id",
"=",
"message_id",
"=",
"None",
"signal",
"=",
"name",
"if",
"isinstance",
"(",
"message_id",
",",
"str",
")",
":",
"message",
"=",
"db",
".",
"frame_by_name",
"(",
"message_id",
")",
"else",
":",
"message",
"=",
"db",
".",
"frame_by_id",
"(",
"message_id",
")",
"for",
"sig",
"in",
"message",
".",
"signals",
":",
"if",
"sig",
".",
"name",
"==",
"signal",
":",
"signal",
"=",
"sig",
"break",
"else",
":",
"raise",
"MdfException",
"(",
"f'Signal \"{signal}\" not found in message \"{message.name}\" of \"{database}\"'",
")",
"if",
"can_id",
"is",
"None",
":",
"for",
"_can_id",
",",
"messages",
"in",
"self",
".",
"can_logging_db",
".",
"items",
"(",
")",
":",
"message_id",
"=",
"message",
".",
"id",
"if",
"message_id",
">",
"0x80000000",
":",
"message_id",
"-=",
"0x80000000",
"if",
"message_id",
"in",
"messages",
":",
"index",
"=",
"messages",
"[",
"message",
".",
"id",
"]",
"break",
"else",
":",
"raise",
"MdfException",
"(",
"f'Message \"{message.name}\" (ID={hex(message.id)}) not found in the measurement'",
")",
"else",
":",
"if",
"can_id",
"in",
"self",
".",
"can_logging_db",
":",
"if",
"message",
".",
"is_j1939",
":",
"test_ids",
"=",
"[",
"canmatrix",
".",
"ArbitrationId",
"(",
"id_",
",",
"extended",
"=",
"True",
")",
".",
"pgn",
"for",
"id_",
"in",
"self",
".",
"can_logging_db",
"[",
"can_id",
"]",
"]",
"id_",
"=",
"message",
".",
"arbitration_id",
".",
"pgn",
"else",
":",
"id_",
"=",
"message",
".",
"arbitration_id",
".",
"id",
"test_ids",
"=",
"self",
".",
"can_logging_db",
"[",
"can_id",
"]",
"if",
"id_",
"in",
"test_ids",
":",
"if",
"message",
".",
"is_j1939",
":",
"for",
"id__",
",",
"idx",
"in",
"self",
".",
"can_logging_db",
"[",
"can_id",
"]",
".",
"items",
"(",
")",
":",
"if",
"canmatrix",
".",
"ArbitrationId",
"(",
"id__",
",",
"extended",
"=",
"True",
")",
".",
"pgn",
"==",
"id_",
":",
"index",
"=",
"idx",
"break",
"else",
":",
"index",
"=",
"self",
".",
"can_logging_db",
"[",
"can_id",
"]",
"[",
"message",
".",
"arbitration_id",
".",
"id",
"]",
"else",
":",
"raise",
"MdfException",
"(",
"f'Message \"{message.name}\" (ID={hex(message.arbitration_id.id)}) not found in the measurement'",
")",
"else",
":",
"raise",
"MdfException",
"(",
"f'No logging from \"{can_id}\" was found in the measurement'",
")",
"can_ids",
"=",
"self",
".",
"get",
"(",
"\"CAN_DataFrame.ID\"",
",",
"group",
"=",
"index",
",",
"ignore_invalidation_bits",
"=",
"ignore_invalidation_bits",
",",
")",
"payload",
"=",
"self",
".",
"get",
"(",
"\"CAN_DataFrame.DataBytes\"",
",",
"group",
"=",
"index",
",",
"samples_only",
"=",
"True",
",",
"ignore_invalidation_bits",
"=",
"ignore_invalidation_bits",
",",
")",
"[",
"0",
"]",
"if",
"message",
".",
"is_j1939",
":",
"ps",
"=",
"(",
"can_ids",
".",
"samples",
">>",
"8",
")",
"&",
"0xFF",
"pf",
"=",
"(",
"can_ids",
".",
"samples",
">>",
"16",
")",
"&",
"0xFF",
"_pgn",
"=",
"pf",
"<<",
"8",
"_pgn",
"=",
"where",
"(",
"pf",
">=",
"240",
",",
"_pgn",
"+",
"ps",
",",
"_pgn",
",",
")",
"idx",
"=",
"nonzero",
"(",
"_pgn",
"==",
"message",
".",
"arbitration_id",
".",
"pgn",
")",
"[",
"0",
"]",
"else",
":",
"idx",
"=",
"nonzero",
"(",
"can_ids",
".",
"samples",
"==",
"message",
".",
"arbitration_id",
".",
"id",
")",
"[",
"0",
"]",
"vals",
"=",
"payload",
"[",
"idx",
"]",
"t",
"=",
"can_ids",
".",
"timestamps",
"[",
"idx",
"]",
".",
"copy",
"(",
")",
"if",
"can_ids",
".",
"invalidation_bits",
"is",
"not",
"None",
":",
"invalidation_bits",
"=",
"can_ids",
".",
"invalidation_bits",
"[",
"idx",
"]",
"else",
":",
"invalidation_bits",
"=",
"None",
"vals",
"=",
"extract_can_signal",
"(",
"signal",
",",
"vals",
")",
"comment",
"=",
"signal",
".",
"comment",
"or",
"\"\"",
"if",
"ignore_invalidation_bits",
":",
"return",
"Signal",
"(",
"samples",
"=",
"vals",
",",
"timestamps",
"=",
"t",
",",
"name",
"=",
"name",
",",
"unit",
"=",
"signal",
".",
"unit",
"or",
"\"\"",
",",
"comment",
"=",
"comment",
",",
"invalidation_bits",
"=",
"invalidation_bits",
",",
")",
"else",
":",
"if",
"invalidation_bits",
"is",
"not",
"None",
":",
"vals",
"=",
"vals",
"[",
"nonzero",
"(",
"~",
"invalidation_bits",
")",
"[",
"0",
"]",
"]",
"t",
"=",
"t",
"[",
"nonzero",
"(",
"~",
"invalidation_bits",
")",
"[",
"0",
"]",
"]",
"return",
"Signal",
"(",
"samples",
"=",
"vals",
",",
"timestamps",
"=",
"t",
",",
"name",
"=",
"name",
",",
"unit",
"=",
"signal",
".",
"unit",
"or",
"\"\"",
",",
"comment",
"=",
"comment",
",",
")"
] |
get CAN message signal. You can specify an external CAN database (
*database* argument) or canmatrix databse object that has already been
loaded from a file (*db* argument).
The signal name can be specified in the following ways
* ``CAN<ID>.<MESSAGE_NAME>.<SIGNAL_NAME>`` - the `ID` value starts from 1
and must match the ID found in the measurement (the source CAN bus ID)
Example: CAN1.Wheels.FL_WheelSpeed
* ``CAN<ID>.CAN_DataFrame_<MESSAGE_ID>.<SIGNAL_NAME>`` - the `ID` value
starts from 1 and the `MESSAGE_ID` is the decimal message ID as found
in the database. Example: CAN1.CAN_DataFrame_218.FL_WheelSpeed
* ``<MESSAGE_NAME>.SIGNAL_NAME`` - in this case the first occurence of
the message name and signal are returned (the same message could be
found on muplit CAN buses; for example on CAN1 and CAN3)
Example: Wheels.FL_WheelSpeed
* ``CAN_DataFrame_<MESSAGE_ID>.<SIGNAL_NAME>`` - in this case the first
occurence of the message name and signal are returned (the same
message could be found on muplit CAN buses; for example on CAN1 and
CAN3). Example: CAN_DataFrame_218.FL_WheelSpeed
* ``<SIGNAL_NAME>`` - in this case the first occurence of the signal
name is returned ( the same signal anme coudl be found in multiple
messages and on multiple CAN buses). Example: FL_WheelSpeed
Parameters
----------
name : str
signal name
database : str
path of external CAN database file (.dbc or .arxml); default *None*
db : canmatrix.database
canmatrix CAN database object; default *None*
ignore_invalidation_bits : bool
option to ignore invalidation bits
Returns
-------
sig : Signal
Signal object with the physical values
|
[
"get",
"CAN",
"message",
"signal",
".",
"You",
"can",
"specify",
"an",
"external",
"CAN",
"database",
"(",
"*",
"database",
"*",
"argument",
")",
"or",
"canmatrix",
"databse",
"object",
"that",
"has",
"already",
"been",
"loaded",
"from",
"a",
"file",
"(",
"*",
"db",
"*",
"argument",
")",
"."
] |
python
|
train
|
bodylabs/lace
|
lace/meshviewer.py
|
https://github.com/bodylabs/lace/blob/b68f4a60a4cac66c0607ffbae38ef9d07d37f459/lace/meshviewer.py#L123-L140
|
def MeshViewers(
shape=(1, 1), titlebar="Mesh Viewers", keepalive=False,
window_width=1280, window_height=960
):
"""Allows subplot-style inspection of primitives in multiple subwindows.
Args:
shape: a tuple indicating the number of vertical and horizontal windows requested
Returns: a list of lists of MeshViewer objects: one per window requested.
"""
if not test_for_opengl():
return Dummy()
mv = MeshViewerLocal(
shape=shape, titlebar=titlebar, uid=None, keepalive=keepalive,
window_width=window_width, window_height=window_height
)
return mv.get_subwindows()
|
[
"def",
"MeshViewers",
"(",
"shape",
"=",
"(",
"1",
",",
"1",
")",
",",
"titlebar",
"=",
"\"Mesh Viewers\"",
",",
"keepalive",
"=",
"False",
",",
"window_width",
"=",
"1280",
",",
"window_height",
"=",
"960",
")",
":",
"if",
"not",
"test_for_opengl",
"(",
")",
":",
"return",
"Dummy",
"(",
")",
"mv",
"=",
"MeshViewerLocal",
"(",
"shape",
"=",
"shape",
",",
"titlebar",
"=",
"titlebar",
",",
"uid",
"=",
"None",
",",
"keepalive",
"=",
"keepalive",
",",
"window_width",
"=",
"window_width",
",",
"window_height",
"=",
"window_height",
")",
"return",
"mv",
".",
"get_subwindows",
"(",
")"
] |
Allows subplot-style inspection of primitives in multiple subwindows.
Args:
shape: a tuple indicating the number of vertical and horizontal windows requested
Returns: a list of lists of MeshViewer objects: one per window requested.
|
[
"Allows",
"subplot",
"-",
"style",
"inspection",
"of",
"primitives",
"in",
"multiple",
"subwindows",
"."
] |
python
|
train
|
blockstack/blockstack-files
|
blockstack_file/blockstack_file.py
|
https://github.com/blockstack/blockstack-files/blob/8d88cc48bdf8ed57f17d4bba860e972bde321921/blockstack_file/blockstack_file.py#L497-L560
|
def file_put( blockchain_id, hostname, recipient_blockchain_ids, data_name, input_path, passphrase=None, config_path=CONFIG_PATH, wallet_keys=None ):
"""
Send a file to the given recipient, encrypted and signed with the
given blockchain ID.
Allow each recipient to receive the data on each of their hosts.
Return {'status': True} on success, and upload to cloud storage
Return {'error': ...} on error
"""
fd, output_path = tempfile.mkstemp( prefix="blockstack-file-" )
os.fchmod( fd, 0600 )
os.close(fd)
config_dir = os.path.dirname(config_path)
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME )
all_recipients = []
# make available to all other hosts for this blockchain_id
my_hosts = file_list_hosts( blockchain_id, wallet_keys=wallet_keys, config_path=config_path )
if 'error' in my_hosts:
log.error("Failed to list hosts: %s" % my_hosts['error'])
os.unlink(output_path)
return {'error': 'Failed to look up sender keys'}
if hostname in my_hosts:
my_hosts.remove(hostname)
all_recipients += [(blockchain_id, host) for host in my_hosts['hosts']]
# make available to all hosts for each recipient
for recipient_blockchain_id in recipient_blockchain_ids:
their_hosts = file_list_hosts( recipient_blockchain_id, wallet_keys=wallet_keys, config_path=config_path )
if 'error' in their_hosts:
log.error("Failed to list hosts for %s: %s" % (recipient_blockchain_id, their_hosts['error']))
os.unlink(output_path)
return {'error': 'Failed to look up recipient keys'}
all_recipients += [(recipient_blockchain_id, host) for host in their_hosts['hosts']]
# encrypt
res = file_encrypt( blockchain_id, hostname, all_recipients, input_path, output_path, passphrase=passphrase, config_path=config_path, wallet_keys=wallet_keys )
if 'error' in res:
log.error("Failed to encrypt: %s" % res['error'])
os.unlink(output_path)
return {'error': 'Failed to encrypt'}
# load up
with open(output_path, "r") as f:
ciphertext = f.read()
message = {'ciphertext': ciphertext, 'sender_key_id': res['sender_key_id']}
# put to mutable storage
fq_data_name = file_fq_data_name( data_name )
proxy = blockstack_client.get_default_proxy( config_path=client_config_path )
res = blockstack_client.data_put( blockstack_client.make_mutable_data_url( blockchain_id, fq_data_name, None ), message, wallet_keys=wallet_keys, proxy=proxy )
if 'error' in res:
log.error("Failed to put data: %s" % res['error'])
os.unlink(output_path)
return {'error': 'Failed to replicate data'}
os.unlink(output_path)
return {'status': True}
|
[
"def",
"file_put",
"(",
"blockchain_id",
",",
"hostname",
",",
"recipient_blockchain_ids",
",",
"data_name",
",",
"input_path",
",",
"passphrase",
"=",
"None",
",",
"config_path",
"=",
"CONFIG_PATH",
",",
"wallet_keys",
"=",
"None",
")",
":",
"fd",
",",
"output_path",
"=",
"tempfile",
".",
"mkstemp",
"(",
"prefix",
"=",
"\"blockstack-file-\"",
")",
"os",
".",
"fchmod",
"(",
"fd",
",",
"0600",
")",
"os",
".",
"close",
"(",
"fd",
")",
"config_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"config_path",
")",
"client_config_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"config_dir",
",",
"blockstack_client",
".",
"CONFIG_FILENAME",
")",
"all_recipients",
"=",
"[",
"]",
"# make available to all other hosts for this blockchain_id",
"my_hosts",
"=",
"file_list_hosts",
"(",
"blockchain_id",
",",
"wallet_keys",
"=",
"wallet_keys",
",",
"config_path",
"=",
"config_path",
")",
"if",
"'error'",
"in",
"my_hosts",
":",
"log",
".",
"error",
"(",
"\"Failed to list hosts: %s\"",
"%",
"my_hosts",
"[",
"'error'",
"]",
")",
"os",
".",
"unlink",
"(",
"output_path",
")",
"return",
"{",
"'error'",
":",
"'Failed to look up sender keys'",
"}",
"if",
"hostname",
"in",
"my_hosts",
":",
"my_hosts",
".",
"remove",
"(",
"hostname",
")",
"all_recipients",
"+=",
"[",
"(",
"blockchain_id",
",",
"host",
")",
"for",
"host",
"in",
"my_hosts",
"[",
"'hosts'",
"]",
"]",
"# make available to all hosts for each recipient ",
"for",
"recipient_blockchain_id",
"in",
"recipient_blockchain_ids",
":",
"their_hosts",
"=",
"file_list_hosts",
"(",
"recipient_blockchain_id",
",",
"wallet_keys",
"=",
"wallet_keys",
",",
"config_path",
"=",
"config_path",
")",
"if",
"'error'",
"in",
"their_hosts",
":",
"log",
".",
"error",
"(",
"\"Failed to list hosts for %s: %s\"",
"%",
"(",
"recipient_blockchain_id",
",",
"their_hosts",
"[",
"'error'",
"]",
")",
")",
"os",
".",
"unlink",
"(",
"output_path",
")",
"return",
"{",
"'error'",
":",
"'Failed to look up recipient keys'",
"}",
"all_recipients",
"+=",
"[",
"(",
"recipient_blockchain_id",
",",
"host",
")",
"for",
"host",
"in",
"their_hosts",
"[",
"'hosts'",
"]",
"]",
"# encrypt",
"res",
"=",
"file_encrypt",
"(",
"blockchain_id",
",",
"hostname",
",",
"all_recipients",
",",
"input_path",
",",
"output_path",
",",
"passphrase",
"=",
"passphrase",
",",
"config_path",
"=",
"config_path",
",",
"wallet_keys",
"=",
"wallet_keys",
")",
"if",
"'error'",
"in",
"res",
":",
"log",
".",
"error",
"(",
"\"Failed to encrypt: %s\"",
"%",
"res",
"[",
"'error'",
"]",
")",
"os",
".",
"unlink",
"(",
"output_path",
")",
"return",
"{",
"'error'",
":",
"'Failed to encrypt'",
"}",
"# load up ",
"with",
"open",
"(",
"output_path",
",",
"\"r\"",
")",
"as",
"f",
":",
"ciphertext",
"=",
"f",
".",
"read",
"(",
")",
"message",
"=",
"{",
"'ciphertext'",
":",
"ciphertext",
",",
"'sender_key_id'",
":",
"res",
"[",
"'sender_key_id'",
"]",
"}",
"# put to mutable storage ",
"fq_data_name",
"=",
"file_fq_data_name",
"(",
"data_name",
")",
"proxy",
"=",
"blockstack_client",
".",
"get_default_proxy",
"(",
"config_path",
"=",
"client_config_path",
")",
"res",
"=",
"blockstack_client",
".",
"data_put",
"(",
"blockstack_client",
".",
"make_mutable_data_url",
"(",
"blockchain_id",
",",
"fq_data_name",
",",
"None",
")",
",",
"message",
",",
"wallet_keys",
"=",
"wallet_keys",
",",
"proxy",
"=",
"proxy",
")",
"if",
"'error'",
"in",
"res",
":",
"log",
".",
"error",
"(",
"\"Failed to put data: %s\"",
"%",
"res",
"[",
"'error'",
"]",
")",
"os",
".",
"unlink",
"(",
"output_path",
")",
"return",
"{",
"'error'",
":",
"'Failed to replicate data'",
"}",
"os",
".",
"unlink",
"(",
"output_path",
")",
"return",
"{",
"'status'",
":",
"True",
"}"
] |
Send a file to the given recipient, encrypted and signed with the
given blockchain ID.
Allow each recipient to receive the data on each of their hosts.
Return {'status': True} on success, and upload to cloud storage
Return {'error': ...} on error
|
[
"Send",
"a",
"file",
"to",
"the",
"given",
"recipient",
"encrypted",
"and",
"signed",
"with",
"the",
"given",
"blockchain",
"ID",
".",
"Allow",
"each",
"recipient",
"to",
"receive",
"the",
"data",
"on",
"each",
"of",
"their",
"hosts",
".",
"Return",
"{",
"status",
":",
"True",
"}",
"on",
"success",
"and",
"upload",
"to",
"cloud",
"storage",
"Return",
"{",
"error",
":",
"...",
"}",
"on",
"error"
] |
python
|
train
|
rigetti/quantumflow
|
quantumflow/states.py
|
https://github.com/rigetti/quantumflow/blob/13a66cabbe8aabf6e023cc675f4a4ebe6ccda8fb/quantumflow/states.py#L131-L147
|
def expectation(self, diag_hermitian: bk.TensorLike,
trials: int = None) -> bk.BKTensor:
"""Return the expectation of a measurement. Since we can only measure
our computer in the computational basis, we only require the diagonal
of the Hermitian in that basis.
If the number of trials is specified, we sample the given number of
times. Else we return the exact expectation (as if we'd performed an
infinite number of trials. )
"""
if trials is None:
probs = self.probabilities()
else:
probs = bk.real(bk.astensorproduct(self.sample(trials) / trials))
diag_hermitian = bk.astensorproduct(diag_hermitian)
return bk.sum(bk.real(diag_hermitian) * probs)
|
[
"def",
"expectation",
"(",
"self",
",",
"diag_hermitian",
":",
"bk",
".",
"TensorLike",
",",
"trials",
":",
"int",
"=",
"None",
")",
"->",
"bk",
".",
"BKTensor",
":",
"if",
"trials",
"is",
"None",
":",
"probs",
"=",
"self",
".",
"probabilities",
"(",
")",
"else",
":",
"probs",
"=",
"bk",
".",
"real",
"(",
"bk",
".",
"astensorproduct",
"(",
"self",
".",
"sample",
"(",
"trials",
")",
"/",
"trials",
")",
")",
"diag_hermitian",
"=",
"bk",
".",
"astensorproduct",
"(",
"diag_hermitian",
")",
"return",
"bk",
".",
"sum",
"(",
"bk",
".",
"real",
"(",
"diag_hermitian",
")",
"*",
"probs",
")"
] |
Return the expectation of a measurement. Since we can only measure
our computer in the computational basis, we only require the diagonal
of the Hermitian in that basis.
If the number of trials is specified, we sample the given number of
times. Else we return the exact expectation (as if we'd performed an
infinite number of trials. )
|
[
"Return",
"the",
"expectation",
"of",
"a",
"measurement",
".",
"Since",
"we",
"can",
"only",
"measure",
"our",
"computer",
"in",
"the",
"computational",
"basis",
"we",
"only",
"require",
"the",
"diagonal",
"of",
"the",
"Hermitian",
"in",
"that",
"basis",
"."
] |
python
|
train
|
brocade/pynos
|
pynos/versions/ver_6/ver_6_0_1/yang/brocade_ip_policy.py
|
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_ip_policy.py#L1121-L1139
|
def hide_routemap_holder_route_map_content_set_origin_origin_igp(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hide_routemap_holder = ET.SubElement(config, "hide-routemap-holder", xmlns="urn:brocade.com:mgmt:brocade-ip-policy")
route_map = ET.SubElement(hide_routemap_holder, "route-map")
name_key = ET.SubElement(route_map, "name")
name_key.text = kwargs.pop('name')
action_rm_key = ET.SubElement(route_map, "action-rm")
action_rm_key.text = kwargs.pop('action_rm')
instance_key = ET.SubElement(route_map, "instance")
instance_key.text = kwargs.pop('instance')
content = ET.SubElement(route_map, "content")
set = ET.SubElement(content, "set")
origin = ET.SubElement(set, "origin")
origin_igp = ET.SubElement(origin, "origin-igp")
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
[
"def",
"hide_routemap_holder_route_map_content_set_origin_origin_igp",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"hide_routemap_holder",
"=",
"ET",
".",
"SubElement",
"(",
"config",
",",
"\"hide-routemap-holder\"",
",",
"xmlns",
"=",
"\"urn:brocade.com:mgmt:brocade-ip-policy\"",
")",
"route_map",
"=",
"ET",
".",
"SubElement",
"(",
"hide_routemap_holder",
",",
"\"route-map\"",
")",
"name_key",
"=",
"ET",
".",
"SubElement",
"(",
"route_map",
",",
"\"name\"",
")",
"name_key",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'name'",
")",
"action_rm_key",
"=",
"ET",
".",
"SubElement",
"(",
"route_map",
",",
"\"action-rm\"",
")",
"action_rm_key",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'action_rm'",
")",
"instance_key",
"=",
"ET",
".",
"SubElement",
"(",
"route_map",
",",
"\"instance\"",
")",
"instance_key",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'instance'",
")",
"content",
"=",
"ET",
".",
"SubElement",
"(",
"route_map",
",",
"\"content\"",
")",
"set",
"=",
"ET",
".",
"SubElement",
"(",
"content",
",",
"\"set\"",
")",
"origin",
"=",
"ET",
".",
"SubElement",
"(",
"set",
",",
"\"origin\"",
")",
"origin_igp",
"=",
"ET",
".",
"SubElement",
"(",
"origin",
",",
"\"origin-igp\"",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
] |
Auto Generated Code
|
[
"Auto",
"Generated",
"Code"
] |
python
|
train
|
domainaware/parsedmarc
|
parsedmarc/__init__.py
|
https://github.com/domainaware/parsedmarc/blob/ecc9fd434c23d896ccd1f35795ccc047f946ed05/parsedmarc/__init__.py#L519-L633
|
def parse_forensic_report(feedback_report, sample, msg_date,
nameservers=None, dns_timeout=2.0,
strip_attachment_payloads=False,
parallel=False):
"""
Converts a DMARC forensic report and sample to a ``OrderedDict``
Args:
feedback_report (str): A message's feedback report as a string
sample (str): The RFC 822 headers or RFC 822 message sample
msg_date (str): The message's date header
nameservers (list): A list of one or more nameservers to use
(Cloudflare's public DNS resolvers by default)
dns_timeout (float): Sets the DNS timeout in seconds
strip_attachment_payloads (bool): Remove attachment payloads from
forensic report results
parallel (bool): Parallel processing
Returns:
OrderedDict: A parsed report and sample
"""
delivery_results = ["delivered", "spam", "policy", "reject", "other"]
try:
parsed_report = OrderedDict()
report_values = feedback_report_regex.findall(feedback_report)
for report_value in report_values:
key = report_value[0].lower().replace("-", "_")
parsed_report[key] = report_value[1]
if "arrival_date" not in parsed_report:
if msg_date is None:
raise InvalidForensicReport(
"Forensic sample is not a valid email")
parsed_report["arrival_date"] = msg_date.isoformat()
if "version" not in parsed_report:
parsed_report["version"] = 1
if "user_agent" not in parsed_report:
parsed_report["user_agent"] = None
if "delivery_result" not in parsed_report:
parsed_report["delivery_result"] = None
else:
for delivery_result in delivery_results:
if delivery_result in parsed_report["delivery_result"].lower():
parsed_report["delivery_result"] = delivery_result
break
if parsed_report["delivery_result"] not in delivery_results:
parsed_report["delivery_result"] = "other"
arrival_utc = human_timestamp_to_datetime(
parsed_report["arrival_date"], to_utc=True)
arrival_utc = arrival_utc.strftime("%Y-%m-%d %H:%M:%S")
parsed_report["arrival_date_utc"] = arrival_utc
ip_address = parsed_report["source_ip"]
parsed_report_source = get_ip_address_info(ip_address,
nameservers=nameservers,
timeout=dns_timeout,
parallel=parallel)
parsed_report["source"] = parsed_report_source
del parsed_report["source_ip"]
if "identity_alignment" not in parsed_report:
parsed_report["authentication_mechanisms"] = []
elif parsed_report["identity_alignment"] == "none":
parsed_report["authentication_mechanisms"] = []
del parsed_report["identity_alignment"]
else:
auth_mechanisms = parsed_report["identity_alignment"]
auth_mechanisms = auth_mechanisms.split(",")
parsed_report["authentication_mechanisms"] = auth_mechanisms
del parsed_report["identity_alignment"]
if "auth_failure" not in parsed_report:
parsed_report["auth_failure"] = "dmarc"
auth_failure = parsed_report["auth_failure"].split(",")
parsed_report["auth_failure"] = auth_failure
optional_fields = ["original_envelope_id", "dkim_domain",
"original_mail_from", "original_rcpt_to"]
for optional_field in optional_fields:
if optional_field not in parsed_report:
parsed_report[optional_field] = None
parsed_sample = parse_email(
sample,
strip_attachment_payloads=strip_attachment_payloads)
if "reported_domain" not in parsed_report:
parsed_report["reported_domain"] = parsed_sample["from"]["domain"]
sample_headers_only = False
number_of_attachments = len(parsed_sample["attachments"])
if number_of_attachments < 1 and parsed_sample["body"] is None:
sample_headers_only = True
if sample_headers_only and parsed_sample["has_defects"]:
del parsed_sample["defects"]
del parsed_sample["defects_categories"]
del parsed_sample["has_defects"]
parsed_report["sample_headers_only"] = sample_headers_only
parsed_report["sample"] = sample
parsed_report["parsed_sample"] = parsed_sample
return parsed_report
except KeyError as error:
raise InvalidForensicReport("Missing value: {0}".format(
error.__str__()))
except Exception as error:
raise InvalidForensicReport(
"Unexpected error: {0}".format(error.__str__()))
|
[
"def",
"parse_forensic_report",
"(",
"feedback_report",
",",
"sample",
",",
"msg_date",
",",
"nameservers",
"=",
"None",
",",
"dns_timeout",
"=",
"2.0",
",",
"strip_attachment_payloads",
"=",
"False",
",",
"parallel",
"=",
"False",
")",
":",
"delivery_results",
"=",
"[",
"\"delivered\"",
",",
"\"spam\"",
",",
"\"policy\"",
",",
"\"reject\"",
",",
"\"other\"",
"]",
"try",
":",
"parsed_report",
"=",
"OrderedDict",
"(",
")",
"report_values",
"=",
"feedback_report_regex",
".",
"findall",
"(",
"feedback_report",
")",
"for",
"report_value",
"in",
"report_values",
":",
"key",
"=",
"report_value",
"[",
"0",
"]",
".",
"lower",
"(",
")",
".",
"replace",
"(",
"\"-\"",
",",
"\"_\"",
")",
"parsed_report",
"[",
"key",
"]",
"=",
"report_value",
"[",
"1",
"]",
"if",
"\"arrival_date\"",
"not",
"in",
"parsed_report",
":",
"if",
"msg_date",
"is",
"None",
":",
"raise",
"InvalidForensicReport",
"(",
"\"Forensic sample is not a valid email\"",
")",
"parsed_report",
"[",
"\"arrival_date\"",
"]",
"=",
"msg_date",
".",
"isoformat",
"(",
")",
"if",
"\"version\"",
"not",
"in",
"parsed_report",
":",
"parsed_report",
"[",
"\"version\"",
"]",
"=",
"1",
"if",
"\"user_agent\"",
"not",
"in",
"parsed_report",
":",
"parsed_report",
"[",
"\"user_agent\"",
"]",
"=",
"None",
"if",
"\"delivery_result\"",
"not",
"in",
"parsed_report",
":",
"parsed_report",
"[",
"\"delivery_result\"",
"]",
"=",
"None",
"else",
":",
"for",
"delivery_result",
"in",
"delivery_results",
":",
"if",
"delivery_result",
"in",
"parsed_report",
"[",
"\"delivery_result\"",
"]",
".",
"lower",
"(",
")",
":",
"parsed_report",
"[",
"\"delivery_result\"",
"]",
"=",
"delivery_result",
"break",
"if",
"parsed_report",
"[",
"\"delivery_result\"",
"]",
"not",
"in",
"delivery_results",
":",
"parsed_report",
"[",
"\"delivery_result\"",
"]",
"=",
"\"other\"",
"arrival_utc",
"=",
"human_timestamp_to_datetime",
"(",
"parsed_report",
"[",
"\"arrival_date\"",
"]",
",",
"to_utc",
"=",
"True",
")",
"arrival_utc",
"=",
"arrival_utc",
".",
"strftime",
"(",
"\"%Y-%m-%d %H:%M:%S\"",
")",
"parsed_report",
"[",
"\"arrival_date_utc\"",
"]",
"=",
"arrival_utc",
"ip_address",
"=",
"parsed_report",
"[",
"\"source_ip\"",
"]",
"parsed_report_source",
"=",
"get_ip_address_info",
"(",
"ip_address",
",",
"nameservers",
"=",
"nameservers",
",",
"timeout",
"=",
"dns_timeout",
",",
"parallel",
"=",
"parallel",
")",
"parsed_report",
"[",
"\"source\"",
"]",
"=",
"parsed_report_source",
"del",
"parsed_report",
"[",
"\"source_ip\"",
"]",
"if",
"\"identity_alignment\"",
"not",
"in",
"parsed_report",
":",
"parsed_report",
"[",
"\"authentication_mechanisms\"",
"]",
"=",
"[",
"]",
"elif",
"parsed_report",
"[",
"\"identity_alignment\"",
"]",
"==",
"\"none\"",
":",
"parsed_report",
"[",
"\"authentication_mechanisms\"",
"]",
"=",
"[",
"]",
"del",
"parsed_report",
"[",
"\"identity_alignment\"",
"]",
"else",
":",
"auth_mechanisms",
"=",
"parsed_report",
"[",
"\"identity_alignment\"",
"]",
"auth_mechanisms",
"=",
"auth_mechanisms",
".",
"split",
"(",
"\",\"",
")",
"parsed_report",
"[",
"\"authentication_mechanisms\"",
"]",
"=",
"auth_mechanisms",
"del",
"parsed_report",
"[",
"\"identity_alignment\"",
"]",
"if",
"\"auth_failure\"",
"not",
"in",
"parsed_report",
":",
"parsed_report",
"[",
"\"auth_failure\"",
"]",
"=",
"\"dmarc\"",
"auth_failure",
"=",
"parsed_report",
"[",
"\"auth_failure\"",
"]",
".",
"split",
"(",
"\",\"",
")",
"parsed_report",
"[",
"\"auth_failure\"",
"]",
"=",
"auth_failure",
"optional_fields",
"=",
"[",
"\"original_envelope_id\"",
",",
"\"dkim_domain\"",
",",
"\"original_mail_from\"",
",",
"\"original_rcpt_to\"",
"]",
"for",
"optional_field",
"in",
"optional_fields",
":",
"if",
"optional_field",
"not",
"in",
"parsed_report",
":",
"parsed_report",
"[",
"optional_field",
"]",
"=",
"None",
"parsed_sample",
"=",
"parse_email",
"(",
"sample",
",",
"strip_attachment_payloads",
"=",
"strip_attachment_payloads",
")",
"if",
"\"reported_domain\"",
"not",
"in",
"parsed_report",
":",
"parsed_report",
"[",
"\"reported_domain\"",
"]",
"=",
"parsed_sample",
"[",
"\"from\"",
"]",
"[",
"\"domain\"",
"]",
"sample_headers_only",
"=",
"False",
"number_of_attachments",
"=",
"len",
"(",
"parsed_sample",
"[",
"\"attachments\"",
"]",
")",
"if",
"number_of_attachments",
"<",
"1",
"and",
"parsed_sample",
"[",
"\"body\"",
"]",
"is",
"None",
":",
"sample_headers_only",
"=",
"True",
"if",
"sample_headers_only",
"and",
"parsed_sample",
"[",
"\"has_defects\"",
"]",
":",
"del",
"parsed_sample",
"[",
"\"defects\"",
"]",
"del",
"parsed_sample",
"[",
"\"defects_categories\"",
"]",
"del",
"parsed_sample",
"[",
"\"has_defects\"",
"]",
"parsed_report",
"[",
"\"sample_headers_only\"",
"]",
"=",
"sample_headers_only",
"parsed_report",
"[",
"\"sample\"",
"]",
"=",
"sample",
"parsed_report",
"[",
"\"parsed_sample\"",
"]",
"=",
"parsed_sample",
"return",
"parsed_report",
"except",
"KeyError",
"as",
"error",
":",
"raise",
"InvalidForensicReport",
"(",
"\"Missing value: {0}\"",
".",
"format",
"(",
"error",
".",
"__str__",
"(",
")",
")",
")",
"except",
"Exception",
"as",
"error",
":",
"raise",
"InvalidForensicReport",
"(",
"\"Unexpected error: {0}\"",
".",
"format",
"(",
"error",
".",
"__str__",
"(",
")",
")",
")"
] |
Converts a DMARC forensic report and sample to a ``OrderedDict``
Args:
feedback_report (str): A message's feedback report as a string
sample (str): The RFC 822 headers or RFC 822 message sample
msg_date (str): The message's date header
nameservers (list): A list of one or more nameservers to use
(Cloudflare's public DNS resolvers by default)
dns_timeout (float): Sets the DNS timeout in seconds
strip_attachment_payloads (bool): Remove attachment payloads from
forensic report results
parallel (bool): Parallel processing
Returns:
OrderedDict: A parsed report and sample
|
[
"Converts",
"a",
"DMARC",
"forensic",
"report",
"and",
"sample",
"to",
"a",
"OrderedDict"
] |
python
|
test
|
brentp/cruzdb
|
cruzdb/intersecter.py
|
https://github.com/brentp/cruzdb/blob/9068d46e25952f4a929dde0242beb31fa4c7e89a/cruzdb/intersecter.py#L194-L215
|
def right(self, f, n=1):
"""return the nearest n features strictly to the right of a Feature f.
Overlapping features are not considered as to the right.
f: a Feature object
n: the number of features to return
"""
intervals = self.intervals[f.chrom]
ilen = len(intervals)
iright = binsearch_right_end(intervals, f.end, 0, ilen)
results = []
while iright < ilen:
i = len(results)
if i > n:
if distance(f, results[i - 1]) != distance(f, results[i - 2]):
return results[:i - 1]
other = intervals[iright]
iright += 1
if distance(other, f) == 0: continue
results.append(other)
return results
|
[
"def",
"right",
"(",
"self",
",",
"f",
",",
"n",
"=",
"1",
")",
":",
"intervals",
"=",
"self",
".",
"intervals",
"[",
"f",
".",
"chrom",
"]",
"ilen",
"=",
"len",
"(",
"intervals",
")",
"iright",
"=",
"binsearch_right_end",
"(",
"intervals",
",",
"f",
".",
"end",
",",
"0",
",",
"ilen",
")",
"results",
"=",
"[",
"]",
"while",
"iright",
"<",
"ilen",
":",
"i",
"=",
"len",
"(",
"results",
")",
"if",
"i",
">",
"n",
":",
"if",
"distance",
"(",
"f",
",",
"results",
"[",
"i",
"-",
"1",
"]",
")",
"!=",
"distance",
"(",
"f",
",",
"results",
"[",
"i",
"-",
"2",
"]",
")",
":",
"return",
"results",
"[",
":",
"i",
"-",
"1",
"]",
"other",
"=",
"intervals",
"[",
"iright",
"]",
"iright",
"+=",
"1",
"if",
"distance",
"(",
"other",
",",
"f",
")",
"==",
"0",
":",
"continue",
"results",
".",
"append",
"(",
"other",
")",
"return",
"results"
] |
return the nearest n features strictly to the right of a Feature f.
Overlapping features are not considered as to the right.
f: a Feature object
n: the number of features to return
|
[
"return",
"the",
"nearest",
"n",
"features",
"strictly",
"to",
"the",
"right",
"of",
"a",
"Feature",
"f",
".",
"Overlapping",
"features",
"are",
"not",
"considered",
"as",
"to",
"the",
"right",
"."
] |
python
|
train
|
smarie/python-valid8
|
valid8/entry_points_annotations.py
|
https://github.com/smarie/python-valid8/blob/5e15d1de11602933c5114eb9f73277ad91d97800/valid8/entry_points_annotations.py#L714-L769
|
def decorate_with_validation(func,
arg_name, # type: str
*validation_func, # type: ValidationFuncs
**kwargs):
# type: (...) -> Callable
"""
This method is the inner method used in `@validate_io`, `@validate_arg` and `@validate_out`.
It can be used if you with to perform decoration manually without a decorator.
:param func:
:param arg_name: the name of the argument to validate or _OUT_KEY for output validation
:param validation_func: the validation function or
list of validation functions to use. A validation function may be a callable, a tuple(callable, help_msg_str),
a tuple(callable, failure_type), or a list of several such elements. Nested lists are supported and indicate an
implicit `and_` (such as the main list). Tuples indicate an implicit `_failure_raiser`.
[mini_lambda](https://smarie.github.io/python-mini-lambda/) expressions can be used instead of callables, they
will be transformed to functions automatically.
:param error_type: a subclass of ValidationError to raise in case of validation failure. By default a
ValidationError will be raised with the provided help_msg
:param help_msg: an optional help message to be used in the raised error in case of validation failure.
:param none_policy: describes how None values should be handled. See `NoneArgPolicy` for the various possibilities.
Default is `NoneArgPolicy.ACCEPT_IF_OPTIONAl_ELSE_REJECT`.
:param kw_context_args: optional contextual information to store in the exception, and that may be also used
to format the help message
:return: the decorated function, that will perform input validation (using `_assert_input_is_valid`) before
executing the function's code everytime it is executed.
"""
error_type, help_msg, none_policy, _constructor_of_cls_ = pop_kwargs(kwargs, [('error_type', None),
('help_msg', None),
('none_policy', None),
('_constructor_of_cls_', None)],
allow_others=True)
# the rest of keyword arguments is used as context.
kw_context_args = kwargs
none_policy = none_policy or NoneArgPolicy.SKIP_IF_NONABLE_ELSE_VALIDATE
# retrieve target function signature
func_sig = signature(func)
# create the new validator
if _constructor_of_cls_ is None:
# standard method: input validator
new_validator = _create_function_validator(func, func_sig, arg_name, *validation_func,
none_policy=none_policy, error_type=error_type,
help_msg=help_msg, **kw_context_args)
else:
# class constructor: field validator
new_validator = _create_function_validator(func, func_sig, arg_name, *validation_func,
none_policy=none_policy, error_type=error_type,
help_msg=help_msg, validated_class=_constructor_of_cls_,
validated_class_field_name=arg_name,
**kw_context_args)
# decorate or update decorator with this new validator
return decorate_with_validators(func, func_signature=func_sig, **{arg_name: new_validator})
|
[
"def",
"decorate_with_validation",
"(",
"func",
",",
"arg_name",
",",
"# type: str",
"*",
"validation_func",
",",
"# type: ValidationFuncs",
"*",
"*",
"kwargs",
")",
":",
"# type: (...) -> Callable",
"error_type",
",",
"help_msg",
",",
"none_policy",
",",
"_constructor_of_cls_",
"=",
"pop_kwargs",
"(",
"kwargs",
",",
"[",
"(",
"'error_type'",
",",
"None",
")",
",",
"(",
"'help_msg'",
",",
"None",
")",
",",
"(",
"'none_policy'",
",",
"None",
")",
",",
"(",
"'_constructor_of_cls_'",
",",
"None",
")",
"]",
",",
"allow_others",
"=",
"True",
")",
"# the rest of keyword arguments is used as context.",
"kw_context_args",
"=",
"kwargs",
"none_policy",
"=",
"none_policy",
"or",
"NoneArgPolicy",
".",
"SKIP_IF_NONABLE_ELSE_VALIDATE",
"# retrieve target function signature",
"func_sig",
"=",
"signature",
"(",
"func",
")",
"# create the new validator",
"if",
"_constructor_of_cls_",
"is",
"None",
":",
"# standard method: input validator",
"new_validator",
"=",
"_create_function_validator",
"(",
"func",
",",
"func_sig",
",",
"arg_name",
",",
"*",
"validation_func",
",",
"none_policy",
"=",
"none_policy",
",",
"error_type",
"=",
"error_type",
",",
"help_msg",
"=",
"help_msg",
",",
"*",
"*",
"kw_context_args",
")",
"else",
":",
"# class constructor: field validator",
"new_validator",
"=",
"_create_function_validator",
"(",
"func",
",",
"func_sig",
",",
"arg_name",
",",
"*",
"validation_func",
",",
"none_policy",
"=",
"none_policy",
",",
"error_type",
"=",
"error_type",
",",
"help_msg",
"=",
"help_msg",
",",
"validated_class",
"=",
"_constructor_of_cls_",
",",
"validated_class_field_name",
"=",
"arg_name",
",",
"*",
"*",
"kw_context_args",
")",
"# decorate or update decorator with this new validator",
"return",
"decorate_with_validators",
"(",
"func",
",",
"func_signature",
"=",
"func_sig",
",",
"*",
"*",
"{",
"arg_name",
":",
"new_validator",
"}",
")"
] |
This method is the inner method used in `@validate_io`, `@validate_arg` and `@validate_out`.
It can be used if you with to perform decoration manually without a decorator.
:param func:
:param arg_name: the name of the argument to validate or _OUT_KEY for output validation
:param validation_func: the validation function or
list of validation functions to use. A validation function may be a callable, a tuple(callable, help_msg_str),
a tuple(callable, failure_type), or a list of several such elements. Nested lists are supported and indicate an
implicit `and_` (such as the main list). Tuples indicate an implicit `_failure_raiser`.
[mini_lambda](https://smarie.github.io/python-mini-lambda/) expressions can be used instead of callables, they
will be transformed to functions automatically.
:param error_type: a subclass of ValidationError to raise in case of validation failure. By default a
ValidationError will be raised with the provided help_msg
:param help_msg: an optional help message to be used in the raised error in case of validation failure.
:param none_policy: describes how None values should be handled. See `NoneArgPolicy` for the various possibilities.
Default is `NoneArgPolicy.ACCEPT_IF_OPTIONAl_ELSE_REJECT`.
:param kw_context_args: optional contextual information to store in the exception, and that may be also used
to format the help message
:return: the decorated function, that will perform input validation (using `_assert_input_is_valid`) before
executing the function's code everytime it is executed.
|
[
"This",
"method",
"is",
"the",
"inner",
"method",
"used",
"in",
"@validate_io",
"@validate_arg",
"and",
"@validate_out",
".",
"It",
"can",
"be",
"used",
"if",
"you",
"with",
"to",
"perform",
"decoration",
"manually",
"without",
"a",
"decorator",
"."
] |
python
|
train
|
mathandy/svgpathtools
|
svgpathtools/path.py
|
https://github.com/mathandy/svgpathtools/blob/fd7348a1dfd88b65ea61da02325c6605aedf8c4f/svgpathtools/path.py#L577-L582
|
def ilength(self, s, s_tol=ILENGTH_S_TOL, maxits=ILENGTH_MAXITS,
error=ILENGTH_ERROR, min_depth=ILENGTH_MIN_DEPTH):
"""Returns a float, t, such that self.length(0, t) is approximately s.
See the inv_arclength() docstring for more details."""
return inv_arclength(self, s, s_tol=s_tol, maxits=maxits, error=error,
min_depth=min_depth)
|
[
"def",
"ilength",
"(",
"self",
",",
"s",
",",
"s_tol",
"=",
"ILENGTH_S_TOL",
",",
"maxits",
"=",
"ILENGTH_MAXITS",
",",
"error",
"=",
"ILENGTH_ERROR",
",",
"min_depth",
"=",
"ILENGTH_MIN_DEPTH",
")",
":",
"return",
"inv_arclength",
"(",
"self",
",",
"s",
",",
"s_tol",
"=",
"s_tol",
",",
"maxits",
"=",
"maxits",
",",
"error",
"=",
"error",
",",
"min_depth",
"=",
"min_depth",
")"
] |
Returns a float, t, such that self.length(0, t) is approximately s.
See the inv_arclength() docstring for more details.
|
[
"Returns",
"a",
"float",
"t",
"such",
"that",
"self",
".",
"length",
"(",
"0",
"t",
")",
"is",
"approximately",
"s",
".",
"See",
"the",
"inv_arclength",
"()",
"docstring",
"for",
"more",
"details",
"."
] |
python
|
train
|
cloud-custodian/cloud-custodian
|
tools/c7n_gcp/c7n_gcp/client.py
|
https://github.com/cloud-custodian/cloud-custodian/blob/52ef732eb3d7bc939d1579faf519314814695c08/tools/c7n_gcp/c7n_gcp/client.py#L394-L418
|
def execute_paged_query(self, verb, verb_arguments):
"""Executes query (ex. list) via a dedicated http object.
Args:
verb (str): Method to execute on the component (ex. get, list).
verb_arguments (dict): key-value pairs to be passed to _BuildRequest.
Yields:
dict: Service Response.
Raises:
PaginationNotSupportedError: When an API does not support paging.
"""
if not self.supports_pagination(verb=verb):
raise PaginationNotSupported('{} does not support pagination')
request = self._build_request(verb, verb_arguments)
number_of_pages_processed = 0
while request is not None:
response = self._execute(request)
number_of_pages_processed += 1
log.debug('Executing paged request #%s', number_of_pages_processed)
request = self._build_next_request(verb, request, response)
yield response
|
[
"def",
"execute_paged_query",
"(",
"self",
",",
"verb",
",",
"verb_arguments",
")",
":",
"if",
"not",
"self",
".",
"supports_pagination",
"(",
"verb",
"=",
"verb",
")",
":",
"raise",
"PaginationNotSupported",
"(",
"'{} does not support pagination'",
")",
"request",
"=",
"self",
".",
"_build_request",
"(",
"verb",
",",
"verb_arguments",
")",
"number_of_pages_processed",
"=",
"0",
"while",
"request",
"is",
"not",
"None",
":",
"response",
"=",
"self",
".",
"_execute",
"(",
"request",
")",
"number_of_pages_processed",
"+=",
"1",
"log",
".",
"debug",
"(",
"'Executing paged request #%s'",
",",
"number_of_pages_processed",
")",
"request",
"=",
"self",
".",
"_build_next_request",
"(",
"verb",
",",
"request",
",",
"response",
")",
"yield",
"response"
] |
Executes query (ex. list) via a dedicated http object.
Args:
verb (str): Method to execute on the component (ex. get, list).
verb_arguments (dict): key-value pairs to be passed to _BuildRequest.
Yields:
dict: Service Response.
Raises:
PaginationNotSupportedError: When an API does not support paging.
|
[
"Executes",
"query",
"(",
"ex",
".",
"list",
")",
"via",
"a",
"dedicated",
"http",
"object",
"."
] |
python
|
train
|
tensorflow/mesh
|
mesh_tensorflow/beam_search.py
|
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/beam_search.py#L577-L642
|
def greedy_decode(logits_fn,
initial_ids,
temperature=0.0,
initial_states=None,
eos_id=EOS_ID,
forced_ids=None,
use_tpu=True):
"""Greedy decoding.
Args:
logits_fn: Interface to the model, to provide logits.
Shoud take:
step_num - mtf Scalar
ids - mtf Tensor with shape [..., length]
states - list of mtf.Tensor
Should return:
logits - [batch, vocab_size]
new_states - list of mtf.Tensor
initial_ids: mtf.Tensor with shape [..., length], containing zeros.
temperature: a float between 0.0 (argmax) and 1.0 (random)
initial_states: list of mtf.Tensor
eos_id: ID for end of sentence.
forced_ids: optional mtf.Tensor with shape [..., length]
use_tpu: a boolean
Returns:
Tensor with shape [..., length]
"""
length_dim = initial_ids.shape.dims[-1]
mesh = initial_ids.mesh
num_steps = mtf.constant(mesh, length_dim.size, dtype=tf.int32)
def cond_fn(step_num, prev_ids, *unused_states):
"""Should we run another loop iteration."""
overflow = mtf.equal(step_num, num_steps)
has_eos = mtf.reduce_any(
mtf.equal(prev_ids, eos_id), reduced_dim=length_dim)
all_has_eos = mtf.reduce_all(has_eos)
return mtf.logical_not(mtf.logical_or(overflow, all_has_eos))
def body_fn(step_num, ids, *states):
"""Body function for greedy decoding.
Args:
step_num: a mtf.Tensor
ids: a mtf.Tensor
*states: additional mtf.Tensors
Returns:
new_step_num, new_ids, *new_states
"""
logits, new_states = logits_fn(step_num, ids, states)
vocab_dim = logits.shape.dims[-1]
new_ids = mtf.sample_with_temperature(
logits, vocab_dim, temperature)
if forced_ids is not None:
# force the new ids to equal the partial targets where specified
# (positions where partial_targets contain nonzero values)
forced = mtf.gather(forced_ids, step_num, length_dim)
new_ids = forced + new_ids * mtf.to_int32(mtf.equal(forced, 0))
ids += new_ids * mtf.one_hot(step_num, length_dim, dtype=tf.int32)
new_step_num = step_num + 1
return [new_step_num, ids] + new_states
initial_step_num = mtf.constant(mesh, 0, dtype=tf.int32)
while_loop_inputs = [initial_step_num, initial_ids] + initial_states
final_step_num, mtf_samples = mtf.while_loop(
cond_fn, body_fn, while_loop_inputs,
num_loop_vars=None if use_tpu else 2)[:2]
mtf_samples = mtf.Print(mtf_samples, [final_step_num], "output_length")
return mtf_samples
|
[
"def",
"greedy_decode",
"(",
"logits_fn",
",",
"initial_ids",
",",
"temperature",
"=",
"0.0",
",",
"initial_states",
"=",
"None",
",",
"eos_id",
"=",
"EOS_ID",
",",
"forced_ids",
"=",
"None",
",",
"use_tpu",
"=",
"True",
")",
":",
"length_dim",
"=",
"initial_ids",
".",
"shape",
".",
"dims",
"[",
"-",
"1",
"]",
"mesh",
"=",
"initial_ids",
".",
"mesh",
"num_steps",
"=",
"mtf",
".",
"constant",
"(",
"mesh",
",",
"length_dim",
".",
"size",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
"def",
"cond_fn",
"(",
"step_num",
",",
"prev_ids",
",",
"*",
"unused_states",
")",
":",
"\"\"\"Should we run another loop iteration.\"\"\"",
"overflow",
"=",
"mtf",
".",
"equal",
"(",
"step_num",
",",
"num_steps",
")",
"has_eos",
"=",
"mtf",
".",
"reduce_any",
"(",
"mtf",
".",
"equal",
"(",
"prev_ids",
",",
"eos_id",
")",
",",
"reduced_dim",
"=",
"length_dim",
")",
"all_has_eos",
"=",
"mtf",
".",
"reduce_all",
"(",
"has_eos",
")",
"return",
"mtf",
".",
"logical_not",
"(",
"mtf",
".",
"logical_or",
"(",
"overflow",
",",
"all_has_eos",
")",
")",
"def",
"body_fn",
"(",
"step_num",
",",
"ids",
",",
"*",
"states",
")",
":",
"\"\"\"Body function for greedy decoding.\n\n Args:\n step_num: a mtf.Tensor\n ids: a mtf.Tensor\n *states: additional mtf.Tensors\n Returns:\n new_step_num, new_ids, *new_states\n \"\"\"",
"logits",
",",
"new_states",
"=",
"logits_fn",
"(",
"step_num",
",",
"ids",
",",
"states",
")",
"vocab_dim",
"=",
"logits",
".",
"shape",
".",
"dims",
"[",
"-",
"1",
"]",
"new_ids",
"=",
"mtf",
".",
"sample_with_temperature",
"(",
"logits",
",",
"vocab_dim",
",",
"temperature",
")",
"if",
"forced_ids",
"is",
"not",
"None",
":",
"# force the new ids to equal the partial targets where specified",
"# (positions where partial_targets contain nonzero values)",
"forced",
"=",
"mtf",
".",
"gather",
"(",
"forced_ids",
",",
"step_num",
",",
"length_dim",
")",
"new_ids",
"=",
"forced",
"+",
"new_ids",
"*",
"mtf",
".",
"to_int32",
"(",
"mtf",
".",
"equal",
"(",
"forced",
",",
"0",
")",
")",
"ids",
"+=",
"new_ids",
"*",
"mtf",
".",
"one_hot",
"(",
"step_num",
",",
"length_dim",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
"new_step_num",
"=",
"step_num",
"+",
"1",
"return",
"[",
"new_step_num",
",",
"ids",
"]",
"+",
"new_states",
"initial_step_num",
"=",
"mtf",
".",
"constant",
"(",
"mesh",
",",
"0",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
"while_loop_inputs",
"=",
"[",
"initial_step_num",
",",
"initial_ids",
"]",
"+",
"initial_states",
"final_step_num",
",",
"mtf_samples",
"=",
"mtf",
".",
"while_loop",
"(",
"cond_fn",
",",
"body_fn",
",",
"while_loop_inputs",
",",
"num_loop_vars",
"=",
"None",
"if",
"use_tpu",
"else",
"2",
")",
"[",
":",
"2",
"]",
"mtf_samples",
"=",
"mtf",
".",
"Print",
"(",
"mtf_samples",
",",
"[",
"final_step_num",
"]",
",",
"\"output_length\"",
")",
"return",
"mtf_samples"
] |
Greedy decoding.
Args:
logits_fn: Interface to the model, to provide logits.
Shoud take:
step_num - mtf Scalar
ids - mtf Tensor with shape [..., length]
states - list of mtf.Tensor
Should return:
logits - [batch, vocab_size]
new_states - list of mtf.Tensor
initial_ids: mtf.Tensor with shape [..., length], containing zeros.
temperature: a float between 0.0 (argmax) and 1.0 (random)
initial_states: list of mtf.Tensor
eos_id: ID for end of sentence.
forced_ids: optional mtf.Tensor with shape [..., length]
use_tpu: a boolean
Returns:
Tensor with shape [..., length]
|
[
"Greedy",
"decoding",
"."
] |
python
|
train
|
google/grumpy
|
third_party/stdlib/json/decoder.py
|
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/json/decoder.py#L362-L371
|
def decode(self, s, _w=WHITESPACE.match):
"""Return the Python representation of ``s`` (a ``str`` or ``unicode``
instance containing a JSON document)
"""
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
end = _w(s, end).end()
if end != len(s):
raise ValueError(errmsg("Extra data", s, end, len(s)))
return obj
|
[
"def",
"decode",
"(",
"self",
",",
"s",
",",
"_w",
"=",
"WHITESPACE",
".",
"match",
")",
":",
"obj",
",",
"end",
"=",
"self",
".",
"raw_decode",
"(",
"s",
",",
"idx",
"=",
"_w",
"(",
"s",
",",
"0",
")",
".",
"end",
"(",
")",
")",
"end",
"=",
"_w",
"(",
"s",
",",
"end",
")",
".",
"end",
"(",
")",
"if",
"end",
"!=",
"len",
"(",
"s",
")",
":",
"raise",
"ValueError",
"(",
"errmsg",
"(",
"\"Extra data\"",
",",
"s",
",",
"end",
",",
"len",
"(",
"s",
")",
")",
")",
"return",
"obj"
] |
Return the Python representation of ``s`` (a ``str`` or ``unicode``
instance containing a JSON document)
|
[
"Return",
"the",
"Python",
"representation",
"of",
"s",
"(",
"a",
"str",
"or",
"unicode",
"instance",
"containing",
"a",
"JSON",
"document",
")"
] |
python
|
valid
|
SheffieldML/GPy
|
GPy/util/univariate_Gaussian.py
|
https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/util/univariate_Gaussian.py#L14-L24
|
def inv_std_norm_cdf(x):
"""
Inverse cumulative standard Gaussian distribution
Based on Winitzki, S. (2008)
"""
z = 2*x -1
ln1z2 = np.log(1-z**2)
a = 8*(np.pi -3)/(3*np.pi*(4-np.pi))
b = 2/(np.pi * a) + ln1z2/2
inv_erf = np.sign(z) * np.sqrt( np.sqrt(b**2 - ln1z2/a) - b )
return np.sqrt(2) * inv_erf
|
[
"def",
"inv_std_norm_cdf",
"(",
"x",
")",
":",
"z",
"=",
"2",
"*",
"x",
"-",
"1",
"ln1z2",
"=",
"np",
".",
"log",
"(",
"1",
"-",
"z",
"**",
"2",
")",
"a",
"=",
"8",
"*",
"(",
"np",
".",
"pi",
"-",
"3",
")",
"/",
"(",
"3",
"*",
"np",
".",
"pi",
"*",
"(",
"4",
"-",
"np",
".",
"pi",
")",
")",
"b",
"=",
"2",
"/",
"(",
"np",
".",
"pi",
"*",
"a",
")",
"+",
"ln1z2",
"/",
"2",
"inv_erf",
"=",
"np",
".",
"sign",
"(",
"z",
")",
"*",
"np",
".",
"sqrt",
"(",
"np",
".",
"sqrt",
"(",
"b",
"**",
"2",
"-",
"ln1z2",
"/",
"a",
")",
"-",
"b",
")",
"return",
"np",
".",
"sqrt",
"(",
"2",
")",
"*",
"inv_erf"
] |
Inverse cumulative standard Gaussian distribution
Based on Winitzki, S. (2008)
|
[
"Inverse",
"cumulative",
"standard",
"Gaussian",
"distribution",
"Based",
"on",
"Winitzki",
"S",
".",
"(",
"2008",
")"
] |
python
|
train
|
OpenGov/carpenter
|
carpenter/carpenter.py
|
https://github.com/OpenGov/carpenter/blob/0ab3c54c05133b9b0468c63e834a7ce3a6fb575b/carpenter/carpenter.py#L87-L97
|
def stitch_block_rows(block_list):
'''
Stitches blocks together into a single block rowwise. These blocks are 2D tables usually
generated from tableproc. The final block will be of dimensions (sum(num_rows), max(num_cols)).
'''
stitched = list(itertools.chain(*block_list))
max_length = max(len(row) for row in stitched)
for row in stitched:
if len(row) < max_length:
row += [None] * (max_length - len(row))
return stitched
|
[
"def",
"stitch_block_rows",
"(",
"block_list",
")",
":",
"stitched",
"=",
"list",
"(",
"itertools",
".",
"chain",
"(",
"*",
"block_list",
")",
")",
"max_length",
"=",
"max",
"(",
"len",
"(",
"row",
")",
"for",
"row",
"in",
"stitched",
")",
"for",
"row",
"in",
"stitched",
":",
"if",
"len",
"(",
"row",
")",
"<",
"max_length",
":",
"row",
"+=",
"[",
"None",
"]",
"*",
"(",
"max_length",
"-",
"len",
"(",
"row",
")",
")",
"return",
"stitched"
] |
Stitches blocks together into a single block rowwise. These blocks are 2D tables usually
generated from tableproc. The final block will be of dimensions (sum(num_rows), max(num_cols)).
|
[
"Stitches",
"blocks",
"together",
"into",
"a",
"single",
"block",
"rowwise",
".",
"These",
"blocks",
"are",
"2D",
"tables",
"usually",
"generated",
"from",
"tableproc",
".",
"The",
"final",
"block",
"will",
"be",
"of",
"dimensions",
"(",
"sum",
"(",
"num_rows",
")",
"max",
"(",
"num_cols",
"))",
"."
] |
python
|
train
|
blazelibs/blazeutils
|
blazeutils/functional.py
|
https://github.com/blazelibs/blazeutils/blob/c94476325146007553cbddeeb9ef83394756babf/blazeutils/functional.py#L114-L122
|
def unzip(iterable):
"""Unzip/transpose an iterable of tuples into a tuple of lists.
WARNING: When given an empty iterable, this returns an empty list instead of a tuple. If you
need a consistent interface then do something like this:
left, right = unzip(two_columned_list) or ([], [])
"""
return list(map(list, list(six.moves.zip(*iterable))))
|
[
"def",
"unzip",
"(",
"iterable",
")",
":",
"return",
"list",
"(",
"map",
"(",
"list",
",",
"list",
"(",
"six",
".",
"moves",
".",
"zip",
"(",
"*",
"iterable",
")",
")",
")",
")"
] |
Unzip/transpose an iterable of tuples into a tuple of lists.
WARNING: When given an empty iterable, this returns an empty list instead of a tuple. If you
need a consistent interface then do something like this:
left, right = unzip(two_columned_list) or ([], [])
|
[
"Unzip",
"/",
"transpose",
"an",
"iterable",
"of",
"tuples",
"into",
"a",
"tuple",
"of",
"lists",
"."
] |
python
|
train
|
bkad/python-stylus
|
stylus/__init__.py
|
https://github.com/bkad/python-stylus/blob/3d79145fecd56e6af9fb38d55886c65ce2cac82e/stylus/__init__.py#L31-L38
|
def use(self, plugin, arguments={}):
"""Add plugin to use during compilation.
plugin: Plugin to include.
arguments: Dictionary of arguments to pass to the import.
"""
self.plugins[plugin] = dict(arguments)
return self.plugins
|
[
"def",
"use",
"(",
"self",
",",
"plugin",
",",
"arguments",
"=",
"{",
"}",
")",
":",
"self",
".",
"plugins",
"[",
"plugin",
"]",
"=",
"dict",
"(",
"arguments",
")",
"return",
"self",
".",
"plugins"
] |
Add plugin to use during compilation.
plugin: Plugin to include.
arguments: Dictionary of arguments to pass to the import.
|
[
"Add",
"plugin",
"to",
"use",
"during",
"compilation",
"."
] |
python
|
train
|
Azure/azure-cli-extensions
|
src/interactive/azext_interactive/azclishell/gather_commands.py
|
https://github.com/Azure/azure-cli-extensions/blob/3d4854205b0f0d882f688cfa12383d14506c2e35/src/interactive/azext_interactive/azclishell/gather_commands.py#L35-L68
|
def add_new_lines(long_phrase, line_min=None, tolerance=TOLERANCE):
""" not everything fits on the screen, based on the size, add newlines """
if line_min is None:
line_min = math.floor(int(_get_window_columns()) / 2 - 15)
if long_phrase is None:
return long_phrase
line_min = int(line_min)
nl_loc = []
skip = False
index = 0
if len(long_phrase) > line_min:
for _ in range(int(math.floor(len(long_phrase) / line_min))):
previous = index
index += line_min
if skip:
index += 1
skip = False
while index < len(long_phrase) and \
not long_phrase[index].isspace() and \
index < tolerance + previous + line_min:
index += 1
if index < len(long_phrase):
if long_phrase[index].isspace():
index += 1
skip = True
nl_loc.append(index)
counter = 0
for loc in nl_loc:
long_phrase = long_phrase[:loc + counter] + '\n' + long_phrase[loc + counter:]
counter += 1
return long_phrase + "\n"
|
[
"def",
"add_new_lines",
"(",
"long_phrase",
",",
"line_min",
"=",
"None",
",",
"tolerance",
"=",
"TOLERANCE",
")",
":",
"if",
"line_min",
"is",
"None",
":",
"line_min",
"=",
"math",
".",
"floor",
"(",
"int",
"(",
"_get_window_columns",
"(",
")",
")",
"/",
"2",
"-",
"15",
")",
"if",
"long_phrase",
"is",
"None",
":",
"return",
"long_phrase",
"line_min",
"=",
"int",
"(",
"line_min",
")",
"nl_loc",
"=",
"[",
"]",
"skip",
"=",
"False",
"index",
"=",
"0",
"if",
"len",
"(",
"long_phrase",
")",
">",
"line_min",
":",
"for",
"_",
"in",
"range",
"(",
"int",
"(",
"math",
".",
"floor",
"(",
"len",
"(",
"long_phrase",
")",
"/",
"line_min",
")",
")",
")",
":",
"previous",
"=",
"index",
"index",
"+=",
"line_min",
"if",
"skip",
":",
"index",
"+=",
"1",
"skip",
"=",
"False",
"while",
"index",
"<",
"len",
"(",
"long_phrase",
")",
"and",
"not",
"long_phrase",
"[",
"index",
"]",
".",
"isspace",
"(",
")",
"and",
"index",
"<",
"tolerance",
"+",
"previous",
"+",
"line_min",
":",
"index",
"+=",
"1",
"if",
"index",
"<",
"len",
"(",
"long_phrase",
")",
":",
"if",
"long_phrase",
"[",
"index",
"]",
".",
"isspace",
"(",
")",
":",
"index",
"+=",
"1",
"skip",
"=",
"True",
"nl_loc",
".",
"append",
"(",
"index",
")",
"counter",
"=",
"0",
"for",
"loc",
"in",
"nl_loc",
":",
"long_phrase",
"=",
"long_phrase",
"[",
":",
"loc",
"+",
"counter",
"]",
"+",
"'\\n'",
"+",
"long_phrase",
"[",
"loc",
"+",
"counter",
":",
"]",
"counter",
"+=",
"1",
"return",
"long_phrase",
"+",
"\"\\n\""
] |
not everything fits on the screen, based on the size, add newlines
|
[
"not",
"everything",
"fits",
"on",
"the",
"screen",
"based",
"on",
"the",
"size",
"add",
"newlines"
] |
python
|
train
|
mrcagney/gtfstk
|
gtfstk/helpers.py
|
https://github.com/mrcagney/gtfstk/blob/c91494e6fefc02523889655a0dc92d1c0eee8d03/gtfstk/helpers.py#L187-L209
|
def get_convert_dist(
dist_units_in: str, dist_units_out: str
) -> Callable[[float], float]:
"""
Return a function of the form
distance in the units ``dist_units_in`` ->
distance in the units ``dist_units_out``
Only supports distance units in :const:`constants.DIST_UNITS`.
"""
di, do = dist_units_in, dist_units_out
DU = cs.DIST_UNITS
if not (di in DU and do in DU):
raise ValueError(f"Distance units must lie in {DU}")
d = {
"ft": {"ft": 1, "m": 0.3048, "mi": 1 / 5280, "km": 0.0003048},
"m": {"ft": 1 / 0.3048, "m": 1, "mi": 1 / 1609.344, "km": 1 / 1000},
"mi": {"ft": 5280, "m": 1609.344, "mi": 1, "km": 1.609344},
"km": {"ft": 1 / 0.0003048, "m": 1000, "mi": 1 / 1.609344, "km": 1},
}
return lambda x: d[di][do] * x
|
[
"def",
"get_convert_dist",
"(",
"dist_units_in",
":",
"str",
",",
"dist_units_out",
":",
"str",
")",
"->",
"Callable",
"[",
"[",
"float",
"]",
",",
"float",
"]",
":",
"di",
",",
"do",
"=",
"dist_units_in",
",",
"dist_units_out",
"DU",
"=",
"cs",
".",
"DIST_UNITS",
"if",
"not",
"(",
"di",
"in",
"DU",
"and",
"do",
"in",
"DU",
")",
":",
"raise",
"ValueError",
"(",
"f\"Distance units must lie in {DU}\"",
")",
"d",
"=",
"{",
"\"ft\"",
":",
"{",
"\"ft\"",
":",
"1",
",",
"\"m\"",
":",
"0.3048",
",",
"\"mi\"",
":",
"1",
"/",
"5280",
",",
"\"km\"",
":",
"0.0003048",
"}",
",",
"\"m\"",
":",
"{",
"\"ft\"",
":",
"1",
"/",
"0.3048",
",",
"\"m\"",
":",
"1",
",",
"\"mi\"",
":",
"1",
"/",
"1609.344",
",",
"\"km\"",
":",
"1",
"/",
"1000",
"}",
",",
"\"mi\"",
":",
"{",
"\"ft\"",
":",
"5280",
",",
"\"m\"",
":",
"1609.344",
",",
"\"mi\"",
":",
"1",
",",
"\"km\"",
":",
"1.609344",
"}",
",",
"\"km\"",
":",
"{",
"\"ft\"",
":",
"1",
"/",
"0.0003048",
",",
"\"m\"",
":",
"1000",
",",
"\"mi\"",
":",
"1",
"/",
"1.609344",
",",
"\"km\"",
":",
"1",
"}",
",",
"}",
"return",
"lambda",
"x",
":",
"d",
"[",
"di",
"]",
"[",
"do",
"]",
"*",
"x"
] |
Return a function of the form
distance in the units ``dist_units_in`` ->
distance in the units ``dist_units_out``
Only supports distance units in :const:`constants.DIST_UNITS`.
|
[
"Return",
"a",
"function",
"of",
"the",
"form"
] |
python
|
train
|
ThreatConnect-Inc/tcex
|
tcex/tcex_playbook.py
|
https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_playbook.py#L59-L70
|
def _variable_pattern(self):
"""Regex pattern to match and parse a playbook variable."""
variable_pattern = r'#([A-Za-z]+)' # match literal (#App) at beginning of String
variable_pattern += r':([\d]+)' # app id (:7979)
variable_pattern += r':([A-Za-z0-9_\.\-\[\]]+)' # variable name (:variable_name)
variable_pattern += r'!(StringArray|BinaryArray|KeyValueArray' # variable type (array)
variable_pattern += r'|TCEntityArray|TCEnhancedEntityArray' # variable type (array)
variable_pattern += r'|String|Binary|KeyValue|TCEntity|TCEnhancedEntity' # variable type
variable_pattern += r'|(?:(?!String)(?!Binary)(?!KeyValue)' # non matching for custom
variable_pattern += r'(?!TCEntity)(?!TCEnhancedEntity)' # non matching for custom
variable_pattern += r'[A-Za-z0-9_-]+))' # variable type (custom)
return variable_pattern
|
[
"def",
"_variable_pattern",
"(",
"self",
")",
":",
"variable_pattern",
"=",
"r'#([A-Za-z]+)'",
"# match literal (#App) at beginning of String",
"variable_pattern",
"+=",
"r':([\\d]+)'",
"# app id (:7979)",
"variable_pattern",
"+=",
"r':([A-Za-z0-9_\\.\\-\\[\\]]+)'",
"# variable name (:variable_name)",
"variable_pattern",
"+=",
"r'!(StringArray|BinaryArray|KeyValueArray'",
"# variable type (array)",
"variable_pattern",
"+=",
"r'|TCEntityArray|TCEnhancedEntityArray'",
"# variable type (array)",
"variable_pattern",
"+=",
"r'|String|Binary|KeyValue|TCEntity|TCEnhancedEntity'",
"# variable type",
"variable_pattern",
"+=",
"r'|(?:(?!String)(?!Binary)(?!KeyValue)'",
"# non matching for custom",
"variable_pattern",
"+=",
"r'(?!TCEntity)(?!TCEnhancedEntity)'",
"# non matching for custom",
"variable_pattern",
"+=",
"r'[A-Za-z0-9_-]+))'",
"# variable type (custom)",
"return",
"variable_pattern"
] |
Regex pattern to match and parse a playbook variable.
|
[
"Regex",
"pattern",
"to",
"match",
"and",
"parse",
"a",
"playbook",
"variable",
"."
] |
python
|
train
|
sorgerlab/indra
|
indra/assemblers/english/assembler.py
|
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/english/assembler.py#L325-L330
|
def _assemble_gef(stmt):
"""Assemble Gef statements into text."""
subj_str = _assemble_agent_str(stmt.gef)
obj_str = _assemble_agent_str(stmt.ras)
stmt_str = subj_str + ' is a GEF for ' + obj_str
return _make_sentence(stmt_str)
|
[
"def",
"_assemble_gef",
"(",
"stmt",
")",
":",
"subj_str",
"=",
"_assemble_agent_str",
"(",
"stmt",
".",
"gef",
")",
"obj_str",
"=",
"_assemble_agent_str",
"(",
"stmt",
".",
"ras",
")",
"stmt_str",
"=",
"subj_str",
"+",
"' is a GEF for '",
"+",
"obj_str",
"return",
"_make_sentence",
"(",
"stmt_str",
")"
] |
Assemble Gef statements into text.
|
[
"Assemble",
"Gef",
"statements",
"into",
"text",
"."
] |
python
|
train
|
brentp/cruzdb
|
cruzdb/sqlsoup.py
|
https://github.com/brentp/cruzdb/blob/9068d46e25952f4a929dde0242beb31fa4c7e89a/cruzdb/sqlsoup.py#L229-L238
|
def execute(self, stmt, **params):
"""Execute a SQL statement.
The statement may be a string SQL string,
an :func:`sqlalchemy.sql.expression.select` construct, or a
:func:`sqlalchemy.sql.expression.text`
construct.
"""
return self.session.execute(sql.text(stmt, bind=self.bind), **params)
|
[
"def",
"execute",
"(",
"self",
",",
"stmt",
",",
"*",
"*",
"params",
")",
":",
"return",
"self",
".",
"session",
".",
"execute",
"(",
"sql",
".",
"text",
"(",
"stmt",
",",
"bind",
"=",
"self",
".",
"bind",
")",
",",
"*",
"*",
"params",
")"
] |
Execute a SQL statement.
The statement may be a string SQL string,
an :func:`sqlalchemy.sql.expression.select` construct, or a
:func:`sqlalchemy.sql.expression.text`
construct.
|
[
"Execute",
"a",
"SQL",
"statement",
"."
] |
python
|
train
|
LonamiWebs/Telethon
|
telethon/extensions/html.py
|
https://github.com/LonamiWebs/Telethon/blob/1ead9757d366b58c1e0567cddb0196e20f1a445f/telethon/extensions/html.py#L117-L131
|
def parse(html):
"""
Parses the given HTML message and returns its stripped representation
plus a list of the MessageEntity's that were found.
:param message: the message with HTML to be parsed.
:return: a tuple consisting of (clean message, [message entities]).
"""
if not html:
return html, []
parser = HTMLToTelegramParser()
parser.feed(_add_surrogate(html))
text = helpers.strip_text(parser.text, parser.entities)
return _del_surrogate(text), parser.entities
|
[
"def",
"parse",
"(",
"html",
")",
":",
"if",
"not",
"html",
":",
"return",
"html",
",",
"[",
"]",
"parser",
"=",
"HTMLToTelegramParser",
"(",
")",
"parser",
".",
"feed",
"(",
"_add_surrogate",
"(",
"html",
")",
")",
"text",
"=",
"helpers",
".",
"strip_text",
"(",
"parser",
".",
"text",
",",
"parser",
".",
"entities",
")",
"return",
"_del_surrogate",
"(",
"text",
")",
",",
"parser",
".",
"entities"
] |
Parses the given HTML message and returns its stripped representation
plus a list of the MessageEntity's that were found.
:param message: the message with HTML to be parsed.
:return: a tuple consisting of (clean message, [message entities]).
|
[
"Parses",
"the",
"given",
"HTML",
"message",
"and",
"returns",
"its",
"stripped",
"representation",
"plus",
"a",
"list",
"of",
"the",
"MessageEntity",
"s",
"that",
"were",
"found",
"."
] |
python
|
train
|
fermiPy/fermipy
|
fermipy/diffuse/gt_split_and_mktime.py
|
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/diffuse/gt_split_and_mktime.py#L38-L63
|
def make_full_path(basedir, outkey, origname):
"""Make a full file path by combining tokens
Parameters
-----------
basedir : str
The top level output area
outkey : str
The key for the particular instance of the analysis
origname : str
Template for the output file name
Returns
-------
outpath : str
This will be <basedir>:<outkey>:<newname>.fits
Where newname = origname.replace('.fits', '_<outkey>.fits')
"""
return os.path.join(basedir, outkey,
os.path.basename(origname).replace('.fits',
'_%s.fits' % outkey))
|
[
"def",
"make_full_path",
"(",
"basedir",
",",
"outkey",
",",
"origname",
")",
":",
"return",
"os",
".",
"path",
".",
"join",
"(",
"basedir",
",",
"outkey",
",",
"os",
".",
"path",
".",
"basename",
"(",
"origname",
")",
".",
"replace",
"(",
"'.fits'",
",",
"'_%s.fits'",
"%",
"outkey",
")",
")"
] |
Make a full file path by combining tokens
Parameters
-----------
basedir : str
The top level output area
outkey : str
The key for the particular instance of the analysis
origname : str
Template for the output file name
Returns
-------
outpath : str
This will be <basedir>:<outkey>:<newname>.fits
Where newname = origname.replace('.fits', '_<outkey>.fits')
|
[
"Make",
"a",
"full",
"file",
"path",
"by",
"combining",
"tokens"
] |
python
|
train
|
yyuu/botornado
|
botornado/sqs/connection.py
|
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/botornado/sqs/connection.py#L98-L122
|
def get_queue_attributes(self, queue, attribute='All', callback=None):
"""
Gets one or all attributes of a Queue
:type queue: A Queue object
:param queue: The SQS queue to be deleted
:type attribute: str
:type attribute: The specific attribute requested. If not supplied,
the default is to return all attributes.
Valid attributes are:
ApproximateNumberOfMessages|
ApproximateNumberOfMessagesNotVisible|
VisibilityTimeout|
CreatedTimestamp|
LastModifiedTimestamp|
Policy
:rtype: :class:`boto.sqs.attributes.Attributes`
:return: An Attributes object containing request value(s).
"""
params = {'AttributeName' : attribute}
return self.get_object('GetQueueAttributes', params,
Attributes, queue.id, callback=callback)
|
[
"def",
"get_queue_attributes",
"(",
"self",
",",
"queue",
",",
"attribute",
"=",
"'All'",
",",
"callback",
"=",
"None",
")",
":",
"params",
"=",
"{",
"'AttributeName'",
":",
"attribute",
"}",
"return",
"self",
".",
"get_object",
"(",
"'GetQueueAttributes'",
",",
"params",
",",
"Attributes",
",",
"queue",
".",
"id",
",",
"callback",
"=",
"callback",
")"
] |
Gets one or all attributes of a Queue
:type queue: A Queue object
:param queue: The SQS queue to be deleted
:type attribute: str
:type attribute: The specific attribute requested. If not supplied,
the default is to return all attributes.
Valid attributes are:
ApproximateNumberOfMessages|
ApproximateNumberOfMessagesNotVisible|
VisibilityTimeout|
CreatedTimestamp|
LastModifiedTimestamp|
Policy
:rtype: :class:`boto.sqs.attributes.Attributes`
:return: An Attributes object containing request value(s).
|
[
"Gets",
"one",
"or",
"all",
"attributes",
"of",
"a",
"Queue",
":",
"type",
"queue",
":",
"A",
"Queue",
"object",
":",
"param",
"queue",
":",
"The",
"SQS",
"queue",
"to",
"be",
"deleted"
] |
python
|
train
|
tango-controls/pytango
|
tango/device_class.py
|
https://github.com/tango-controls/pytango/blob/9cf78c517c9cdc1081ff6d080a9646a740cc1d36/tango/device_class.py#L190-L206
|
def get_property_type(self, prop_name, properties):
"""
get_property_type(self, prop_name, properties) -> CmdArgType
Gets the property type for the given property name using the
information given in properties
Parameters :
- prop_name : (str) property name
- properties : (dict<str,data>) property data
Return : (CmdArgType) the tango type for the given property"""
try:
tg_type = properties[prop_name][0]
except:
tg_type = CmdArgType.DevVoid
return tg_type
|
[
"def",
"get_property_type",
"(",
"self",
",",
"prop_name",
",",
"properties",
")",
":",
"try",
":",
"tg_type",
"=",
"properties",
"[",
"prop_name",
"]",
"[",
"0",
"]",
"except",
":",
"tg_type",
"=",
"CmdArgType",
".",
"DevVoid",
"return",
"tg_type"
] |
get_property_type(self, prop_name, properties) -> CmdArgType
Gets the property type for the given property name using the
information given in properties
Parameters :
- prop_name : (str) property name
- properties : (dict<str,data>) property data
Return : (CmdArgType) the tango type for the given property
|
[
"get_property_type",
"(",
"self",
"prop_name",
"properties",
")",
"-",
">",
"CmdArgType"
] |
python
|
train
|
idank/bashlex
|
bashlex/parser.py
|
https://github.com/idank/bashlex/blob/800cb7e3c634eaa3c81f8a8648fd7fd4e27050ac/bashlex/parser.py#L355-L365
|
def p_elif_clause(p):
'''elif_clause : ELIF compound_list THEN compound_list
| ELIF compound_list THEN compound_list ELSE compound_list
| ELIF compound_list THEN compound_list elif_clause'''
parts = []
for i in range(1, len(p)):
if isinstance(p[i], ast.node):
parts.append(p[i])
else:
parts.append(ast.node(kind='reservedword', word=p[i], pos=p.lexspan(i)))
p[0] = parts
|
[
"def",
"p_elif_clause",
"(",
"p",
")",
":",
"parts",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"p",
")",
")",
":",
"if",
"isinstance",
"(",
"p",
"[",
"i",
"]",
",",
"ast",
".",
"node",
")",
":",
"parts",
".",
"append",
"(",
"p",
"[",
"i",
"]",
")",
"else",
":",
"parts",
".",
"append",
"(",
"ast",
".",
"node",
"(",
"kind",
"=",
"'reservedword'",
",",
"word",
"=",
"p",
"[",
"i",
"]",
",",
"pos",
"=",
"p",
".",
"lexspan",
"(",
"i",
")",
")",
")",
"p",
"[",
"0",
"]",
"=",
"parts"
] |
elif_clause : ELIF compound_list THEN compound_list
| ELIF compound_list THEN compound_list ELSE compound_list
| ELIF compound_list THEN compound_list elif_clause
|
[
"elif_clause",
":",
"ELIF",
"compound_list",
"THEN",
"compound_list",
"|",
"ELIF",
"compound_list",
"THEN",
"compound_list",
"ELSE",
"compound_list",
"|",
"ELIF",
"compound_list",
"THEN",
"compound_list",
"elif_clause"
] |
python
|
train
|
pyviz/holoviews
|
examples/gallery/apps/bokeh/mandelbrot.py
|
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/examples/gallery/apps/bokeh/mandelbrot.py#L17-L31
|
def mandel(x, y, max_iters):
"""
Given the real and imaginary parts of a complex number,
determine if it is a candidate for membership in the Mandelbrot
set given a fixed number of iterations.
"""
i = 0
c = complex(x,y)
z = 0.0j
for i in range(max_iters):
z = z*z + c
if (z.real*z.real + z.imag*z.imag) >= 4:
return i
return 255
|
[
"def",
"mandel",
"(",
"x",
",",
"y",
",",
"max_iters",
")",
":",
"i",
"=",
"0",
"c",
"=",
"complex",
"(",
"x",
",",
"y",
")",
"z",
"=",
"0.0j",
"for",
"i",
"in",
"range",
"(",
"max_iters",
")",
":",
"z",
"=",
"z",
"*",
"z",
"+",
"c",
"if",
"(",
"z",
".",
"real",
"*",
"z",
".",
"real",
"+",
"z",
".",
"imag",
"*",
"z",
".",
"imag",
")",
">=",
"4",
":",
"return",
"i",
"return",
"255"
] |
Given the real and imaginary parts of a complex number,
determine if it is a candidate for membership in the Mandelbrot
set given a fixed number of iterations.
|
[
"Given",
"the",
"real",
"and",
"imaginary",
"parts",
"of",
"a",
"complex",
"number",
"determine",
"if",
"it",
"is",
"a",
"candidate",
"for",
"membership",
"in",
"the",
"Mandelbrot",
"set",
"given",
"a",
"fixed",
"number",
"of",
"iterations",
"."
] |
python
|
train
|
LettError/MutatorMath
|
Lib/mutatorMath/objects/location.py
|
https://github.com/LettError/MutatorMath/blob/10318fc4e7c9cee9df6130826829baea3054a42b/Lib/mutatorMath/objects/location.py#L606-L644
|
def sortLocations(locations):
""" Sort the locations by ranking:
1. all on-axis points
2. all off-axis points which project onto on-axis points
these would be involved in master to master interpolations
necessary for patching. Projecting off-axis masters have
at least one coordinate in common with an on-axis master.
3. non-projecting off-axis points, 'wild' off axis points
These would be involved in projecting limits and need to be patched.
"""
onAxis = []
onAxisValues = {}
offAxis = []
offAxis_projecting = []
offAxis_wild = []
# first get the on-axis points
for l in locations:
if l.isOrigin():
continue
if l.isOnAxis():
onAxis.append(l)
for axis in l.keys():
if axis not in onAxisValues:
onAxisValues[axis] = []
onAxisValues[axis].append(l[axis])
else:
offAxis.append(l)
for l in offAxis:
ok = False
for axis in l.keys():
if axis not in onAxisValues:
continue
if l[axis] in onAxisValues[axis]:
ok = True
if ok:
offAxis_projecting.append(l)
else:
offAxis_wild.append(l)
return onAxis, offAxis_projecting, offAxis_wild
|
[
"def",
"sortLocations",
"(",
"locations",
")",
":",
"onAxis",
"=",
"[",
"]",
"onAxisValues",
"=",
"{",
"}",
"offAxis",
"=",
"[",
"]",
"offAxis_projecting",
"=",
"[",
"]",
"offAxis_wild",
"=",
"[",
"]",
"# first get the on-axis points",
"for",
"l",
"in",
"locations",
":",
"if",
"l",
".",
"isOrigin",
"(",
")",
":",
"continue",
"if",
"l",
".",
"isOnAxis",
"(",
")",
":",
"onAxis",
".",
"append",
"(",
"l",
")",
"for",
"axis",
"in",
"l",
".",
"keys",
"(",
")",
":",
"if",
"axis",
"not",
"in",
"onAxisValues",
":",
"onAxisValues",
"[",
"axis",
"]",
"=",
"[",
"]",
"onAxisValues",
"[",
"axis",
"]",
".",
"append",
"(",
"l",
"[",
"axis",
"]",
")",
"else",
":",
"offAxis",
".",
"append",
"(",
"l",
")",
"for",
"l",
"in",
"offAxis",
":",
"ok",
"=",
"False",
"for",
"axis",
"in",
"l",
".",
"keys",
"(",
")",
":",
"if",
"axis",
"not",
"in",
"onAxisValues",
":",
"continue",
"if",
"l",
"[",
"axis",
"]",
"in",
"onAxisValues",
"[",
"axis",
"]",
":",
"ok",
"=",
"True",
"if",
"ok",
":",
"offAxis_projecting",
".",
"append",
"(",
"l",
")",
"else",
":",
"offAxis_wild",
".",
"append",
"(",
"l",
")",
"return",
"onAxis",
",",
"offAxis_projecting",
",",
"offAxis_wild"
] |
Sort the locations by ranking:
1. all on-axis points
2. all off-axis points which project onto on-axis points
these would be involved in master to master interpolations
necessary for patching. Projecting off-axis masters have
at least one coordinate in common with an on-axis master.
3. non-projecting off-axis points, 'wild' off axis points
These would be involved in projecting limits and need to be patched.
|
[
"Sort",
"the",
"locations",
"by",
"ranking",
":",
"1",
".",
"all",
"on",
"-",
"axis",
"points",
"2",
".",
"all",
"off",
"-",
"axis",
"points",
"which",
"project",
"onto",
"on",
"-",
"axis",
"points",
"these",
"would",
"be",
"involved",
"in",
"master",
"to",
"master",
"interpolations",
"necessary",
"for",
"patching",
".",
"Projecting",
"off",
"-",
"axis",
"masters",
"have",
"at",
"least",
"one",
"coordinate",
"in",
"common",
"with",
"an",
"on",
"-",
"axis",
"master",
".",
"3",
".",
"non",
"-",
"projecting",
"off",
"-",
"axis",
"points",
"wild",
"off",
"axis",
"points",
"These",
"would",
"be",
"involved",
"in",
"projecting",
"limits",
"and",
"need",
"to",
"be",
"patched",
"."
] |
python
|
train
|
opendatateam/udata
|
udata/i18n.py
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/i18n.py#L203-L209
|
def redirect_to_lang(*args, **kwargs):
'''Redirect non lang-prefixed urls to default language.'''
endpoint = request.endpoint.replace('_redirect', '')
kwargs = multi_to_dict(request.args)
kwargs.update(request.view_args)
kwargs['lang_code'] = default_lang
return redirect(url_for(endpoint, **kwargs))
|
[
"def",
"redirect_to_lang",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"endpoint",
"=",
"request",
".",
"endpoint",
".",
"replace",
"(",
"'_redirect'",
",",
"''",
")",
"kwargs",
"=",
"multi_to_dict",
"(",
"request",
".",
"args",
")",
"kwargs",
".",
"update",
"(",
"request",
".",
"view_args",
")",
"kwargs",
"[",
"'lang_code'",
"]",
"=",
"default_lang",
"return",
"redirect",
"(",
"url_for",
"(",
"endpoint",
",",
"*",
"*",
"kwargs",
")",
")"
] |
Redirect non lang-prefixed urls to default language.
|
[
"Redirect",
"non",
"lang",
"-",
"prefixed",
"urls",
"to",
"default",
"language",
"."
] |
python
|
train
|
inasafe/inasafe
|
safe/datastore/geopackage.py
|
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/datastore/geopackage.py#L103-L117
|
def _vector_layers(self):
"""Return a list of vector layers available.
:return: List of vector layers available in the geopackage.
:rtype: list
.. versionadded:: 4.0
"""
layers = []
vector_datasource = self.vector_driver.Open(
self.uri.absoluteFilePath())
if vector_datasource:
for i in range(vector_datasource.GetLayerCount()):
layers.append(vector_datasource.GetLayer(i).GetName())
return layers
|
[
"def",
"_vector_layers",
"(",
"self",
")",
":",
"layers",
"=",
"[",
"]",
"vector_datasource",
"=",
"self",
".",
"vector_driver",
".",
"Open",
"(",
"self",
".",
"uri",
".",
"absoluteFilePath",
"(",
")",
")",
"if",
"vector_datasource",
":",
"for",
"i",
"in",
"range",
"(",
"vector_datasource",
".",
"GetLayerCount",
"(",
")",
")",
":",
"layers",
".",
"append",
"(",
"vector_datasource",
".",
"GetLayer",
"(",
"i",
")",
".",
"GetName",
"(",
")",
")",
"return",
"layers"
] |
Return a list of vector layers available.
:return: List of vector layers available in the geopackage.
:rtype: list
.. versionadded:: 4.0
|
[
"Return",
"a",
"list",
"of",
"vector",
"layers",
"available",
"."
] |
python
|
train
|
chainside/btcpy
|
btcpy/lib/base58.py
|
https://github.com/chainside/btcpy/blob/8e75c630dacf0f997ed0e0e8739bed428a95d7b1/btcpy/lib/base58.py#L37-L54
|
def b58decode(v: str) -> bytes:
'''Decode a Base58 encoded string'''
origlen = len(v)
v = v.lstrip(alphabet[0])
newlen = len(v)
p, acc = 1, 0
for c in v[::-1]:
acc += p * alphabet.index(c)
p *= 58
result = []
while acc > 0:
acc, mod = divmod(acc, 256)
result.append(mod)
return (bseq(result) + b'\0' * (origlen - newlen))[::-1]
|
[
"def",
"b58decode",
"(",
"v",
":",
"str",
")",
"->",
"bytes",
":",
"origlen",
"=",
"len",
"(",
"v",
")",
"v",
"=",
"v",
".",
"lstrip",
"(",
"alphabet",
"[",
"0",
"]",
")",
"newlen",
"=",
"len",
"(",
"v",
")",
"p",
",",
"acc",
"=",
"1",
",",
"0",
"for",
"c",
"in",
"v",
"[",
":",
":",
"-",
"1",
"]",
":",
"acc",
"+=",
"p",
"*",
"alphabet",
".",
"index",
"(",
"c",
")",
"p",
"*=",
"58",
"result",
"=",
"[",
"]",
"while",
"acc",
">",
"0",
":",
"acc",
",",
"mod",
"=",
"divmod",
"(",
"acc",
",",
"256",
")",
"result",
".",
"append",
"(",
"mod",
")",
"return",
"(",
"bseq",
"(",
"result",
")",
"+",
"b'\\0'",
"*",
"(",
"origlen",
"-",
"newlen",
")",
")",
"[",
":",
":",
"-",
"1",
"]"
] |
Decode a Base58 encoded string
|
[
"Decode",
"a",
"Base58",
"encoded",
"string"
] |
python
|
train
|
allenai/allennlp
|
allennlp/semparse/domain_languages/domain_language.py
|
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/domain_languages/domain_language.py#L315-L333
|
def execute_action_sequence(self, action_sequence: List[str], side_arguments: List[Dict] = None):
"""
Executes the program defined by an action sequence directly, without needing the overhead
of translating to a logical form first. For any given program, :func:`execute` and this
function are equivalent, they just take different representations of the program, so you
can use whichever is more efficient.
Also, if you have state or side arguments associated with particular production rules
(e.g., the decoder's attention on an input utterance when a predicate was predicted), you
`must` use this function to execute the logical form, instead of :func:`execute`, so that
we can match the side arguments with the right functions.
"""
# We'll strip off the first action, because it doesn't matter for execution.
first_action = action_sequence[0]
left_side = first_action.split(' -> ')[0]
if left_side != '@start@':
raise ExecutionError('invalid action sequence')
remaining_side_args = side_arguments[1:] if side_arguments else None
return self._execute_sequence(action_sequence[1:], remaining_side_args)[0]
|
[
"def",
"execute_action_sequence",
"(",
"self",
",",
"action_sequence",
":",
"List",
"[",
"str",
"]",
",",
"side_arguments",
":",
"List",
"[",
"Dict",
"]",
"=",
"None",
")",
":",
"# We'll strip off the first action, because it doesn't matter for execution.",
"first_action",
"=",
"action_sequence",
"[",
"0",
"]",
"left_side",
"=",
"first_action",
".",
"split",
"(",
"' -> '",
")",
"[",
"0",
"]",
"if",
"left_side",
"!=",
"'@start@'",
":",
"raise",
"ExecutionError",
"(",
"'invalid action sequence'",
")",
"remaining_side_args",
"=",
"side_arguments",
"[",
"1",
":",
"]",
"if",
"side_arguments",
"else",
"None",
"return",
"self",
".",
"_execute_sequence",
"(",
"action_sequence",
"[",
"1",
":",
"]",
",",
"remaining_side_args",
")",
"[",
"0",
"]"
] |
Executes the program defined by an action sequence directly, without needing the overhead
of translating to a logical form first. For any given program, :func:`execute` and this
function are equivalent, they just take different representations of the program, so you
can use whichever is more efficient.
Also, if you have state or side arguments associated with particular production rules
(e.g., the decoder's attention on an input utterance when a predicate was predicted), you
`must` use this function to execute the logical form, instead of :func:`execute`, so that
we can match the side arguments with the right functions.
|
[
"Executes",
"the",
"program",
"defined",
"by",
"an",
"action",
"sequence",
"directly",
"without",
"needing",
"the",
"overhead",
"of",
"translating",
"to",
"a",
"logical",
"form",
"first",
".",
"For",
"any",
"given",
"program",
":",
"func",
":",
"execute",
"and",
"this",
"function",
"are",
"equivalent",
"they",
"just",
"take",
"different",
"representations",
"of",
"the",
"program",
"so",
"you",
"can",
"use",
"whichever",
"is",
"more",
"efficient",
"."
] |
python
|
train
|
aparo/pyes
|
pyes/managers.py
|
https://github.com/aparo/pyes/blob/712eb6095961755067b2b5baa262008ade6584b3/pyes/managers.py#L610-L619
|
def nodes_info(self, nodes=None):
"""
The cluster :ref:`nodes info <es-guide-reference-api-admin-cluster-state>` API allows to retrieve one or more (or all) of
the cluster nodes information.
"""
parts = ["_cluster", "nodes"]
if nodes:
parts.append(",".join(nodes))
path = make_path(*parts)
return self.conn._send_request('GET', path)
|
[
"def",
"nodes_info",
"(",
"self",
",",
"nodes",
"=",
"None",
")",
":",
"parts",
"=",
"[",
"\"_cluster\"",
",",
"\"nodes\"",
"]",
"if",
"nodes",
":",
"parts",
".",
"append",
"(",
"\",\"",
".",
"join",
"(",
"nodes",
")",
")",
"path",
"=",
"make_path",
"(",
"*",
"parts",
")",
"return",
"self",
".",
"conn",
".",
"_send_request",
"(",
"'GET'",
",",
"path",
")"
] |
The cluster :ref:`nodes info <es-guide-reference-api-admin-cluster-state>` API allows to retrieve one or more (or all) of
the cluster nodes information.
|
[
"The",
"cluster",
":",
"ref",
":",
"nodes",
"info",
"<es",
"-",
"guide",
"-",
"reference",
"-",
"api",
"-",
"admin",
"-",
"cluster",
"-",
"state",
">",
"API",
"allows",
"to",
"retrieve",
"one",
"or",
"more",
"(",
"or",
"all",
")",
"of",
"the",
"cluster",
"nodes",
"information",
"."
] |
python
|
train
|
gem/oq-engine
|
openquake/hazardlib/gsim/cauzzi_2014.py
|
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/cauzzi_2014.py#L132-L136
|
def _get_distance_scaling_term(self, C, mag, rrup):
"""
Returns the distance scaling parameter
"""
return (C["r1"] + C["r2"] * mag) * np.log10(rrup + C["r3"])
|
[
"def",
"_get_distance_scaling_term",
"(",
"self",
",",
"C",
",",
"mag",
",",
"rrup",
")",
":",
"return",
"(",
"C",
"[",
"\"r1\"",
"]",
"+",
"C",
"[",
"\"r2\"",
"]",
"*",
"mag",
")",
"*",
"np",
".",
"log10",
"(",
"rrup",
"+",
"C",
"[",
"\"r3\"",
"]",
")"
] |
Returns the distance scaling parameter
|
[
"Returns",
"the",
"distance",
"scaling",
"parameter"
] |
python
|
train
|
wal-e/wal-e
|
wal_e/operator/backup.py
|
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/operator/backup.py#L248-L291
|
def wal_archive(self, wal_path, concurrency=1):
"""
Uploads a WAL file to S3 or Windows Azure Blob Service
This code is intended to typically be called from Postgres's
archive_command feature.
"""
# Upload the segment expressly indicated. It's special
# relative to other uploads when parallel wal-push is enabled,
# in that it's not desirable to tweak its .ready/.done files
# in archive_status.
xlog_dir = os.path.dirname(wal_path)
segment = WalSegment(wal_path, explicit=True)
uploader = WalUploader(self.layout, self.creds, self.gpg_key_id)
group = WalTransferGroup(uploader)
group.start(segment)
# Upload any additional wal segments up to the specified
# concurrency by scanning the Postgres archive_status
# directory.
started = 1
seg_stream = WalSegment.from_ready_archive_status(xlog_dir)
while started < concurrency:
try:
other_segment = next(seg_stream)
except StopIteration:
break
if other_segment.path != wal_path:
group.start(other_segment)
started += 1
try:
# Wait for uploads to finish.
group.join()
except EnvironmentError as e:
if e.errno == errno.ENOENT:
print(e)
raise UserException(
msg='could not find file for wal-push',
detail=('The operating system reported: {0} {1}'
.format(e.strerror, repr(e.filename))))
raise
|
[
"def",
"wal_archive",
"(",
"self",
",",
"wal_path",
",",
"concurrency",
"=",
"1",
")",
":",
"# Upload the segment expressly indicated. It's special",
"# relative to other uploads when parallel wal-push is enabled,",
"# in that it's not desirable to tweak its .ready/.done files",
"# in archive_status.",
"xlog_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"wal_path",
")",
"segment",
"=",
"WalSegment",
"(",
"wal_path",
",",
"explicit",
"=",
"True",
")",
"uploader",
"=",
"WalUploader",
"(",
"self",
".",
"layout",
",",
"self",
".",
"creds",
",",
"self",
".",
"gpg_key_id",
")",
"group",
"=",
"WalTransferGroup",
"(",
"uploader",
")",
"group",
".",
"start",
"(",
"segment",
")",
"# Upload any additional wal segments up to the specified",
"# concurrency by scanning the Postgres archive_status",
"# directory.",
"started",
"=",
"1",
"seg_stream",
"=",
"WalSegment",
".",
"from_ready_archive_status",
"(",
"xlog_dir",
")",
"while",
"started",
"<",
"concurrency",
":",
"try",
":",
"other_segment",
"=",
"next",
"(",
"seg_stream",
")",
"except",
"StopIteration",
":",
"break",
"if",
"other_segment",
".",
"path",
"!=",
"wal_path",
":",
"group",
".",
"start",
"(",
"other_segment",
")",
"started",
"+=",
"1",
"try",
":",
"# Wait for uploads to finish.",
"group",
".",
"join",
"(",
")",
"except",
"EnvironmentError",
"as",
"e",
":",
"if",
"e",
".",
"errno",
"==",
"errno",
".",
"ENOENT",
":",
"print",
"(",
"e",
")",
"raise",
"UserException",
"(",
"msg",
"=",
"'could not find file for wal-push'",
",",
"detail",
"=",
"(",
"'The operating system reported: {0} {1}'",
".",
"format",
"(",
"e",
".",
"strerror",
",",
"repr",
"(",
"e",
".",
"filename",
")",
")",
")",
")",
"raise"
] |
Uploads a WAL file to S3 or Windows Azure Blob Service
This code is intended to typically be called from Postgres's
archive_command feature.
|
[
"Uploads",
"a",
"WAL",
"file",
"to",
"S3",
"or",
"Windows",
"Azure",
"Blob",
"Service"
] |
python
|
train
|
inveniosoftware/invenio-pidrelations
|
invenio_pidrelations/serializers/schemas.py
|
https://github.com/inveniosoftware/invenio-pidrelations/blob/a49f3725cf595b663c5b04814280b231f88bc333/invenio_pidrelations/serializers/schemas.py#L68-L71
|
def dump_previous(self, obj):
"""Dump the parent of a PID."""
if self._is_child(obj) and obj.index(self.context['pid']) > 0:
return self._dump_relative(obj.previous_child(self.context['pid']))
|
[
"def",
"dump_previous",
"(",
"self",
",",
"obj",
")",
":",
"if",
"self",
".",
"_is_child",
"(",
"obj",
")",
"and",
"obj",
".",
"index",
"(",
"self",
".",
"context",
"[",
"'pid'",
"]",
")",
">",
"0",
":",
"return",
"self",
".",
"_dump_relative",
"(",
"obj",
".",
"previous_child",
"(",
"self",
".",
"context",
"[",
"'pid'",
"]",
")",
")"
] |
Dump the parent of a PID.
|
[
"Dump",
"the",
"parent",
"of",
"a",
"PID",
"."
] |
python
|
train
|
audreyr/design
|
design/clouds.py
|
https://github.com/audreyr/design/blob/3bc801dceeadfa4935a1a17f4083fbe09a03cbac/design/clouds.py#L22-L31
|
def draw_circle(ctx, x, y, radius, cairo_color):
"""
Draw a circle.
:param radius: radius in pixels
:param cairo_color: normalized rgb color
"""
ctx.new_path()
ctx.set_source_rgb(cairo_color.red, cairo_color.green, cairo_color.blue)
ctx.arc(x, y, radius, 0, 2 * pi)
ctx.fill()
|
[
"def",
"draw_circle",
"(",
"ctx",
",",
"x",
",",
"y",
",",
"radius",
",",
"cairo_color",
")",
":",
"ctx",
".",
"new_path",
"(",
")",
"ctx",
".",
"set_source_rgb",
"(",
"cairo_color",
".",
"red",
",",
"cairo_color",
".",
"green",
",",
"cairo_color",
".",
"blue",
")",
"ctx",
".",
"arc",
"(",
"x",
",",
"y",
",",
"radius",
",",
"0",
",",
"2",
"*",
"pi",
")",
"ctx",
".",
"fill",
"(",
")"
] |
Draw a circle.
:param radius: radius in pixels
:param cairo_color: normalized rgb color
|
[
"Draw",
"a",
"circle",
".",
":",
"param",
"radius",
":",
"radius",
"in",
"pixels",
":",
"param",
"cairo_color",
":",
"normalized",
"rgb",
"color"
] |
python
|
train
|
vertexproject/synapse
|
synapse/lib/link.py
|
https://github.com/vertexproject/synapse/blob/22e67c5a8f6d7caddbcf34b39ab1bd2d6c4a6e0b/synapse/lib/link.py#L49-L55
|
async def unixconnect(path):
'''
Connect to a PF_UNIX server listening on the given path.
'''
reader, writer = await asyncio.open_unix_connection(path=path)
info = {'path': path, 'unix': True}
return await Link.anit(reader, writer, info=info)
|
[
"async",
"def",
"unixconnect",
"(",
"path",
")",
":",
"reader",
",",
"writer",
"=",
"await",
"asyncio",
".",
"open_unix_connection",
"(",
"path",
"=",
"path",
")",
"info",
"=",
"{",
"'path'",
":",
"path",
",",
"'unix'",
":",
"True",
"}",
"return",
"await",
"Link",
".",
"anit",
"(",
"reader",
",",
"writer",
",",
"info",
"=",
"info",
")"
] |
Connect to a PF_UNIX server listening on the given path.
|
[
"Connect",
"to",
"a",
"PF_UNIX",
"server",
"listening",
"on",
"the",
"given",
"path",
"."
] |
python
|
train
|
StefanKopieczek/ticker
|
ticker/ticker_parsers.py
|
https://github.com/StefanKopieczek/ticker/blob/6dcc1bf8f55bf8612986833097531ecf021b687c/ticker/ticker_parsers.py#L19-L29
|
def parse_float(float_str):
"""Parse a string of the form 305.48b into a Python float.
The terminal letter, if present, indicates e.g. billions."""
factor = __get_factor(float_str)
if factor != 1:
float_str = float_str[:-1]
try:
return float(float_str.replace(',', '')) * factor
except ValueError:
return None
|
[
"def",
"parse_float",
"(",
"float_str",
")",
":",
"factor",
"=",
"__get_factor",
"(",
"float_str",
")",
"if",
"factor",
"!=",
"1",
":",
"float_str",
"=",
"float_str",
"[",
":",
"-",
"1",
"]",
"try",
":",
"return",
"float",
"(",
"float_str",
".",
"replace",
"(",
"','",
",",
"''",
")",
")",
"*",
"factor",
"except",
"ValueError",
":",
"return",
"None"
] |
Parse a string of the form 305.48b into a Python float.
The terminal letter, if present, indicates e.g. billions.
|
[
"Parse",
"a",
"string",
"of",
"the",
"form",
"305",
".",
"48b",
"into",
"a",
"Python",
"float",
".",
"The",
"terminal",
"letter",
"if",
"present",
"indicates",
"e",
".",
"g",
".",
"billions",
"."
] |
python
|
train
|
minio/minio-py
|
minio/parsers.py
|
https://github.com/minio/minio-py/blob/7107c84183cf5fb4deff68c0a16ab9f1c0b4c37e/minio/parsers.py#L96-L111
|
def get_child_text(self, name, strict=True):
"""Extract text of a child element. If strict, and child element is
not present, raises InvalidXMLError and otherwise returns
None.
"""
if strict:
try:
return self.element.find('s3:{}'.format(name), _S3_NS).text
except _ETREE_EXCEPTIONS as error:
raise InvalidXMLError(
('Invalid XML provided for "{}" - erroring tag <{}>. '
'Message: {}').format(self.root_name, name, error.message)
)
else:
return self.element.findtext('s3:{}'.format(name), None, _S3_NS)
|
[
"def",
"get_child_text",
"(",
"self",
",",
"name",
",",
"strict",
"=",
"True",
")",
":",
"if",
"strict",
":",
"try",
":",
"return",
"self",
".",
"element",
".",
"find",
"(",
"'s3:{}'",
".",
"format",
"(",
"name",
")",
",",
"_S3_NS",
")",
".",
"text",
"except",
"_ETREE_EXCEPTIONS",
"as",
"error",
":",
"raise",
"InvalidXMLError",
"(",
"(",
"'Invalid XML provided for \"{}\" - erroring tag <{}>. '",
"'Message: {}'",
")",
".",
"format",
"(",
"self",
".",
"root_name",
",",
"name",
",",
"error",
".",
"message",
")",
")",
"else",
":",
"return",
"self",
".",
"element",
".",
"findtext",
"(",
"'s3:{}'",
".",
"format",
"(",
"name",
")",
",",
"None",
",",
"_S3_NS",
")"
] |
Extract text of a child element. If strict, and child element is
not present, raises InvalidXMLError and otherwise returns
None.
|
[
"Extract",
"text",
"of",
"a",
"child",
"element",
".",
"If",
"strict",
"and",
"child",
"element",
"is",
"not",
"present",
"raises",
"InvalidXMLError",
"and",
"otherwise",
"returns",
"None",
"."
] |
python
|
train
|
arteria/django-hijack
|
hijack/helpers.py
|
https://github.com/arteria/django-hijack/blob/64a3a1dd0655d9fee9786d62628add132073b946/hijack/helpers.py#L20-L35
|
def no_update_last_login():
"""
Disconnect any signals to update_last_login() for the scope of the context
manager, then restore.
"""
kw = {'receiver': update_last_login}
kw_id = {'receiver': update_last_login, 'dispatch_uid': 'update_last_login'}
was_connected = user_logged_in.disconnect(**kw)
was_connected_id = not was_connected and user_logged_in.disconnect(**kw_id)
yield
# Restore signal if needed
if was_connected:
user_logged_in.connect(**kw)
elif was_connected_id:
user_logged_in.connect(**kw_id)
|
[
"def",
"no_update_last_login",
"(",
")",
":",
"kw",
"=",
"{",
"'receiver'",
":",
"update_last_login",
"}",
"kw_id",
"=",
"{",
"'receiver'",
":",
"update_last_login",
",",
"'dispatch_uid'",
":",
"'update_last_login'",
"}",
"was_connected",
"=",
"user_logged_in",
".",
"disconnect",
"(",
"*",
"*",
"kw",
")",
"was_connected_id",
"=",
"not",
"was_connected",
"and",
"user_logged_in",
".",
"disconnect",
"(",
"*",
"*",
"kw_id",
")",
"yield",
"# Restore signal if needed",
"if",
"was_connected",
":",
"user_logged_in",
".",
"connect",
"(",
"*",
"*",
"kw",
")",
"elif",
"was_connected_id",
":",
"user_logged_in",
".",
"connect",
"(",
"*",
"*",
"kw_id",
")"
] |
Disconnect any signals to update_last_login() for the scope of the context
manager, then restore.
|
[
"Disconnect",
"any",
"signals",
"to",
"update_last_login",
"()",
"for",
"the",
"scope",
"of",
"the",
"context",
"manager",
"then",
"restore",
"."
] |
python
|
train
|
coursera-dl/coursera-dl
|
coursera/network.py
|
https://github.com/coursera-dl/coursera-dl/blob/9b434bcf3c4011bf3181429fe674633ae5fb7d4d/coursera/network.py#L12-L58
|
def get_reply(session, url, post=False, data=None, headers=None, quiet=False):
"""
Download an HTML page using the requests session. Low-level function
that allows for flexible request configuration.
@param session: Requests session.
@type session: requests.Session
@param url: URL pattern with optional keywords to format.
@type url: str
@param post: Flag that indicates whether POST request should be sent.
@type post: bool
@param data: Payload data that is sent with request (in request body).
@type data: object
@param headers: Additional headers to send with request.
@type headers: dict
@param quiet: Flag that tells whether to print error message when status
code != 200.
@type quiet: bool
@return: Requests response.
@rtype: requests.Response
"""
request_headers = {} if headers is None else headers
request = requests.Request('POST' if post else 'GET',
url,
data=data,
headers=request_headers)
prepared_request = session.prepare_request(request)
reply = session.send(prepared_request)
try:
reply.raise_for_status()
except requests.exceptions.HTTPError as e:
if not quiet:
logging.error("Error %s getting page %s", e, url)
logging.error("The server replied: %s", reply.text)
raise
return reply
|
[
"def",
"get_reply",
"(",
"session",
",",
"url",
",",
"post",
"=",
"False",
",",
"data",
"=",
"None",
",",
"headers",
"=",
"None",
",",
"quiet",
"=",
"False",
")",
":",
"request_headers",
"=",
"{",
"}",
"if",
"headers",
"is",
"None",
"else",
"headers",
"request",
"=",
"requests",
".",
"Request",
"(",
"'POST'",
"if",
"post",
"else",
"'GET'",
",",
"url",
",",
"data",
"=",
"data",
",",
"headers",
"=",
"request_headers",
")",
"prepared_request",
"=",
"session",
".",
"prepare_request",
"(",
"request",
")",
"reply",
"=",
"session",
".",
"send",
"(",
"prepared_request",
")",
"try",
":",
"reply",
".",
"raise_for_status",
"(",
")",
"except",
"requests",
".",
"exceptions",
".",
"HTTPError",
"as",
"e",
":",
"if",
"not",
"quiet",
":",
"logging",
".",
"error",
"(",
"\"Error %s getting page %s\"",
",",
"e",
",",
"url",
")",
"logging",
".",
"error",
"(",
"\"The server replied: %s\"",
",",
"reply",
".",
"text",
")",
"raise",
"return",
"reply"
] |
Download an HTML page using the requests session. Low-level function
that allows for flexible request configuration.
@param session: Requests session.
@type session: requests.Session
@param url: URL pattern with optional keywords to format.
@type url: str
@param post: Flag that indicates whether POST request should be sent.
@type post: bool
@param data: Payload data that is sent with request (in request body).
@type data: object
@param headers: Additional headers to send with request.
@type headers: dict
@param quiet: Flag that tells whether to print error message when status
code != 200.
@type quiet: bool
@return: Requests response.
@rtype: requests.Response
|
[
"Download",
"an",
"HTML",
"page",
"using",
"the",
"requests",
"session",
".",
"Low",
"-",
"level",
"function",
"that",
"allows",
"for",
"flexible",
"request",
"configuration",
"."
] |
python
|
train
|
genialis/resolwe
|
resolwe/flow/utils/docs/autoprocess.py
|
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/utils/docs/autoprocess.py#L121-L148
|
def make_field(self, field_name, field_body):
"""Fill content into nodes.
:param string field_name: Field name of the field
:param field_name: Field body if the field
:type field_name: str or instance of docutils.nodes
:return: field instance filled with given name and body
:rtype: nodes.field
"""
name = nodes.field_name()
name += nodes.Text(field_name)
paragraph = nodes.paragraph()
if isinstance(field_body, str):
# This is the case when field_body is just a string:
paragraph += nodes.Text(field_body)
else:
# This is the case when field_body is a complex node:
# useful when constructing nested field lists
paragraph += field_body
body = nodes.field_body()
body += paragraph
field = nodes.field()
field.extend([name, body])
return field
|
[
"def",
"make_field",
"(",
"self",
",",
"field_name",
",",
"field_body",
")",
":",
"name",
"=",
"nodes",
".",
"field_name",
"(",
")",
"name",
"+=",
"nodes",
".",
"Text",
"(",
"field_name",
")",
"paragraph",
"=",
"nodes",
".",
"paragraph",
"(",
")",
"if",
"isinstance",
"(",
"field_body",
",",
"str",
")",
":",
"# This is the case when field_body is just a string:",
"paragraph",
"+=",
"nodes",
".",
"Text",
"(",
"field_body",
")",
"else",
":",
"# This is the case when field_body is a complex node:",
"# useful when constructing nested field lists",
"paragraph",
"+=",
"field_body",
"body",
"=",
"nodes",
".",
"field_body",
"(",
")",
"body",
"+=",
"paragraph",
"field",
"=",
"nodes",
".",
"field",
"(",
")",
"field",
".",
"extend",
"(",
"[",
"name",
",",
"body",
"]",
")",
"return",
"field"
] |
Fill content into nodes.
:param string field_name: Field name of the field
:param field_name: Field body if the field
:type field_name: str or instance of docutils.nodes
:return: field instance filled with given name and body
:rtype: nodes.field
|
[
"Fill",
"content",
"into",
"nodes",
"."
] |
python
|
train
|
xoolive/traffic
|
traffic/data/adsb/opensky_impala.py
|
https://github.com/xoolive/traffic/blob/d1a8878098f16759f6b6e0e8d8b8f32e34a680a8/traffic/data/adsb/opensky_impala.py#L263-L399
|
def history(
self,
start: timelike,
stop: Optional[timelike] = None,
*args, # more reasonable to be explicit about arguments
date_delta: timedelta = timedelta(hours=1),
callsign: Union[None, str, Iterable[str]] = None,
icao24: Union[None, str, Iterable[str]] = None,
serials: Union[None, str, Iterable[str]] = None,
bounds: Union[
BaseGeometry, Tuple[float, float, float, float], None
] = None,
cached: bool = True,
count: bool = False,
other_tables: str = "",
other_params: str = "",
progressbar: Callable[[Iterable], Iterable] = iter,
) -> Optional[Union[Traffic, Flight]]:
"""Get Traffic from the OpenSky Impala shell.
The method builds appropriate SQL requests, caches results and formats
data into a proper pandas DataFrame. Requests are split by hour (by
default) in case the connection fails.
Args:
start: a string, epoch or datetime
stop (optional): a string, epoch or datetime, by default, one day
after start
date_delta (optional): how to split the requests (default: one day)
callsign (optional): a string or a list of strings (default: empty)
icao24 (optional): a string or a list of strings identifying the
transponder code of the aircraft (default: empty)
serials (optional): a string or a list of strings identifying the
sensors receiving the data. (default: empty)
bounds (optional): a shape (requires the bounds attribute) or a
tuple of floats (west, south, east, north) to put a geographical
limit on the request. (default: empty)
cached (boolean): whether to look first whether the request has been
cached (default: True)
count (boolean): add a column stating how many sensors received each
line (default: False)
Returns:
a Traffic structure wrapping the dataframe
"""
return_flight = False
start = to_datetime(start)
if stop is not None:
stop = to_datetime(stop)
else:
stop = start + timedelta(days=1)
if progressbar == iter and stop - start > timedelta(hours=1):
progressbar = tqdm
if isinstance(serials, Iterable):
other_tables += ", state_vectors_data4.serials s "
other_params += "and s.ITEM in {} ".format(tuple(serials))
if isinstance(icao24, str):
other_params += "and icao24='{}' ".format(icao24)
elif isinstance(icao24, Iterable):
icao24 = ",".join("'{}'".format(c) for c in icao24)
other_params += "and icao24 in ({}) ".format(icao24)
if isinstance(callsign, str):
other_params += "and callsign='{:<8s}' ".format(callsign)
return_flight = True
elif isinstance(callsign, Iterable):
callsign = ",".join("'{:<8s}'".format(c) for c in callsign)
other_params += "and callsign in ({}) ".format(callsign)
if bounds is not None:
try:
# thinking of shapely bounds attribute (in this order)
# I just don't want to add the shapely dependency here
west, south, east, north = bounds.bounds # type: ignore
except AttributeError:
west, south, east, north = bounds
other_params += "and lon>={} and lon<={} ".format(west, east)
other_params += "and lat>={} and lat<={} ".format(south, north)
cumul = []
sequence = list(split_times(start, stop, date_delta))
columns = ", ".join(self._impala_columns)
parse_columns = ", ".join(self._impala_columns)
if count is True:
other_params += "group by " + columns
columns = "count(*) as count, " + columns
parse_columns = "count, " + parse_columns
other_tables += ", state_vectors_data4.serials s"
for bt, at, bh, ah in progressbar(sequence):
logging.info(
f"Sending request between time {bt} and {at} "
f"and hour {bh} and {ah}"
)
request = self.basic_request.format(
columns=columns,
before_time=bt.timestamp(),
after_time=at.timestamp(),
before_hour=bh.timestamp(),
after_hour=ah.timestamp(),
other_tables=other_tables,
other_params=other_params,
)
df = self._impala(request, columns=parse_columns, cached=cached)
if df is None:
continue
df = self._format_history(df)
df = self._format_dataframe(df)
cumul.append(df)
if len(cumul) == 0:
return None
df = pd.concat(cumul, sort=True).sort_values("timestamp")
if count is True:
df = df.assign(count=lambda df: df["count"].astype(int))
if return_flight:
return Flight(df)
return Traffic(df)
|
[
"def",
"history",
"(",
"self",
",",
"start",
":",
"timelike",
",",
"stop",
":",
"Optional",
"[",
"timelike",
"]",
"=",
"None",
",",
"*",
"args",
",",
"# more reasonable to be explicit about arguments",
"date_delta",
":",
"timedelta",
"=",
"timedelta",
"(",
"hours",
"=",
"1",
")",
",",
"callsign",
":",
"Union",
"[",
"None",
",",
"str",
",",
"Iterable",
"[",
"str",
"]",
"]",
"=",
"None",
",",
"icao24",
":",
"Union",
"[",
"None",
",",
"str",
",",
"Iterable",
"[",
"str",
"]",
"]",
"=",
"None",
",",
"serials",
":",
"Union",
"[",
"None",
",",
"str",
",",
"Iterable",
"[",
"str",
"]",
"]",
"=",
"None",
",",
"bounds",
":",
"Union",
"[",
"BaseGeometry",
",",
"Tuple",
"[",
"float",
",",
"float",
",",
"float",
",",
"float",
"]",
",",
"None",
"]",
"=",
"None",
",",
"cached",
":",
"bool",
"=",
"True",
",",
"count",
":",
"bool",
"=",
"False",
",",
"other_tables",
":",
"str",
"=",
"\"\"",
",",
"other_params",
":",
"str",
"=",
"\"\"",
",",
"progressbar",
":",
"Callable",
"[",
"[",
"Iterable",
"]",
",",
"Iterable",
"]",
"=",
"iter",
",",
")",
"->",
"Optional",
"[",
"Union",
"[",
"Traffic",
",",
"Flight",
"]",
"]",
":",
"return_flight",
"=",
"False",
"start",
"=",
"to_datetime",
"(",
"start",
")",
"if",
"stop",
"is",
"not",
"None",
":",
"stop",
"=",
"to_datetime",
"(",
"stop",
")",
"else",
":",
"stop",
"=",
"start",
"+",
"timedelta",
"(",
"days",
"=",
"1",
")",
"if",
"progressbar",
"==",
"iter",
"and",
"stop",
"-",
"start",
">",
"timedelta",
"(",
"hours",
"=",
"1",
")",
":",
"progressbar",
"=",
"tqdm",
"if",
"isinstance",
"(",
"serials",
",",
"Iterable",
")",
":",
"other_tables",
"+=",
"\", state_vectors_data4.serials s \"",
"other_params",
"+=",
"\"and s.ITEM in {} \"",
".",
"format",
"(",
"tuple",
"(",
"serials",
")",
")",
"if",
"isinstance",
"(",
"icao24",
",",
"str",
")",
":",
"other_params",
"+=",
"\"and icao24='{}' \"",
".",
"format",
"(",
"icao24",
")",
"elif",
"isinstance",
"(",
"icao24",
",",
"Iterable",
")",
":",
"icao24",
"=",
"\",\"",
".",
"join",
"(",
"\"'{}'\"",
".",
"format",
"(",
"c",
")",
"for",
"c",
"in",
"icao24",
")",
"other_params",
"+=",
"\"and icao24 in ({}) \"",
".",
"format",
"(",
"icao24",
")",
"if",
"isinstance",
"(",
"callsign",
",",
"str",
")",
":",
"other_params",
"+=",
"\"and callsign='{:<8s}' \"",
".",
"format",
"(",
"callsign",
")",
"return_flight",
"=",
"True",
"elif",
"isinstance",
"(",
"callsign",
",",
"Iterable",
")",
":",
"callsign",
"=",
"\",\"",
".",
"join",
"(",
"\"'{:<8s}'\"",
".",
"format",
"(",
"c",
")",
"for",
"c",
"in",
"callsign",
")",
"other_params",
"+=",
"\"and callsign in ({}) \"",
".",
"format",
"(",
"callsign",
")",
"if",
"bounds",
"is",
"not",
"None",
":",
"try",
":",
"# thinking of shapely bounds attribute (in this order)",
"# I just don't want to add the shapely dependency here",
"west",
",",
"south",
",",
"east",
",",
"north",
"=",
"bounds",
".",
"bounds",
"# type: ignore",
"except",
"AttributeError",
":",
"west",
",",
"south",
",",
"east",
",",
"north",
"=",
"bounds",
"other_params",
"+=",
"\"and lon>={} and lon<={} \"",
".",
"format",
"(",
"west",
",",
"east",
")",
"other_params",
"+=",
"\"and lat>={} and lat<={} \"",
".",
"format",
"(",
"south",
",",
"north",
")",
"cumul",
"=",
"[",
"]",
"sequence",
"=",
"list",
"(",
"split_times",
"(",
"start",
",",
"stop",
",",
"date_delta",
")",
")",
"columns",
"=",
"\", \"",
".",
"join",
"(",
"self",
".",
"_impala_columns",
")",
"parse_columns",
"=",
"\", \"",
".",
"join",
"(",
"self",
".",
"_impala_columns",
")",
"if",
"count",
"is",
"True",
":",
"other_params",
"+=",
"\"group by \"",
"+",
"columns",
"columns",
"=",
"\"count(*) as count, \"",
"+",
"columns",
"parse_columns",
"=",
"\"count, \"",
"+",
"parse_columns",
"other_tables",
"+=",
"\", state_vectors_data4.serials s\"",
"for",
"bt",
",",
"at",
",",
"bh",
",",
"ah",
"in",
"progressbar",
"(",
"sequence",
")",
":",
"logging",
".",
"info",
"(",
"f\"Sending request between time {bt} and {at} \"",
"f\"and hour {bh} and {ah}\"",
")",
"request",
"=",
"self",
".",
"basic_request",
".",
"format",
"(",
"columns",
"=",
"columns",
",",
"before_time",
"=",
"bt",
".",
"timestamp",
"(",
")",
",",
"after_time",
"=",
"at",
".",
"timestamp",
"(",
")",
",",
"before_hour",
"=",
"bh",
".",
"timestamp",
"(",
")",
",",
"after_hour",
"=",
"ah",
".",
"timestamp",
"(",
")",
",",
"other_tables",
"=",
"other_tables",
",",
"other_params",
"=",
"other_params",
",",
")",
"df",
"=",
"self",
".",
"_impala",
"(",
"request",
",",
"columns",
"=",
"parse_columns",
",",
"cached",
"=",
"cached",
")",
"if",
"df",
"is",
"None",
":",
"continue",
"df",
"=",
"self",
".",
"_format_history",
"(",
"df",
")",
"df",
"=",
"self",
".",
"_format_dataframe",
"(",
"df",
")",
"cumul",
".",
"append",
"(",
"df",
")",
"if",
"len",
"(",
"cumul",
")",
"==",
"0",
":",
"return",
"None",
"df",
"=",
"pd",
".",
"concat",
"(",
"cumul",
",",
"sort",
"=",
"True",
")",
".",
"sort_values",
"(",
"\"timestamp\"",
")",
"if",
"count",
"is",
"True",
":",
"df",
"=",
"df",
".",
"assign",
"(",
"count",
"=",
"lambda",
"df",
":",
"df",
"[",
"\"count\"",
"]",
".",
"astype",
"(",
"int",
")",
")",
"if",
"return_flight",
":",
"return",
"Flight",
"(",
"df",
")",
"return",
"Traffic",
"(",
"df",
")"
] |
Get Traffic from the OpenSky Impala shell.
The method builds appropriate SQL requests, caches results and formats
data into a proper pandas DataFrame. Requests are split by hour (by
default) in case the connection fails.
Args:
start: a string, epoch or datetime
stop (optional): a string, epoch or datetime, by default, one day
after start
date_delta (optional): how to split the requests (default: one day)
callsign (optional): a string or a list of strings (default: empty)
icao24 (optional): a string or a list of strings identifying the
transponder code of the aircraft (default: empty)
serials (optional): a string or a list of strings identifying the
sensors receiving the data. (default: empty)
bounds (optional): a shape (requires the bounds attribute) or a
tuple of floats (west, south, east, north) to put a geographical
limit on the request. (default: empty)
cached (boolean): whether to look first whether the request has been
cached (default: True)
count (boolean): add a column stating how many sensors received each
line (default: False)
Returns:
a Traffic structure wrapping the dataframe
|
[
"Get",
"Traffic",
"from",
"the",
"OpenSky",
"Impala",
"shell",
"."
] |
python
|
train
|
adaptive-learning/proso-apps
|
proso_models/models.py
|
https://github.com/adaptive-learning/proso-apps/blob/8278c72e498d6ef8d392cc47b48473f4ec037142/proso_models/models.py#L651-L694
|
def get_leaves(self, item_ids=None, language=None, forbidden_item_ids=None):
"""
Get mapping of items to their reachable leaves. Leaves having
inactive relations to other items are omitted.
Args:
item_ids (list): items which are taken as roots for the reachability
language (str): if specified, filter out items which are not
available in the given language
Returns:
dict: item id -> list of items (reachable leaves)
"""
forbidden_item_ids = set() if forbidden_item_ids is None else set(forbidden_item_ids)
children = self.get_children_graph(item_ids, language=language, forbidden_item_ids=forbidden_item_ids)
counts = self.get_children_counts(active=None)
if item_ids is None:
# not leaves
item_ids = set(children.keys())
def _get_leaves(item_id):
leaves = set()
def __search(item_ids):
result = set(flatten([children.get(item_id, []) for item_id in item_ids]))
new_leaves = {item_id for item_id in result if item_id not in children.keys()}
leaves.update(new_leaves)
return result - new_leaves
fixed_point(
is_zero=lambda to_visit: len(to_visit) == 0,
minus=lambda to_visit, visited: to_visit - visited,
plus=lambda visited_x, visited_y: visited_x | visited_y,
f=__search,
x={item_id}
)
leaves = {leaf for leaf in leaves if counts[leaf] == 0}
if len(leaves) > 0:
return leaves
if counts[item_id] == 0 and item_id not in forbidden_item_ids:
return {item_id}
return set()
return {item_id: _get_leaves(item_id) for item_id in item_ids}
|
[
"def",
"get_leaves",
"(",
"self",
",",
"item_ids",
"=",
"None",
",",
"language",
"=",
"None",
",",
"forbidden_item_ids",
"=",
"None",
")",
":",
"forbidden_item_ids",
"=",
"set",
"(",
")",
"if",
"forbidden_item_ids",
"is",
"None",
"else",
"set",
"(",
"forbidden_item_ids",
")",
"children",
"=",
"self",
".",
"get_children_graph",
"(",
"item_ids",
",",
"language",
"=",
"language",
",",
"forbidden_item_ids",
"=",
"forbidden_item_ids",
")",
"counts",
"=",
"self",
".",
"get_children_counts",
"(",
"active",
"=",
"None",
")",
"if",
"item_ids",
"is",
"None",
":",
"# not leaves",
"item_ids",
"=",
"set",
"(",
"children",
".",
"keys",
"(",
")",
")",
"def",
"_get_leaves",
"(",
"item_id",
")",
":",
"leaves",
"=",
"set",
"(",
")",
"def",
"__search",
"(",
"item_ids",
")",
":",
"result",
"=",
"set",
"(",
"flatten",
"(",
"[",
"children",
".",
"get",
"(",
"item_id",
",",
"[",
"]",
")",
"for",
"item_id",
"in",
"item_ids",
"]",
")",
")",
"new_leaves",
"=",
"{",
"item_id",
"for",
"item_id",
"in",
"result",
"if",
"item_id",
"not",
"in",
"children",
".",
"keys",
"(",
")",
"}",
"leaves",
".",
"update",
"(",
"new_leaves",
")",
"return",
"result",
"-",
"new_leaves",
"fixed_point",
"(",
"is_zero",
"=",
"lambda",
"to_visit",
":",
"len",
"(",
"to_visit",
")",
"==",
"0",
",",
"minus",
"=",
"lambda",
"to_visit",
",",
"visited",
":",
"to_visit",
"-",
"visited",
",",
"plus",
"=",
"lambda",
"visited_x",
",",
"visited_y",
":",
"visited_x",
"|",
"visited_y",
",",
"f",
"=",
"__search",
",",
"x",
"=",
"{",
"item_id",
"}",
")",
"leaves",
"=",
"{",
"leaf",
"for",
"leaf",
"in",
"leaves",
"if",
"counts",
"[",
"leaf",
"]",
"==",
"0",
"}",
"if",
"len",
"(",
"leaves",
")",
">",
"0",
":",
"return",
"leaves",
"if",
"counts",
"[",
"item_id",
"]",
"==",
"0",
"and",
"item_id",
"not",
"in",
"forbidden_item_ids",
":",
"return",
"{",
"item_id",
"}",
"return",
"set",
"(",
")",
"return",
"{",
"item_id",
":",
"_get_leaves",
"(",
"item_id",
")",
"for",
"item_id",
"in",
"item_ids",
"}"
] |
Get mapping of items to their reachable leaves. Leaves having
inactive relations to other items are omitted.
Args:
item_ids (list): items which are taken as roots for the reachability
language (str): if specified, filter out items which are not
available in the given language
Returns:
dict: item id -> list of items (reachable leaves)
|
[
"Get",
"mapping",
"of",
"items",
"to",
"their",
"reachable",
"leaves",
".",
"Leaves",
"having",
"inactive",
"relations",
"to",
"other",
"items",
"are",
"omitted",
"."
] |
python
|
train
|
wmayner/pyphi
|
pyphi/models/fmt.py
|
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/models/fmt.py#L79-L102
|
def indent(lines, amount=2, char=' '):
r"""Indent a string.
Prepends whitespace to every line in the passed string. (Lines are
separated by newline characters.)
Args:
lines (str): The string to indent.
Keyword Args:
amount (int): The number of columns to indent by.
char (str): The character to to use as the indentation.
Returns:
str: The indented string.
Example:
>>> print(indent('line1\nline2', char='*'))
**line1
**line2
"""
lines = str(lines)
padding = amount * char
return padding + ('\n' + padding).join(lines.split('\n'))
|
[
"def",
"indent",
"(",
"lines",
",",
"amount",
"=",
"2",
",",
"char",
"=",
"' '",
")",
":",
"lines",
"=",
"str",
"(",
"lines",
")",
"padding",
"=",
"amount",
"*",
"char",
"return",
"padding",
"+",
"(",
"'\\n'",
"+",
"padding",
")",
".",
"join",
"(",
"lines",
".",
"split",
"(",
"'\\n'",
")",
")"
] |
r"""Indent a string.
Prepends whitespace to every line in the passed string. (Lines are
separated by newline characters.)
Args:
lines (str): The string to indent.
Keyword Args:
amount (int): The number of columns to indent by.
char (str): The character to to use as the indentation.
Returns:
str: The indented string.
Example:
>>> print(indent('line1\nline2', char='*'))
**line1
**line2
|
[
"r",
"Indent",
"a",
"string",
"."
] |
python
|
train
|
decryptus/sonicprobe
|
sonicprobe/helpers.py
|
https://github.com/decryptus/sonicprobe/blob/72f73f3a40d2982d79ad68686e36aa31d94b76f8/sonicprobe/helpers.py#L357-L423
|
def linesubst(line, variables):
"""
In a string, substitute '{{varname}}' occurrences with the value of
variables['varname'], '\\' being an escaping char...
If at first you don't understand this function, draw its finite state
machine and everything will become crystal clear :)
"""
# trivial no substitution early detection:
if '{{' not in line and '\\' not in line:
return line
st = NORM
out = ""
curvar = ""
for c in line:
if st is NORM:
if c == '{':
st = ONE
elif c == '\\':
st = LIT
else:
out += c
elif st is LIT:
out += c
st = NORM
elif st is ONE:
if c == '{':
st = TWO
elif c == '\\':
out += '{'
st = LIT
else:
out += '{' + c
st = NORM
elif st is TWO:
if c == '\\':
st = TLIT
elif c == '}':
st = TERM
else:
curvar += c
elif st is TLIT:
curvar += c
st = TWO
elif st is TERM:
if c == '}':
if curvar not in variables:
LOG.warning("Unknown variable %r detected, will just be replaced by an empty string", curvar)
else:
LOG.debug("Substitution of {{%s}} by %r", curvar, variables[curvar])
value = variables[curvar]
if isinstance(value, (float, int, long)):
value = str(value)
out += value
curvar = ''
st = NORM
elif c == '\\':
curvar += '}'
st = TLIT
else:
curvar += '}' + c
st = TWO
if st is not NORM:
LOG.warning("st is not NORM at end of line: " + line)
LOG.warning("returned substitution: " + out)
return out
|
[
"def",
"linesubst",
"(",
"line",
",",
"variables",
")",
":",
"# trivial no substitution early detection:",
"if",
"'{{'",
"not",
"in",
"line",
"and",
"'\\\\'",
"not",
"in",
"line",
":",
"return",
"line",
"st",
"=",
"NORM",
"out",
"=",
"\"\"",
"curvar",
"=",
"\"\"",
"for",
"c",
"in",
"line",
":",
"if",
"st",
"is",
"NORM",
":",
"if",
"c",
"==",
"'{'",
":",
"st",
"=",
"ONE",
"elif",
"c",
"==",
"'\\\\'",
":",
"st",
"=",
"LIT",
"else",
":",
"out",
"+=",
"c",
"elif",
"st",
"is",
"LIT",
":",
"out",
"+=",
"c",
"st",
"=",
"NORM",
"elif",
"st",
"is",
"ONE",
":",
"if",
"c",
"==",
"'{'",
":",
"st",
"=",
"TWO",
"elif",
"c",
"==",
"'\\\\'",
":",
"out",
"+=",
"'{'",
"st",
"=",
"LIT",
"else",
":",
"out",
"+=",
"'{'",
"+",
"c",
"st",
"=",
"NORM",
"elif",
"st",
"is",
"TWO",
":",
"if",
"c",
"==",
"'\\\\'",
":",
"st",
"=",
"TLIT",
"elif",
"c",
"==",
"'}'",
":",
"st",
"=",
"TERM",
"else",
":",
"curvar",
"+=",
"c",
"elif",
"st",
"is",
"TLIT",
":",
"curvar",
"+=",
"c",
"st",
"=",
"TWO",
"elif",
"st",
"is",
"TERM",
":",
"if",
"c",
"==",
"'}'",
":",
"if",
"curvar",
"not",
"in",
"variables",
":",
"LOG",
".",
"warning",
"(",
"\"Unknown variable %r detected, will just be replaced by an empty string\"",
",",
"curvar",
")",
"else",
":",
"LOG",
".",
"debug",
"(",
"\"Substitution of {{%s}} by %r\"",
",",
"curvar",
",",
"variables",
"[",
"curvar",
"]",
")",
"value",
"=",
"variables",
"[",
"curvar",
"]",
"if",
"isinstance",
"(",
"value",
",",
"(",
"float",
",",
"int",
",",
"long",
")",
")",
":",
"value",
"=",
"str",
"(",
"value",
")",
"out",
"+=",
"value",
"curvar",
"=",
"''",
"st",
"=",
"NORM",
"elif",
"c",
"==",
"'\\\\'",
":",
"curvar",
"+=",
"'}'",
"st",
"=",
"TLIT",
"else",
":",
"curvar",
"+=",
"'}'",
"+",
"c",
"st",
"=",
"TWO",
"if",
"st",
"is",
"not",
"NORM",
":",
"LOG",
".",
"warning",
"(",
"\"st is not NORM at end of line: \"",
"+",
"line",
")",
"LOG",
".",
"warning",
"(",
"\"returned substitution: \"",
"+",
"out",
")",
"return",
"out"
] |
In a string, substitute '{{varname}}' occurrences with the value of
variables['varname'], '\\' being an escaping char...
If at first you don't understand this function, draw its finite state
machine and everything will become crystal clear :)
|
[
"In",
"a",
"string",
"substitute",
"{{",
"varname",
"}}",
"occurrences",
"with",
"the",
"value",
"of",
"variables",
"[",
"varname",
"]",
"\\\\",
"being",
"an",
"escaping",
"char",
"...",
"If",
"at",
"first",
"you",
"don",
"t",
"understand",
"this",
"function",
"draw",
"its",
"finite",
"state",
"machine",
"and",
"everything",
"will",
"become",
"crystal",
"clear",
":",
")"
] |
python
|
train
|
mitsei/dlkit
|
dlkit/json_/assessment/mixins.py
|
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/assessment/mixins.py#L222-L232
|
def _update_from_database(self):
"""Updates map to latest state in database.
Should be called prior to major object events to assure that an
assessment being taken on multiple devices are reasonably synchronized.
"""
collection = JSONClientValidated('assessment',
collection='AssessmentSection',
runtime=self._runtime)
self._my_map = collection.find_one({'_id': self._my_map['_id']})
|
[
"def",
"_update_from_database",
"(",
"self",
")",
":",
"collection",
"=",
"JSONClientValidated",
"(",
"'assessment'",
",",
"collection",
"=",
"'AssessmentSection'",
",",
"runtime",
"=",
"self",
".",
"_runtime",
")",
"self",
".",
"_my_map",
"=",
"collection",
".",
"find_one",
"(",
"{",
"'_id'",
":",
"self",
".",
"_my_map",
"[",
"'_id'",
"]",
"}",
")"
] |
Updates map to latest state in database.
Should be called prior to major object events to assure that an
assessment being taken on multiple devices are reasonably synchronized.
|
[
"Updates",
"map",
"to",
"latest",
"state",
"in",
"database",
"."
] |
python
|
train
|
boriel/zxbasic
|
api/check.py
|
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/api/check.py#L414-L463
|
def common_type(a, b):
""" Returns a type which is common for both a and b types.
Returns None if no common types allowed.
"""
from symbols.type_ import SymbolBASICTYPE as BASICTYPE
from symbols.type_ import Type as TYPE
from symbols.type_ import SymbolTYPE
if a is None or b is None:
return None
if not isinstance(a, SymbolTYPE):
a = a.type_
if not isinstance(b, SymbolTYPE):
b = b.type_
if a == b: # Both types are the same?
return a # Returns it
if a == TYPE.unknown and b == TYPE.unknown:
return BASICTYPE(global_.DEFAULT_TYPE)
if a == TYPE.unknown:
return b
if b == TYPE.unknown:
return a
# TODO: This will removed / expanded in the future
assert a.is_basic
assert b.is_basic
types = (a, b)
if TYPE.float_ in types:
return TYPE.float_
if TYPE.fixed in types:
return TYPE.fixed
if TYPE.string in types: # TODO: Check this ??
return TYPE.unknown
result = a if a.size > b.size else b
if not TYPE.is_unsigned(a) or not TYPE.is_unsigned(b):
result = TYPE.to_signed(result)
return result
|
[
"def",
"common_type",
"(",
"a",
",",
"b",
")",
":",
"from",
"symbols",
".",
"type_",
"import",
"SymbolBASICTYPE",
"as",
"BASICTYPE",
"from",
"symbols",
".",
"type_",
"import",
"Type",
"as",
"TYPE",
"from",
"symbols",
".",
"type_",
"import",
"SymbolTYPE",
"if",
"a",
"is",
"None",
"or",
"b",
"is",
"None",
":",
"return",
"None",
"if",
"not",
"isinstance",
"(",
"a",
",",
"SymbolTYPE",
")",
":",
"a",
"=",
"a",
".",
"type_",
"if",
"not",
"isinstance",
"(",
"b",
",",
"SymbolTYPE",
")",
":",
"b",
"=",
"b",
".",
"type_",
"if",
"a",
"==",
"b",
":",
"# Both types are the same?",
"return",
"a",
"# Returns it",
"if",
"a",
"==",
"TYPE",
".",
"unknown",
"and",
"b",
"==",
"TYPE",
".",
"unknown",
":",
"return",
"BASICTYPE",
"(",
"global_",
".",
"DEFAULT_TYPE",
")",
"if",
"a",
"==",
"TYPE",
".",
"unknown",
":",
"return",
"b",
"if",
"b",
"==",
"TYPE",
".",
"unknown",
":",
"return",
"a",
"# TODO: This will removed / expanded in the future",
"assert",
"a",
".",
"is_basic",
"assert",
"b",
".",
"is_basic",
"types",
"=",
"(",
"a",
",",
"b",
")",
"if",
"TYPE",
".",
"float_",
"in",
"types",
":",
"return",
"TYPE",
".",
"float_",
"if",
"TYPE",
".",
"fixed",
"in",
"types",
":",
"return",
"TYPE",
".",
"fixed",
"if",
"TYPE",
".",
"string",
"in",
"types",
":",
"# TODO: Check this ??",
"return",
"TYPE",
".",
"unknown",
"result",
"=",
"a",
"if",
"a",
".",
"size",
">",
"b",
".",
"size",
"else",
"b",
"if",
"not",
"TYPE",
".",
"is_unsigned",
"(",
"a",
")",
"or",
"not",
"TYPE",
".",
"is_unsigned",
"(",
"b",
")",
":",
"result",
"=",
"TYPE",
".",
"to_signed",
"(",
"result",
")",
"return",
"result"
] |
Returns a type which is common for both a and b types.
Returns None if no common types allowed.
|
[
"Returns",
"a",
"type",
"which",
"is",
"common",
"for",
"both",
"a",
"and",
"b",
"types",
".",
"Returns",
"None",
"if",
"no",
"common",
"types",
"allowed",
"."
] |
python
|
train
|
psss/did
|
did/stats.py
|
https://github.com/psss/did/blob/04e4ee6f1aa14c0cae3ba9f9803871f3f98279cb/did/stats.py#L74-L88
|
def check(self):
""" Check the stats if enabled. """
if not self.enabled():
return
try:
self.fetch()
except (xmlrpclib.Fault, did.base.ConfigError) as error:
log.error(error)
self._error = True
# Raise the exception if debugging
if not self.options or self.options.debug:
raise
# Show the results stats (unless merging)
if self.options and not self.options.merge:
self.show()
|
[
"def",
"check",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"enabled",
"(",
")",
":",
"return",
"try",
":",
"self",
".",
"fetch",
"(",
")",
"except",
"(",
"xmlrpclib",
".",
"Fault",
",",
"did",
".",
"base",
".",
"ConfigError",
")",
"as",
"error",
":",
"log",
".",
"error",
"(",
"error",
")",
"self",
".",
"_error",
"=",
"True",
"# Raise the exception if debugging",
"if",
"not",
"self",
".",
"options",
"or",
"self",
".",
"options",
".",
"debug",
":",
"raise",
"# Show the results stats (unless merging)",
"if",
"self",
".",
"options",
"and",
"not",
"self",
".",
"options",
".",
"merge",
":",
"self",
".",
"show",
"(",
")"
] |
Check the stats if enabled.
|
[
"Check",
"the",
"stats",
"if",
"enabled",
"."
] |
python
|
train
|
sass/libsass-python
|
sassutils/builder.py
|
https://github.com/sass/libsass-python/blob/fde5b18bc761f0253e71685ee5489e4beb8a403e/sassutils/builder.py#L201-L223
|
def unresolve_filename(self, package_dir, filename):
"""Retrieves the probable source path from the output filename. Pass
in a .css path to get out a .scss path.
:param package_dir: the path of the package directory
:type package_dir: :class:`str`
:param filename: the css filename
:type filename: :class:`str`
:returns: the scss filename
:rtype: :class:`str`
"""
filename, _ = os.path.splitext(filename)
if self.strip_extension:
for ext in ('.scss', '.sass'):
test_path = os.path.join(
package_dir, self.sass_path, filename + ext,
)
if os.path.exists(test_path):
return filename + ext
else: # file not found, let it error with `.scss` extension
return filename + '.scss'
else:
return filename
|
[
"def",
"unresolve_filename",
"(",
"self",
",",
"package_dir",
",",
"filename",
")",
":",
"filename",
",",
"_",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"filename",
")",
"if",
"self",
".",
"strip_extension",
":",
"for",
"ext",
"in",
"(",
"'.scss'",
",",
"'.sass'",
")",
":",
"test_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"package_dir",
",",
"self",
".",
"sass_path",
",",
"filename",
"+",
"ext",
",",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"test_path",
")",
":",
"return",
"filename",
"+",
"ext",
"else",
":",
"# file not found, let it error with `.scss` extension",
"return",
"filename",
"+",
"'.scss'",
"else",
":",
"return",
"filename"
] |
Retrieves the probable source path from the output filename. Pass
in a .css path to get out a .scss path.
:param package_dir: the path of the package directory
:type package_dir: :class:`str`
:param filename: the css filename
:type filename: :class:`str`
:returns: the scss filename
:rtype: :class:`str`
|
[
"Retrieves",
"the",
"probable",
"source",
"path",
"from",
"the",
"output",
"filename",
".",
"Pass",
"in",
"a",
".",
"css",
"path",
"to",
"get",
"out",
"a",
".",
"scss",
"path",
"."
] |
python
|
train
|
ARMmbed/mbed-cloud-sdk-python
|
src/mbed_cloud/filters.py
|
https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/filters.py#L61-L95
|
def _normalise_key_values(filter_obj, attr_map=None):
"""Converts nested dictionary filters into django-style key value pairs
Map filter operators and aliases to operator-land
Additionally, perform replacements according to attribute map
Automatically assumes __eq if not explicitly defined
"""
new_filter = {}
for key, constraints in filter_obj.items():
aliased_key = key
if attr_map is not None:
aliased_key = attr_map.get(key)
if aliased_key is None:
raise CloudValueError(
'Invalid key %r for filter attribute; must be one of:\n%s' % (
key,
attr_map.keys()
)
)
if not isinstance(constraints, dict):
constraints = {'eq': constraints}
for operator, value in constraints.items():
# FIXME: deprecate this $ nonsense
canonical_operator = FILTER_OPERATOR_ALIASES.get(operator.lstrip('$'))
if canonical_operator is None:
raise CloudValueError(
'Invalid operator %r for filter key %s; must be one of:\n%s' % (
operator,
key,
FILTER_OPERATOR_ALIASES.keys()
)
)
canonical_key = str('%s__%s' % (aliased_key, canonical_operator))
new_filter[canonical_key] = _normalise_value(value)
return new_filter
|
[
"def",
"_normalise_key_values",
"(",
"filter_obj",
",",
"attr_map",
"=",
"None",
")",
":",
"new_filter",
"=",
"{",
"}",
"for",
"key",
",",
"constraints",
"in",
"filter_obj",
".",
"items",
"(",
")",
":",
"aliased_key",
"=",
"key",
"if",
"attr_map",
"is",
"not",
"None",
":",
"aliased_key",
"=",
"attr_map",
".",
"get",
"(",
"key",
")",
"if",
"aliased_key",
"is",
"None",
":",
"raise",
"CloudValueError",
"(",
"'Invalid key %r for filter attribute; must be one of:\\n%s'",
"%",
"(",
"key",
",",
"attr_map",
".",
"keys",
"(",
")",
")",
")",
"if",
"not",
"isinstance",
"(",
"constraints",
",",
"dict",
")",
":",
"constraints",
"=",
"{",
"'eq'",
":",
"constraints",
"}",
"for",
"operator",
",",
"value",
"in",
"constraints",
".",
"items",
"(",
")",
":",
"# FIXME: deprecate this $ nonsense",
"canonical_operator",
"=",
"FILTER_OPERATOR_ALIASES",
".",
"get",
"(",
"operator",
".",
"lstrip",
"(",
"'$'",
")",
")",
"if",
"canonical_operator",
"is",
"None",
":",
"raise",
"CloudValueError",
"(",
"'Invalid operator %r for filter key %s; must be one of:\\n%s'",
"%",
"(",
"operator",
",",
"key",
",",
"FILTER_OPERATOR_ALIASES",
".",
"keys",
"(",
")",
")",
")",
"canonical_key",
"=",
"str",
"(",
"'%s__%s'",
"%",
"(",
"aliased_key",
",",
"canonical_operator",
")",
")",
"new_filter",
"[",
"canonical_key",
"]",
"=",
"_normalise_value",
"(",
"value",
")",
"return",
"new_filter"
] |
Converts nested dictionary filters into django-style key value pairs
Map filter operators and aliases to operator-land
Additionally, perform replacements according to attribute map
Automatically assumes __eq if not explicitly defined
|
[
"Converts",
"nested",
"dictionary",
"filters",
"into",
"django",
"-",
"style",
"key",
"value",
"pairs"
] |
python
|
train
|
Azure/azure-event-hubs-python
|
azure/eventprocessorhost/partition_context.py
|
https://github.com/Azure/azure-event-hubs-python/blob/737c5f966557ada2cf10fa0d8f3c19671ae96348/azure/eventprocessorhost/partition_context.py#L76-L100
|
async def checkpoint_async_event_data(self, event_data, event_processor_context=None):
"""
Stores the offset and sequenceNumber from the provided received EventData instance,
then writes those values to the checkpoint store via the checkpoint manager.
Optionally stores the state of the Event Processor along the checkpoint.
:param event_data: A received EventData with valid offset and sequenceNumber.
:type event_data: ~azure.eventhub.common.EventData
:param event_processor_context An optional custom state value for the Event Processor.
This data must be in a JSON serializable format.
:type event_processor_context: str or dict
:raises: ValueError if suplied event_data is None.
:raises: ValueError if the sequenceNumber is less than the last checkpointed value.
"""
if not event_data:
raise ValueError("event_data")
if event_data.sequence_number > self.sequence_number:
#We have never seen this sequence number yet
raise ValueError("Argument Out Of Range event_data x-opt-sequence-number")
await self.persist_checkpoint_async(Checkpoint(self.partition_id,
event_data.offset.value,
event_data.sequence_number),
event_processor_context)
self.event_processor_context = event_processor_context
|
[
"async",
"def",
"checkpoint_async_event_data",
"(",
"self",
",",
"event_data",
",",
"event_processor_context",
"=",
"None",
")",
":",
"if",
"not",
"event_data",
":",
"raise",
"ValueError",
"(",
"\"event_data\"",
")",
"if",
"event_data",
".",
"sequence_number",
">",
"self",
".",
"sequence_number",
":",
"#We have never seen this sequence number yet",
"raise",
"ValueError",
"(",
"\"Argument Out Of Range event_data x-opt-sequence-number\"",
")",
"await",
"self",
".",
"persist_checkpoint_async",
"(",
"Checkpoint",
"(",
"self",
".",
"partition_id",
",",
"event_data",
".",
"offset",
".",
"value",
",",
"event_data",
".",
"sequence_number",
")",
",",
"event_processor_context",
")",
"self",
".",
"event_processor_context",
"=",
"event_processor_context"
] |
Stores the offset and sequenceNumber from the provided received EventData instance,
then writes those values to the checkpoint store via the checkpoint manager.
Optionally stores the state of the Event Processor along the checkpoint.
:param event_data: A received EventData with valid offset and sequenceNumber.
:type event_data: ~azure.eventhub.common.EventData
:param event_processor_context An optional custom state value for the Event Processor.
This data must be in a JSON serializable format.
:type event_processor_context: str or dict
:raises: ValueError if suplied event_data is None.
:raises: ValueError if the sequenceNumber is less than the last checkpointed value.
|
[
"Stores",
"the",
"offset",
"and",
"sequenceNumber",
"from",
"the",
"provided",
"received",
"EventData",
"instance",
"then",
"writes",
"those",
"values",
"to",
"the",
"checkpoint",
"store",
"via",
"the",
"checkpoint",
"manager",
".",
"Optionally",
"stores",
"the",
"state",
"of",
"the",
"Event",
"Processor",
"along",
"the",
"checkpoint",
"."
] |
python
|
train
|
saltant-org/saltant-py
|
saltant/models/base_task_instance.py
|
https://github.com/saltant-org/saltant-py/blob/bf3bdbc4ec9c772c7f621f8bd6a76c5932af68be/saltant/models/base_task_instance.py#L312-L338
|
def wait_until_finished(
self, uuid, refresh_period=DEFAULT_TASK_INSTANCE_WAIT_REFRESH_PERIOD
):
"""Wait until a task instance with the given UUID is finished.
Args:
uuid (str): The UUID of the task instance to wait for.
refresh_period (float, optional): How many seconds to wait
in between checking the task's status. Defaults to 5
seconds.
Returns:
:class:`saltant.models.base_task_instance.BaseTaskInstance`:
A task instance model instance representing the task
instance which we waited for.
"""
# Wait for the task to finish
task_instance = self.get(uuid)
while task_instance.state not in TASK_INSTANCE_FINISH_STATUSES:
# Wait a bit
time.sleep(refresh_period)
# Query again
task_instance = self.get(uuid)
return task_instance
|
[
"def",
"wait_until_finished",
"(",
"self",
",",
"uuid",
",",
"refresh_period",
"=",
"DEFAULT_TASK_INSTANCE_WAIT_REFRESH_PERIOD",
")",
":",
"# Wait for the task to finish",
"task_instance",
"=",
"self",
".",
"get",
"(",
"uuid",
")",
"while",
"task_instance",
".",
"state",
"not",
"in",
"TASK_INSTANCE_FINISH_STATUSES",
":",
"# Wait a bit",
"time",
".",
"sleep",
"(",
"refresh_period",
")",
"# Query again",
"task_instance",
"=",
"self",
".",
"get",
"(",
"uuid",
")",
"return",
"task_instance"
] |
Wait until a task instance with the given UUID is finished.
Args:
uuid (str): The UUID of the task instance to wait for.
refresh_period (float, optional): How many seconds to wait
in between checking the task's status. Defaults to 5
seconds.
Returns:
:class:`saltant.models.base_task_instance.BaseTaskInstance`:
A task instance model instance representing the task
instance which we waited for.
|
[
"Wait",
"until",
"a",
"task",
"instance",
"with",
"the",
"given",
"UUID",
"is",
"finished",
"."
] |
python
|
train
|
msmbuilder/msmbuilder
|
msmbuilder/msm/_metzner_mcmc_slow.py
|
https://github.com/msmbuilder/msmbuilder/blob/556a93a170782f47be53f4a1e9d740fb1c8272b3/msmbuilder/msm/_metzner_mcmc_slow.py#L5-L99
|
def metzner_mcmc_slow(Z, n_samples, n_thin=1, random_state=None):
"""Metropolis Markov chain Monte Carlo sampler for reversible transition
matrices
Parameters
----------
Z : np.array, shape=(n_states, n_states)
The effective count matrix, the number of observed transitions
between states plus the number of prior counts
n_samples : int
Number of steps to iterate the chain for
n_thin : int
Yield every ``n_thin``-th sample from the MCMC chain
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton.
Notes
-----
The transition matrix posterior distribution is ::
P(T | Z) \propto \Prod_{ij} T_{ij}^{Z_{ij}}
and constrained to be reversible, such that there exists a \pi s.t. ::
\pi_i T_{ij} = \pi_j T_{ji}
Yields
------
T : np.array, shape=(n_states, n_states)
This generator yields samples from the transition matrix posterior
References
----------
.. [1] P. Metzner, F. Noe and C. Schutte, "Estimating the sampling error:
Distribution of transition matrices and functions of transition
matrices for given trajectory data." Phys. Rev. E 80 021106 (2009)
See Also
--------
metzner_mcmc_fast
"""
# Upper and lower bounds on the sum of the K matrix, to ensure proper
# proposal weights. See Eq. 17 of [1].
K_MINUS = 0.9
K_PLUS = 1.1
Z = np.asarray(Z)
n_states = Z.shape[0]
if not Z.ndim == 2 and Z.shape[1] == n_states:
raise ValueError("Z must be square. Z.shape=%s" % str(Z.shape))
K = 0.5 * (Z + Z.T) / np.sum(Z, dtype=float)
random = check_random_state(random_state)
n_accept = 0
for t in range(n_samples):
# proposal
# Select two indices in [0...n_states). We draw them by drawing a
# random floats in [0,1) and then rounding to int so that this method
# is exactly analogous to `metzner_mcmc_fast`, which, for each MCMC
# iteration, draws 4 random floats in [0,1) from the same numpy PSRNG,
# and then inside the C step kernel (src/metzner_mcmc.c) uses two of
# them like this. This ensures that this function and
# `metzner_mcmc_fast` give _exactly_ the same sequence of transition
# matricies, given the same random seed.
i, j = (random.rand(2) * n_states).astype(np.int)
sc = np.sum(K)
if i == j:
a, b = max(-K[i,j], K_MINUS - sc), K_PLUS - sc
else:
a, b = max(-K[i,j], 0.5*(K_MINUS - sc)), 0.5*(K_PLUS - sc)
epsilon = random.uniform(a, b)
K_proposal = np.copy(K)
K_proposal[i, j] += epsilon
if i != j:
K_proposal[j, i] += epsilon
# acceptance?
cutoff = np.exp(_logprob_T(_K_to_T(K_proposal), Z) -
_logprob_T(_K_to_T(K), Z))
r = random.rand()
# print 'i', i, 'j', j
# print 'a', a, 'b', b
# print 'cutoff', cutoff
# print 'r', r
# print 'sc', sc
if r < cutoff:
n_accept += 1
K = K_proposal
if (t+1) % n_thin == 0:
yield _K_to_T(K)
|
[
"def",
"metzner_mcmc_slow",
"(",
"Z",
",",
"n_samples",
",",
"n_thin",
"=",
"1",
",",
"random_state",
"=",
"None",
")",
":",
"# Upper and lower bounds on the sum of the K matrix, to ensure proper",
"# proposal weights. See Eq. 17 of [1].",
"K_MINUS",
"=",
"0.9",
"K_PLUS",
"=",
"1.1",
"Z",
"=",
"np",
".",
"asarray",
"(",
"Z",
")",
"n_states",
"=",
"Z",
".",
"shape",
"[",
"0",
"]",
"if",
"not",
"Z",
".",
"ndim",
"==",
"2",
"and",
"Z",
".",
"shape",
"[",
"1",
"]",
"==",
"n_states",
":",
"raise",
"ValueError",
"(",
"\"Z must be square. Z.shape=%s\"",
"%",
"str",
"(",
"Z",
".",
"shape",
")",
")",
"K",
"=",
"0.5",
"*",
"(",
"Z",
"+",
"Z",
".",
"T",
")",
"/",
"np",
".",
"sum",
"(",
"Z",
",",
"dtype",
"=",
"float",
")",
"random",
"=",
"check_random_state",
"(",
"random_state",
")",
"n_accept",
"=",
"0",
"for",
"t",
"in",
"range",
"(",
"n_samples",
")",
":",
"# proposal",
"# Select two indices in [0...n_states). We draw them by drawing a",
"# random floats in [0,1) and then rounding to int so that this method",
"# is exactly analogous to `metzner_mcmc_fast`, which, for each MCMC",
"# iteration, draws 4 random floats in [0,1) from the same numpy PSRNG,",
"# and then inside the C step kernel (src/metzner_mcmc.c) uses two of",
"# them like this. This ensures that this function and",
"# `metzner_mcmc_fast` give _exactly_ the same sequence of transition",
"# matricies, given the same random seed.",
"i",
",",
"j",
"=",
"(",
"random",
".",
"rand",
"(",
"2",
")",
"*",
"n_states",
")",
".",
"astype",
"(",
"np",
".",
"int",
")",
"sc",
"=",
"np",
".",
"sum",
"(",
"K",
")",
"if",
"i",
"==",
"j",
":",
"a",
",",
"b",
"=",
"max",
"(",
"-",
"K",
"[",
"i",
",",
"j",
"]",
",",
"K_MINUS",
"-",
"sc",
")",
",",
"K_PLUS",
"-",
"sc",
"else",
":",
"a",
",",
"b",
"=",
"max",
"(",
"-",
"K",
"[",
"i",
",",
"j",
"]",
",",
"0.5",
"*",
"(",
"K_MINUS",
"-",
"sc",
")",
")",
",",
"0.5",
"*",
"(",
"K_PLUS",
"-",
"sc",
")",
"epsilon",
"=",
"random",
".",
"uniform",
"(",
"a",
",",
"b",
")",
"K_proposal",
"=",
"np",
".",
"copy",
"(",
"K",
")",
"K_proposal",
"[",
"i",
",",
"j",
"]",
"+=",
"epsilon",
"if",
"i",
"!=",
"j",
":",
"K_proposal",
"[",
"j",
",",
"i",
"]",
"+=",
"epsilon",
"# acceptance?",
"cutoff",
"=",
"np",
".",
"exp",
"(",
"_logprob_T",
"(",
"_K_to_T",
"(",
"K_proposal",
")",
",",
"Z",
")",
"-",
"_logprob_T",
"(",
"_K_to_T",
"(",
"K",
")",
",",
"Z",
")",
")",
"r",
"=",
"random",
".",
"rand",
"(",
")",
"# print 'i', i, 'j', j",
"# print 'a', a, 'b', b",
"# print 'cutoff', cutoff",
"# print 'r', r",
"# print 'sc', sc",
"if",
"r",
"<",
"cutoff",
":",
"n_accept",
"+=",
"1",
"K",
"=",
"K_proposal",
"if",
"(",
"t",
"+",
"1",
")",
"%",
"n_thin",
"==",
"0",
":",
"yield",
"_K_to_T",
"(",
"K",
")"
] |
Metropolis Markov chain Monte Carlo sampler for reversible transition
matrices
Parameters
----------
Z : np.array, shape=(n_states, n_states)
The effective count matrix, the number of observed transitions
between states plus the number of prior counts
n_samples : int
Number of steps to iterate the chain for
n_thin : int
Yield every ``n_thin``-th sample from the MCMC chain
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton.
Notes
-----
The transition matrix posterior distribution is ::
P(T | Z) \propto \Prod_{ij} T_{ij}^{Z_{ij}}
and constrained to be reversible, such that there exists a \pi s.t. ::
\pi_i T_{ij} = \pi_j T_{ji}
Yields
------
T : np.array, shape=(n_states, n_states)
This generator yields samples from the transition matrix posterior
References
----------
.. [1] P. Metzner, F. Noe and C. Schutte, "Estimating the sampling error:
Distribution of transition matrices and functions of transition
matrices for given trajectory data." Phys. Rev. E 80 021106 (2009)
See Also
--------
metzner_mcmc_fast
|
[
"Metropolis",
"Markov",
"chain",
"Monte",
"Carlo",
"sampler",
"for",
"reversible",
"transition",
"matrices"
] |
python
|
train
|
fastai/fastai
|
fastai/callback.py
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/callback.py#L36-L42
|
def new_with_params(self, param_groups:Collection[Collection[nn.Parameter]]):
"Create a new `OptimWrapper` from `self` with another `layer_groups` but the same hyper-parameters."
opt_func = getattr(self, 'opt_func', self.opt.__class__)
opt = opt_func([{'params': p, 'lr':0} for p in param_groups])
opt = self.__class__(opt, wd=self.wd, true_wd=self.true_wd, bn_wd=self.bn_wd)
opt.lr,opt.opt_func,opt.mom,opt.beta = self.lr,opt_func,self.mom,self.beta
return opt
|
[
"def",
"new_with_params",
"(",
"self",
",",
"param_groups",
":",
"Collection",
"[",
"Collection",
"[",
"nn",
".",
"Parameter",
"]",
"]",
")",
":",
"opt_func",
"=",
"getattr",
"(",
"self",
",",
"'opt_func'",
",",
"self",
".",
"opt",
".",
"__class__",
")",
"opt",
"=",
"opt_func",
"(",
"[",
"{",
"'params'",
":",
"p",
",",
"'lr'",
":",
"0",
"}",
"for",
"p",
"in",
"param_groups",
"]",
")",
"opt",
"=",
"self",
".",
"__class__",
"(",
"opt",
",",
"wd",
"=",
"self",
".",
"wd",
",",
"true_wd",
"=",
"self",
".",
"true_wd",
",",
"bn_wd",
"=",
"self",
".",
"bn_wd",
")",
"opt",
".",
"lr",
",",
"opt",
".",
"opt_func",
",",
"opt",
".",
"mom",
",",
"opt",
".",
"beta",
"=",
"self",
".",
"lr",
",",
"opt_func",
",",
"self",
".",
"mom",
",",
"self",
".",
"beta",
"return",
"opt"
] |
Create a new `OptimWrapper` from `self` with another `layer_groups` but the same hyper-parameters.
|
[
"Create",
"a",
"new",
"OptimWrapper",
"from",
"self",
"with",
"another",
"layer_groups",
"but",
"the",
"same",
"hyper",
"-",
"parameters",
"."
] |
python
|
train
|
cocagne/txdbus
|
doc/examples/fd_server.py
|
https://github.com/cocagne/txdbus/blob/eb424918764b7b93eecd2a4e2e5c2d0b2944407b/doc/examples/fd_server.py#L46-L53
|
def dbus_lenFD(self, fd):
"""
Returns the byte count after reading till EOF.
"""
f = os.fdopen(fd, 'rb')
result = len(f.read())
f.close()
return result
|
[
"def",
"dbus_lenFD",
"(",
"self",
",",
"fd",
")",
":",
"f",
"=",
"os",
".",
"fdopen",
"(",
"fd",
",",
"'rb'",
")",
"result",
"=",
"len",
"(",
"f",
".",
"read",
"(",
")",
")",
"f",
".",
"close",
"(",
")",
"return",
"result"
] |
Returns the byte count after reading till EOF.
|
[
"Returns",
"the",
"byte",
"count",
"after",
"reading",
"till",
"EOF",
"."
] |
python
|
train
|
Autodesk/cryptorito
|
cryptorito/__init__.py
|
https://github.com/Autodesk/cryptorito/blob/277fc7cc42c31c5bc37e26d8bf5a2ac746a6ea85/cryptorito/__init__.py#L252-L260
|
def has_gpg_key(fingerprint):
"""Checks to see if we have this gpg fingerprint"""
if len(fingerprint) > 8:
fingerprint = fingerprint[-8:]
fingerprint = fingerprint.upper()
cmd = flatten([gnupg_bin(), gnupg_home(), "--list-public-keys"])
lines = stderr_output(cmd).split('\n')
return len([key for key in lines if key.find(fingerprint) > -1]) == 1
|
[
"def",
"has_gpg_key",
"(",
"fingerprint",
")",
":",
"if",
"len",
"(",
"fingerprint",
")",
">",
"8",
":",
"fingerprint",
"=",
"fingerprint",
"[",
"-",
"8",
":",
"]",
"fingerprint",
"=",
"fingerprint",
".",
"upper",
"(",
")",
"cmd",
"=",
"flatten",
"(",
"[",
"gnupg_bin",
"(",
")",
",",
"gnupg_home",
"(",
")",
",",
"\"--list-public-keys\"",
"]",
")",
"lines",
"=",
"stderr_output",
"(",
"cmd",
")",
".",
"split",
"(",
"'\\n'",
")",
"return",
"len",
"(",
"[",
"key",
"for",
"key",
"in",
"lines",
"if",
"key",
".",
"find",
"(",
"fingerprint",
")",
">",
"-",
"1",
"]",
")",
"==",
"1"
] |
Checks to see if we have this gpg fingerprint
|
[
"Checks",
"to",
"see",
"if",
"we",
"have",
"this",
"gpg",
"fingerprint"
] |
python
|
train
|
openego/ding0
|
ding0/core/__init__.py
|
https://github.com/openego/ding0/blob/e2d6528f96255e4bb22ba15514a4f1883564ed5d/ding0/core/__init__.py#L270-L325
|
def get_mvgd_lvla_lvgd_obj_from_id(self):
""" Build dict with mapping from LVLoadAreaDing0 id to LVLoadAreaDing0 object,
MVGridDistrictDing0 id to MVGridDistrictDing0 object,
LVGridDistrictDing0 id to LVGridDistrictDing0 object and
LVStationDing0 id to LVStationDing0 object
Returns
-------
:obj:`dict`
mv_grid_districts_dict::
{
mv_grid_district_id_1: mv_grid_district_obj_1,
...,
mv_grid_district_id_n: mv_grid_district_obj_n
}
:obj:`dict`
lv_load_areas_dict::
{
lv_load_area_id_1: lv_load_area_obj_1,
...,
lv_load_area_id_n: lv_load_area_obj_n
}
:obj:`dict`
lv_grid_districts_dict::
{
lv_grid_district_id_1: lv_grid_district_obj_1,
...,
lv_grid_district_id_n: lv_grid_district_obj_n
}
:obj:`dict`
lv_stations_dict::
{
lv_station_id_1: lv_station_obj_1,
...,
lv_station_id_n: lv_station_obj_n
}
"""
mv_grid_districts_dict = {}
lv_load_areas_dict = {}
lv_grid_districts_dict = {}
lv_stations_dict = {}
for mv_grid_district in self.mv_grid_districts():
mv_grid_districts_dict[mv_grid_district.id_db] = mv_grid_district
for lv_load_area in mv_grid_district.lv_load_areas():
lv_load_areas_dict[lv_load_area.id_db] = lv_load_area
for lv_grid_district in lv_load_area.lv_grid_districts():
lv_grid_districts_dict[lv_grid_district.id_db] = lv_grid_district
lv_stations_dict[lv_grid_district.lv_grid.station().id_db] = lv_grid_district.lv_grid.station()
return mv_grid_districts_dict, lv_load_areas_dict, lv_grid_districts_dict, lv_stations_dict
|
[
"def",
"get_mvgd_lvla_lvgd_obj_from_id",
"(",
"self",
")",
":",
"mv_grid_districts_dict",
"=",
"{",
"}",
"lv_load_areas_dict",
"=",
"{",
"}",
"lv_grid_districts_dict",
"=",
"{",
"}",
"lv_stations_dict",
"=",
"{",
"}",
"for",
"mv_grid_district",
"in",
"self",
".",
"mv_grid_districts",
"(",
")",
":",
"mv_grid_districts_dict",
"[",
"mv_grid_district",
".",
"id_db",
"]",
"=",
"mv_grid_district",
"for",
"lv_load_area",
"in",
"mv_grid_district",
".",
"lv_load_areas",
"(",
")",
":",
"lv_load_areas_dict",
"[",
"lv_load_area",
".",
"id_db",
"]",
"=",
"lv_load_area",
"for",
"lv_grid_district",
"in",
"lv_load_area",
".",
"lv_grid_districts",
"(",
")",
":",
"lv_grid_districts_dict",
"[",
"lv_grid_district",
".",
"id_db",
"]",
"=",
"lv_grid_district",
"lv_stations_dict",
"[",
"lv_grid_district",
".",
"lv_grid",
".",
"station",
"(",
")",
".",
"id_db",
"]",
"=",
"lv_grid_district",
".",
"lv_grid",
".",
"station",
"(",
")",
"return",
"mv_grid_districts_dict",
",",
"lv_load_areas_dict",
",",
"lv_grid_districts_dict",
",",
"lv_stations_dict"
] |
Build dict with mapping from LVLoadAreaDing0 id to LVLoadAreaDing0 object,
MVGridDistrictDing0 id to MVGridDistrictDing0 object,
LVGridDistrictDing0 id to LVGridDistrictDing0 object and
LVStationDing0 id to LVStationDing0 object
Returns
-------
:obj:`dict`
mv_grid_districts_dict::
{
mv_grid_district_id_1: mv_grid_district_obj_1,
...,
mv_grid_district_id_n: mv_grid_district_obj_n
}
:obj:`dict`
lv_load_areas_dict::
{
lv_load_area_id_1: lv_load_area_obj_1,
...,
lv_load_area_id_n: lv_load_area_obj_n
}
:obj:`dict`
lv_grid_districts_dict::
{
lv_grid_district_id_1: lv_grid_district_obj_1,
...,
lv_grid_district_id_n: lv_grid_district_obj_n
}
:obj:`dict`
lv_stations_dict::
{
lv_station_id_1: lv_station_obj_1,
...,
lv_station_id_n: lv_station_obj_n
}
|
[
"Build",
"dict",
"with",
"mapping",
"from",
"LVLoadAreaDing0",
"id",
"to",
"LVLoadAreaDing0",
"object",
"MVGridDistrictDing0",
"id",
"to",
"MVGridDistrictDing0",
"object",
"LVGridDistrictDing0",
"id",
"to",
"LVGridDistrictDing0",
"object",
"and",
"LVStationDing0",
"id",
"to",
"LVStationDing0",
"object"
] |
python
|
train
|
DarkEnergySurvey/ugali
|
ugali/scratch/simulation/survey_selection_function.py
|
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/scratch/simulation/survey_selection_function.py#L117-L132
|
def meanFracdet(map_fracdet, lon_population, lat_population, radius_population):
"""
Compute the mean fracdet within circular aperture (radius specified in decimal degrees)
lon, lat, and radius are taken to be arrays of the same length
"""
nside_fracdet = hp.npix2nside(len(map_fracdet))
map_fracdet_zero = np.where(map_fracdet >= 0., map_fracdet, 0.)
fracdet_population = np.empty(len(lon_population))
for ii in range(0, len(lon_population)):
fracdet_population[ii] = np.mean(map_fracdet_zero[angToDisc(nside_fracdet,
lon_population[ii],
lat_population[ii],
radius_population if np.isscalar(radius_population) else radius_population[ii],
inclusive=True)])
return fracdet_population
|
[
"def",
"meanFracdet",
"(",
"map_fracdet",
",",
"lon_population",
",",
"lat_population",
",",
"radius_population",
")",
":",
"nside_fracdet",
"=",
"hp",
".",
"npix2nside",
"(",
"len",
"(",
"map_fracdet",
")",
")",
"map_fracdet_zero",
"=",
"np",
".",
"where",
"(",
"map_fracdet",
">=",
"0.",
",",
"map_fracdet",
",",
"0.",
")",
"fracdet_population",
"=",
"np",
".",
"empty",
"(",
"len",
"(",
"lon_population",
")",
")",
"for",
"ii",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"lon_population",
")",
")",
":",
"fracdet_population",
"[",
"ii",
"]",
"=",
"np",
".",
"mean",
"(",
"map_fracdet_zero",
"[",
"angToDisc",
"(",
"nside_fracdet",
",",
"lon_population",
"[",
"ii",
"]",
",",
"lat_population",
"[",
"ii",
"]",
",",
"radius_population",
"if",
"np",
".",
"isscalar",
"(",
"radius_population",
")",
"else",
"radius_population",
"[",
"ii",
"]",
",",
"inclusive",
"=",
"True",
")",
"]",
")",
"return",
"fracdet_population"
] |
Compute the mean fracdet within circular aperture (radius specified in decimal degrees)
lon, lat, and radius are taken to be arrays of the same length
|
[
"Compute",
"the",
"mean",
"fracdet",
"within",
"circular",
"aperture",
"(",
"radius",
"specified",
"in",
"decimal",
"degrees",
")"
] |
python
|
train
|
kerrickstaley/genanki
|
genanki/model.py
|
https://github.com/kerrickstaley/genanki/blob/624a9ffaf2f7868721cf7e7e55f53b238df9a04a/genanki/model.py#L27-L78
|
def _req(self):
"""
List of required fields for each template. Format is [tmpl_idx, "all"|"any", [req_field_1, req_field_2, ...]].
Partial reimplementation of req computing logic from Anki. We use pystache instead of Anki's custom mustache
implementation.
The goal is to figure out which fields are "required", i.e. if they are missing then the front side of the note
doesn't contain any meaningful content.
"""
sentinel = 'SeNtInEl'
field_names = [field['name'] for field in self.fields]
req = []
for template_ord, template in enumerate(self.templates):
field_values = {field: sentinel for field in field_names}
required_fields = []
for field_ord, field in enumerate(field_names):
fvcopy = copy(field_values)
fvcopy[field] = ''
rendered = pystache.render(template['qfmt'], fvcopy)
if sentinel not in rendered:
# when this field is missing, there is no meaningful content (no field values) in the question, so this field
# is required
required_fields.append(field_ord)
if required_fields:
req.append([template_ord, 'all', required_fields])
continue
# there are no required fields, so an "all" is not appropriate, switch to checking for "any"
field_values = {field: '' for field in field_names}
for field_ord, field in enumerate(field_names):
fvcopy = copy(field_values)
fvcopy[field] = sentinel
rendered = pystache.render(template['qfmt'], fvcopy)
if sentinel in rendered:
# when this field is present, there is meaningful content in the question
required_fields.append(field_ord)
if not required_fields:
raise Exception(
'Could not compute required fields for this template; please check the formatting of "qfmt": {}'.format(
template))
req.append([template_ord, 'any', required_fields])
return req
|
[
"def",
"_req",
"(",
"self",
")",
":",
"sentinel",
"=",
"'SeNtInEl'",
"field_names",
"=",
"[",
"field",
"[",
"'name'",
"]",
"for",
"field",
"in",
"self",
".",
"fields",
"]",
"req",
"=",
"[",
"]",
"for",
"template_ord",
",",
"template",
"in",
"enumerate",
"(",
"self",
".",
"templates",
")",
":",
"field_values",
"=",
"{",
"field",
":",
"sentinel",
"for",
"field",
"in",
"field_names",
"}",
"required_fields",
"=",
"[",
"]",
"for",
"field_ord",
",",
"field",
"in",
"enumerate",
"(",
"field_names",
")",
":",
"fvcopy",
"=",
"copy",
"(",
"field_values",
")",
"fvcopy",
"[",
"field",
"]",
"=",
"''",
"rendered",
"=",
"pystache",
".",
"render",
"(",
"template",
"[",
"'qfmt'",
"]",
",",
"fvcopy",
")",
"if",
"sentinel",
"not",
"in",
"rendered",
":",
"# when this field is missing, there is no meaningful content (no field values) in the question, so this field",
"# is required",
"required_fields",
".",
"append",
"(",
"field_ord",
")",
"if",
"required_fields",
":",
"req",
".",
"append",
"(",
"[",
"template_ord",
",",
"'all'",
",",
"required_fields",
"]",
")",
"continue",
"# there are no required fields, so an \"all\" is not appropriate, switch to checking for \"any\"",
"field_values",
"=",
"{",
"field",
":",
"''",
"for",
"field",
"in",
"field_names",
"}",
"for",
"field_ord",
",",
"field",
"in",
"enumerate",
"(",
"field_names",
")",
":",
"fvcopy",
"=",
"copy",
"(",
"field_values",
")",
"fvcopy",
"[",
"field",
"]",
"=",
"sentinel",
"rendered",
"=",
"pystache",
".",
"render",
"(",
"template",
"[",
"'qfmt'",
"]",
",",
"fvcopy",
")",
"if",
"sentinel",
"in",
"rendered",
":",
"# when this field is present, there is meaningful content in the question",
"required_fields",
".",
"append",
"(",
"field_ord",
")",
"if",
"not",
"required_fields",
":",
"raise",
"Exception",
"(",
"'Could not compute required fields for this template; please check the formatting of \"qfmt\": {}'",
".",
"format",
"(",
"template",
")",
")",
"req",
".",
"append",
"(",
"[",
"template_ord",
",",
"'any'",
",",
"required_fields",
"]",
")",
"return",
"req"
] |
List of required fields for each template. Format is [tmpl_idx, "all"|"any", [req_field_1, req_field_2, ...]].
Partial reimplementation of req computing logic from Anki. We use pystache instead of Anki's custom mustache
implementation.
The goal is to figure out which fields are "required", i.e. if they are missing then the front side of the note
doesn't contain any meaningful content.
|
[
"List",
"of",
"required",
"fields",
"for",
"each",
"template",
".",
"Format",
"is",
"[",
"tmpl_idx",
"all",
"|",
"any",
"[",
"req_field_1",
"req_field_2",
"...",
"]]",
"."
] |
python
|
train
|
zomux/deepy
|
deepy/dataset/padding.py
|
https://github.com/zomux/deepy/blob/090fbad22a08a809b12951cd0d4984f5bd432698/deepy/dataset/padding.py#L7-L17
|
def pad_dataset(subset, side="right", length=-1):
"""
Pad data set to specified length.
Parameters:
length - max length, a just to the max length in the batch if length is -1
"""
assert length == -1 or length > 0
if type(subset[0][0][0]) in [float, int, np.int64, np.int32, np.float32]:
return _pad_2d(subset, side, length)
else:
return _pad_3d(subset, side, length)
|
[
"def",
"pad_dataset",
"(",
"subset",
",",
"side",
"=",
"\"right\"",
",",
"length",
"=",
"-",
"1",
")",
":",
"assert",
"length",
"==",
"-",
"1",
"or",
"length",
">",
"0",
"if",
"type",
"(",
"subset",
"[",
"0",
"]",
"[",
"0",
"]",
"[",
"0",
"]",
")",
"in",
"[",
"float",
",",
"int",
",",
"np",
".",
"int64",
",",
"np",
".",
"int32",
",",
"np",
".",
"float32",
"]",
":",
"return",
"_pad_2d",
"(",
"subset",
",",
"side",
",",
"length",
")",
"else",
":",
"return",
"_pad_3d",
"(",
"subset",
",",
"side",
",",
"length",
")"
] |
Pad data set to specified length.
Parameters:
length - max length, a just to the max length in the batch if length is -1
|
[
"Pad",
"data",
"set",
"to",
"specified",
"length",
".",
"Parameters",
":",
"length",
"-",
"max",
"length",
"a",
"just",
"to",
"the",
"max",
"length",
"in",
"the",
"batch",
"if",
"length",
"is",
"-",
"1"
] |
python
|
test
|
jopohl/urh
|
src/urh/controller/GeneratorTabController.py
|
https://github.com/jopohl/urh/blob/2eb33b125c8407964cd1092843cde5010eb88aae/src/urh/controller/GeneratorTabController.py#L499-L517
|
def refresh_existing_encodings(self, encodings_from_file):
"""
Refresh existing encodings for messages, when encoding was changed by user in dialog
:return:
"""
update = False
for msg in self.table_model.protocol.messages:
i = next((i for i, d in enumerate(encodings_from_file) if d.name == msg.decoder.name), 0)
if msg.decoder != encodings_from_file[i]:
update = True
msg.decoder = encodings_from_file[i]
msg.clear_decoded_bits()
msg.clear_encoded_bits()
if update:
self.refresh_table()
self.refresh_estimated_time()
|
[
"def",
"refresh_existing_encodings",
"(",
"self",
",",
"encodings_from_file",
")",
":",
"update",
"=",
"False",
"for",
"msg",
"in",
"self",
".",
"table_model",
".",
"protocol",
".",
"messages",
":",
"i",
"=",
"next",
"(",
"(",
"i",
"for",
"i",
",",
"d",
"in",
"enumerate",
"(",
"encodings_from_file",
")",
"if",
"d",
".",
"name",
"==",
"msg",
".",
"decoder",
".",
"name",
")",
",",
"0",
")",
"if",
"msg",
".",
"decoder",
"!=",
"encodings_from_file",
"[",
"i",
"]",
":",
"update",
"=",
"True",
"msg",
".",
"decoder",
"=",
"encodings_from_file",
"[",
"i",
"]",
"msg",
".",
"clear_decoded_bits",
"(",
")",
"msg",
".",
"clear_encoded_bits",
"(",
")",
"if",
"update",
":",
"self",
".",
"refresh_table",
"(",
")",
"self",
".",
"refresh_estimated_time",
"(",
")"
] |
Refresh existing encodings for messages, when encoding was changed by user in dialog
:return:
|
[
"Refresh",
"existing",
"encodings",
"for",
"messages",
"when",
"encoding",
"was",
"changed",
"by",
"user",
"in",
"dialog"
] |
python
|
train
|
wmayner/pyphi
|
pyphi/jsonify.py
|
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/jsonify.py#L233-L248
|
def _load_model(self, dct):
"""Load a serialized PyPhi model.
The object is memoized for reuse elsewhere in the object graph.
"""
classname, version, _ = _pop_metadata(dct)
_check_version(version)
cls = self._models[classname]
# Use `from_json` if available
if hasattr(cls, 'from_json'):
return cls.from_json(dct)
# Default to object constructor
return cls(**dct)
|
[
"def",
"_load_model",
"(",
"self",
",",
"dct",
")",
":",
"classname",
",",
"version",
",",
"_",
"=",
"_pop_metadata",
"(",
"dct",
")",
"_check_version",
"(",
"version",
")",
"cls",
"=",
"self",
".",
"_models",
"[",
"classname",
"]",
"# Use `from_json` if available",
"if",
"hasattr",
"(",
"cls",
",",
"'from_json'",
")",
":",
"return",
"cls",
".",
"from_json",
"(",
"dct",
")",
"# Default to object constructor",
"return",
"cls",
"(",
"*",
"*",
"dct",
")"
] |
Load a serialized PyPhi model.
The object is memoized for reuse elsewhere in the object graph.
|
[
"Load",
"a",
"serialized",
"PyPhi",
"model",
"."
] |
python
|
train
|
Alignak-monitoring/alignak
|
alignak/dispatcher.py
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/dispatcher.py#L946-L976
|
def stop_request(self, stop_now=False):
"""Send a stop request to all the daemons
:param stop_now: stop now or go to stop wait mode
:type stop_now: bool
:return: True if all daemons are reachable
"""
all_ok = True
for daemon_link in self.all_daemons_links:
logger.debug("Stopping: %s (%s)", daemon_link, stop_now)
if daemon_link == self.arbiter_link:
# I exclude myself from the process, I know we are going to stop ;)
continue
if not daemon_link.active:
# I exclude the daemons that are not active
continue
# Send a stop request to the daemon
try:
stop_ok = daemon_link.stop_request(stop_now=stop_now)
except LinkError:
stop_ok = True
logger.warning("Daemon stop request failed, %s probably stopped!", daemon_link)
all_ok = all_ok and stop_ok
daemon_link.stopping = True
self.stop_request_sent = all_ok
return self.stop_request_sent
|
[
"def",
"stop_request",
"(",
"self",
",",
"stop_now",
"=",
"False",
")",
":",
"all_ok",
"=",
"True",
"for",
"daemon_link",
"in",
"self",
".",
"all_daemons_links",
":",
"logger",
".",
"debug",
"(",
"\"Stopping: %s (%s)\"",
",",
"daemon_link",
",",
"stop_now",
")",
"if",
"daemon_link",
"==",
"self",
".",
"arbiter_link",
":",
"# I exclude myself from the process, I know we are going to stop ;)",
"continue",
"if",
"not",
"daemon_link",
".",
"active",
":",
"# I exclude the daemons that are not active",
"continue",
"# Send a stop request to the daemon",
"try",
":",
"stop_ok",
"=",
"daemon_link",
".",
"stop_request",
"(",
"stop_now",
"=",
"stop_now",
")",
"except",
"LinkError",
":",
"stop_ok",
"=",
"True",
"logger",
".",
"warning",
"(",
"\"Daemon stop request failed, %s probably stopped!\"",
",",
"daemon_link",
")",
"all_ok",
"=",
"all_ok",
"and",
"stop_ok",
"daemon_link",
".",
"stopping",
"=",
"True",
"self",
".",
"stop_request_sent",
"=",
"all_ok",
"return",
"self",
".",
"stop_request_sent"
] |
Send a stop request to all the daemons
:param stop_now: stop now or go to stop wait mode
:type stop_now: bool
:return: True if all daemons are reachable
|
[
"Send",
"a",
"stop",
"request",
"to",
"all",
"the",
"daemons"
] |
python
|
train
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.