repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
ipfs/py-ipfs-api
|
ipfsapi/client.py
|
https://github.com/ipfs/py-ipfs-api/blob/7574dad04877b45dbe4ad321dcfa9e880eb2d90c/ipfsapi/client.py#L2194-L2221
|
def add_pyobj(self, py_obj, **kwargs):
"""Adds a picklable Python object as a file to IPFS.
.. deprecated:: 0.4.2
The ``*_pyobj`` APIs allow for arbitrary code execution if abused.
Either switch to :meth:`~ipfsapi.Client.add_json` or use
``client.add_bytes(pickle.dumps(py_obj))`` instead.
Please see :meth:`~ipfsapi.Client.get_pyobj` for the
**security risks** of using these methods!
.. code-block:: python
>>> c.add_pyobj([0, 1.0, 2j, '3', 4e5])
'QmWgXZSUTNNDD8LdkdJ8UXSn55KfFnNvTP1r7SyaQd74Ji'
Parameters
----------
py_obj : object
A picklable Python object
Returns
-------
str : Hash of the added IPFS object
"""
warnings.warn("Using `*_pyobj` on untrusted data is a security risk",
DeprecationWarning)
return self.add_bytes(encoding.Pickle().encode(py_obj), **kwargs)
|
[
"def",
"add_pyobj",
"(",
"self",
",",
"py_obj",
",",
"*",
"*",
"kwargs",
")",
":",
"warnings",
".",
"warn",
"(",
"\"Using `*_pyobj` on untrusted data is a security risk\"",
",",
"DeprecationWarning",
")",
"return",
"self",
".",
"add_bytes",
"(",
"encoding",
".",
"Pickle",
"(",
")",
".",
"encode",
"(",
"py_obj",
")",
",",
"*",
"*",
"kwargs",
")"
] |
Adds a picklable Python object as a file to IPFS.
.. deprecated:: 0.4.2
The ``*_pyobj`` APIs allow for arbitrary code execution if abused.
Either switch to :meth:`~ipfsapi.Client.add_json` or use
``client.add_bytes(pickle.dumps(py_obj))`` instead.
Please see :meth:`~ipfsapi.Client.get_pyobj` for the
**security risks** of using these methods!
.. code-block:: python
>>> c.add_pyobj([0, 1.0, 2j, '3', 4e5])
'QmWgXZSUTNNDD8LdkdJ8UXSn55KfFnNvTP1r7SyaQd74Ji'
Parameters
----------
py_obj : object
A picklable Python object
Returns
-------
str : Hash of the added IPFS object
|
[
"Adds",
"a",
"picklable",
"Python",
"object",
"as",
"a",
"file",
"to",
"IPFS",
"."
] |
python
|
train
|
geophysics-ubonn/crtomo_tools
|
lib/crtomo/plotManager.py
|
https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/plotManager.py#L290-L498
|
def plot_elements_to_ax(self, cid, ax=None, **kwargs):
"""Plot element data (parameter sets).
If the parameter *ax* is not set, then a new figure will be created
with a corresponding axes.
Parameters
----------
cid : int or :py:class:`numpy.ndarray`
if *cid* is an int, then treat it as the id of the parameter set
stored in self.parman. Otherwise, expect it to be the data to plot.
At the moment no checks are made that the data fits the grid.
ax : matplotlib.Axes, optional
plot to this axes object, if provided
alpha_cid : int, optional
if given, use the corresponding dataset in self.parman as the alpha
channel. No checks are made if all values of this data set lie
between 0 and 1 (0 being fully transparent, and 1 being opaque).
xmin : float, optional
minimal x limit to plot
xmax : float, optional
maximal x limit to plot
zmin : float, optional
minimal z limit to plot
zmax : float, optional
maximial z limit to plot
converter : function, optional
if given, then use this function to convert the data into another
representation. The given function must work with a numpy array.
Default: None
norm : norm object, optional
the norm object for matplotlib plotting can be provided here
cmap_name : string, optional
name of the colorbar to use. Default is "viridis". To reverse
colors, use the _r version "viridis_r"
cbposition : ?
?
cblabel : string, optional
colorbar label
cbsegments : int, optional
?
cbnrticks : int, optional
?
over : color, optional
color to use for values above the current cb-limit. Default: ?
under :
color to use for values below the current cb-limit. Default: ?
bad :
color to use for nan-values. Default: ?
plot_colorbar : bool, optional
if true, plot a colorbar next to the plot
title : string, optional
plot title string
xlabel : string, optional
Set xlabel of the resulting plot
ylabel : string, optional
Set ylabel of the resulting plot
no_elecs : bool, optional
If True, plot no electrodes
rasterize: bool, optional
if True, rasterize the plot. Default: False
Returns
-------
fig:
ax:
cnorm:
cmap:
cb: colorbar instance, optional
only of plot_colorbar is True
scalarMap:
use to create custom colorbars
"""
rasterize = kwargs.get('rasterize', False)
xmin = kwargs.get('xmin', self.grid.grid['x'].min())
xmax = kwargs.get('xmax', self.grid.grid['x'].max())
zmin = kwargs.get('zmin', self.grid.grid['z'].min())
zmax = kwargs.get('zmax', self.grid.grid['z'].max())
# try to create a suitable default figure size
if ax is None:
# 15 cm
sizex = 15 / 2.54
sizez = sizex * (np.abs(zmax - zmin) / np.abs(xmax - xmin) * 1.1)
# add 1 inch to accommodate colorbar
sizez += 1.3
fig, ax = plt.subplots(figsize=(sizex, sizez))
else:
fig = ax.get_figure()
sizex, sizez = fig.get_size_inches()
# get data
if isinstance(cid, int):
subdata = self.parman.parsets[cid]
else:
subdata = cid
if 'converter' in kwargs:
subdata = kwargs['converter'](subdata)
# color map
cmap_name = kwargs.get('cmap_name', 'viridis')
cmap = mpl.cm.get_cmap(
cmap_name,
kwargs.get('cbsegments', None)
)
over = kwargs.get('over', 'orange')
under = kwargs.get('under', 'mediumblue')
bad = kwargs.get('bad', 'white')
cmap.set_over(over)
cmap.set_under(under)
cmap.set_bad(bad)
# normalize data
data_min = kwargs.get('cbmin', subdata.min())
data_max = kwargs.get('cbmax', subdata.max())
if(data_min is not None and data_max is not None and
data_min == data_max):
data_min -= 1
data_max += 1
cnorm = mpl.colors.Normalize(vmin=data_min, vmax=data_max)
scalarMap = mpl.cm.ScalarMappable(norm=cnorm, cmap=cmap)
fcolors = scalarMap.to_rgba(subdata)
scalarMap.set_array(subdata)
# if applicable, apply alpha values
alpha_cid = kwargs.get('cid_alpha', None)
if isinstance(alpha_cid, int):
print('applying alpha')
alpha = self.parman.parsets[alpha_cid]
# make sure this data set is normalized between 0 and 1
if np.nanmin(alpha) < 0 or np.nanmax(alpha) > 1:
raise Exception(
'alpha data set must be normalized between 0 and 1'
)
fcolors[:, 3] = alpha
all_xz = []
for x, z in zip(self.grid.grid['x'], self.grid.grid['z']):
tmp = np.vstack((x, z)).T
all_xz.append(tmp)
norm = kwargs.get('norm', None)
collection = mpl.collections.PolyCollection(
all_xz,
edgecolor=fcolors,
facecolor=fcolors,
linewidth=0.0,
cmap=cmap,
norm=norm,
rasterized=rasterize,
)
collection.set_cmap(cmap)
ax.add_collection(collection)
no_elecs = kwargs.get('no_elecs', False)
if self.grid.electrodes is not None and no_elecs is not True:
ax.scatter(
self.grid.electrodes[:, 1],
self.grid.electrodes[:, 2],
color=self.grid.props['electrode_color'],
# clip_on=False,
)
ax.set_xlim(xmin, xmax)
ax.set_ylim(zmin, zmax)
ax.set_xlabel(kwargs.get('xlabel', 'x'))
ax.set_ylabel(kwargs.get('zlabel', 'z'))
ax.set_aspect('equal')
ax.set_title(
kwargs.get('title', '')
)
if kwargs.get('plot_colorbar', False):
divider = make_axes_locatable(ax)
cbposition = kwargs.get('cbposition', 'vertical')
if cbposition == 'horizontal':
ax_cb = divider.new_vertical(
size=0.1, pad=0.4, pack_start=True
)
elif cbposition == 'vertical':
ax_cb = divider.new_horizontal(
size=0.1, pad=0.4,
)
else:
raise Exception('cbposition not recognized')
ax.get_figure().add_axes(ax_cb)
cb = fig.colorbar(
scalarMap,
cax=ax_cb,
orientation=cbposition,
label=kwargs.get('cblabel', ''),
ticks=mpl.ticker.MaxNLocator(kwargs.get('cbnrticks', 3)),
format=kwargs.get('cbformat', None),
extend='both',
)
return fig, ax, cnorm, cmap, cb, scalarMap
return fig, ax, cnorm, cmap, scalarMap
|
[
"def",
"plot_elements_to_ax",
"(",
"self",
",",
"cid",
",",
"ax",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"rasterize",
"=",
"kwargs",
".",
"get",
"(",
"'rasterize'",
",",
"False",
")",
"xmin",
"=",
"kwargs",
".",
"get",
"(",
"'xmin'",
",",
"self",
".",
"grid",
".",
"grid",
"[",
"'x'",
"]",
".",
"min",
"(",
")",
")",
"xmax",
"=",
"kwargs",
".",
"get",
"(",
"'xmax'",
",",
"self",
".",
"grid",
".",
"grid",
"[",
"'x'",
"]",
".",
"max",
"(",
")",
")",
"zmin",
"=",
"kwargs",
".",
"get",
"(",
"'zmin'",
",",
"self",
".",
"grid",
".",
"grid",
"[",
"'z'",
"]",
".",
"min",
"(",
")",
")",
"zmax",
"=",
"kwargs",
".",
"get",
"(",
"'zmax'",
",",
"self",
".",
"grid",
".",
"grid",
"[",
"'z'",
"]",
".",
"max",
"(",
")",
")",
"# try to create a suitable default figure size",
"if",
"ax",
"is",
"None",
":",
"# 15 cm",
"sizex",
"=",
"15",
"/",
"2.54",
"sizez",
"=",
"sizex",
"*",
"(",
"np",
".",
"abs",
"(",
"zmax",
"-",
"zmin",
")",
"/",
"np",
".",
"abs",
"(",
"xmax",
"-",
"xmin",
")",
"*",
"1.1",
")",
"# add 1 inch to accommodate colorbar",
"sizez",
"+=",
"1.3",
"fig",
",",
"ax",
"=",
"plt",
".",
"subplots",
"(",
"figsize",
"=",
"(",
"sizex",
",",
"sizez",
")",
")",
"else",
":",
"fig",
"=",
"ax",
".",
"get_figure",
"(",
")",
"sizex",
",",
"sizez",
"=",
"fig",
".",
"get_size_inches",
"(",
")",
"# get data",
"if",
"isinstance",
"(",
"cid",
",",
"int",
")",
":",
"subdata",
"=",
"self",
".",
"parman",
".",
"parsets",
"[",
"cid",
"]",
"else",
":",
"subdata",
"=",
"cid",
"if",
"'converter'",
"in",
"kwargs",
":",
"subdata",
"=",
"kwargs",
"[",
"'converter'",
"]",
"(",
"subdata",
")",
"# color map",
"cmap_name",
"=",
"kwargs",
".",
"get",
"(",
"'cmap_name'",
",",
"'viridis'",
")",
"cmap",
"=",
"mpl",
".",
"cm",
".",
"get_cmap",
"(",
"cmap_name",
",",
"kwargs",
".",
"get",
"(",
"'cbsegments'",
",",
"None",
")",
")",
"over",
"=",
"kwargs",
".",
"get",
"(",
"'over'",
",",
"'orange'",
")",
"under",
"=",
"kwargs",
".",
"get",
"(",
"'under'",
",",
"'mediumblue'",
")",
"bad",
"=",
"kwargs",
".",
"get",
"(",
"'bad'",
",",
"'white'",
")",
"cmap",
".",
"set_over",
"(",
"over",
")",
"cmap",
".",
"set_under",
"(",
"under",
")",
"cmap",
".",
"set_bad",
"(",
"bad",
")",
"# normalize data",
"data_min",
"=",
"kwargs",
".",
"get",
"(",
"'cbmin'",
",",
"subdata",
".",
"min",
"(",
")",
")",
"data_max",
"=",
"kwargs",
".",
"get",
"(",
"'cbmax'",
",",
"subdata",
".",
"max",
"(",
")",
")",
"if",
"(",
"data_min",
"is",
"not",
"None",
"and",
"data_max",
"is",
"not",
"None",
"and",
"data_min",
"==",
"data_max",
")",
":",
"data_min",
"-=",
"1",
"data_max",
"+=",
"1",
"cnorm",
"=",
"mpl",
".",
"colors",
".",
"Normalize",
"(",
"vmin",
"=",
"data_min",
",",
"vmax",
"=",
"data_max",
")",
"scalarMap",
"=",
"mpl",
".",
"cm",
".",
"ScalarMappable",
"(",
"norm",
"=",
"cnorm",
",",
"cmap",
"=",
"cmap",
")",
"fcolors",
"=",
"scalarMap",
".",
"to_rgba",
"(",
"subdata",
")",
"scalarMap",
".",
"set_array",
"(",
"subdata",
")",
"# if applicable, apply alpha values",
"alpha_cid",
"=",
"kwargs",
".",
"get",
"(",
"'cid_alpha'",
",",
"None",
")",
"if",
"isinstance",
"(",
"alpha_cid",
",",
"int",
")",
":",
"print",
"(",
"'applying alpha'",
")",
"alpha",
"=",
"self",
".",
"parman",
".",
"parsets",
"[",
"alpha_cid",
"]",
"# make sure this data set is normalized between 0 and 1",
"if",
"np",
".",
"nanmin",
"(",
"alpha",
")",
"<",
"0",
"or",
"np",
".",
"nanmax",
"(",
"alpha",
")",
">",
"1",
":",
"raise",
"Exception",
"(",
"'alpha data set must be normalized between 0 and 1'",
")",
"fcolors",
"[",
":",
",",
"3",
"]",
"=",
"alpha",
"all_xz",
"=",
"[",
"]",
"for",
"x",
",",
"z",
"in",
"zip",
"(",
"self",
".",
"grid",
".",
"grid",
"[",
"'x'",
"]",
",",
"self",
".",
"grid",
".",
"grid",
"[",
"'z'",
"]",
")",
":",
"tmp",
"=",
"np",
".",
"vstack",
"(",
"(",
"x",
",",
"z",
")",
")",
".",
"T",
"all_xz",
".",
"append",
"(",
"tmp",
")",
"norm",
"=",
"kwargs",
".",
"get",
"(",
"'norm'",
",",
"None",
")",
"collection",
"=",
"mpl",
".",
"collections",
".",
"PolyCollection",
"(",
"all_xz",
",",
"edgecolor",
"=",
"fcolors",
",",
"facecolor",
"=",
"fcolors",
",",
"linewidth",
"=",
"0.0",
",",
"cmap",
"=",
"cmap",
",",
"norm",
"=",
"norm",
",",
"rasterized",
"=",
"rasterize",
",",
")",
"collection",
".",
"set_cmap",
"(",
"cmap",
")",
"ax",
".",
"add_collection",
"(",
"collection",
")",
"no_elecs",
"=",
"kwargs",
".",
"get",
"(",
"'no_elecs'",
",",
"False",
")",
"if",
"self",
".",
"grid",
".",
"electrodes",
"is",
"not",
"None",
"and",
"no_elecs",
"is",
"not",
"True",
":",
"ax",
".",
"scatter",
"(",
"self",
".",
"grid",
".",
"electrodes",
"[",
":",
",",
"1",
"]",
",",
"self",
".",
"grid",
".",
"electrodes",
"[",
":",
",",
"2",
"]",
",",
"color",
"=",
"self",
".",
"grid",
".",
"props",
"[",
"'electrode_color'",
"]",
",",
"# clip_on=False,",
")",
"ax",
".",
"set_xlim",
"(",
"xmin",
",",
"xmax",
")",
"ax",
".",
"set_ylim",
"(",
"zmin",
",",
"zmax",
")",
"ax",
".",
"set_xlabel",
"(",
"kwargs",
".",
"get",
"(",
"'xlabel'",
",",
"'x'",
")",
")",
"ax",
".",
"set_ylabel",
"(",
"kwargs",
".",
"get",
"(",
"'zlabel'",
",",
"'z'",
")",
")",
"ax",
".",
"set_aspect",
"(",
"'equal'",
")",
"ax",
".",
"set_title",
"(",
"kwargs",
".",
"get",
"(",
"'title'",
",",
"''",
")",
")",
"if",
"kwargs",
".",
"get",
"(",
"'plot_colorbar'",
",",
"False",
")",
":",
"divider",
"=",
"make_axes_locatable",
"(",
"ax",
")",
"cbposition",
"=",
"kwargs",
".",
"get",
"(",
"'cbposition'",
",",
"'vertical'",
")",
"if",
"cbposition",
"==",
"'horizontal'",
":",
"ax_cb",
"=",
"divider",
".",
"new_vertical",
"(",
"size",
"=",
"0.1",
",",
"pad",
"=",
"0.4",
",",
"pack_start",
"=",
"True",
")",
"elif",
"cbposition",
"==",
"'vertical'",
":",
"ax_cb",
"=",
"divider",
".",
"new_horizontal",
"(",
"size",
"=",
"0.1",
",",
"pad",
"=",
"0.4",
",",
")",
"else",
":",
"raise",
"Exception",
"(",
"'cbposition not recognized'",
")",
"ax",
".",
"get_figure",
"(",
")",
".",
"add_axes",
"(",
"ax_cb",
")",
"cb",
"=",
"fig",
".",
"colorbar",
"(",
"scalarMap",
",",
"cax",
"=",
"ax_cb",
",",
"orientation",
"=",
"cbposition",
",",
"label",
"=",
"kwargs",
".",
"get",
"(",
"'cblabel'",
",",
"''",
")",
",",
"ticks",
"=",
"mpl",
".",
"ticker",
".",
"MaxNLocator",
"(",
"kwargs",
".",
"get",
"(",
"'cbnrticks'",
",",
"3",
")",
")",
",",
"format",
"=",
"kwargs",
".",
"get",
"(",
"'cbformat'",
",",
"None",
")",
",",
"extend",
"=",
"'both'",
",",
")",
"return",
"fig",
",",
"ax",
",",
"cnorm",
",",
"cmap",
",",
"cb",
",",
"scalarMap",
"return",
"fig",
",",
"ax",
",",
"cnorm",
",",
"cmap",
",",
"scalarMap"
] |
Plot element data (parameter sets).
If the parameter *ax* is not set, then a new figure will be created
with a corresponding axes.
Parameters
----------
cid : int or :py:class:`numpy.ndarray`
if *cid* is an int, then treat it as the id of the parameter set
stored in self.parman. Otherwise, expect it to be the data to plot.
At the moment no checks are made that the data fits the grid.
ax : matplotlib.Axes, optional
plot to this axes object, if provided
alpha_cid : int, optional
if given, use the corresponding dataset in self.parman as the alpha
channel. No checks are made if all values of this data set lie
between 0 and 1 (0 being fully transparent, and 1 being opaque).
xmin : float, optional
minimal x limit to plot
xmax : float, optional
maximal x limit to plot
zmin : float, optional
minimal z limit to plot
zmax : float, optional
maximial z limit to plot
converter : function, optional
if given, then use this function to convert the data into another
representation. The given function must work with a numpy array.
Default: None
norm : norm object, optional
the norm object for matplotlib plotting can be provided here
cmap_name : string, optional
name of the colorbar to use. Default is "viridis". To reverse
colors, use the _r version "viridis_r"
cbposition : ?
?
cblabel : string, optional
colorbar label
cbsegments : int, optional
?
cbnrticks : int, optional
?
over : color, optional
color to use for values above the current cb-limit. Default: ?
under :
color to use for values below the current cb-limit. Default: ?
bad :
color to use for nan-values. Default: ?
plot_colorbar : bool, optional
if true, plot a colorbar next to the plot
title : string, optional
plot title string
xlabel : string, optional
Set xlabel of the resulting plot
ylabel : string, optional
Set ylabel of the resulting plot
no_elecs : bool, optional
If True, plot no electrodes
rasterize: bool, optional
if True, rasterize the plot. Default: False
Returns
-------
fig:
ax:
cnorm:
cmap:
cb: colorbar instance, optional
only of plot_colorbar is True
scalarMap:
use to create custom colorbars
|
[
"Plot",
"element",
"data",
"(",
"parameter",
"sets",
")",
"."
] |
python
|
train
|
lpantano/seqcluster
|
seqcluster/libs/thinkbayes.py
|
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L1283-L1296
|
def MakeSuiteFromDict(d, name=''):
"""Makes a suite from a map from values to probabilities.
Args:
d: dictionary that maps values to probabilities
name: string name for this suite
Returns:
Suite object
"""
suite = Suite(name=name)
suite.SetDict(d)
suite.Normalize()
return suite
|
[
"def",
"MakeSuiteFromDict",
"(",
"d",
",",
"name",
"=",
"''",
")",
":",
"suite",
"=",
"Suite",
"(",
"name",
"=",
"name",
")",
"suite",
".",
"SetDict",
"(",
"d",
")",
"suite",
".",
"Normalize",
"(",
")",
"return",
"suite"
] |
Makes a suite from a map from values to probabilities.
Args:
d: dictionary that maps values to probabilities
name: string name for this suite
Returns:
Suite object
|
[
"Makes",
"a",
"suite",
"from",
"a",
"map",
"from",
"values",
"to",
"probabilities",
"."
] |
python
|
train
|
idlesign/uwsgiconf
|
uwsgiconf/options/caching.py
|
https://github.com/idlesign/uwsgiconf/blob/475407acb44199edbf7e0a66261bfeb51de1afae/uwsgiconf/options/caching.py#L83-L196
|
def add_cache(
self, name, max_items, expires=None, store=None, store_sync_interval=None, store_delete=None,
hash_algo=None, hash_size=None, key_size=None, udp_clients=None, udp_servers=None,
block_size=None, block_count=None, sync_from=None, mode_bitmap=None, use_lastmod=None,
full_silent=None, full_purge_lru=None):
"""Creates cache. Default mode: single block.
.. note:: This uses new generation ``cache2`` option available since uWSGI 1.9.
.. note:: When at least one cache is configured without ``full_purge_lru``
and the master is enabled a thread named "the cache sweeper" is started.
Its main purpose is deleting expired keys from the cache.
If you want auto-expiring you need to enable the master.
:param str|unicode name: Set the name of the cache. Must be unique in an instance.
:param int max_items: Set the maximum number of cache items.
.. note:: Effective number of items is **max_items - 1** -
the first item of the cache is always internally used as "NULL/None/undef".
:param int expires: The number of seconds after the object is no more valid
(and will be removed by the cache sweeper when ``full_purge_lru`` is not set.
:param str|unicode store: Set the filename for the persistent storage.
If it doesn't exist, the system assumes an empty cache and the file will be created.
:param int store_sync_interval: Set the number of seconds after which msync() is called
to flush memory cache on disk when in persistent mode.
By default it is disabled leaving the decision-making to the kernel.
:param bool store_delete: uWSGI, by default, will not start if a cache file exists
and the store file does not match the configured items/blocksize.
Setting this option will make uWSGI delete the existing file upon mismatch
and create a new one.
:param str|unicode hash_algo: Set the hash algorithm used in the hash table. Current options are:
* djb33x (default)
* murmur2
:param int hash_size: This is the size of the hash table in bytes.
Generally 65536 (the default) is a good value.
.. note:: Change it only if you know what you are doing
or if you have a lot of collisions in your cache.
:param int key_size: Set the maximum size of a key, in bytes. Default: 2048.
:param str|unicode|list udp_clients: List of UDP servers which will receive UDP cache updates.
:param str|unicode |list udp_servers: List of UDP addresses on which to bind the cache
to wait for UDP updates.
:param int block_size: Set the size (in bytes) of a single block.
.. note:: It's a good idea to use a multiple of 4096 (common memory page size).
:param int block_count: Set the number of blocks in the cache. Useful only in bitmap mode,
otherwise the number of blocks is equal to the maximum number of items.
:param str|unicode|list sync_from: List of uWSGI addresses which the cache subsystem will connect to
for getting a full dump of the cache. It can be used for initial cache synchronization.
The first node sending a valid dump will stop the procedure.
:param bool mode_bitmap: Enable (more versatile but relatively slower) bitmap mode.
http://uwsgi-docs.readthedocs.io/en/latest/Caching.html#single-block-faster-vs-bitmaps-slower
.. warning:: Considered production ready only from uWSGI 2.0.2.
:param bool use_lastmod: Enabling will update last_modified_at timestamp of each cache
on every cache item modification. Enable it if you want to track this value
or if other features depend on it. This value will then be accessible via the stats socket.
:param bool full_silent: By default uWSGI will print warning message on every cache set operation
if the cache is full. To disable this warning set this option.
.. note:: Available since 2.0.4.
:param bool full_purge_lru: Allows the caching framework to evict Least Recently Used (LRU)
item when you try to add new item to cache storage that is full.
.. note:: ``expires`` argument will be ignored.
"""
value = KeyValue(
locals(),
keys=[
'name', 'max_items', 'expires', 'store', 'store_sync_interval', 'store_delete',
'hash_algo', 'hash_size', 'key_size', 'udp_clients', 'udp_servers',
'block_size', 'block_count', 'sync_from', 'mode_bitmap', 'use_lastmod',
'full_silent', 'full_purge_lru',
],
aliases={
'max_items': 'maxitems',
'store_sync_interval': 'storesync',
'hash_algo': 'hash',
'udp_clients': 'nodes',
'block_size': 'blocksize',
'block_count': 'blocks',
'sync_from': 'sync',
'mode_bitmap': 'bitmap',
'use_lastmod': 'lastmod',
'full_silent': 'ignore_full',
'full_purge_lru': 'purge_lru',
},
bool_keys=['store_delete', 'mode_bitmap', 'use_lastmod', 'full_silent', 'full_purge_lru'],
list_keys=['udp_clients', 'udp_servers', 'sync_from'],
)
self._set('cache2', value, multi=True)
return self._section
|
[
"def",
"add_cache",
"(",
"self",
",",
"name",
",",
"max_items",
",",
"expires",
"=",
"None",
",",
"store",
"=",
"None",
",",
"store_sync_interval",
"=",
"None",
",",
"store_delete",
"=",
"None",
",",
"hash_algo",
"=",
"None",
",",
"hash_size",
"=",
"None",
",",
"key_size",
"=",
"None",
",",
"udp_clients",
"=",
"None",
",",
"udp_servers",
"=",
"None",
",",
"block_size",
"=",
"None",
",",
"block_count",
"=",
"None",
",",
"sync_from",
"=",
"None",
",",
"mode_bitmap",
"=",
"None",
",",
"use_lastmod",
"=",
"None",
",",
"full_silent",
"=",
"None",
",",
"full_purge_lru",
"=",
"None",
")",
":",
"value",
"=",
"KeyValue",
"(",
"locals",
"(",
")",
",",
"keys",
"=",
"[",
"'name'",
",",
"'max_items'",
",",
"'expires'",
",",
"'store'",
",",
"'store_sync_interval'",
",",
"'store_delete'",
",",
"'hash_algo'",
",",
"'hash_size'",
",",
"'key_size'",
",",
"'udp_clients'",
",",
"'udp_servers'",
",",
"'block_size'",
",",
"'block_count'",
",",
"'sync_from'",
",",
"'mode_bitmap'",
",",
"'use_lastmod'",
",",
"'full_silent'",
",",
"'full_purge_lru'",
",",
"]",
",",
"aliases",
"=",
"{",
"'max_items'",
":",
"'maxitems'",
",",
"'store_sync_interval'",
":",
"'storesync'",
",",
"'hash_algo'",
":",
"'hash'",
",",
"'udp_clients'",
":",
"'nodes'",
",",
"'block_size'",
":",
"'blocksize'",
",",
"'block_count'",
":",
"'blocks'",
",",
"'sync_from'",
":",
"'sync'",
",",
"'mode_bitmap'",
":",
"'bitmap'",
",",
"'use_lastmod'",
":",
"'lastmod'",
",",
"'full_silent'",
":",
"'ignore_full'",
",",
"'full_purge_lru'",
":",
"'purge_lru'",
",",
"}",
",",
"bool_keys",
"=",
"[",
"'store_delete'",
",",
"'mode_bitmap'",
",",
"'use_lastmod'",
",",
"'full_silent'",
",",
"'full_purge_lru'",
"]",
",",
"list_keys",
"=",
"[",
"'udp_clients'",
",",
"'udp_servers'",
",",
"'sync_from'",
"]",
",",
")",
"self",
".",
"_set",
"(",
"'cache2'",
",",
"value",
",",
"multi",
"=",
"True",
")",
"return",
"self",
".",
"_section"
] |
Creates cache. Default mode: single block.
.. note:: This uses new generation ``cache2`` option available since uWSGI 1.9.
.. note:: When at least one cache is configured without ``full_purge_lru``
and the master is enabled a thread named "the cache sweeper" is started.
Its main purpose is deleting expired keys from the cache.
If you want auto-expiring you need to enable the master.
:param str|unicode name: Set the name of the cache. Must be unique in an instance.
:param int max_items: Set the maximum number of cache items.
.. note:: Effective number of items is **max_items - 1** -
the first item of the cache is always internally used as "NULL/None/undef".
:param int expires: The number of seconds after the object is no more valid
(and will be removed by the cache sweeper when ``full_purge_lru`` is not set.
:param str|unicode store: Set the filename for the persistent storage.
If it doesn't exist, the system assumes an empty cache and the file will be created.
:param int store_sync_interval: Set the number of seconds after which msync() is called
to flush memory cache on disk when in persistent mode.
By default it is disabled leaving the decision-making to the kernel.
:param bool store_delete: uWSGI, by default, will not start if a cache file exists
and the store file does not match the configured items/blocksize.
Setting this option will make uWSGI delete the existing file upon mismatch
and create a new one.
:param str|unicode hash_algo: Set the hash algorithm used in the hash table. Current options are:
* djb33x (default)
* murmur2
:param int hash_size: This is the size of the hash table in bytes.
Generally 65536 (the default) is a good value.
.. note:: Change it only if you know what you are doing
or if you have a lot of collisions in your cache.
:param int key_size: Set the maximum size of a key, in bytes. Default: 2048.
:param str|unicode|list udp_clients: List of UDP servers which will receive UDP cache updates.
:param str|unicode |list udp_servers: List of UDP addresses on which to bind the cache
to wait for UDP updates.
:param int block_size: Set the size (in bytes) of a single block.
.. note:: It's a good idea to use a multiple of 4096 (common memory page size).
:param int block_count: Set the number of blocks in the cache. Useful only in bitmap mode,
otherwise the number of blocks is equal to the maximum number of items.
:param str|unicode|list sync_from: List of uWSGI addresses which the cache subsystem will connect to
for getting a full dump of the cache. It can be used for initial cache synchronization.
The first node sending a valid dump will stop the procedure.
:param bool mode_bitmap: Enable (more versatile but relatively slower) bitmap mode.
http://uwsgi-docs.readthedocs.io/en/latest/Caching.html#single-block-faster-vs-bitmaps-slower
.. warning:: Considered production ready only from uWSGI 2.0.2.
:param bool use_lastmod: Enabling will update last_modified_at timestamp of each cache
on every cache item modification. Enable it if you want to track this value
or if other features depend on it. This value will then be accessible via the stats socket.
:param bool full_silent: By default uWSGI will print warning message on every cache set operation
if the cache is full. To disable this warning set this option.
.. note:: Available since 2.0.4.
:param bool full_purge_lru: Allows the caching framework to evict Least Recently Used (LRU)
item when you try to add new item to cache storage that is full.
.. note:: ``expires`` argument will be ignored.
|
[
"Creates",
"cache",
".",
"Default",
"mode",
":",
"single",
"block",
"."
] |
python
|
train
|
LIVVkit/LIVVkit
|
livvkit/util/elements.py
|
https://github.com/LIVVkit/LIVVkit/blob/680120cd437e408673e62e535fc0a246c7fc17db/livvkit/util/elements.py#L92-L126
|
def tab(tab_name, element_list=None, section_list=None):
"""
Returns a dictionary representing a new tab to display elements.
This can be thought of as a simple container for displaying multiple
types of information.
Args:
tab_name: The title to display
element_list: The list of elements to display. If a single element is
given it will be wrapped in a list.
section_list: A list of sections to display.
Returns:
A dictionary with metadata specifying that it is to be rendered
as a page containing multiple elements and/or tab.
"""
_tab = {
'Type': 'Tab',
'Title': tab_name,
}
if element_list is not None:
if isinstance(element_list, list):
_tab['Elements'] = element_list
else:
_tab['Elements'] = [element_list]
if section_list is not None:
if isinstance(section_list, list):
_tab['Sections'] = section_list
else:
if 'Elements' not in section_list:
_tab['Elements'] = element_list
else:
_tab['Elements'].append(element_list)
return _tab
|
[
"def",
"tab",
"(",
"tab_name",
",",
"element_list",
"=",
"None",
",",
"section_list",
"=",
"None",
")",
":",
"_tab",
"=",
"{",
"'Type'",
":",
"'Tab'",
",",
"'Title'",
":",
"tab_name",
",",
"}",
"if",
"element_list",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"element_list",
",",
"list",
")",
":",
"_tab",
"[",
"'Elements'",
"]",
"=",
"element_list",
"else",
":",
"_tab",
"[",
"'Elements'",
"]",
"=",
"[",
"element_list",
"]",
"if",
"section_list",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"section_list",
",",
"list",
")",
":",
"_tab",
"[",
"'Sections'",
"]",
"=",
"section_list",
"else",
":",
"if",
"'Elements'",
"not",
"in",
"section_list",
":",
"_tab",
"[",
"'Elements'",
"]",
"=",
"element_list",
"else",
":",
"_tab",
"[",
"'Elements'",
"]",
".",
"append",
"(",
"element_list",
")",
"return",
"_tab"
] |
Returns a dictionary representing a new tab to display elements.
This can be thought of as a simple container for displaying multiple
types of information.
Args:
tab_name: The title to display
element_list: The list of elements to display. If a single element is
given it will be wrapped in a list.
section_list: A list of sections to display.
Returns:
A dictionary with metadata specifying that it is to be rendered
as a page containing multiple elements and/or tab.
|
[
"Returns",
"a",
"dictionary",
"representing",
"a",
"new",
"tab",
"to",
"display",
"elements",
".",
"This",
"can",
"be",
"thought",
"of",
"as",
"a",
"simple",
"container",
"for",
"displaying",
"multiple",
"types",
"of",
"information",
"."
] |
python
|
train
|
codenerix/django-codenerix
|
codenerix/authbackend.py
|
https://github.com/codenerix/django-codenerix/blob/1f5527b352141caaee902b37b2648791a06bd57d/codenerix/authbackend.py#L407-L429
|
def debug(self, msg):
'''
Handle the debugging to a file
'''
# If debug is not disabled
if self.__debug is not False:
# If never was set, try to set it up
if self.__debug is None:
# Check what do we have inside settings
debug_filename = getattr(settings, "AD_DEBUG_FILE", None)
if debug_filename:
# Open the debug file pointer
self.__debug = open(settings.AD_DEBUG_FILE, 'a')
else:
# Disable debuging forever
self.__debug = False
if self.__debug:
# Debug the given message
self.__debug.write("{}\n".format(msg))
self.__debug.flush()
|
[
"def",
"debug",
"(",
"self",
",",
"msg",
")",
":",
"# If debug is not disabled",
"if",
"self",
".",
"__debug",
"is",
"not",
"False",
":",
"# If never was set, try to set it up",
"if",
"self",
".",
"__debug",
"is",
"None",
":",
"# Check what do we have inside settings",
"debug_filename",
"=",
"getattr",
"(",
"settings",
",",
"\"AD_DEBUG_FILE\"",
",",
"None",
")",
"if",
"debug_filename",
":",
"# Open the debug file pointer",
"self",
".",
"__debug",
"=",
"open",
"(",
"settings",
".",
"AD_DEBUG_FILE",
",",
"'a'",
")",
"else",
":",
"# Disable debuging forever",
"self",
".",
"__debug",
"=",
"False",
"if",
"self",
".",
"__debug",
":",
"# Debug the given message",
"self",
".",
"__debug",
".",
"write",
"(",
"\"{}\\n\"",
".",
"format",
"(",
"msg",
")",
")",
"self",
".",
"__debug",
".",
"flush",
"(",
")"
] |
Handle the debugging to a file
|
[
"Handle",
"the",
"debugging",
"to",
"a",
"file"
] |
python
|
train
|
nickmckay/LiPD-utilities
|
Python/lipd/directory.py
|
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/directory.py#L192-L202
|
def collect_metadata_file(full_path):
"""
Create the file metadata and add it to the appropriate section by file-type
:param str full_path:
:param dict existing_files:
:return dict existing files:
"""
fne = os.path.basename(full_path)
fn = os.path.splitext(fne)[0]
obj = {"full_path": full_path, "filename_ext": fne, "filename_no_ext": fn, "dir": os.path.dirname(full_path)}
return obj
|
[
"def",
"collect_metadata_file",
"(",
"full_path",
")",
":",
"fne",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"full_path",
")",
"fn",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"fne",
")",
"[",
"0",
"]",
"obj",
"=",
"{",
"\"full_path\"",
":",
"full_path",
",",
"\"filename_ext\"",
":",
"fne",
",",
"\"filename_no_ext\"",
":",
"fn",
",",
"\"dir\"",
":",
"os",
".",
"path",
".",
"dirname",
"(",
"full_path",
")",
"}",
"return",
"obj"
] |
Create the file metadata and add it to the appropriate section by file-type
:param str full_path:
:param dict existing_files:
:return dict existing files:
|
[
"Create",
"the",
"file",
"metadata",
"and",
"add",
"it",
"to",
"the",
"appropriate",
"section",
"by",
"file",
"-",
"type",
":",
"param",
"str",
"full_path",
":",
":",
"param",
"dict",
"existing_files",
":",
":",
"return",
"dict",
"existing",
"files",
":"
] |
python
|
train
|
rshk/python-libxdo
|
xdo/__init__.py
|
https://github.com/rshk/python-libxdo/blob/84cafa5943b005bc423edd28203a5266b3579ac3/xdo/__init__.py#L596-L604
|
def select_window_with_click(self):
"""
Get a window ID by clicking on it.
This function blocks until a selection is made.
"""
window_ret = window_t(0)
_libxdo.xdo_select_window_with_click(
self._xdo, ctypes.byref(window_ret))
return window_ret.value
|
[
"def",
"select_window_with_click",
"(",
"self",
")",
":",
"window_ret",
"=",
"window_t",
"(",
"0",
")",
"_libxdo",
".",
"xdo_select_window_with_click",
"(",
"self",
".",
"_xdo",
",",
"ctypes",
".",
"byref",
"(",
"window_ret",
")",
")",
"return",
"window_ret",
".",
"value"
] |
Get a window ID by clicking on it.
This function blocks until a selection is made.
|
[
"Get",
"a",
"window",
"ID",
"by",
"clicking",
"on",
"it",
".",
"This",
"function",
"blocks",
"until",
"a",
"selection",
"is",
"made",
"."
] |
python
|
train
|
bububa/pyTOP
|
pyTOP/simba.py
|
https://github.com/bububa/pyTOP/blob/1e48009bcfe886be392628244b370e6374e1f2b2/pyTOP/simba.py#L310-L322
|
def add(self, campaign_id, item_id, default_price, title, img_url, nick=None):
'''xxxxx.xxxxx.adgroup.add
===================================
创建一个推广组'''
request = TOPRequest('xxxxx.xxxxx.adgroup.add')
request['campaign_id'] = campaign_id
request['item_id'] = item_id
request['default_price'] = default_price
request['title'] = title
request['img_url'] = img_url
if nick!=None: request['nick'] = nick
self.create(self.execute(request), fields=['success','result','success','result_code','result_message'], models={'result':ADGroup})
return self.result
|
[
"def",
"add",
"(",
"self",
",",
"campaign_id",
",",
"item_id",
",",
"default_price",
",",
"title",
",",
"img_url",
",",
"nick",
"=",
"None",
")",
":",
"request",
"=",
"TOPRequest",
"(",
"'xxxxx.xxxxx.adgroup.add'",
")",
"request",
"[",
"'campaign_id'",
"]",
"=",
"campaign_id",
"request",
"[",
"'item_id'",
"]",
"=",
"item_id",
"request",
"[",
"'default_price'",
"]",
"=",
"default_price",
"request",
"[",
"'title'",
"]",
"=",
"title",
"request",
"[",
"'img_url'",
"]",
"=",
"img_url",
"if",
"nick",
"!=",
"None",
":",
"request",
"[",
"'nick'",
"]",
"=",
"nick",
"self",
".",
"create",
"(",
"self",
".",
"execute",
"(",
"request",
")",
",",
"fields",
"=",
"[",
"'success'",
",",
"'result'",
",",
"'success'",
",",
"'result_code'",
",",
"'result_message'",
"]",
",",
"models",
"=",
"{",
"'result'",
":",
"ADGroup",
"}",
")",
"return",
"self",
".",
"result"
] |
xxxxx.xxxxx.adgroup.add
===================================
创建一个推广组
|
[
"xxxxx",
".",
"xxxxx",
".",
"adgroup",
".",
"add",
"===================================",
"创建一个推广组"
] |
python
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/text_encoder.py
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/text_encoder.py#L750-L866
|
def build_from_token_counts(self,
token_counts,
min_count,
num_iterations=4,
reserved_tokens=None,
max_subtoken_length=None):
"""Train a SubwordTextEncoder based on a dictionary of word counts.
Args:
token_counts: a dictionary of Unicode strings to int.
min_count: an integer - discard subtokens with lower counts.
num_iterations: an integer. how many iterations of refinement.
reserved_tokens: List of reserved tokens. The global variable
`RESERVED_TOKENS` must be a prefix of `reserved_tokens`. If this
argument is `None`, it will use `RESERVED_TOKENS`.
max_subtoken_length: Maximum length of a subtoken. If this is not set,
then the runtime and memory use of creating the vocab is quadratic in
the length of the longest token. If this is set, then it is instead
O(max_subtoken_length * length of longest token).
Raises:
ValueError: if reserved is not 0 or len(RESERVED_TOKENS). In this case, it
is not clear what the space is being reserved for, or when it will be
filled in.
"""
if reserved_tokens is None:
reserved_tokens = RESERVED_TOKENS
else:
# There is not complete freedom in replacing RESERVED_TOKENS.
for default, proposed in zip(RESERVED_TOKENS, reserved_tokens):
if default != proposed:
raise ValueError("RESERVED_TOKENS must be a prefix of "
"reserved_tokens.")
# Initialize the alphabet. Note, this must include reserved tokens or it can
# result in encoding failures.
alphabet_tokens = chain(six.iterkeys(token_counts),
[native_to_unicode(t) for t in reserved_tokens])
self._init_alphabet_from_tokens(alphabet_tokens)
# Bootstrap the initial list of subtokens with the characters from the
# alphabet plus the escaping characters.
self._init_subtokens_from_list(list(self._alphabet),
reserved_tokens=reserved_tokens)
# We build iteratively. On each iteration, we segment all the words,
# then count the resulting potential subtokens, keeping the ones
# with high enough counts for our new vocabulary.
if min_count < 1:
min_count = 1
for i in range(num_iterations):
tf.logging.info("Iteration {0}".format(i))
# Collect all substrings of the encoded token that break along current
# subtoken boundaries.
subtoken_counts = collections.defaultdict(int)
for token, count in six.iteritems(token_counts):
iter_start_time = time.time()
escaped_token = _escape_token(token, self._alphabet)
subtokens = self._escaped_token_to_subtoken_strings(escaped_token)
start = 0
for subtoken in subtokens:
last_position = len(escaped_token) + 1
if max_subtoken_length is not None:
last_position = min(last_position, start + max_subtoken_length)
for end in range(start + 1, last_position):
new_subtoken = escaped_token[start:end]
subtoken_counts[new_subtoken] += count
start += len(subtoken)
iter_time_secs = time.time() - iter_start_time
if iter_time_secs > 0.1:
tf.logging.info(u"Processing token [{0}] took {1} seconds, consider "
"setting Text2TextProblem.max_subtoken_length to a "
"smaller value.".format(token, iter_time_secs))
# Array of sets of candidate subtoken strings, by length.
len_to_subtoken_strings = []
for subtoken_string, count in six.iteritems(subtoken_counts):
lsub = len(subtoken_string)
if count >= min_count:
while len(len_to_subtoken_strings) <= lsub:
len_to_subtoken_strings.append(set())
len_to_subtoken_strings[lsub].add(subtoken_string)
# Consider the candidates longest to shortest, so that if we accept
# a longer subtoken string, we can decrement the counts of its prefixes.
new_subtoken_strings = []
for lsub in range(len(len_to_subtoken_strings) - 1, 0, -1):
subtoken_strings = len_to_subtoken_strings[lsub]
for subtoken_string in subtoken_strings:
count = subtoken_counts[subtoken_string]
if count >= min_count:
# Exclude alphabet tokens here, as they must be included later,
# explicitly, regardless of count.
if subtoken_string not in self._alphabet:
new_subtoken_strings.append((count, subtoken_string))
for l in range(1, lsub):
subtoken_counts[subtoken_string[:l]] -= count
# Include the alphabet explicitly to guarantee all strings are encodable.
new_subtoken_strings.extend((subtoken_counts.get(a, 0), a)
for a in self._alphabet)
new_subtoken_strings.sort(reverse=True)
# Reinitialize to the candidate vocabulary.
new_subtoken_strings = [subtoken for _, subtoken in new_subtoken_strings]
if reserved_tokens:
escaped_reserved_tokens = [
_escape_token(native_to_unicode(t), self._alphabet)
for t in reserved_tokens
]
new_subtoken_strings = escaped_reserved_tokens + new_subtoken_strings
self._init_subtokens_from_list(new_subtoken_strings)
tf.logging.info("vocab_size = %d" % self.vocab_size)
|
[
"def",
"build_from_token_counts",
"(",
"self",
",",
"token_counts",
",",
"min_count",
",",
"num_iterations",
"=",
"4",
",",
"reserved_tokens",
"=",
"None",
",",
"max_subtoken_length",
"=",
"None",
")",
":",
"if",
"reserved_tokens",
"is",
"None",
":",
"reserved_tokens",
"=",
"RESERVED_TOKENS",
"else",
":",
"# There is not complete freedom in replacing RESERVED_TOKENS.",
"for",
"default",
",",
"proposed",
"in",
"zip",
"(",
"RESERVED_TOKENS",
",",
"reserved_tokens",
")",
":",
"if",
"default",
"!=",
"proposed",
":",
"raise",
"ValueError",
"(",
"\"RESERVED_TOKENS must be a prefix of \"",
"\"reserved_tokens.\"",
")",
"# Initialize the alphabet. Note, this must include reserved tokens or it can",
"# result in encoding failures.",
"alphabet_tokens",
"=",
"chain",
"(",
"six",
".",
"iterkeys",
"(",
"token_counts",
")",
",",
"[",
"native_to_unicode",
"(",
"t",
")",
"for",
"t",
"in",
"reserved_tokens",
"]",
")",
"self",
".",
"_init_alphabet_from_tokens",
"(",
"alphabet_tokens",
")",
"# Bootstrap the initial list of subtokens with the characters from the",
"# alphabet plus the escaping characters.",
"self",
".",
"_init_subtokens_from_list",
"(",
"list",
"(",
"self",
".",
"_alphabet",
")",
",",
"reserved_tokens",
"=",
"reserved_tokens",
")",
"# We build iteratively. On each iteration, we segment all the words,",
"# then count the resulting potential subtokens, keeping the ones",
"# with high enough counts for our new vocabulary.",
"if",
"min_count",
"<",
"1",
":",
"min_count",
"=",
"1",
"for",
"i",
"in",
"range",
"(",
"num_iterations",
")",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Iteration {0}\"",
".",
"format",
"(",
"i",
")",
")",
"# Collect all substrings of the encoded token that break along current",
"# subtoken boundaries.",
"subtoken_counts",
"=",
"collections",
".",
"defaultdict",
"(",
"int",
")",
"for",
"token",
",",
"count",
"in",
"six",
".",
"iteritems",
"(",
"token_counts",
")",
":",
"iter_start_time",
"=",
"time",
".",
"time",
"(",
")",
"escaped_token",
"=",
"_escape_token",
"(",
"token",
",",
"self",
".",
"_alphabet",
")",
"subtokens",
"=",
"self",
".",
"_escaped_token_to_subtoken_strings",
"(",
"escaped_token",
")",
"start",
"=",
"0",
"for",
"subtoken",
"in",
"subtokens",
":",
"last_position",
"=",
"len",
"(",
"escaped_token",
")",
"+",
"1",
"if",
"max_subtoken_length",
"is",
"not",
"None",
":",
"last_position",
"=",
"min",
"(",
"last_position",
",",
"start",
"+",
"max_subtoken_length",
")",
"for",
"end",
"in",
"range",
"(",
"start",
"+",
"1",
",",
"last_position",
")",
":",
"new_subtoken",
"=",
"escaped_token",
"[",
"start",
":",
"end",
"]",
"subtoken_counts",
"[",
"new_subtoken",
"]",
"+=",
"count",
"start",
"+=",
"len",
"(",
"subtoken",
")",
"iter_time_secs",
"=",
"time",
".",
"time",
"(",
")",
"-",
"iter_start_time",
"if",
"iter_time_secs",
">",
"0.1",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"u\"Processing token [{0}] took {1} seconds, consider \"",
"\"setting Text2TextProblem.max_subtoken_length to a \"",
"\"smaller value.\"",
".",
"format",
"(",
"token",
",",
"iter_time_secs",
")",
")",
"# Array of sets of candidate subtoken strings, by length.",
"len_to_subtoken_strings",
"=",
"[",
"]",
"for",
"subtoken_string",
",",
"count",
"in",
"six",
".",
"iteritems",
"(",
"subtoken_counts",
")",
":",
"lsub",
"=",
"len",
"(",
"subtoken_string",
")",
"if",
"count",
">=",
"min_count",
":",
"while",
"len",
"(",
"len_to_subtoken_strings",
")",
"<=",
"lsub",
":",
"len_to_subtoken_strings",
".",
"append",
"(",
"set",
"(",
")",
")",
"len_to_subtoken_strings",
"[",
"lsub",
"]",
".",
"add",
"(",
"subtoken_string",
")",
"# Consider the candidates longest to shortest, so that if we accept",
"# a longer subtoken string, we can decrement the counts of its prefixes.",
"new_subtoken_strings",
"=",
"[",
"]",
"for",
"lsub",
"in",
"range",
"(",
"len",
"(",
"len_to_subtoken_strings",
")",
"-",
"1",
",",
"0",
",",
"-",
"1",
")",
":",
"subtoken_strings",
"=",
"len_to_subtoken_strings",
"[",
"lsub",
"]",
"for",
"subtoken_string",
"in",
"subtoken_strings",
":",
"count",
"=",
"subtoken_counts",
"[",
"subtoken_string",
"]",
"if",
"count",
">=",
"min_count",
":",
"# Exclude alphabet tokens here, as they must be included later,",
"# explicitly, regardless of count.",
"if",
"subtoken_string",
"not",
"in",
"self",
".",
"_alphabet",
":",
"new_subtoken_strings",
".",
"append",
"(",
"(",
"count",
",",
"subtoken_string",
")",
")",
"for",
"l",
"in",
"range",
"(",
"1",
",",
"lsub",
")",
":",
"subtoken_counts",
"[",
"subtoken_string",
"[",
":",
"l",
"]",
"]",
"-=",
"count",
"# Include the alphabet explicitly to guarantee all strings are encodable.",
"new_subtoken_strings",
".",
"extend",
"(",
"(",
"subtoken_counts",
".",
"get",
"(",
"a",
",",
"0",
")",
",",
"a",
")",
"for",
"a",
"in",
"self",
".",
"_alphabet",
")",
"new_subtoken_strings",
".",
"sort",
"(",
"reverse",
"=",
"True",
")",
"# Reinitialize to the candidate vocabulary.",
"new_subtoken_strings",
"=",
"[",
"subtoken",
"for",
"_",
",",
"subtoken",
"in",
"new_subtoken_strings",
"]",
"if",
"reserved_tokens",
":",
"escaped_reserved_tokens",
"=",
"[",
"_escape_token",
"(",
"native_to_unicode",
"(",
"t",
")",
",",
"self",
".",
"_alphabet",
")",
"for",
"t",
"in",
"reserved_tokens",
"]",
"new_subtoken_strings",
"=",
"escaped_reserved_tokens",
"+",
"new_subtoken_strings",
"self",
".",
"_init_subtokens_from_list",
"(",
"new_subtoken_strings",
")",
"tf",
".",
"logging",
".",
"info",
"(",
"\"vocab_size = %d\"",
"%",
"self",
".",
"vocab_size",
")"
] |
Train a SubwordTextEncoder based on a dictionary of word counts.
Args:
token_counts: a dictionary of Unicode strings to int.
min_count: an integer - discard subtokens with lower counts.
num_iterations: an integer. how many iterations of refinement.
reserved_tokens: List of reserved tokens. The global variable
`RESERVED_TOKENS` must be a prefix of `reserved_tokens`. If this
argument is `None`, it will use `RESERVED_TOKENS`.
max_subtoken_length: Maximum length of a subtoken. If this is not set,
then the runtime and memory use of creating the vocab is quadratic in
the length of the longest token. If this is set, then it is instead
O(max_subtoken_length * length of longest token).
Raises:
ValueError: if reserved is not 0 or len(RESERVED_TOKENS). In this case, it
is not clear what the space is being reserved for, or when it will be
filled in.
|
[
"Train",
"a",
"SubwordTextEncoder",
"based",
"on",
"a",
"dictionary",
"of",
"word",
"counts",
"."
] |
python
|
train
|
sendwithus/sendwithus_python
|
sendwithus/__init__.py
|
https://github.com/sendwithus/sendwithus_python/blob/8ae50d514febd44f7d9be3c838b4d92f99412832/sendwithus/__init__.py#L755-L774
|
def _api_request(self, endpoint, http_method, *args, **kwargs):
"""Private method for api requests"""
logger.debug(' > Queing batch api request for endpoint: %s' % endpoint)
path = self._build_request_path(endpoint, absolute=False)
logger.debug('\tpath: %s' % path)
data = None
if 'payload' in kwargs:
data = kwargs['payload']
logger.debug('\tdata: %s' % data)
command = {
"path": path,
"method": http_method
}
if data:
command['body'] = data
self._commands.append(command)
|
[
"def",
"_api_request",
"(",
"self",
",",
"endpoint",
",",
"http_method",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"logger",
".",
"debug",
"(",
"' > Queing batch api request for endpoint: %s'",
"%",
"endpoint",
")",
"path",
"=",
"self",
".",
"_build_request_path",
"(",
"endpoint",
",",
"absolute",
"=",
"False",
")",
"logger",
".",
"debug",
"(",
"'\\tpath: %s'",
"%",
"path",
")",
"data",
"=",
"None",
"if",
"'payload'",
"in",
"kwargs",
":",
"data",
"=",
"kwargs",
"[",
"'payload'",
"]",
"logger",
".",
"debug",
"(",
"'\\tdata: %s'",
"%",
"data",
")",
"command",
"=",
"{",
"\"path\"",
":",
"path",
",",
"\"method\"",
":",
"http_method",
"}",
"if",
"data",
":",
"command",
"[",
"'body'",
"]",
"=",
"data",
"self",
".",
"_commands",
".",
"append",
"(",
"command",
")"
] |
Private method for api requests
|
[
"Private",
"method",
"for",
"api",
"requests"
] |
python
|
valid
|
alvarogzp/telegram-bot-framework
|
bot/multithreading/scheduler.py
|
https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/multithreading/scheduler.py#L156-L166
|
def new_worker_pool(self, name: str, min_workers: int = 0, max_workers: int = 1,
max_seconds_idle: int = DEFAULT_WORKER_POOL_MAX_SECONDS_IDLE):
"""
Creates a new worker pool and starts it.
Returns the Worker that schedules works to the pool.
"""
if not self.running:
return self.immediate_worker
worker = self._new_worker_pool(name, min_workers, max_workers, max_seconds_idle)
self._start_worker_pool(worker)
return worker
|
[
"def",
"new_worker_pool",
"(",
"self",
",",
"name",
":",
"str",
",",
"min_workers",
":",
"int",
"=",
"0",
",",
"max_workers",
":",
"int",
"=",
"1",
",",
"max_seconds_idle",
":",
"int",
"=",
"DEFAULT_WORKER_POOL_MAX_SECONDS_IDLE",
")",
":",
"if",
"not",
"self",
".",
"running",
":",
"return",
"self",
".",
"immediate_worker",
"worker",
"=",
"self",
".",
"_new_worker_pool",
"(",
"name",
",",
"min_workers",
",",
"max_workers",
",",
"max_seconds_idle",
")",
"self",
".",
"_start_worker_pool",
"(",
"worker",
")",
"return",
"worker"
] |
Creates a new worker pool and starts it.
Returns the Worker that schedules works to the pool.
|
[
"Creates",
"a",
"new",
"worker",
"pool",
"and",
"starts",
"it",
".",
"Returns",
"the",
"Worker",
"that",
"schedules",
"works",
"to",
"the",
"pool",
"."
] |
python
|
train
|
apache/incubator-mxnet
|
python/mxnet/optimizer/optimizer.py
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/optimizer/optimizer.py#L1701-L1710
|
def get_states(self, dump_optimizer=False):
"""Gets updater states.
Parameters
----------
dump_optimizer : bool, default False
Whether to also save the optimizer itself. This would also save optimizer
information such as learning rate and weight decay schedules.
"""
return pickle.dumps((self.states, self.optimizer) if dump_optimizer else self.states)
|
[
"def",
"get_states",
"(",
"self",
",",
"dump_optimizer",
"=",
"False",
")",
":",
"return",
"pickle",
".",
"dumps",
"(",
"(",
"self",
".",
"states",
",",
"self",
".",
"optimizer",
")",
"if",
"dump_optimizer",
"else",
"self",
".",
"states",
")"
] |
Gets updater states.
Parameters
----------
dump_optimizer : bool, default False
Whether to also save the optimizer itself. This would also save optimizer
information such as learning rate and weight decay schedules.
|
[
"Gets",
"updater",
"states",
"."
] |
python
|
train
|
spyder-ide/spyder
|
spyder/widgets/fileswitcher.py
|
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/widgets/fileswitcher.py#L565-L575
|
def get_widget(self, index=None, path=None, tabs=None):
"""Get widget by index.
If no tabs and index specified the current active widget is returned.
"""
if (index and tabs) or (path and tabs):
return tabs.widget(index)
elif self.plugin:
return self.get_plugin_tabwidget(self.plugin).currentWidget()
else:
return self.plugins_tabs[0][0].currentWidget()
|
[
"def",
"get_widget",
"(",
"self",
",",
"index",
"=",
"None",
",",
"path",
"=",
"None",
",",
"tabs",
"=",
"None",
")",
":",
"if",
"(",
"index",
"and",
"tabs",
")",
"or",
"(",
"path",
"and",
"tabs",
")",
":",
"return",
"tabs",
".",
"widget",
"(",
"index",
")",
"elif",
"self",
".",
"plugin",
":",
"return",
"self",
".",
"get_plugin_tabwidget",
"(",
"self",
".",
"plugin",
")",
".",
"currentWidget",
"(",
")",
"else",
":",
"return",
"self",
".",
"plugins_tabs",
"[",
"0",
"]",
"[",
"0",
"]",
".",
"currentWidget",
"(",
")"
] |
Get widget by index.
If no tabs and index specified the current active widget is returned.
|
[
"Get",
"widget",
"by",
"index",
"."
] |
python
|
train
|
ARMmbed/yotta
|
yotta/lib/component.py
|
https://github.com/ARMmbed/yotta/blob/56bc1e56c602fa20307b23fe27518e9cd6c11af1/yotta/lib/component.py#L41-L65
|
def _truthyConfValue(v):
''' Determine yotta-config truthiness. In yotta config land truthiness is
different to python or json truthiness (in order to map nicely only
preprocessor and CMake definediness):
json -> python -> truthy/falsey
false -> False -> Falsey
null -> None -> Falsey
undefined -> None -> Falsey
0 -> 0 -> Falsey
"" -> "" -> Truthy (different from python)
"0" -> "0" -> Truthy
{} -> {} -> Truthy (different from python)
[] -> [] -> Truthy (different from python)
everything else is truthy
'''
if v is False:
return False
elif v is None:
return False
elif v == 0:
return False
else:
# everything else is truthy!
return True
|
[
"def",
"_truthyConfValue",
"(",
"v",
")",
":",
"if",
"v",
"is",
"False",
":",
"return",
"False",
"elif",
"v",
"is",
"None",
":",
"return",
"False",
"elif",
"v",
"==",
"0",
":",
"return",
"False",
"else",
":",
"# everything else is truthy!",
"return",
"True"
] |
Determine yotta-config truthiness. In yotta config land truthiness is
different to python or json truthiness (in order to map nicely only
preprocessor and CMake definediness):
json -> python -> truthy/falsey
false -> False -> Falsey
null -> None -> Falsey
undefined -> None -> Falsey
0 -> 0 -> Falsey
"" -> "" -> Truthy (different from python)
"0" -> "0" -> Truthy
{} -> {} -> Truthy (different from python)
[] -> [] -> Truthy (different from python)
everything else is truthy
|
[
"Determine",
"yotta",
"-",
"config",
"truthiness",
".",
"In",
"yotta",
"config",
"land",
"truthiness",
"is",
"different",
"to",
"python",
"or",
"json",
"truthiness",
"(",
"in",
"order",
"to",
"map",
"nicely",
"only",
"preprocessor",
"and",
"CMake",
"definediness",
")",
":"
] |
python
|
valid
|
datacats/datacats
|
datacats/environment.py
|
https://github.com/datacats/datacats/blob/e4bae503efa997660fb3f34fe166699569653157/datacats/environment.py#L295-L304
|
def create_ckan_ini(self):
"""
Use make-config to generate an initial development.ini file
"""
self.run_command(
command='/scripts/run_as_user.sh /usr/lib/ckan/bin/paster make-config'
' ckan /project/development.ini',
rw_project=True,
ro={scripts.get_script_path('run_as_user.sh'): '/scripts/run_as_user.sh'},
)
|
[
"def",
"create_ckan_ini",
"(",
"self",
")",
":",
"self",
".",
"run_command",
"(",
"command",
"=",
"'/scripts/run_as_user.sh /usr/lib/ckan/bin/paster make-config'",
"' ckan /project/development.ini'",
",",
"rw_project",
"=",
"True",
",",
"ro",
"=",
"{",
"scripts",
".",
"get_script_path",
"(",
"'run_as_user.sh'",
")",
":",
"'/scripts/run_as_user.sh'",
"}",
",",
")"
] |
Use make-config to generate an initial development.ini file
|
[
"Use",
"make",
"-",
"config",
"to",
"generate",
"an",
"initial",
"development",
".",
"ini",
"file"
] |
python
|
train
|
PmagPy/PmagPy
|
SPD/lib/lib_arai_plot_statistics.py
|
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/SPD/lib/lib_arai_plot_statistics.py#L297-L307
|
def get_normed_points(point_array, norm): # good to go
"""
input: point_array, norm
output: normed array
"""
norm = float(norm)
#floated_array = []
#for p in point_array: # need to make sure each point is a float
#floated_array.append(float(p))
points = old_div(numpy.array(point_array), norm)
return points
|
[
"def",
"get_normed_points",
"(",
"point_array",
",",
"norm",
")",
":",
"# good to go",
"norm",
"=",
"float",
"(",
"norm",
")",
"#floated_array = []",
"#for p in point_array: # need to make sure each point is a float",
"#floated_array.append(float(p))",
"points",
"=",
"old_div",
"(",
"numpy",
".",
"array",
"(",
"point_array",
")",
",",
"norm",
")",
"return",
"points"
] |
input: point_array, norm
output: normed array
|
[
"input",
":",
"point_array",
"norm",
"output",
":",
"normed",
"array"
] |
python
|
train
|
Vagrants/blackbird
|
blackbird/utils/configread.py
|
https://github.com/Vagrants/blackbird/blob/3b38cd5650caae362e0668dbd38bf8f88233e079/blackbird/utils/configread.py#L229-L250
|
def add_default_module_dir(self):
"""
Add directory to store built-in plugins to `module_dir` parameter.
Default directory to store plugins is `BLACKBIRD_INSTALL_DIR/plugins`.
:rtype: None
:return: None
"""
default_module_dir = os.path.join(
os.path.abspath(os.path.curdir),
'plugins'
)
module_dir_params = {
'module_dir': [default_module_dir]
}
if 'module_dir' in self.config['global']:
module_dir_params['module_dir'].append(
self.config['global']['module_dir']
)
self.config['global'].update(
module_dir_params
)
|
[
"def",
"add_default_module_dir",
"(",
"self",
")",
":",
"default_module_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"curdir",
")",
",",
"'plugins'",
")",
"module_dir_params",
"=",
"{",
"'module_dir'",
":",
"[",
"default_module_dir",
"]",
"}",
"if",
"'module_dir'",
"in",
"self",
".",
"config",
"[",
"'global'",
"]",
":",
"module_dir_params",
"[",
"'module_dir'",
"]",
".",
"append",
"(",
"self",
".",
"config",
"[",
"'global'",
"]",
"[",
"'module_dir'",
"]",
")",
"self",
".",
"config",
"[",
"'global'",
"]",
".",
"update",
"(",
"module_dir_params",
")"
] |
Add directory to store built-in plugins to `module_dir` parameter.
Default directory to store plugins is `BLACKBIRD_INSTALL_DIR/plugins`.
:rtype: None
:return: None
|
[
"Add",
"directory",
"to",
"store",
"built",
"-",
"in",
"plugins",
"to",
"module_dir",
"parameter",
".",
"Default",
"directory",
"to",
"store",
"plugins",
"is",
"BLACKBIRD_INSTALL_DIR",
"/",
"plugins",
".",
":",
"rtype",
":",
"None",
":",
"return",
":",
"None"
] |
python
|
train
|
jayvdb/flake8-putty
|
flake8_putty/config.py
|
https://github.com/jayvdb/flake8-putty/blob/854b2c6daef409974c2f5e9c5acaf0a069b0ff23/flake8_putty/config.py#L267-L278
|
def match(self, filename, line, codes):
"""Match rule."""
if ((not self.file_selectors or self.file_match_any(filename)) and
(not self.environment_marker_selector or
self.environment_marker_evaluate()) and
(not self.code_selectors or self.codes_match_any(codes))):
if self.regex_selectors:
return super(Rule, self).match(filename, line, codes)
else:
return True
return False
|
[
"def",
"match",
"(",
"self",
",",
"filename",
",",
"line",
",",
"codes",
")",
":",
"if",
"(",
"(",
"not",
"self",
".",
"file_selectors",
"or",
"self",
".",
"file_match_any",
"(",
"filename",
")",
")",
"and",
"(",
"not",
"self",
".",
"environment_marker_selector",
"or",
"self",
".",
"environment_marker_evaluate",
"(",
")",
")",
"and",
"(",
"not",
"self",
".",
"code_selectors",
"or",
"self",
".",
"codes_match_any",
"(",
"codes",
")",
")",
")",
":",
"if",
"self",
".",
"regex_selectors",
":",
"return",
"super",
"(",
"Rule",
",",
"self",
")",
".",
"match",
"(",
"filename",
",",
"line",
",",
"codes",
")",
"else",
":",
"return",
"True",
"return",
"False"
] |
Match rule.
|
[
"Match",
"rule",
"."
] |
python
|
train
|
dereneaton/ipyrad
|
ipyrad/analysis/bucky.py
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/analysis/bucky.py#L599-L608
|
def _resolveambig(subseq):
"""
Randomly resolves iupac hetero codes. This is a shortcut
for now, we could instead use the phased alleles in RAD loci.
"""
N = []
for col in subseq:
rand = np.random.binomial(1, 0.5)
N.append([_AMBIGS[i][rand] for i in col])
return np.array(N)
|
[
"def",
"_resolveambig",
"(",
"subseq",
")",
":",
"N",
"=",
"[",
"]",
"for",
"col",
"in",
"subseq",
":",
"rand",
"=",
"np",
".",
"random",
".",
"binomial",
"(",
"1",
",",
"0.5",
")",
"N",
".",
"append",
"(",
"[",
"_AMBIGS",
"[",
"i",
"]",
"[",
"rand",
"]",
"for",
"i",
"in",
"col",
"]",
")",
"return",
"np",
".",
"array",
"(",
"N",
")"
] |
Randomly resolves iupac hetero codes. This is a shortcut
for now, we could instead use the phased alleles in RAD loci.
|
[
"Randomly",
"resolves",
"iupac",
"hetero",
"codes",
".",
"This",
"is",
"a",
"shortcut",
"for",
"now",
"we",
"could",
"instead",
"use",
"the",
"phased",
"alleles",
"in",
"RAD",
"loci",
"."
] |
python
|
valid
|
pypa/pipenv
|
pipenv/project.py
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/project.py#L159-L164
|
def path_to(self, p):
"""Returns the absolute path to a given relative path."""
if os.path.isabs(p):
return p
return os.sep.join([self._original_dir, p])
|
[
"def",
"path_to",
"(",
"self",
",",
"p",
")",
":",
"if",
"os",
".",
"path",
".",
"isabs",
"(",
"p",
")",
":",
"return",
"p",
"return",
"os",
".",
"sep",
".",
"join",
"(",
"[",
"self",
".",
"_original_dir",
",",
"p",
"]",
")"
] |
Returns the absolute path to a given relative path.
|
[
"Returns",
"the",
"absolute",
"path",
"to",
"a",
"given",
"relative",
"path",
"."
] |
python
|
train
|
gholt/swiftly
|
swiftly/client/standardclient.py
|
https://github.com/gholt/swiftly/blob/5bcc1c65323b1caf1f85adbefd9fc4988c072149/swiftly/client/standardclient.py#L222-L250
|
def auth(self):
"""
See :py:func:`swiftly.client.client.Client.auth`
"""
self.reset()
if not self.auth_url:
raise ValueError('No Auth URL has been provided.')
funcs = []
if self.auth_methods:
for method in self.auth_methods.split(','):
funcs.append(getattr(self, '_' + method))
if not funcs:
if '1.0' in self.auth_url:
funcs = [self._auth1, self._auth2key, self._auth2password]
if not self.auth_tenant:
funcs.append(self._auth2password_force_tenant)
else:
funcs = [self._auth2key, self._auth2password]
if not self.auth_tenant:
funcs.append(self._auth2password_force_tenant)
funcs.append(self._auth1)
info = []
for func in funcs:
status, reason = func()
info.append('%s %s' % (status, reason))
if status // 100 == 2:
break
else:
raise self.HTTPException('Auth failure %r.' % info)
|
[
"def",
"auth",
"(",
"self",
")",
":",
"self",
".",
"reset",
"(",
")",
"if",
"not",
"self",
".",
"auth_url",
":",
"raise",
"ValueError",
"(",
"'No Auth URL has been provided.'",
")",
"funcs",
"=",
"[",
"]",
"if",
"self",
".",
"auth_methods",
":",
"for",
"method",
"in",
"self",
".",
"auth_methods",
".",
"split",
"(",
"','",
")",
":",
"funcs",
".",
"append",
"(",
"getattr",
"(",
"self",
",",
"'_'",
"+",
"method",
")",
")",
"if",
"not",
"funcs",
":",
"if",
"'1.0'",
"in",
"self",
".",
"auth_url",
":",
"funcs",
"=",
"[",
"self",
".",
"_auth1",
",",
"self",
".",
"_auth2key",
",",
"self",
".",
"_auth2password",
"]",
"if",
"not",
"self",
".",
"auth_tenant",
":",
"funcs",
".",
"append",
"(",
"self",
".",
"_auth2password_force_tenant",
")",
"else",
":",
"funcs",
"=",
"[",
"self",
".",
"_auth2key",
",",
"self",
".",
"_auth2password",
"]",
"if",
"not",
"self",
".",
"auth_tenant",
":",
"funcs",
".",
"append",
"(",
"self",
".",
"_auth2password_force_tenant",
")",
"funcs",
".",
"append",
"(",
"self",
".",
"_auth1",
")",
"info",
"=",
"[",
"]",
"for",
"func",
"in",
"funcs",
":",
"status",
",",
"reason",
"=",
"func",
"(",
")",
"info",
".",
"append",
"(",
"'%s %s'",
"%",
"(",
"status",
",",
"reason",
")",
")",
"if",
"status",
"//",
"100",
"==",
"2",
":",
"break",
"else",
":",
"raise",
"self",
".",
"HTTPException",
"(",
"'Auth failure %r.'",
"%",
"info",
")"
] |
See :py:func:`swiftly.client.client.Client.auth`
|
[
"See",
":",
"py",
":",
"func",
":",
"swiftly",
".",
"client",
".",
"client",
".",
"Client",
".",
"auth"
] |
python
|
test
|
fake-name/ChromeController
|
ChromeController/Generator/Generated.py
|
https://github.com/fake-name/ChromeController/blob/914dd136184e8f1165c7aa6ef30418aaf10c61f0/ChromeController/Generator/Generated.py#L6830-L6866
|
def Runtime_compileScript(self, expression, sourceURL, persistScript, **kwargs
):
"""
Function path: Runtime.compileScript
Domain: Runtime
Method name: compileScript
Parameters:
Required arguments:
'expression' (type: string) -> Expression to compile.
'sourceURL' (type: string) -> Source url to be set for the script.
'persistScript' (type: boolean) -> Specifies whether the compiled script should be persisted.
Optional arguments:
'executionContextId' (type: ExecutionContextId) -> Specifies in which execution context to perform script run. If the parameter is omitted the evaluation will be performed in the context of the inspected page.
Returns:
'scriptId' (type: ScriptId) -> Id of the script.
'exceptionDetails' (type: ExceptionDetails) -> Exception details.
Description: Compiles expression.
"""
assert isinstance(expression, (str,)
), "Argument 'expression' must be of type '['str']'. Received type: '%s'" % type(
expression)
assert isinstance(sourceURL, (str,)
), "Argument 'sourceURL' must be of type '['str']'. Received type: '%s'" % type(
sourceURL)
assert isinstance(persistScript, (bool,)
), "Argument 'persistScript' must be of type '['bool']'. Received type: '%s'" % type(
persistScript)
expected = ['executionContextId']
passed_keys = list(kwargs.keys())
assert all([(key in expected) for key in passed_keys]
), "Allowed kwargs are ['executionContextId']. Passed kwargs: %s" % passed_keys
subdom_funcs = self.synchronous_command('Runtime.compileScript',
expression=expression, sourceURL=sourceURL, persistScript=
persistScript, **kwargs)
return subdom_funcs
|
[
"def",
"Runtime_compileScript",
"(",
"self",
",",
"expression",
",",
"sourceURL",
",",
"persistScript",
",",
"*",
"*",
"kwargs",
")",
":",
"assert",
"isinstance",
"(",
"expression",
",",
"(",
"str",
",",
")",
")",
",",
"\"Argument 'expression' must be of type '['str']'. Received type: '%s'\"",
"%",
"type",
"(",
"expression",
")",
"assert",
"isinstance",
"(",
"sourceURL",
",",
"(",
"str",
",",
")",
")",
",",
"\"Argument 'sourceURL' must be of type '['str']'. Received type: '%s'\"",
"%",
"type",
"(",
"sourceURL",
")",
"assert",
"isinstance",
"(",
"persistScript",
",",
"(",
"bool",
",",
")",
")",
",",
"\"Argument 'persistScript' must be of type '['bool']'. Received type: '%s'\"",
"%",
"type",
"(",
"persistScript",
")",
"expected",
"=",
"[",
"'executionContextId'",
"]",
"passed_keys",
"=",
"list",
"(",
"kwargs",
".",
"keys",
"(",
")",
")",
"assert",
"all",
"(",
"[",
"(",
"key",
"in",
"expected",
")",
"for",
"key",
"in",
"passed_keys",
"]",
")",
",",
"\"Allowed kwargs are ['executionContextId']. Passed kwargs: %s\"",
"%",
"passed_keys",
"subdom_funcs",
"=",
"self",
".",
"synchronous_command",
"(",
"'Runtime.compileScript'",
",",
"expression",
"=",
"expression",
",",
"sourceURL",
"=",
"sourceURL",
",",
"persistScript",
"=",
"persistScript",
",",
"*",
"*",
"kwargs",
")",
"return",
"subdom_funcs"
] |
Function path: Runtime.compileScript
Domain: Runtime
Method name: compileScript
Parameters:
Required arguments:
'expression' (type: string) -> Expression to compile.
'sourceURL' (type: string) -> Source url to be set for the script.
'persistScript' (type: boolean) -> Specifies whether the compiled script should be persisted.
Optional arguments:
'executionContextId' (type: ExecutionContextId) -> Specifies in which execution context to perform script run. If the parameter is omitted the evaluation will be performed in the context of the inspected page.
Returns:
'scriptId' (type: ScriptId) -> Id of the script.
'exceptionDetails' (type: ExceptionDetails) -> Exception details.
Description: Compiles expression.
|
[
"Function",
"path",
":",
"Runtime",
".",
"compileScript",
"Domain",
":",
"Runtime",
"Method",
"name",
":",
"compileScript",
"Parameters",
":",
"Required",
"arguments",
":",
"expression",
"(",
"type",
":",
"string",
")",
"-",
">",
"Expression",
"to",
"compile",
".",
"sourceURL",
"(",
"type",
":",
"string",
")",
"-",
">",
"Source",
"url",
"to",
"be",
"set",
"for",
"the",
"script",
".",
"persistScript",
"(",
"type",
":",
"boolean",
")",
"-",
">",
"Specifies",
"whether",
"the",
"compiled",
"script",
"should",
"be",
"persisted",
".",
"Optional",
"arguments",
":",
"executionContextId",
"(",
"type",
":",
"ExecutionContextId",
")",
"-",
">",
"Specifies",
"in",
"which",
"execution",
"context",
"to",
"perform",
"script",
"run",
".",
"If",
"the",
"parameter",
"is",
"omitted",
"the",
"evaluation",
"will",
"be",
"performed",
"in",
"the",
"context",
"of",
"the",
"inspected",
"page",
".",
"Returns",
":",
"scriptId",
"(",
"type",
":",
"ScriptId",
")",
"-",
">",
"Id",
"of",
"the",
"script",
".",
"exceptionDetails",
"(",
"type",
":",
"ExceptionDetails",
")",
"-",
">",
"Exception",
"details",
".",
"Description",
":",
"Compiles",
"expression",
"."
] |
python
|
train
|
saltstack/salt
|
salt/modules/rh_ip.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/rh_ip.py#L963-L968
|
def _write_file_network(data, filename):
'''
Writes a file to disk
'''
with salt.utils.files.fopen(filename, 'w') as fp_:
fp_.write(salt.utils.stringutils.to_str(data))
|
[
"def",
"_write_file_network",
"(",
"data",
",",
"filename",
")",
":",
"with",
"salt",
".",
"utils",
".",
"files",
".",
"fopen",
"(",
"filename",
",",
"'w'",
")",
"as",
"fp_",
":",
"fp_",
".",
"write",
"(",
"salt",
".",
"utils",
".",
"stringutils",
".",
"to_str",
"(",
"data",
")",
")"
] |
Writes a file to disk
|
[
"Writes",
"a",
"file",
"to",
"disk"
] |
python
|
train
|
Robpol86/libnl
|
libnl/list_.py
|
https://github.com/Robpol86/libnl/blob/274e9fdaa39822d06ef70b799ed4a95937a4d923/libnl/list_.py#L62-L69
|
def nl_list_del(obj):
"""https://github.com/thom311/libnl/blob/libnl3_2_25/include/netlink/list.h#L49.
Positional arguments:
obj -- nl_list_head class instance.
"""
obj.next.prev = obj.prev
obj.prev.next_ = obj.next_
|
[
"def",
"nl_list_del",
"(",
"obj",
")",
":",
"obj",
".",
"next",
".",
"prev",
"=",
"obj",
".",
"prev",
"obj",
".",
"prev",
".",
"next_",
"=",
"obj",
".",
"next_"
] |
https://github.com/thom311/libnl/blob/libnl3_2_25/include/netlink/list.h#L49.
Positional arguments:
obj -- nl_list_head class instance.
|
[
"https",
":",
"//",
"github",
".",
"com",
"/",
"thom311",
"/",
"libnl",
"/",
"blob",
"/",
"libnl3_2_25",
"/",
"include",
"/",
"netlink",
"/",
"list",
".",
"h#L49",
"."
] |
python
|
train
|
duguyue100/minesweeper
|
minesweeper/gui.py
|
https://github.com/duguyue100/minesweeper/blob/38b1910f4c34d0275ac10a300285aba6f1d91d61/minesweeper/gui.py#L117-L132
|
def update_grid(self):
"""Update grid according to info map."""
info_map = self.ms_game.get_info_map()
for i in xrange(self.ms_game.board_height):
for j in xrange(self.ms_game.board_width):
self.grid_wgs[(i, j)].info_label(info_map[i, j])
self.ctrl_wg.move_counter.display(self.ms_game.num_moves)
if self.ms_game.game_status == 2:
self.ctrl_wg.reset_button.setIcon(QtGui.QIcon(CONTINUE_PATH))
elif self.ms_game.game_status == 1:
self.ctrl_wg.reset_button.setIcon(QtGui.QIcon(WIN_PATH))
self.timer.stop()
elif self.ms_game.game_status == 0:
self.ctrl_wg.reset_button.setIcon(QtGui.QIcon(LOSE_PATH))
self.timer.stop()
|
[
"def",
"update_grid",
"(",
"self",
")",
":",
"info_map",
"=",
"self",
".",
"ms_game",
".",
"get_info_map",
"(",
")",
"for",
"i",
"in",
"xrange",
"(",
"self",
".",
"ms_game",
".",
"board_height",
")",
":",
"for",
"j",
"in",
"xrange",
"(",
"self",
".",
"ms_game",
".",
"board_width",
")",
":",
"self",
".",
"grid_wgs",
"[",
"(",
"i",
",",
"j",
")",
"]",
".",
"info_label",
"(",
"info_map",
"[",
"i",
",",
"j",
"]",
")",
"self",
".",
"ctrl_wg",
".",
"move_counter",
".",
"display",
"(",
"self",
".",
"ms_game",
".",
"num_moves",
")",
"if",
"self",
".",
"ms_game",
".",
"game_status",
"==",
"2",
":",
"self",
".",
"ctrl_wg",
".",
"reset_button",
".",
"setIcon",
"(",
"QtGui",
".",
"QIcon",
"(",
"CONTINUE_PATH",
")",
")",
"elif",
"self",
".",
"ms_game",
".",
"game_status",
"==",
"1",
":",
"self",
".",
"ctrl_wg",
".",
"reset_button",
".",
"setIcon",
"(",
"QtGui",
".",
"QIcon",
"(",
"WIN_PATH",
")",
")",
"self",
".",
"timer",
".",
"stop",
"(",
")",
"elif",
"self",
".",
"ms_game",
".",
"game_status",
"==",
"0",
":",
"self",
".",
"ctrl_wg",
".",
"reset_button",
".",
"setIcon",
"(",
"QtGui",
".",
"QIcon",
"(",
"LOSE_PATH",
")",
")",
"self",
".",
"timer",
".",
"stop",
"(",
")"
] |
Update grid according to info map.
|
[
"Update",
"grid",
"according",
"to",
"info",
"map",
"."
] |
python
|
train
|
ninuxorg/nodeshot
|
nodeshot/networking/net/views.py
|
https://github.com/ninuxorg/nodeshot/blob/2466f0a55f522b2696026f196436ce7ba3f1e5c6/nodeshot/networking/net/views.py#L35-L55
|
def get_queryset(self):
"""
Optionally restricts the returned devices
by filtering against a `search` query parameter in the URL.
"""
# retrieve all devices which are published and accessible to current user
# and use joins to retrieve related fields
queryset = super(DeviceList, self).get_queryset()#.select_related('layer', 'status', 'user')
# retrieve value of querystring parameter "search"
search = self.request.query_params.get('search', None)
if search is not None:
search_query = (
Q(name__icontains=search) |
Q(description__icontains=search)
)
# add instructions for search to queryset
queryset = queryset.filter(search_query)
return queryset
|
[
"def",
"get_queryset",
"(",
"self",
")",
":",
"# retrieve all devices which are published and accessible to current user",
"# and use joins to retrieve related fields",
"queryset",
"=",
"super",
"(",
"DeviceList",
",",
"self",
")",
".",
"get_queryset",
"(",
")",
"#.select_related('layer', 'status', 'user')",
"# retrieve value of querystring parameter \"search\"",
"search",
"=",
"self",
".",
"request",
".",
"query_params",
".",
"get",
"(",
"'search'",
",",
"None",
")",
"if",
"search",
"is",
"not",
"None",
":",
"search_query",
"=",
"(",
"Q",
"(",
"name__icontains",
"=",
"search",
")",
"|",
"Q",
"(",
"description__icontains",
"=",
"search",
")",
")",
"# add instructions for search to queryset",
"queryset",
"=",
"queryset",
".",
"filter",
"(",
"search_query",
")",
"return",
"queryset"
] |
Optionally restricts the returned devices
by filtering against a `search` query parameter in the URL.
|
[
"Optionally",
"restricts",
"the",
"returned",
"devices",
"by",
"filtering",
"against",
"a",
"search",
"query",
"parameter",
"in",
"the",
"URL",
"."
] |
python
|
train
|
CamDavidsonPilon/lifelines
|
lifelines/plotting.py
|
https://github.com/CamDavidsonPilon/lifelines/blob/bdf6be6f1d10eea4c46365ee0ee6a47d8c30edf8/lifelines/plotting.py#L409-L446
|
def plot_loglogs(cls, loc=None, iloc=None, show_censors=False, censor_styles=None, **kwargs):
"""
Specifies a plot of the log(-log(SV)) versus log(time) where SV is the estimated survival function.
"""
def loglog(s):
return np.log(-np.log(s))
if (loc is not None) and (iloc is not None):
raise ValueError("Cannot set both loc and iloc in call to .plot().")
if censor_styles is None:
censor_styles = {}
set_kwargs_ax(kwargs)
set_kwargs_color(kwargs)
set_kwargs_drawstyle(kwargs)
kwargs["logx"] = True
dataframe_slicer = create_dataframe_slicer(iloc, loc)
# plot censors
ax = kwargs["ax"]
colour = kwargs["c"]
if show_censors and cls.event_table["censored"].sum() > 0:
cs = {"marker": "+", "ms": 12, "mew": 1}
cs.update(censor_styles)
times = dataframe_slicer(cls.event_table.loc[(cls.event_table["censored"] > 0)]).index.values.astype(float)
v = cls.predict(times)
# don't log times, as Pandas will take care of all log-scaling later.
ax.plot(times, loglog(v), linestyle="None", color=colour, **cs)
# plot estimate
dataframe_slicer(loglog(cls.survival_function_)).plot(**kwargs)
ax.set_xlabel("log(timeline)")
ax.set_ylabel("log(-log(survival_function_))")
return ax
|
[
"def",
"plot_loglogs",
"(",
"cls",
",",
"loc",
"=",
"None",
",",
"iloc",
"=",
"None",
",",
"show_censors",
"=",
"False",
",",
"censor_styles",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"loglog",
"(",
"s",
")",
":",
"return",
"np",
".",
"log",
"(",
"-",
"np",
".",
"log",
"(",
"s",
")",
")",
"if",
"(",
"loc",
"is",
"not",
"None",
")",
"and",
"(",
"iloc",
"is",
"not",
"None",
")",
":",
"raise",
"ValueError",
"(",
"\"Cannot set both loc and iloc in call to .plot().\"",
")",
"if",
"censor_styles",
"is",
"None",
":",
"censor_styles",
"=",
"{",
"}",
"set_kwargs_ax",
"(",
"kwargs",
")",
"set_kwargs_color",
"(",
"kwargs",
")",
"set_kwargs_drawstyle",
"(",
"kwargs",
")",
"kwargs",
"[",
"\"logx\"",
"]",
"=",
"True",
"dataframe_slicer",
"=",
"create_dataframe_slicer",
"(",
"iloc",
",",
"loc",
")",
"# plot censors",
"ax",
"=",
"kwargs",
"[",
"\"ax\"",
"]",
"colour",
"=",
"kwargs",
"[",
"\"c\"",
"]",
"if",
"show_censors",
"and",
"cls",
".",
"event_table",
"[",
"\"censored\"",
"]",
".",
"sum",
"(",
")",
">",
"0",
":",
"cs",
"=",
"{",
"\"marker\"",
":",
"\"+\"",
",",
"\"ms\"",
":",
"12",
",",
"\"mew\"",
":",
"1",
"}",
"cs",
".",
"update",
"(",
"censor_styles",
")",
"times",
"=",
"dataframe_slicer",
"(",
"cls",
".",
"event_table",
".",
"loc",
"[",
"(",
"cls",
".",
"event_table",
"[",
"\"censored\"",
"]",
">",
"0",
")",
"]",
")",
".",
"index",
".",
"values",
".",
"astype",
"(",
"float",
")",
"v",
"=",
"cls",
".",
"predict",
"(",
"times",
")",
"# don't log times, as Pandas will take care of all log-scaling later.",
"ax",
".",
"plot",
"(",
"times",
",",
"loglog",
"(",
"v",
")",
",",
"linestyle",
"=",
"\"None\"",
",",
"color",
"=",
"colour",
",",
"*",
"*",
"cs",
")",
"# plot estimate",
"dataframe_slicer",
"(",
"loglog",
"(",
"cls",
".",
"survival_function_",
")",
")",
".",
"plot",
"(",
"*",
"*",
"kwargs",
")",
"ax",
".",
"set_xlabel",
"(",
"\"log(timeline)\"",
")",
"ax",
".",
"set_ylabel",
"(",
"\"log(-log(survival_function_))\"",
")",
"return",
"ax"
] |
Specifies a plot of the log(-log(SV)) versus log(time) where SV is the estimated survival function.
|
[
"Specifies",
"a",
"plot",
"of",
"the",
"log",
"(",
"-",
"log",
"(",
"SV",
"))",
"versus",
"log",
"(",
"time",
")",
"where",
"SV",
"is",
"the",
"estimated",
"survival",
"function",
"."
] |
python
|
train
|
PyGithub/PyGithub
|
github/Repository.py
|
https://github.com/PyGithub/PyGithub/blob/f716df86bbe7dc276c6596699fa9712b61ef974c/github/Repository.py#L2674-L2682
|
def subscribe_to_hub(self, event, callback, secret=github.GithubObject.NotSet):
"""
:calls: `POST /hub <http://developer.github.com/>`_
:param event: string
:param callback: string
:param secret: string
:rtype: None
"""
return self._hub("subscribe", event, callback, secret)
|
[
"def",
"subscribe_to_hub",
"(",
"self",
",",
"event",
",",
"callback",
",",
"secret",
"=",
"github",
".",
"GithubObject",
".",
"NotSet",
")",
":",
"return",
"self",
".",
"_hub",
"(",
"\"subscribe\"",
",",
"event",
",",
"callback",
",",
"secret",
")"
] |
:calls: `POST /hub <http://developer.github.com/>`_
:param event: string
:param callback: string
:param secret: string
:rtype: None
|
[
":",
"calls",
":",
"POST",
"/",
"hub",
"<http",
":",
"//",
"developer",
".",
"github",
".",
"com",
"/",
">",
"_",
":",
"param",
"event",
":",
"string",
":",
"param",
"callback",
":",
"string",
":",
"param",
"secret",
":",
"string",
":",
"rtype",
":",
"None"
] |
python
|
train
|
pypa/pipenv
|
pipenv/vendor/pexpect/pty_spawn.py
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/pexpect/pty_spawn.py#L557-L561
|
def _log_control(self, s):
"""Write control characters to the appropriate log files"""
if self.encoding is not None:
s = s.decode(self.encoding, 'replace')
self._log(s, 'send')
|
[
"def",
"_log_control",
"(",
"self",
",",
"s",
")",
":",
"if",
"self",
".",
"encoding",
"is",
"not",
"None",
":",
"s",
"=",
"s",
".",
"decode",
"(",
"self",
".",
"encoding",
",",
"'replace'",
")",
"self",
".",
"_log",
"(",
"s",
",",
"'send'",
")"
] |
Write control characters to the appropriate log files
|
[
"Write",
"control",
"characters",
"to",
"the",
"appropriate",
"log",
"files"
] |
python
|
train
|
numenta/nupic
|
src/nupic/algorithms/knn_classifier.py
|
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/knn_classifier.py#L840-L870
|
def getPattern(self, idx, sparseBinaryForm=False, cat=None):
"""Gets a training pattern either by index or category number.
:param idx: Index of the training pattern
:param sparseBinaryForm: If true, returns a list of the indices of the
non-zero bits in the training pattern
:param cat: If not None, get the first pattern belonging to category cat. If
this is specified, idx must be None.
:returns: The training pattern with specified index
"""
if cat is not None:
assert idx is None
idx = self._categoryList.index(cat)
if not self.useSparseMemory:
pattern = self._Memory[idx]
if sparseBinaryForm:
pattern = pattern.nonzero()[0]
else:
(nz, values) = self._Memory.rowNonZeros(idx)
if not sparseBinaryForm:
pattern = numpy.zeros(self._Memory.nCols())
numpy.put(pattern, nz, 1)
else:
pattern = nz
return pattern
|
[
"def",
"getPattern",
"(",
"self",
",",
"idx",
",",
"sparseBinaryForm",
"=",
"False",
",",
"cat",
"=",
"None",
")",
":",
"if",
"cat",
"is",
"not",
"None",
":",
"assert",
"idx",
"is",
"None",
"idx",
"=",
"self",
".",
"_categoryList",
".",
"index",
"(",
"cat",
")",
"if",
"not",
"self",
".",
"useSparseMemory",
":",
"pattern",
"=",
"self",
".",
"_Memory",
"[",
"idx",
"]",
"if",
"sparseBinaryForm",
":",
"pattern",
"=",
"pattern",
".",
"nonzero",
"(",
")",
"[",
"0",
"]",
"else",
":",
"(",
"nz",
",",
"values",
")",
"=",
"self",
".",
"_Memory",
".",
"rowNonZeros",
"(",
"idx",
")",
"if",
"not",
"sparseBinaryForm",
":",
"pattern",
"=",
"numpy",
".",
"zeros",
"(",
"self",
".",
"_Memory",
".",
"nCols",
"(",
")",
")",
"numpy",
".",
"put",
"(",
"pattern",
",",
"nz",
",",
"1",
")",
"else",
":",
"pattern",
"=",
"nz",
"return",
"pattern"
] |
Gets a training pattern either by index or category number.
:param idx: Index of the training pattern
:param sparseBinaryForm: If true, returns a list of the indices of the
non-zero bits in the training pattern
:param cat: If not None, get the first pattern belonging to category cat. If
this is specified, idx must be None.
:returns: The training pattern with specified index
|
[
"Gets",
"a",
"training",
"pattern",
"either",
"by",
"index",
"or",
"category",
"number",
"."
] |
python
|
valid
|
JasonKessler/scattertext
|
scattertext/characteristic/DenseRankCharacteristicness.py
|
https://github.com/JasonKessler/scattertext/blob/cacf1f687d218ee8cae3fc05cc901db824bb1b81/scattertext/characteristic/DenseRankCharacteristicness.py#L37-L74
|
def get_scores(self, corpus):
'''
Parameters
----------
corpus
Returns
-------
float, pd.Series
float: point on x-axis at even characteristicness
pd.Series: term -> value between 0 and 1, sorted by score in a descending manner
Background scores from corpus
'''
term_ranks = self.term_ranker(corpus).get_ranks()
freq_df = pd.DataFrame({
'corpus': term_ranks.sum(axis=1),
'standard': self.background_frequencies.get_background_frequency_df()['background']}
).dropna()
corpus_rank = rankdata(freq_df.corpus, 'dense')
standard_rank = rankdata(freq_df.standard, 'dense')
scores = corpus_rank/corpus_rank.max() - standard_rank/standard_rank.max()
#scores = RankDifference().get_scores(bg['corpus'], bg['bg']).sort_values()
# import pdb; pdb.set_trace()
if self.rerank_ranks:
rank_scores, zero_marker = self._rerank_scores(scores)
freq_df['score'] = pd.Series(rank_scores, index=freq_df.index)
else:
if scores.min() < 0 and scores.max() > 0:
zero_marker = -scores.min() / (scores.max() - scores.min())
elif scores.min() > 0:
zero_marker = 0
else:
zero_marker = 1
freq_df['score'] = scale(scores)
return zero_marker, freq_df.sort_values(by='score', ascending=False)['score']
|
[
"def",
"get_scores",
"(",
"self",
",",
"corpus",
")",
":",
"term_ranks",
"=",
"self",
".",
"term_ranker",
"(",
"corpus",
")",
".",
"get_ranks",
"(",
")",
"freq_df",
"=",
"pd",
".",
"DataFrame",
"(",
"{",
"'corpus'",
":",
"term_ranks",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
",",
"'standard'",
":",
"self",
".",
"background_frequencies",
".",
"get_background_frequency_df",
"(",
")",
"[",
"'background'",
"]",
"}",
")",
".",
"dropna",
"(",
")",
"corpus_rank",
"=",
"rankdata",
"(",
"freq_df",
".",
"corpus",
",",
"'dense'",
")",
"standard_rank",
"=",
"rankdata",
"(",
"freq_df",
".",
"standard",
",",
"'dense'",
")",
"scores",
"=",
"corpus_rank",
"/",
"corpus_rank",
".",
"max",
"(",
")",
"-",
"standard_rank",
"/",
"standard_rank",
".",
"max",
"(",
")",
"#scores = RankDifference().get_scores(bg['corpus'], bg['bg']).sort_values()",
"# import pdb; pdb.set_trace()",
"if",
"self",
".",
"rerank_ranks",
":",
"rank_scores",
",",
"zero_marker",
"=",
"self",
".",
"_rerank_scores",
"(",
"scores",
")",
"freq_df",
"[",
"'score'",
"]",
"=",
"pd",
".",
"Series",
"(",
"rank_scores",
",",
"index",
"=",
"freq_df",
".",
"index",
")",
"else",
":",
"if",
"scores",
".",
"min",
"(",
")",
"<",
"0",
"and",
"scores",
".",
"max",
"(",
")",
">",
"0",
":",
"zero_marker",
"=",
"-",
"scores",
".",
"min",
"(",
")",
"/",
"(",
"scores",
".",
"max",
"(",
")",
"-",
"scores",
".",
"min",
"(",
")",
")",
"elif",
"scores",
".",
"min",
"(",
")",
">",
"0",
":",
"zero_marker",
"=",
"0",
"else",
":",
"zero_marker",
"=",
"1",
"freq_df",
"[",
"'score'",
"]",
"=",
"scale",
"(",
"scores",
")",
"return",
"zero_marker",
",",
"freq_df",
".",
"sort_values",
"(",
"by",
"=",
"'score'",
",",
"ascending",
"=",
"False",
")",
"[",
"'score'",
"]"
] |
Parameters
----------
corpus
Returns
-------
float, pd.Series
float: point on x-axis at even characteristicness
pd.Series: term -> value between 0 and 1, sorted by score in a descending manner
Background scores from corpus
|
[
"Parameters",
"----------",
"corpus"
] |
python
|
train
|
edoburu/django-debugtools
|
debugtools/formatter.py
|
https://github.com/edoburu/django-debugtools/blob/5c609c00fa9954330cd135fc62a1e18b8e7fea8a/debugtools/formatter.py#L129-L152
|
def _style_text(text):
"""
Apply some HTML highlighting to the contents.
This can't be done in the
"""
# Escape text and apply some formatting.
# To have really good highlighting, pprint would have to be re-implemented.
text = escape(text)
text = text.replace(' <iterator object>', " <small><<var>this object can be used in a 'for' loop</var>></small>")
text = text.replace(' <dynamic item>', ' <small><<var>this object may have extra field names</var>></small>')
text = text.replace(' <dynamic attribute>', ' <small><<var>this object may have extra field names</var>></small>')
text = RE_PROXY.sub('\g<1><small><<var>proxy object</var>></small>', text)
text = RE_FUNCTION.sub('\g<1><small><<var>object method</var>></small>', text)
text = RE_GENERATOR.sub("\g<1><small><<var>generator, use 'for' to traverse it</var>></small>", text)
text = RE_OBJECT_ADDRESS.sub('\g<1><small><<var>\g<2> object</var>></small>', text)
text = RE_MANAGER.sub('\g<1><small><<var>manager, use <kbd>.all</kbd> to traverse it</var>></small>', text)
text = RE_CLASS_REPR.sub('\g<1><small><<var>\g<2> class</var>></small>', text)
# Since Django's WSGIRequest does a pprint like format for it's __repr__, make that styling consistent
text = RE_REQUEST_FIELDNAME.sub('\g<1>:\n <strong style="color: #222;">\g<2></strong>: ', text)
text = RE_REQUEST_CLEANUP1.sub('\g<1>', text)
text = RE_REQUEST_CLEANUP2.sub(')', text)
return mark_safe(text)
|
[
"def",
"_style_text",
"(",
"text",
")",
":",
"# Escape text and apply some formatting.",
"# To have really good highlighting, pprint would have to be re-implemented.",
"text",
"=",
"escape",
"(",
"text",
")",
"text",
"=",
"text",
".",
"replace",
"(",
"' <iterator object>'",
",",
"\" <small><<var>this object can be used in a 'for' loop</var>></small>\"",
")",
"text",
"=",
"text",
".",
"replace",
"(",
"' <dynamic item>'",
",",
"' <small><<var>this object may have extra field names</var>></small>'",
")",
"text",
"=",
"text",
".",
"replace",
"(",
"' <dynamic attribute>'",
",",
"' <small><<var>this object may have extra field names</var>></small>'",
")",
"text",
"=",
"RE_PROXY",
".",
"sub",
"(",
"'\\g<1><small><<var>proxy object</var>></small>'",
",",
"text",
")",
"text",
"=",
"RE_FUNCTION",
".",
"sub",
"(",
"'\\g<1><small><<var>object method</var>></small>'",
",",
"text",
")",
"text",
"=",
"RE_GENERATOR",
".",
"sub",
"(",
"\"\\g<1><small><<var>generator, use 'for' to traverse it</var>></small>\"",
",",
"text",
")",
"text",
"=",
"RE_OBJECT_ADDRESS",
".",
"sub",
"(",
"'\\g<1><small><<var>\\g<2> object</var>></small>'",
",",
"text",
")",
"text",
"=",
"RE_MANAGER",
".",
"sub",
"(",
"'\\g<1><small><<var>manager, use <kbd>.all</kbd> to traverse it</var>></small>'",
",",
"text",
")",
"text",
"=",
"RE_CLASS_REPR",
".",
"sub",
"(",
"'\\g<1><small><<var>\\g<2> class</var>></small>'",
",",
"text",
")",
"# Since Django's WSGIRequest does a pprint like format for it's __repr__, make that styling consistent",
"text",
"=",
"RE_REQUEST_FIELDNAME",
".",
"sub",
"(",
"'\\g<1>:\\n <strong style=\"color: #222;\">\\g<2></strong>: '",
",",
"text",
")",
"text",
"=",
"RE_REQUEST_CLEANUP1",
".",
"sub",
"(",
"'\\g<1>'",
",",
"text",
")",
"text",
"=",
"RE_REQUEST_CLEANUP2",
".",
"sub",
"(",
"')'",
",",
"text",
")",
"return",
"mark_safe",
"(",
"text",
")"
] |
Apply some HTML highlighting to the contents.
This can't be done in the
|
[
"Apply",
"some",
"HTML",
"highlighting",
"to",
"the",
"contents",
".",
"This",
"can",
"t",
"be",
"done",
"in",
"the"
] |
python
|
test
|
rwl/pylon
|
pyreto/util.py
|
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pyreto/util.py#L96-L106
|
def weighted_choice(lst):
""" Makes weighted choices. Accepts a list of tuples with the item and
probability as a pair like:
>>> x = [('one', 0.25), ('two', 0.25), ('three', 0.5)]
>>> y=windex(x) """
n = random.uniform(0, 1)
for item, weight in lst:
if n < weight:
break
n = n - weight
return item
|
[
"def",
"weighted_choice",
"(",
"lst",
")",
":",
"n",
"=",
"random",
".",
"uniform",
"(",
"0",
",",
"1",
")",
"for",
"item",
",",
"weight",
"in",
"lst",
":",
"if",
"n",
"<",
"weight",
":",
"break",
"n",
"=",
"n",
"-",
"weight",
"return",
"item"
] |
Makes weighted choices. Accepts a list of tuples with the item and
probability as a pair like:
>>> x = [('one', 0.25), ('two', 0.25), ('three', 0.5)]
>>> y=windex(x)
|
[
"Makes",
"weighted",
"choices",
".",
"Accepts",
"a",
"list",
"of",
"tuples",
"with",
"the",
"item",
"and",
"probability",
"as",
"a",
"pair",
"like",
":",
">>>",
"x",
"=",
"[",
"(",
"one",
"0",
".",
"25",
")",
"(",
"two",
"0",
".",
"25",
")",
"(",
"three",
"0",
".",
"5",
")",
"]",
">>>",
"y",
"=",
"windex",
"(",
"x",
")"
] |
python
|
train
|
portantier/habu
|
habu/lib/ip2asn.py
|
https://github.com/portantier/habu/blob/87091e389dc6332fe1b82830c22b2eefc55816f2/habu/lib/ip2asn.py#L7-L77
|
def ip2asn(ipaddr):
"""Returns the ASN data associated with an IP (v4 or v6)
>>> from pprint import pprint
>>> pprint(ip2asn('8.8.8.8'))
{'asn': '15169',
'asname': 'GOOGLE - Google Inc., US',
'cc': 'US',
'net': '8.8.8.0/24',
'rir': 'ARIN'}
>>> pprint(ip2asn('2001:4860:4860::8888'))
{'asn': '15169',
'asname': 'GOOGLE - Google Inc., US',
'cc': 'US',
'net': '2001:4860::/32',
'rir': 'ARIN'}
>>> pprint(ip2asn('unk'))
None
"""
try:
ip = ipaddress.ip_network(ipaddr)
except ValueError:
return None
if ip.is_private:
return None
if ip.version == 4:
a, b, c, d = str(ip.exploded).split('/')[0].split('.')
reversed = "%s.%s.%s.%s" % (d, c, b, a)
name = "%s.origin.asn.cymru.com" % (reversed)
else:
only_addr = str(ip.exploded).split('/')[0].replace(':', '')
reversed = ''
for number in only_addr[::-1]:
reversed += number
reversed += '.'
reversed = reversed.rstrip('.')
name = "%s.origin6.asn.cymru.com" % (reversed)
try:
response = dns.resolver.query(name, 'TXT')
except:
return None
# "15169 | 8.8.4.0/24 | US | arin |"
r = {}
r['asn'] = response[0].to_text().split('|')[0].strip(" \"").split(' ')[0]
r['net'] = response[0].to_text().split('|')[1].strip(" \"")
r['cc'] = response[0].to_text().split('|')[2].strip(" \"")
r['rir'] = response[0].to_text().split('|')[3].strip(" \"").upper()
r['asname'] = 'unknown'
# Get AS Name
# "15169 | US | arin | 2000-03-30 | GOOGLE - Google Inc.,US"
try:
name = "AS%s.asn.cymru.com" % (r['asn'])
response = dns.resolver.query(name, 'TXT')
r['asname'] = response[0].to_text().split('|')[4].strip(" \"")
except:
pass
return(r)
|
[
"def",
"ip2asn",
"(",
"ipaddr",
")",
":",
"try",
":",
"ip",
"=",
"ipaddress",
".",
"ip_network",
"(",
"ipaddr",
")",
"except",
"ValueError",
":",
"return",
"None",
"if",
"ip",
".",
"is_private",
":",
"return",
"None",
"if",
"ip",
".",
"version",
"==",
"4",
":",
"a",
",",
"b",
",",
"c",
",",
"d",
"=",
"str",
"(",
"ip",
".",
"exploded",
")",
".",
"split",
"(",
"'/'",
")",
"[",
"0",
"]",
".",
"split",
"(",
"'.'",
")",
"reversed",
"=",
"\"%s.%s.%s.%s\"",
"%",
"(",
"d",
",",
"c",
",",
"b",
",",
"a",
")",
"name",
"=",
"\"%s.origin.asn.cymru.com\"",
"%",
"(",
"reversed",
")",
"else",
":",
"only_addr",
"=",
"str",
"(",
"ip",
".",
"exploded",
")",
".",
"split",
"(",
"'/'",
")",
"[",
"0",
"]",
".",
"replace",
"(",
"':'",
",",
"''",
")",
"reversed",
"=",
"''",
"for",
"number",
"in",
"only_addr",
"[",
":",
":",
"-",
"1",
"]",
":",
"reversed",
"+=",
"number",
"reversed",
"+=",
"'.'",
"reversed",
"=",
"reversed",
".",
"rstrip",
"(",
"'.'",
")",
"name",
"=",
"\"%s.origin6.asn.cymru.com\"",
"%",
"(",
"reversed",
")",
"try",
":",
"response",
"=",
"dns",
".",
"resolver",
".",
"query",
"(",
"name",
",",
"'TXT'",
")",
"except",
":",
"return",
"None",
"# \"15169 | 8.8.4.0/24 | US | arin |\"",
"r",
"=",
"{",
"}",
"r",
"[",
"'asn'",
"]",
"=",
"response",
"[",
"0",
"]",
".",
"to_text",
"(",
")",
".",
"split",
"(",
"'|'",
")",
"[",
"0",
"]",
".",
"strip",
"(",
"\" \\\"\"",
")",
".",
"split",
"(",
"' '",
")",
"[",
"0",
"]",
"r",
"[",
"'net'",
"]",
"=",
"response",
"[",
"0",
"]",
".",
"to_text",
"(",
")",
".",
"split",
"(",
"'|'",
")",
"[",
"1",
"]",
".",
"strip",
"(",
"\" \\\"\"",
")",
"r",
"[",
"'cc'",
"]",
"=",
"response",
"[",
"0",
"]",
".",
"to_text",
"(",
")",
".",
"split",
"(",
"'|'",
")",
"[",
"2",
"]",
".",
"strip",
"(",
"\" \\\"\"",
")",
"r",
"[",
"'rir'",
"]",
"=",
"response",
"[",
"0",
"]",
".",
"to_text",
"(",
")",
".",
"split",
"(",
"'|'",
")",
"[",
"3",
"]",
".",
"strip",
"(",
"\" \\\"\"",
")",
".",
"upper",
"(",
")",
"r",
"[",
"'asname'",
"]",
"=",
"'unknown'",
"# Get AS Name",
"# \"15169 | US | arin | 2000-03-30 | GOOGLE - Google Inc.,US\"",
"try",
":",
"name",
"=",
"\"AS%s.asn.cymru.com\"",
"%",
"(",
"r",
"[",
"'asn'",
"]",
")",
"response",
"=",
"dns",
".",
"resolver",
".",
"query",
"(",
"name",
",",
"'TXT'",
")",
"r",
"[",
"'asname'",
"]",
"=",
"response",
"[",
"0",
"]",
".",
"to_text",
"(",
")",
".",
"split",
"(",
"'|'",
")",
"[",
"4",
"]",
".",
"strip",
"(",
"\" \\\"\"",
")",
"except",
":",
"pass",
"return",
"(",
"r",
")"
] |
Returns the ASN data associated with an IP (v4 or v6)
>>> from pprint import pprint
>>> pprint(ip2asn('8.8.8.8'))
{'asn': '15169',
'asname': 'GOOGLE - Google Inc., US',
'cc': 'US',
'net': '8.8.8.0/24',
'rir': 'ARIN'}
>>> pprint(ip2asn('2001:4860:4860::8888'))
{'asn': '15169',
'asname': 'GOOGLE - Google Inc., US',
'cc': 'US',
'net': '2001:4860::/32',
'rir': 'ARIN'}
>>> pprint(ip2asn('unk'))
None
|
[
"Returns",
"the",
"ASN",
"data",
"associated",
"with",
"an",
"IP",
"(",
"v4",
"or",
"v6",
")"
] |
python
|
train
|
Robpol86/sphinxcontrib-versioning
|
sphinxcontrib/versioning/__main__.py
|
https://github.com/Robpol86/sphinxcontrib-versioning/blob/920edec0ac764081b583a2ecf4e6952762b9dbf2/sphinxcontrib/versioning/__main__.py#L210-L234
|
def override_root_main_ref(config, remotes, banner):
"""Override root_ref or banner_main_ref with tags in config if user requested.
:param sphinxcontrib.versioning.lib.Config config: Runtime configuration.
:param iter remotes: List of dicts from Versions.remotes.
:param bool banner: Evaluate banner main ref instead of root ref.
:return: If root/main ref exists.
:rtype: bool
"""
log = logging.getLogger(__name__)
greatest_tag = config.banner_greatest_tag if banner else config.greatest_tag
recent_tag = config.banner_recent_tag if banner else config.recent_tag
if greatest_tag or recent_tag:
candidates = [r for r in remotes if r['kind'] == 'tags']
if candidates:
multi_sort(candidates, ['semver' if greatest_tag else 'time'])
config.update({'banner_main_ref' if banner else 'root_ref': candidates[0]['name']}, overwrite=True)
else:
flag = '--banner-main-ref' if banner else '--root-ref'
log.warning('No git tags with docs found in remote. Falling back to %s value.', flag)
ref = config.banner_main_ref if banner else config.root_ref
return ref in [r['name'] for r in remotes]
|
[
"def",
"override_root_main_ref",
"(",
"config",
",",
"remotes",
",",
"banner",
")",
":",
"log",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"greatest_tag",
"=",
"config",
".",
"banner_greatest_tag",
"if",
"banner",
"else",
"config",
".",
"greatest_tag",
"recent_tag",
"=",
"config",
".",
"banner_recent_tag",
"if",
"banner",
"else",
"config",
".",
"recent_tag",
"if",
"greatest_tag",
"or",
"recent_tag",
":",
"candidates",
"=",
"[",
"r",
"for",
"r",
"in",
"remotes",
"if",
"r",
"[",
"'kind'",
"]",
"==",
"'tags'",
"]",
"if",
"candidates",
":",
"multi_sort",
"(",
"candidates",
",",
"[",
"'semver'",
"if",
"greatest_tag",
"else",
"'time'",
"]",
")",
"config",
".",
"update",
"(",
"{",
"'banner_main_ref'",
"if",
"banner",
"else",
"'root_ref'",
":",
"candidates",
"[",
"0",
"]",
"[",
"'name'",
"]",
"}",
",",
"overwrite",
"=",
"True",
")",
"else",
":",
"flag",
"=",
"'--banner-main-ref'",
"if",
"banner",
"else",
"'--root-ref'",
"log",
".",
"warning",
"(",
"'No git tags with docs found in remote. Falling back to %s value.'",
",",
"flag",
")",
"ref",
"=",
"config",
".",
"banner_main_ref",
"if",
"banner",
"else",
"config",
".",
"root_ref",
"return",
"ref",
"in",
"[",
"r",
"[",
"'name'",
"]",
"for",
"r",
"in",
"remotes",
"]"
] |
Override root_ref or banner_main_ref with tags in config if user requested.
:param sphinxcontrib.versioning.lib.Config config: Runtime configuration.
:param iter remotes: List of dicts from Versions.remotes.
:param bool banner: Evaluate banner main ref instead of root ref.
:return: If root/main ref exists.
:rtype: bool
|
[
"Override",
"root_ref",
"or",
"banner_main_ref",
"with",
"tags",
"in",
"config",
"if",
"user",
"requested",
"."
] |
python
|
train
|
xxtea/xxtea-python
|
xxtea/__init__.py
|
https://github.com/xxtea/xxtea-python/blob/35bd893cb42dce338631d051be9302fcbc21b7fc/xxtea/__init__.py#L42-L51
|
def decrypt(data, key):
'''decrypt the data with the key'''
data_len = len(data)
data = ffi.from_buffer(data)
key = ffi.from_buffer(__tobytes(key))
out_len = ffi.new('size_t *')
result = lib.xxtea_decrypt(data, data_len, key, out_len)
ret = ffi.buffer(result, out_len[0])[:]
lib.free(result)
return ret
|
[
"def",
"decrypt",
"(",
"data",
",",
"key",
")",
":",
"data_len",
"=",
"len",
"(",
"data",
")",
"data",
"=",
"ffi",
".",
"from_buffer",
"(",
"data",
")",
"key",
"=",
"ffi",
".",
"from_buffer",
"(",
"__tobytes",
"(",
"key",
")",
")",
"out_len",
"=",
"ffi",
".",
"new",
"(",
"'size_t *'",
")",
"result",
"=",
"lib",
".",
"xxtea_decrypt",
"(",
"data",
",",
"data_len",
",",
"key",
",",
"out_len",
")",
"ret",
"=",
"ffi",
".",
"buffer",
"(",
"result",
",",
"out_len",
"[",
"0",
"]",
")",
"[",
":",
"]",
"lib",
".",
"free",
"(",
"result",
")",
"return",
"ret"
] |
decrypt the data with the key
|
[
"decrypt",
"the",
"data",
"with",
"the",
"key"
] |
python
|
train
|
trendmicro/flask-ini
|
flask_ini.py
|
https://github.com/trendmicro/flask-ini/blob/a1e4baa598c9a01021a1333d9c15e4d99c8334dd/flask_ini.py#L38-L58
|
def _load_item(self, key):
'''Load the specified item from the [flask] section. Type is
determined by the type of the equivalent value in app.default_config
or string if unknown.'''
key_u = key.upper()
default = current_app.default_config.get(key_u)
# One of the default config vars is a timedelta - interpret it
# as an int and construct using it
if isinstance(default, datetime.timedelta):
current_app.config[key_u] = datetime.timedelta(self.getint('flask', key))
elif isinstance(default, bool):
current_app.config[key_u] = self.getboolean('flask', key)
elif isinstance(default, float):
current_app.config[key_u] = self.getfloat('flask', key)
elif isinstance(default, int):
current_app.config[key_u] = self.getint('flask', key)
else:
# All the string keys need to be coerced into str()
# because Flask expects some of them not to be unicode
current_app.config[key_u] = str(self.get('flask', key))
|
[
"def",
"_load_item",
"(",
"self",
",",
"key",
")",
":",
"key_u",
"=",
"key",
".",
"upper",
"(",
")",
"default",
"=",
"current_app",
".",
"default_config",
".",
"get",
"(",
"key_u",
")",
"# One of the default config vars is a timedelta - interpret it",
"# as an int and construct using it",
"if",
"isinstance",
"(",
"default",
",",
"datetime",
".",
"timedelta",
")",
":",
"current_app",
".",
"config",
"[",
"key_u",
"]",
"=",
"datetime",
".",
"timedelta",
"(",
"self",
".",
"getint",
"(",
"'flask'",
",",
"key",
")",
")",
"elif",
"isinstance",
"(",
"default",
",",
"bool",
")",
":",
"current_app",
".",
"config",
"[",
"key_u",
"]",
"=",
"self",
".",
"getboolean",
"(",
"'flask'",
",",
"key",
")",
"elif",
"isinstance",
"(",
"default",
",",
"float",
")",
":",
"current_app",
".",
"config",
"[",
"key_u",
"]",
"=",
"self",
".",
"getfloat",
"(",
"'flask'",
",",
"key",
")",
"elif",
"isinstance",
"(",
"default",
",",
"int",
")",
":",
"current_app",
".",
"config",
"[",
"key_u",
"]",
"=",
"self",
".",
"getint",
"(",
"'flask'",
",",
"key",
")",
"else",
":",
"# All the string keys need to be coerced into str()",
"# because Flask expects some of them not to be unicode",
"current_app",
".",
"config",
"[",
"key_u",
"]",
"=",
"str",
"(",
"self",
".",
"get",
"(",
"'flask'",
",",
"key",
")",
")"
] |
Load the specified item from the [flask] section. Type is
determined by the type of the equivalent value in app.default_config
or string if unknown.
|
[
"Load",
"the",
"specified",
"item",
"from",
"the",
"[",
"flask",
"]",
"section",
".",
"Type",
"is",
"determined",
"by",
"the",
"type",
"of",
"the",
"equivalent",
"value",
"in",
"app",
".",
"default_config",
"or",
"string",
"if",
"unknown",
"."
] |
python
|
train
|
photo/openphoto-python
|
trovebox/api/api_tag.py
|
https://github.com/photo/openphoto-python/blob/209a1da27c8d8c88dbcf4ea6c6f57031ea1bc44b/trovebox/api/api_tag.py#L9-L17
|
def list(self, **kwds):
"""
Endpoint: /tags/list.json
Returns a list of Tag objects.
"""
tags = self._client.get("/tags/list.json", **kwds)["result"]
tags = self._result_to_list(tags)
return [Tag(self._client, tag) for tag in tags]
|
[
"def",
"list",
"(",
"self",
",",
"*",
"*",
"kwds",
")",
":",
"tags",
"=",
"self",
".",
"_client",
".",
"get",
"(",
"\"/tags/list.json\"",
",",
"*",
"*",
"kwds",
")",
"[",
"\"result\"",
"]",
"tags",
"=",
"self",
".",
"_result_to_list",
"(",
"tags",
")",
"return",
"[",
"Tag",
"(",
"self",
".",
"_client",
",",
"tag",
")",
"for",
"tag",
"in",
"tags",
"]"
] |
Endpoint: /tags/list.json
Returns a list of Tag objects.
|
[
"Endpoint",
":",
"/",
"tags",
"/",
"list",
".",
"json"
] |
python
|
train
|
Esri/ArcREST
|
src/arcresthelper/common.py
|
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcresthelper/common.py#L434-L457
|
def unicode_convert(obj):
"""Converts unicode objects to anscii.
Args:
obj (object): The object to convert.
Returns:
The object converted to anscii, if possible. For ``dict`` and ``list``, the object type is maintained.
"""
try:
if isinstance(obj, dict):
return {unicode_convert(key): unicode_convert(value) for key, value in obj.items()}
elif isinstance(obj, list):
return [unicode_convert(element) for element in obj]
elif isinstance(obj, str):
return obj
elif isinstance(obj, six.text_type):
return obj.encode('utf-8')
elif isinstance(obj, six.integer_types):
return obj
else:
return obj
except:
return obj
|
[
"def",
"unicode_convert",
"(",
"obj",
")",
":",
"try",
":",
"if",
"isinstance",
"(",
"obj",
",",
"dict",
")",
":",
"return",
"{",
"unicode_convert",
"(",
"key",
")",
":",
"unicode_convert",
"(",
"value",
")",
"for",
"key",
",",
"value",
"in",
"obj",
".",
"items",
"(",
")",
"}",
"elif",
"isinstance",
"(",
"obj",
",",
"list",
")",
":",
"return",
"[",
"unicode_convert",
"(",
"element",
")",
"for",
"element",
"in",
"obj",
"]",
"elif",
"isinstance",
"(",
"obj",
",",
"str",
")",
":",
"return",
"obj",
"elif",
"isinstance",
"(",
"obj",
",",
"six",
".",
"text_type",
")",
":",
"return",
"obj",
".",
"encode",
"(",
"'utf-8'",
")",
"elif",
"isinstance",
"(",
"obj",
",",
"six",
".",
"integer_types",
")",
":",
"return",
"obj",
"else",
":",
"return",
"obj",
"except",
":",
"return",
"obj"
] |
Converts unicode objects to anscii.
Args:
obj (object): The object to convert.
Returns:
The object converted to anscii, if possible. For ``dict`` and ``list``, the object type is maintained.
|
[
"Converts",
"unicode",
"objects",
"to",
"anscii",
"."
] |
python
|
train
|
apple/turicreate
|
src/external/coremltools_wrap/coremltools/coremltools/models/pipeline.py
|
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/pipeline.py#L61-L79
|
def add_model(self, spec):
"""
Add a protobuf spec or :py:class:`models.MLModel` instance to the pipeline.
All input features of this model must either match the input_features
of the pipeline, or match the outputs of a previous model.
Parameters
----------
spec: [MLModel, Model_pb2]
A protobuf spec or MLModel instance containing a model.
"""
if isinstance(spec, _model.MLModel):
spec = spec._spec
pipeline = self.spec.pipeline
step_spec = pipeline.models.add()
step_spec.CopyFrom(spec)
|
[
"def",
"add_model",
"(",
"self",
",",
"spec",
")",
":",
"if",
"isinstance",
"(",
"spec",
",",
"_model",
".",
"MLModel",
")",
":",
"spec",
"=",
"spec",
".",
"_spec",
"pipeline",
"=",
"self",
".",
"spec",
".",
"pipeline",
"step_spec",
"=",
"pipeline",
".",
"models",
".",
"add",
"(",
")",
"step_spec",
".",
"CopyFrom",
"(",
"spec",
")"
] |
Add a protobuf spec or :py:class:`models.MLModel` instance to the pipeline.
All input features of this model must either match the input_features
of the pipeline, or match the outputs of a previous model.
Parameters
----------
spec: [MLModel, Model_pb2]
A protobuf spec or MLModel instance containing a model.
|
[
"Add",
"a",
"protobuf",
"spec",
"or",
":",
"py",
":",
"class",
":",
"models",
".",
"MLModel",
"instance",
"to",
"the",
"pipeline",
"."
] |
python
|
train
|
inspirehep/inspire-crawler
|
inspire_crawler/cli.py
|
https://github.com/inspirehep/inspire-crawler/blob/36d5cc0cd87cc597ba80e680b7de7254b120173a/inspire_crawler/cli.py#L256-L309
|
def schedule_crawl_cli(spider_name, workflow_name, dont_force_crawl, kwarg):
"""Schedule a new crawl.
Note:
Currently the oaiharvesting is done on inspire side, before this, so
it's not supported here yet.
"""
extra_kwargs = {}
for extra_kwarg in kwarg:
if '=' not in extra_kwarg:
raise TypeError(
'Bad formatted kwarg (%s), it should be in the form:\n'
' --kwarg key=value' % extra_kwarg
)
key, value = extra_kwarg.split('=', 1)
extra_kwargs[key] = value
settings = {'CRAWL_ONCE_ENABLED': False}
if dont_force_crawl:
settings = {}
try:
crawler_job_uid = schedule_crawl(
spider=spider_name,
workflow=workflow_name,
crawler_settings=settings,
**extra_kwargs
)
except ScrapydResponseError as error:
message = str(error)
if 'spider' in message and 'not found' in message:
click.echo('%s' % error)
click.echo('\n Available spiders:')
spiders = list_spiders()
click.echo('\n'.join(spiders))
raise click.Abort()
else:
raise
crawler_job = models.CrawlerJob.query.filter_by(
job_id=crawler_job_uid
).one()
click.echo(
'Once the job is started, you can see the logs of the job with the '
'command:\n'
' inspirehep crawler job list\n'
' inspirehep crawler job logs %s\n'
'\n'
'and for the associated workflow (it\'s job_id should be %s):\n'
' inspirehep crawler workflow list\n'
% (crawler_job.id, crawler_job_uid)
)
|
[
"def",
"schedule_crawl_cli",
"(",
"spider_name",
",",
"workflow_name",
",",
"dont_force_crawl",
",",
"kwarg",
")",
":",
"extra_kwargs",
"=",
"{",
"}",
"for",
"extra_kwarg",
"in",
"kwarg",
":",
"if",
"'='",
"not",
"in",
"extra_kwarg",
":",
"raise",
"TypeError",
"(",
"'Bad formatted kwarg (%s), it should be in the form:\\n'",
"' --kwarg key=value'",
"%",
"extra_kwarg",
")",
"key",
",",
"value",
"=",
"extra_kwarg",
".",
"split",
"(",
"'='",
",",
"1",
")",
"extra_kwargs",
"[",
"key",
"]",
"=",
"value",
"settings",
"=",
"{",
"'CRAWL_ONCE_ENABLED'",
":",
"False",
"}",
"if",
"dont_force_crawl",
":",
"settings",
"=",
"{",
"}",
"try",
":",
"crawler_job_uid",
"=",
"schedule_crawl",
"(",
"spider",
"=",
"spider_name",
",",
"workflow",
"=",
"workflow_name",
",",
"crawler_settings",
"=",
"settings",
",",
"*",
"*",
"extra_kwargs",
")",
"except",
"ScrapydResponseError",
"as",
"error",
":",
"message",
"=",
"str",
"(",
"error",
")",
"if",
"'spider'",
"in",
"message",
"and",
"'not found'",
"in",
"message",
":",
"click",
".",
"echo",
"(",
"'%s'",
"%",
"error",
")",
"click",
".",
"echo",
"(",
"'\\n Available spiders:'",
")",
"spiders",
"=",
"list_spiders",
"(",
")",
"click",
".",
"echo",
"(",
"'\\n'",
".",
"join",
"(",
"spiders",
")",
")",
"raise",
"click",
".",
"Abort",
"(",
")",
"else",
":",
"raise",
"crawler_job",
"=",
"models",
".",
"CrawlerJob",
".",
"query",
".",
"filter_by",
"(",
"job_id",
"=",
"crawler_job_uid",
")",
".",
"one",
"(",
")",
"click",
".",
"echo",
"(",
"'Once the job is started, you can see the logs of the job with the '",
"'command:\\n'",
"' inspirehep crawler job list\\n'",
"' inspirehep crawler job logs %s\\n'",
"'\\n'",
"'and for the associated workflow (it\\'s job_id should be %s):\\n'",
"' inspirehep crawler workflow list\\n'",
"%",
"(",
"crawler_job",
".",
"id",
",",
"crawler_job_uid",
")",
")"
] |
Schedule a new crawl.
Note:
Currently the oaiharvesting is done on inspire side, before this, so
it's not supported here yet.
|
[
"Schedule",
"a",
"new",
"crawl",
"."
] |
python
|
train
|
materialsproject/pymatgen
|
pymatgen/io/lammps/data.py
|
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/lammps/data.py#L1117-L1130
|
def to_file(self, filename):
"""
Saves object to a file in YAML format.
Args:
filename (str): Filename.
"""
d = {"mass_info": self.mass_info,
"nonbond_coeffs": self.nonbond_coeffs,
"topo_coeffs": self.topo_coeffs}
yaml = YAML(typ="safe")
with open(filename, "w") as f:
yaml.dump(d, f)
|
[
"def",
"to_file",
"(",
"self",
",",
"filename",
")",
":",
"d",
"=",
"{",
"\"mass_info\"",
":",
"self",
".",
"mass_info",
",",
"\"nonbond_coeffs\"",
":",
"self",
".",
"nonbond_coeffs",
",",
"\"topo_coeffs\"",
":",
"self",
".",
"topo_coeffs",
"}",
"yaml",
"=",
"YAML",
"(",
"typ",
"=",
"\"safe\"",
")",
"with",
"open",
"(",
"filename",
",",
"\"w\"",
")",
"as",
"f",
":",
"yaml",
".",
"dump",
"(",
"d",
",",
"f",
")"
] |
Saves object to a file in YAML format.
Args:
filename (str): Filename.
|
[
"Saves",
"object",
"to",
"a",
"file",
"in",
"YAML",
"format",
"."
] |
python
|
train
|
evhub/coconut
|
coconut/command/command.py
|
https://github.com/evhub/coconut/blob/ff97177344e7604e89a0a98a977a87ed2a56fc6d/coconut/command/command.py#L470-L485
|
def get_input(self, more=False):
"""Prompt for code input."""
received = None
try:
received = self.prompt.input(more)
except KeyboardInterrupt:
print()
printerr("KeyboardInterrupt")
except EOFError:
print()
self.exit_runner()
else:
if received.startswith(exit_chars):
self.exit_runner()
received = None
return received
|
[
"def",
"get_input",
"(",
"self",
",",
"more",
"=",
"False",
")",
":",
"received",
"=",
"None",
"try",
":",
"received",
"=",
"self",
".",
"prompt",
".",
"input",
"(",
"more",
")",
"except",
"KeyboardInterrupt",
":",
"print",
"(",
")",
"printerr",
"(",
"\"KeyboardInterrupt\"",
")",
"except",
"EOFError",
":",
"print",
"(",
")",
"self",
".",
"exit_runner",
"(",
")",
"else",
":",
"if",
"received",
".",
"startswith",
"(",
"exit_chars",
")",
":",
"self",
".",
"exit_runner",
"(",
")",
"received",
"=",
"None",
"return",
"received"
] |
Prompt for code input.
|
[
"Prompt",
"for",
"code",
"input",
"."
] |
python
|
train
|
opendatateam/udata
|
udata/harvest/actions.py
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/harvest/actions.py#L160-L187
|
def preview_from_config(name, url, backend,
description=None,
frequency=DEFAULT_HARVEST_FREQUENCY,
owner=None,
organization=None,
config=None,
):
'''Preview an harvesting from a source created with the given parameters'''
if owner and not isinstance(owner, User):
owner = User.get(owner)
if organization and not isinstance(organization, Organization):
organization = Organization.get(organization)
source = HarvestSource(
name=name,
url=url,
backend=backend,
description=description,
frequency=frequency or DEFAULT_HARVEST_FREQUENCY,
owner=owner,
organization=organization,
config=config,
)
cls = backends.get(current_app, source.backend)
max_items = current_app.config['HARVEST_PREVIEW_MAX_ITEMS']
backend = cls(source, dryrun=True, max_items=max_items)
return backend.harvest()
|
[
"def",
"preview_from_config",
"(",
"name",
",",
"url",
",",
"backend",
",",
"description",
"=",
"None",
",",
"frequency",
"=",
"DEFAULT_HARVEST_FREQUENCY",
",",
"owner",
"=",
"None",
",",
"organization",
"=",
"None",
",",
"config",
"=",
"None",
",",
")",
":",
"if",
"owner",
"and",
"not",
"isinstance",
"(",
"owner",
",",
"User",
")",
":",
"owner",
"=",
"User",
".",
"get",
"(",
"owner",
")",
"if",
"organization",
"and",
"not",
"isinstance",
"(",
"organization",
",",
"Organization",
")",
":",
"organization",
"=",
"Organization",
".",
"get",
"(",
"organization",
")",
"source",
"=",
"HarvestSource",
"(",
"name",
"=",
"name",
",",
"url",
"=",
"url",
",",
"backend",
"=",
"backend",
",",
"description",
"=",
"description",
",",
"frequency",
"=",
"frequency",
"or",
"DEFAULT_HARVEST_FREQUENCY",
",",
"owner",
"=",
"owner",
",",
"organization",
"=",
"organization",
",",
"config",
"=",
"config",
",",
")",
"cls",
"=",
"backends",
".",
"get",
"(",
"current_app",
",",
"source",
".",
"backend",
")",
"max_items",
"=",
"current_app",
".",
"config",
"[",
"'HARVEST_PREVIEW_MAX_ITEMS'",
"]",
"backend",
"=",
"cls",
"(",
"source",
",",
"dryrun",
"=",
"True",
",",
"max_items",
"=",
"max_items",
")",
"return",
"backend",
".",
"harvest",
"(",
")"
] |
Preview an harvesting from a source created with the given parameters
|
[
"Preview",
"an",
"harvesting",
"from",
"a",
"source",
"created",
"with",
"the",
"given",
"parameters"
] |
python
|
train
|
BetterWorks/django-anonymizer
|
anonymizer/replacers.py
|
https://github.com/BetterWorks/django-anonymizer/blob/2d25bb6e8b5e4230c58031c4b6d10cc536669b3e/anonymizer/replacers.py#L53-L57
|
def datetime(anon, obj, field, val):
"""
Returns a random datetime
"""
return anon.faker.datetime(field=field)
|
[
"def",
"datetime",
"(",
"anon",
",",
"obj",
",",
"field",
",",
"val",
")",
":",
"return",
"anon",
".",
"faker",
".",
"datetime",
"(",
"field",
"=",
"field",
")"
] |
Returns a random datetime
|
[
"Returns",
"a",
"random",
"datetime"
] |
python
|
train
|
kervi/kervi-devices
|
kervi/devices/displays/dummy_display_driver.py
|
https://github.com/kervi/kervi-devices/blob/c6aaddc6da1d0bce0ea2b0c6eb8393ba10aefa56/kervi/devices/displays/dummy_display_driver.py#L105-L116
|
def image(self, image):
"""Set buffer to value of Python Imaging Library image. The image should
be in 1 bit mode and a size equal to the display size.
"""
if image.mode != '1':
raise ValueError('Image must be in mode 1.')
imwidth, imheight = image.size
if imwidth != self.width or imheight != self.height:
raise ValueError('Image must be same dimensions as display ({0}x{1}).' \
.format(self.width, self.height))
print("bitmap display: image")
image.save("dummydisplay.bmp")
|
[
"def",
"image",
"(",
"self",
",",
"image",
")",
":",
"if",
"image",
".",
"mode",
"!=",
"'1'",
":",
"raise",
"ValueError",
"(",
"'Image must be in mode 1.'",
")",
"imwidth",
",",
"imheight",
"=",
"image",
".",
"size",
"if",
"imwidth",
"!=",
"self",
".",
"width",
"or",
"imheight",
"!=",
"self",
".",
"height",
":",
"raise",
"ValueError",
"(",
"'Image must be same dimensions as display ({0}x{1}).'",
".",
"format",
"(",
"self",
".",
"width",
",",
"self",
".",
"height",
")",
")",
"print",
"(",
"\"bitmap display: image\"",
")",
"image",
".",
"save",
"(",
"\"dummydisplay.bmp\"",
")"
] |
Set buffer to value of Python Imaging Library image. The image should
be in 1 bit mode and a size equal to the display size.
|
[
"Set",
"buffer",
"to",
"value",
"of",
"Python",
"Imaging",
"Library",
"image",
".",
"The",
"image",
"should",
"be",
"in",
"1",
"bit",
"mode",
"and",
"a",
"size",
"equal",
"to",
"the",
"display",
"size",
"."
] |
python
|
train
|
PythonCharmers/python-future
|
src/future/backports/datetime.py
|
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/datetime.py#L1354-L1379
|
def fromtimestamp(cls, t, tz=None):
"""Construct a datetime from a POSIX timestamp (like time.time()).
A timezone info object may be passed in as well.
"""
_check_tzinfo_arg(tz)
converter = _time.localtime if tz is None else _time.gmtime
t, frac = divmod(t, 1.0)
us = int(frac * 1e6)
# If timestamp is less than one microsecond smaller than a
# full second, us can be rounded up to 1000000. In this case,
# roll over to seconds, otherwise, ValueError is raised
# by the constructor.
if us == 1000000:
t += 1
us = 0
y, m, d, hh, mm, ss, weekday, jday, dst = converter(t)
ss = min(ss, 59) # clamp out leap seconds if the platform has them
result = cls(y, m, d, hh, mm, ss, us, tz)
if tz is not None:
result = tz.fromutc(result)
return result
|
[
"def",
"fromtimestamp",
"(",
"cls",
",",
"t",
",",
"tz",
"=",
"None",
")",
":",
"_check_tzinfo_arg",
"(",
"tz",
")",
"converter",
"=",
"_time",
".",
"localtime",
"if",
"tz",
"is",
"None",
"else",
"_time",
".",
"gmtime",
"t",
",",
"frac",
"=",
"divmod",
"(",
"t",
",",
"1.0",
")",
"us",
"=",
"int",
"(",
"frac",
"*",
"1e6",
")",
"# If timestamp is less than one microsecond smaller than a",
"# full second, us can be rounded up to 1000000. In this case,",
"# roll over to seconds, otherwise, ValueError is raised",
"# by the constructor.",
"if",
"us",
"==",
"1000000",
":",
"t",
"+=",
"1",
"us",
"=",
"0",
"y",
",",
"m",
",",
"d",
",",
"hh",
",",
"mm",
",",
"ss",
",",
"weekday",
",",
"jday",
",",
"dst",
"=",
"converter",
"(",
"t",
")",
"ss",
"=",
"min",
"(",
"ss",
",",
"59",
")",
"# clamp out leap seconds if the platform has them",
"result",
"=",
"cls",
"(",
"y",
",",
"m",
",",
"d",
",",
"hh",
",",
"mm",
",",
"ss",
",",
"us",
",",
"tz",
")",
"if",
"tz",
"is",
"not",
"None",
":",
"result",
"=",
"tz",
".",
"fromutc",
"(",
"result",
")",
"return",
"result"
] |
Construct a datetime from a POSIX timestamp (like time.time()).
A timezone info object may be passed in as well.
|
[
"Construct",
"a",
"datetime",
"from",
"a",
"POSIX",
"timestamp",
"(",
"like",
"time",
".",
"time",
"()",
")",
"."
] |
python
|
train
|
blockstack-packages/blockstack-profiles-py
|
blockstack_profiles/token_verifying.py
|
https://github.com/blockstack-packages/blockstack-profiles-py/blob/103783798df78cf0f007801e79ec6298f00b2817/blockstack_profiles/token_verifying.py#L102-L124
|
def get_profile_from_tokens(token_records, public_key_or_address,
hierarchical_keys=False):
""" A function for extracting a profile from a list of tokens.
"""
if hierarchical_keys:
raise NotImplementedError("Hierarchical key support not implemented")
profile = {}
for token_record in token_records:
# print token_record
try:
decoded_token = verify_token_record(token_record, public_key_or_address)
except ValueError:
# traceback.print_exc()
continue
else:
if "payload" in decoded_token:
if "claim" in decoded_token["payload"]:
claim = decoded_token["payload"]["claim"]
profile.update(claim)
return profile
|
[
"def",
"get_profile_from_tokens",
"(",
"token_records",
",",
"public_key_or_address",
",",
"hierarchical_keys",
"=",
"False",
")",
":",
"if",
"hierarchical_keys",
":",
"raise",
"NotImplementedError",
"(",
"\"Hierarchical key support not implemented\"",
")",
"profile",
"=",
"{",
"}",
"for",
"token_record",
"in",
"token_records",
":",
"# print token_record",
"try",
":",
"decoded_token",
"=",
"verify_token_record",
"(",
"token_record",
",",
"public_key_or_address",
")",
"except",
"ValueError",
":",
"# traceback.print_exc()",
"continue",
"else",
":",
"if",
"\"payload\"",
"in",
"decoded_token",
":",
"if",
"\"claim\"",
"in",
"decoded_token",
"[",
"\"payload\"",
"]",
":",
"claim",
"=",
"decoded_token",
"[",
"\"payload\"",
"]",
"[",
"\"claim\"",
"]",
"profile",
".",
"update",
"(",
"claim",
")",
"return",
"profile"
] |
A function for extracting a profile from a list of tokens.
|
[
"A",
"function",
"for",
"extracting",
"a",
"profile",
"from",
"a",
"list",
"of",
"tokens",
"."
] |
python
|
train
|
roclark/sportsreference
|
sportsreference/ncaab/boxscore.py
|
https://github.com/roclark/sportsreference/blob/ea0bae432be76450e137671d2998eb38f962dffd/sportsreference/ncaab/boxscore.py#L255-L282
|
def _parse_game_date_and_location(self, field, boxscore):
"""
Retrieve the game's date and location.
The date and location of the game follow a more complicated parsing
scheme and should be handled differently from other tags. Both fields
are separated by a newline character ('\n') with the first line being
the date and the second being the location.
Parameters
----------
field : string
The name of the attribute to parse
boxscore : PyQuery object
A PyQuery object containing all of the HTML data from the boxscore.
Returns
-------
string
Depending on the requested field, returns a text representation of
either the date or location of the game.
"""
scheme = BOXSCORE_SCHEME[field]
items = [i.text() for i in boxscore(scheme).items()]
game_info = items[0].split('\n')
if len(game_info) < 3 and field == 'location':
return None
return game_info[BOXSCORE_ELEMENT_INDEX[field]]
|
[
"def",
"_parse_game_date_and_location",
"(",
"self",
",",
"field",
",",
"boxscore",
")",
":",
"scheme",
"=",
"BOXSCORE_SCHEME",
"[",
"field",
"]",
"items",
"=",
"[",
"i",
".",
"text",
"(",
")",
"for",
"i",
"in",
"boxscore",
"(",
"scheme",
")",
".",
"items",
"(",
")",
"]",
"game_info",
"=",
"items",
"[",
"0",
"]",
".",
"split",
"(",
"'\\n'",
")",
"if",
"len",
"(",
"game_info",
")",
"<",
"3",
"and",
"field",
"==",
"'location'",
":",
"return",
"None",
"return",
"game_info",
"[",
"BOXSCORE_ELEMENT_INDEX",
"[",
"field",
"]",
"]"
] |
Retrieve the game's date and location.
The date and location of the game follow a more complicated parsing
scheme and should be handled differently from other tags. Both fields
are separated by a newline character ('\n') with the first line being
the date and the second being the location.
Parameters
----------
field : string
The name of the attribute to parse
boxscore : PyQuery object
A PyQuery object containing all of the HTML data from the boxscore.
Returns
-------
string
Depending on the requested field, returns a text representation of
either the date or location of the game.
|
[
"Retrieve",
"the",
"game",
"s",
"date",
"and",
"location",
"."
] |
python
|
train
|
fhcrc/seqmagick
|
seqmagick/transform.py
|
https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/transform.py#L362-L368
|
def ungap_sequences(records, gap_chars=GAP_TABLE):
"""
Remove gaps from sequences, given an alignment.
"""
logging.info('Applying _ungap_sequences generator: removing all gap characters')
for record in records:
yield ungap_all(record, gap_chars)
|
[
"def",
"ungap_sequences",
"(",
"records",
",",
"gap_chars",
"=",
"GAP_TABLE",
")",
":",
"logging",
".",
"info",
"(",
"'Applying _ungap_sequences generator: removing all gap characters'",
")",
"for",
"record",
"in",
"records",
":",
"yield",
"ungap_all",
"(",
"record",
",",
"gap_chars",
")"
] |
Remove gaps from sequences, given an alignment.
|
[
"Remove",
"gaps",
"from",
"sequences",
"given",
"an",
"alignment",
"."
] |
python
|
train
|
cisco-sas/kitty
|
kitty/model/low_level/container.py
|
https://github.com/cisco-sas/kitty/blob/cb0760989dcdfe079e43ac574d872d0b18953a32/kitty/model/low_level/container.py#L294-L302
|
def pop(self):
'''
Remove a the top container from the container stack
'''
if not self._containers:
raise KittyException('no container to pop')
self._containers.pop()
if self._container():
self._container().pop()
|
[
"def",
"pop",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_containers",
":",
"raise",
"KittyException",
"(",
"'no container to pop'",
")",
"self",
".",
"_containers",
".",
"pop",
"(",
")",
"if",
"self",
".",
"_container",
"(",
")",
":",
"self",
".",
"_container",
"(",
")",
".",
"pop",
"(",
")"
] |
Remove a the top container from the container stack
|
[
"Remove",
"a",
"the",
"top",
"container",
"from",
"the",
"container",
"stack"
] |
python
|
train
|
krzysiekfonal/grammaregex
|
grammaregex/grammaregex.py
|
https://github.com/krzysiekfonal/grammaregex/blob/5212075433fc5201da628acf09cdf5bf73aa1ad0/grammaregex/grammaregex.py#L79-L111
|
def match_tree(sentence, pattern):
"""Matches given sentence with provided pattern.
:param sentence: sentence from Spacy(see: http://spacy.io/docs/#doc-spans-sents) representing complete statement
:param pattern: pattern to which sentence will be compared
:return: True if sentence match to pattern, False otherwise
:raises: PatternSyntaxException: if pattern has wrong syntax
"""
if not verify_pattern(pattern):
raise PatternSyntaxException(pattern)
def _match_node(t, p):
pat_node = p.pop(0) if p else ""
return not pat_node or (_match_token(t, pat_node, False) and _match_edge(t.children,p))
def _match_edge(edges,p):
pat_edge = p.pop(0) if p else ""
if not pat_edge:
return True
elif not edges:
return False
else:
for (t) in edges:
if (_match_token(t, pat_edge, True)) and _match_node(t, list(p)):
return True
elif pat_edge == "**" and _match_edge(t.children, ["**"] + p):
return True
return False
return _match_node(sentence.root, pattern.split("/"))
|
[
"def",
"match_tree",
"(",
"sentence",
",",
"pattern",
")",
":",
"if",
"not",
"verify_pattern",
"(",
"pattern",
")",
":",
"raise",
"PatternSyntaxException",
"(",
"pattern",
")",
"def",
"_match_node",
"(",
"t",
",",
"p",
")",
":",
"pat_node",
"=",
"p",
".",
"pop",
"(",
"0",
")",
"if",
"p",
"else",
"\"\"",
"return",
"not",
"pat_node",
"or",
"(",
"_match_token",
"(",
"t",
",",
"pat_node",
",",
"False",
")",
"and",
"_match_edge",
"(",
"t",
".",
"children",
",",
"p",
")",
")",
"def",
"_match_edge",
"(",
"edges",
",",
"p",
")",
":",
"pat_edge",
"=",
"p",
".",
"pop",
"(",
"0",
")",
"if",
"p",
"else",
"\"\"",
"if",
"not",
"pat_edge",
":",
"return",
"True",
"elif",
"not",
"edges",
":",
"return",
"False",
"else",
":",
"for",
"(",
"t",
")",
"in",
"edges",
":",
"if",
"(",
"_match_token",
"(",
"t",
",",
"pat_edge",
",",
"True",
")",
")",
"and",
"_match_node",
"(",
"t",
",",
"list",
"(",
"p",
")",
")",
":",
"return",
"True",
"elif",
"pat_edge",
"==",
"\"**\"",
"and",
"_match_edge",
"(",
"t",
".",
"children",
",",
"[",
"\"**\"",
"]",
"+",
"p",
")",
":",
"return",
"True",
"return",
"False",
"return",
"_match_node",
"(",
"sentence",
".",
"root",
",",
"pattern",
".",
"split",
"(",
"\"/\"",
")",
")"
] |
Matches given sentence with provided pattern.
:param sentence: sentence from Spacy(see: http://spacy.io/docs/#doc-spans-sents) representing complete statement
:param pattern: pattern to which sentence will be compared
:return: True if sentence match to pattern, False otherwise
:raises: PatternSyntaxException: if pattern has wrong syntax
|
[
"Matches",
"given",
"sentence",
"with",
"provided",
"pattern",
"."
] |
python
|
train
|
pkgw/pwkit
|
pwkit/dulk_models.py
|
https://github.com/pkgw/pwkit/blob/d40957a1c3d2ea34e7ceac2267ee9635135f2793/pwkit/dulk_models.py#L175-L205
|
def calc_gs_nu_pk(b, ne, delta, sinth, depth):
"""Calculate the frequency of peak synchrotron emission, ν_pk.
This is Dulk (1985) equation 39, which is a fitting function assuming a
power-law electron population. Arguments are:
b
Magnetic field strength in Gauss
ne
The density of electrons per cubic centimeter with energies greater than 10 keV.
delta
The power-law index defining the energy distribution of the electron population,
with ``n(E) ~ E^(-delta)``. The equation is valid for ``2 <~ delta <~ 7``.
sinth
The sine of the angle between the line of sight and the magnetic field direction.
The equation is valid for θ > 20° or ``sinth > 0.34`` or so.
depth
The path length through the emitting medium, in cm.
The return value is peak frequency in Hz.
No complaints are raised if you attempt to use the equation outside of its
range of validity.
"""
coldens = ne * depth
return (2.72e3 *
10**(0.27 * delta) *
sinth**(0.41 + 0.03 * delta) *
coldens**(0.32 - 0.03 * delta) *
b**(0.68 + 0.03 * delta))
|
[
"def",
"calc_gs_nu_pk",
"(",
"b",
",",
"ne",
",",
"delta",
",",
"sinth",
",",
"depth",
")",
":",
"coldens",
"=",
"ne",
"*",
"depth",
"return",
"(",
"2.72e3",
"*",
"10",
"**",
"(",
"0.27",
"*",
"delta",
")",
"*",
"sinth",
"**",
"(",
"0.41",
"+",
"0.03",
"*",
"delta",
")",
"*",
"coldens",
"**",
"(",
"0.32",
"-",
"0.03",
"*",
"delta",
")",
"*",
"b",
"**",
"(",
"0.68",
"+",
"0.03",
"*",
"delta",
")",
")"
] |
Calculate the frequency of peak synchrotron emission, ν_pk.
This is Dulk (1985) equation 39, which is a fitting function assuming a
power-law electron population. Arguments are:
b
Magnetic field strength in Gauss
ne
The density of electrons per cubic centimeter with energies greater than 10 keV.
delta
The power-law index defining the energy distribution of the electron population,
with ``n(E) ~ E^(-delta)``. The equation is valid for ``2 <~ delta <~ 7``.
sinth
The sine of the angle between the line of sight and the magnetic field direction.
The equation is valid for θ > 20° or ``sinth > 0.34`` or so.
depth
The path length through the emitting medium, in cm.
The return value is peak frequency in Hz.
No complaints are raised if you attempt to use the equation outside of its
range of validity.
|
[
"Calculate",
"the",
"frequency",
"of",
"peak",
"synchrotron",
"emission",
"ν_pk",
"."
] |
python
|
train
|
GoogleCloudPlatform/appengine-mapreduce
|
python/src/mapreduce/namespace_range.py
|
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/namespace_range.py#L324-L330
|
def to_json_object(self):
"""Returns a dict representation that can be serialized to JSON."""
obj_dict = dict(namespace_start=self.namespace_start,
namespace_end=self.namespace_end)
if self.app is not None:
obj_dict['app'] = self.app
return obj_dict
|
[
"def",
"to_json_object",
"(",
"self",
")",
":",
"obj_dict",
"=",
"dict",
"(",
"namespace_start",
"=",
"self",
".",
"namespace_start",
",",
"namespace_end",
"=",
"self",
".",
"namespace_end",
")",
"if",
"self",
".",
"app",
"is",
"not",
"None",
":",
"obj_dict",
"[",
"'app'",
"]",
"=",
"self",
".",
"app",
"return",
"obj_dict"
] |
Returns a dict representation that can be serialized to JSON.
|
[
"Returns",
"a",
"dict",
"representation",
"that",
"can",
"be",
"serialized",
"to",
"JSON",
"."
] |
python
|
train
|
has2k1/plotnine
|
plotnine/guides/guide.py
|
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/guides/guide.py#L93-L105
|
def _default(self, key, default=None):
"""
Lookup value of *key* themeable
If *key* not in themeable or value is None,
return the *default* value.
"""
try:
value = self.theme.themeables.property(key)
except KeyError:
value = None
return value if value is not None else default
|
[
"def",
"_default",
"(",
"self",
",",
"key",
",",
"default",
"=",
"None",
")",
":",
"try",
":",
"value",
"=",
"self",
".",
"theme",
".",
"themeables",
".",
"property",
"(",
"key",
")",
"except",
"KeyError",
":",
"value",
"=",
"None",
"return",
"value",
"if",
"value",
"is",
"not",
"None",
"else",
"default"
] |
Lookup value of *key* themeable
If *key* not in themeable or value is None,
return the *default* value.
|
[
"Lookup",
"value",
"of",
"*",
"key",
"*",
"themeable"
] |
python
|
train
|
datacats/datacats
|
datacats/cli/manage.py
|
https://github.com/datacats/datacats/blob/e4bae503efa997660fb3f34fe166699569653157/datacats/cli/manage.py#L206-L244
|
def logs(environment, opts):
"""Display or follow container logs
Usage:
datacats logs [--postgres | --solr | --datapusher] [-s NAME] [-tr] [--tail=LINES] [ENVIRONMENT]
datacats logs -f [--postgres | --solr | --datapusher] [-s NAME] [-r] [ENVIRONMENT]
Options:
--datapusher Show logs for datapusher instead of web logs
--postgres Show postgres database logs instead of web logs
-f --follow Follow logs instead of exiting immediately
--solr Show solr search logs instead of web logs
-t --timestamps Add timestamps to log lines
-s --site=NAME Specify a site for logs if needed [default: primary]
--tail=LINES Number of lines to show [default: all]
ENVIRONMENT may be an environment name or a path to an environment directory.
Default: '.'
"""
container = 'web'
if opts['--solr']:
container = 'solr'
if opts['--postgres']:
container = 'postgres'
if opts['--datapusher']:
container = 'datapusher'
tail = opts['--tail']
if tail != 'all':
tail = int(tail)
l = environment.logs(container, tail, opts['--follow'],
opts['--timestamps'])
if not opts['--follow']:
print l
return
try:
for message in l:
write(message)
except KeyboardInterrupt:
print
|
[
"def",
"logs",
"(",
"environment",
",",
"opts",
")",
":",
"container",
"=",
"'web'",
"if",
"opts",
"[",
"'--solr'",
"]",
":",
"container",
"=",
"'solr'",
"if",
"opts",
"[",
"'--postgres'",
"]",
":",
"container",
"=",
"'postgres'",
"if",
"opts",
"[",
"'--datapusher'",
"]",
":",
"container",
"=",
"'datapusher'",
"tail",
"=",
"opts",
"[",
"'--tail'",
"]",
"if",
"tail",
"!=",
"'all'",
":",
"tail",
"=",
"int",
"(",
"tail",
")",
"l",
"=",
"environment",
".",
"logs",
"(",
"container",
",",
"tail",
",",
"opts",
"[",
"'--follow'",
"]",
",",
"opts",
"[",
"'--timestamps'",
"]",
")",
"if",
"not",
"opts",
"[",
"'--follow'",
"]",
":",
"print",
"l",
"return",
"try",
":",
"for",
"message",
"in",
"l",
":",
"write",
"(",
"message",
")",
"except",
"KeyboardInterrupt",
":",
"print"
] |
Display or follow container logs
Usage:
datacats logs [--postgres | --solr | --datapusher] [-s NAME] [-tr] [--tail=LINES] [ENVIRONMENT]
datacats logs -f [--postgres | --solr | --datapusher] [-s NAME] [-r] [ENVIRONMENT]
Options:
--datapusher Show logs for datapusher instead of web logs
--postgres Show postgres database logs instead of web logs
-f --follow Follow logs instead of exiting immediately
--solr Show solr search logs instead of web logs
-t --timestamps Add timestamps to log lines
-s --site=NAME Specify a site for logs if needed [default: primary]
--tail=LINES Number of lines to show [default: all]
ENVIRONMENT may be an environment name or a path to an environment directory.
Default: '.'
|
[
"Display",
"or",
"follow",
"container",
"logs"
] |
python
|
train
|
andy-esch/sqterritory
|
sqterritory/territory.py
|
https://github.com/andy-esch/sqterritory/blob/53bcf7c8946f5d216d1ceccf55f9f339125b8205/sqterritory/territory.py#L74-L102
|
def _get_target_nearest(self):
"""Get nearest target for each origin"""
reps_query = """
SELECT DISTINCT ON(g2.cartodb_id)
g1.cartodb_id As origin_id,
g2.the_geom,
g2.cartodb_id + {maxorigin} as cartodb_id,
g2.the_geom_webmercator
FROM {origin_table} As g1, {target_table} As g2
ORDER BY g2.cartodb_id, g1.the_geom <-> g2.the_geom
""".format(
maxorigin=self.origins.index.max(),
origin_table=self.origin_table,
target_table=self.target_table
)
nearest_reps = self.context.query(
reps_query,
decode_geom=True
)
nearest_reps = gpd.GeoDataFrame(nearest_reps, geometry='geometry')
init_labels = nearest_reps['origin_id'].values
# update with new information
self.targets['labels'] = init_labels
logging.info('nearest targets retrieved')
return nearest_reps
|
[
"def",
"_get_target_nearest",
"(",
"self",
")",
":",
"reps_query",
"=",
"\"\"\"\n SELECT DISTINCT ON(g2.cartodb_id)\n g1.cartodb_id As origin_id,\n g2.the_geom,\n g2.cartodb_id + {maxorigin} as cartodb_id,\n g2.the_geom_webmercator\n FROM {origin_table} As g1, {target_table} As g2\n ORDER BY g2.cartodb_id, g1.the_geom <-> g2.the_geom\n \"\"\"",
".",
"format",
"(",
"maxorigin",
"=",
"self",
".",
"origins",
".",
"index",
".",
"max",
"(",
")",
",",
"origin_table",
"=",
"self",
".",
"origin_table",
",",
"target_table",
"=",
"self",
".",
"target_table",
")",
"nearest_reps",
"=",
"self",
".",
"context",
".",
"query",
"(",
"reps_query",
",",
"decode_geom",
"=",
"True",
")",
"nearest_reps",
"=",
"gpd",
".",
"GeoDataFrame",
"(",
"nearest_reps",
",",
"geometry",
"=",
"'geometry'",
")",
"init_labels",
"=",
"nearest_reps",
"[",
"'origin_id'",
"]",
".",
"values",
"# update with new information",
"self",
".",
"targets",
"[",
"'labels'",
"]",
"=",
"init_labels",
"logging",
".",
"info",
"(",
"'nearest targets retrieved'",
")",
"return",
"nearest_reps"
] |
Get nearest target for each origin
|
[
"Get",
"nearest",
"target",
"for",
"each",
"origin"
] |
python
|
train
|
Erotemic/utool
|
utool/util_cplat.py
|
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_cplat.py#L544-L622
|
def view_directory(dname=None, fname=None, verbose=True):
"""
View a directory in the operating system file browser. Currently supports
windows explorer, mac open, and linux nautlius.
Args:
dname (str): directory name
fname (str): a filename to select in the directory (nautlius only)
verbose (bool):
CommandLine:
python -m utool.util_cplat --test-view_directory
Example:
>>> # DISABLE_DOCTEST
>>> # DOCTEST_DISABLE
>>> from utool.util_cplat import * # NOQA
>>> import utool as ut
>>> dname = ut.truepath('~')
>>> verbose = True
>>> view_directory(dname, verbose)
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_cplat import * # NOQA
>>> import utool as ut
>>> base = ut.ensure_app_cache_dir('utool', 'test_vd')
>>> dirs = [
>>> '',
>>> 'dir1',
>>> 'has space',
>>> 'space at end ',
>>> ' space at start ',
>>> '"quotes and spaces"',
>>> "'single quotes and spaces'",
>>> 'Frogram Piles (y2K)',
>>> ]
>>> dirs_ = [ut.ensuredir(join(base, d)) for d in dirs]
>>> for dname in dirs_:
>>> ut.view_directory(dname, verbose=False)
>>> fpath = join(base, 'afile.txt')
>>> ut.touch(fpath)
>>> ut.view_directory(base, fpath, verbose=False)
"""
from utool.util_arg import STRICT
from utool.util_path import checkpath
# from utool.util_str import SINGLE_QUOTE, DOUBLE_QUOTE
if HAVE_PATHLIB and isinstance(dname, pathlib.Path):
dname = str(dname)
if verbose:
print('[cplat] view_directory(%r) ' % dname)
dname = os.getcwd() if dname is None else dname
open_prog = {
'win32': 'explorer.exe',
'linux': 'nautilus',
'darwin': 'open'
}[OS_TYPE]
dname = normpath(dname)
if STRICT:
assert checkpath(dname, verbose=verbose), 'directory doesnt exit'
if fname is not None and OS_TYPE == 'linux':
arg = join(dname, fname)
else:
arg = dname
# if ' ' in dname and not dname.startswith((SINGLE_QUOTE, DOUBLE_QUOTE)):
# # Ensure quotations
# dname = '"%s"' % dname
# if not WIN32:
# arg = dname
# # arg = subprocess.list2cmdline([dname])
# # arg = pipes.quote(dname)
# else:
# arg = dname
# spawn and detatch process
args = (open_prog, arg)
print(subprocess.list2cmdline(args))
subprocess.Popen(args)
|
[
"def",
"view_directory",
"(",
"dname",
"=",
"None",
",",
"fname",
"=",
"None",
",",
"verbose",
"=",
"True",
")",
":",
"from",
"utool",
".",
"util_arg",
"import",
"STRICT",
"from",
"utool",
".",
"util_path",
"import",
"checkpath",
"# from utool.util_str import SINGLE_QUOTE, DOUBLE_QUOTE",
"if",
"HAVE_PATHLIB",
"and",
"isinstance",
"(",
"dname",
",",
"pathlib",
".",
"Path",
")",
":",
"dname",
"=",
"str",
"(",
"dname",
")",
"if",
"verbose",
":",
"print",
"(",
"'[cplat] view_directory(%r) '",
"%",
"dname",
")",
"dname",
"=",
"os",
".",
"getcwd",
"(",
")",
"if",
"dname",
"is",
"None",
"else",
"dname",
"open_prog",
"=",
"{",
"'win32'",
":",
"'explorer.exe'",
",",
"'linux'",
":",
"'nautilus'",
",",
"'darwin'",
":",
"'open'",
"}",
"[",
"OS_TYPE",
"]",
"dname",
"=",
"normpath",
"(",
"dname",
")",
"if",
"STRICT",
":",
"assert",
"checkpath",
"(",
"dname",
",",
"verbose",
"=",
"verbose",
")",
",",
"'directory doesnt exit'",
"if",
"fname",
"is",
"not",
"None",
"and",
"OS_TYPE",
"==",
"'linux'",
":",
"arg",
"=",
"join",
"(",
"dname",
",",
"fname",
")",
"else",
":",
"arg",
"=",
"dname",
"# if ' ' in dname and not dname.startswith((SINGLE_QUOTE, DOUBLE_QUOTE)):",
"# # Ensure quotations",
"# dname = '\"%s\"' % dname",
"# if not WIN32:",
"# arg = dname",
"# # arg = subprocess.list2cmdline([dname])",
"# # arg = pipes.quote(dname)",
"# else:",
"# arg = dname",
"# spawn and detatch process",
"args",
"=",
"(",
"open_prog",
",",
"arg",
")",
"print",
"(",
"subprocess",
".",
"list2cmdline",
"(",
"args",
")",
")",
"subprocess",
".",
"Popen",
"(",
"args",
")"
] |
View a directory in the operating system file browser. Currently supports
windows explorer, mac open, and linux nautlius.
Args:
dname (str): directory name
fname (str): a filename to select in the directory (nautlius only)
verbose (bool):
CommandLine:
python -m utool.util_cplat --test-view_directory
Example:
>>> # DISABLE_DOCTEST
>>> # DOCTEST_DISABLE
>>> from utool.util_cplat import * # NOQA
>>> import utool as ut
>>> dname = ut.truepath('~')
>>> verbose = True
>>> view_directory(dname, verbose)
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_cplat import * # NOQA
>>> import utool as ut
>>> base = ut.ensure_app_cache_dir('utool', 'test_vd')
>>> dirs = [
>>> '',
>>> 'dir1',
>>> 'has space',
>>> 'space at end ',
>>> ' space at start ',
>>> '"quotes and spaces"',
>>> "'single quotes and spaces'",
>>> 'Frogram Piles (y2K)',
>>> ]
>>> dirs_ = [ut.ensuredir(join(base, d)) for d in dirs]
>>> for dname in dirs_:
>>> ut.view_directory(dname, verbose=False)
>>> fpath = join(base, 'afile.txt')
>>> ut.touch(fpath)
>>> ut.view_directory(base, fpath, verbose=False)
|
[
"View",
"a",
"directory",
"in",
"the",
"operating",
"system",
"file",
"browser",
".",
"Currently",
"supports",
"windows",
"explorer",
"mac",
"open",
"and",
"linux",
"nautlius",
"."
] |
python
|
train
|
AtsushiSakai/SimpleTkGUIKit
|
SimpleTkGUIKit/SimpleTkGUIKit.py
|
https://github.com/AtsushiSakai/SimpleTkGUIKit/blob/e7cbb06ff32afb165cdaa4fe396ca2f172c66ff0/SimpleTkGUIKit/SimpleTkGUIKit.py#L134-L167
|
def GetEntries(dataList, title="Select", msg=""):
"""
Get entries of the list
title: Window name
mag: Label of the check button
return data dictionary like:
{'y': '5.0', 'x': '100', 'z': 'save'}
"""
root = tkinter.Tk()
root.title(title)
label = tkinter.Label(root, text=msg)
label.pack()
entries = []
for item in dataList:
tkinter.Label(root, text=item).pack()
entry = tkinter.Entry(root)
entry.pack()
entries.append(entry)
# print entries
tkinter.Button(root, text="OK", fg="black", command=root.quit).pack()
root.mainloop()
result = {}
for (entry, data) in zip(entries, dataList):
result[data] = entry.get()
root.destroy()
print(result)
return result
|
[
"def",
"GetEntries",
"(",
"dataList",
",",
"title",
"=",
"\"Select\"",
",",
"msg",
"=",
"\"\"",
")",
":",
"root",
"=",
"tkinter",
".",
"Tk",
"(",
")",
"root",
".",
"title",
"(",
"title",
")",
"label",
"=",
"tkinter",
".",
"Label",
"(",
"root",
",",
"text",
"=",
"msg",
")",
"label",
".",
"pack",
"(",
")",
"entries",
"=",
"[",
"]",
"for",
"item",
"in",
"dataList",
":",
"tkinter",
".",
"Label",
"(",
"root",
",",
"text",
"=",
"item",
")",
".",
"pack",
"(",
")",
"entry",
"=",
"tkinter",
".",
"Entry",
"(",
"root",
")",
"entry",
".",
"pack",
"(",
")",
"entries",
".",
"append",
"(",
"entry",
")",
"# print entries",
"tkinter",
".",
"Button",
"(",
"root",
",",
"text",
"=",
"\"OK\"",
",",
"fg",
"=",
"\"black\"",
",",
"command",
"=",
"root",
".",
"quit",
")",
".",
"pack",
"(",
")",
"root",
".",
"mainloop",
"(",
")",
"result",
"=",
"{",
"}",
"for",
"(",
"entry",
",",
"data",
")",
"in",
"zip",
"(",
"entries",
",",
"dataList",
")",
":",
"result",
"[",
"data",
"]",
"=",
"entry",
".",
"get",
"(",
")",
"root",
".",
"destroy",
"(",
")",
"print",
"(",
"result",
")",
"return",
"result"
] |
Get entries of the list
title: Window name
mag: Label of the check button
return data dictionary like:
{'y': '5.0', 'x': '100', 'z': 'save'}
|
[
"Get",
"entries",
"of",
"the",
"list"
] |
python
|
train
|
yyuu/botornado
|
boto/connection.py
|
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/connection.py#L699-L797
|
def _mexe(self, request, sender=None, override_num_retries=None,
retry_handler=None):
"""
mexe - Multi-execute inside a loop, retrying multiple times to handle
transient Internet errors by simply trying again.
Also handles redirects.
This code was inspired by the S3Utils classes posted to the boto-users
Google group by Larry Bates. Thanks!
"""
boto.log.debug('Method: %s' % request.method)
boto.log.debug('Path: %s' % request.path)
boto.log.debug('Data: %s' % request.body)
boto.log.debug('Headers: %s' % request.headers)
boto.log.debug('Host: %s' % request.host)
response = None
body = None
e = None
if override_num_retries is None:
num_retries = config.getint('Boto', 'num_retries', self.num_retries)
else:
num_retries = override_num_retries
i = 0
connection = self.get_http_connection(request.host, self.is_secure)
while i <= num_retries:
# Use binary exponential backoff to desynchronize client requests
next_sleep = random.random() * (2 ** i)
try:
# we now re-sign each request before it is retried
boto.log.debug('Token: %s' % self.provider.security_token)
request.authorize(connection=self)
if callable(sender):
response = sender(connection, request.method, request.path,
request.body, request.headers)
else:
connection.request(request.method, request.path,
request.body, request.headers)
response = connection.getresponse()
location = response.getheader('location')
# -- gross hack --
# httplib gets confused with chunked responses to HEAD requests
# so I have to fake it out
if request.method == 'HEAD' and getattr(response,
'chunked', False):
response.chunked = 0
if callable(retry_handler):
status = retry_handler(response, i, next_sleep)
if status:
msg, i, next_sleep = status
if msg:
boto.log.debug(msg)
time.sleep(next_sleep)
continue
if response.status == 500 or response.status == 503:
msg = 'Received %d response. ' % response.status
msg += 'Retrying in %3.1f seconds' % next_sleep
boto.log.debug(msg)
body = response.read()
elif response.status < 300 or response.status >= 400 or \
not location:
self.put_http_connection(request.host, self.is_secure,
connection)
return response
else:
scheme, request.host, request.path, \
params, query, fragment = urlparse.urlparse(location)
if query:
request.path += '?' + query
msg = 'Redirecting: %s' % scheme + '://'
msg += request.host + request.path
boto.log.debug(msg)
connection = self.get_http_connection(request.host,
scheme == 'https')
continue
except self.http_exceptions, e:
for unretryable in self.http_unretryable_exceptions:
if isinstance(e, unretryable):
boto.log.debug(
'encountered unretryable %s exception, re-raising' %
e.__class__.__name__)
raise e
boto.log.debug('encountered %s exception, reconnecting' % \
e.__class__.__name__)
connection = self.new_http_connection(request.host,
self.is_secure)
time.sleep(next_sleep)
i += 1
# If we made it here, it's because we have exhausted our retries
# and stil haven't succeeded. So, if we have a response object,
# use it to raise an exception.
# Otherwise, raise the exception that must have already h#appened.
if response:
raise BotoServerError(response.status, response.reason, body)
elif e:
raise e
else:
msg = 'Please report this exception as a Boto Issue!'
raise BotoClientError(msg)
|
[
"def",
"_mexe",
"(",
"self",
",",
"request",
",",
"sender",
"=",
"None",
",",
"override_num_retries",
"=",
"None",
",",
"retry_handler",
"=",
"None",
")",
":",
"boto",
".",
"log",
".",
"debug",
"(",
"'Method: %s'",
"%",
"request",
".",
"method",
")",
"boto",
".",
"log",
".",
"debug",
"(",
"'Path: %s'",
"%",
"request",
".",
"path",
")",
"boto",
".",
"log",
".",
"debug",
"(",
"'Data: %s'",
"%",
"request",
".",
"body",
")",
"boto",
".",
"log",
".",
"debug",
"(",
"'Headers: %s'",
"%",
"request",
".",
"headers",
")",
"boto",
".",
"log",
".",
"debug",
"(",
"'Host: %s'",
"%",
"request",
".",
"host",
")",
"response",
"=",
"None",
"body",
"=",
"None",
"e",
"=",
"None",
"if",
"override_num_retries",
"is",
"None",
":",
"num_retries",
"=",
"config",
".",
"getint",
"(",
"'Boto'",
",",
"'num_retries'",
",",
"self",
".",
"num_retries",
")",
"else",
":",
"num_retries",
"=",
"override_num_retries",
"i",
"=",
"0",
"connection",
"=",
"self",
".",
"get_http_connection",
"(",
"request",
".",
"host",
",",
"self",
".",
"is_secure",
")",
"while",
"i",
"<=",
"num_retries",
":",
"# Use binary exponential backoff to desynchronize client requests",
"next_sleep",
"=",
"random",
".",
"random",
"(",
")",
"*",
"(",
"2",
"**",
"i",
")",
"try",
":",
"# we now re-sign each request before it is retried",
"boto",
".",
"log",
".",
"debug",
"(",
"'Token: %s'",
"%",
"self",
".",
"provider",
".",
"security_token",
")",
"request",
".",
"authorize",
"(",
"connection",
"=",
"self",
")",
"if",
"callable",
"(",
"sender",
")",
":",
"response",
"=",
"sender",
"(",
"connection",
",",
"request",
".",
"method",
",",
"request",
".",
"path",
",",
"request",
".",
"body",
",",
"request",
".",
"headers",
")",
"else",
":",
"connection",
".",
"request",
"(",
"request",
".",
"method",
",",
"request",
".",
"path",
",",
"request",
".",
"body",
",",
"request",
".",
"headers",
")",
"response",
"=",
"connection",
".",
"getresponse",
"(",
")",
"location",
"=",
"response",
".",
"getheader",
"(",
"'location'",
")",
"# -- gross hack --",
"# httplib gets confused with chunked responses to HEAD requests",
"# so I have to fake it out",
"if",
"request",
".",
"method",
"==",
"'HEAD'",
"and",
"getattr",
"(",
"response",
",",
"'chunked'",
",",
"False",
")",
":",
"response",
".",
"chunked",
"=",
"0",
"if",
"callable",
"(",
"retry_handler",
")",
":",
"status",
"=",
"retry_handler",
"(",
"response",
",",
"i",
",",
"next_sleep",
")",
"if",
"status",
":",
"msg",
",",
"i",
",",
"next_sleep",
"=",
"status",
"if",
"msg",
":",
"boto",
".",
"log",
".",
"debug",
"(",
"msg",
")",
"time",
".",
"sleep",
"(",
"next_sleep",
")",
"continue",
"if",
"response",
".",
"status",
"==",
"500",
"or",
"response",
".",
"status",
"==",
"503",
":",
"msg",
"=",
"'Received %d response. '",
"%",
"response",
".",
"status",
"msg",
"+=",
"'Retrying in %3.1f seconds'",
"%",
"next_sleep",
"boto",
".",
"log",
".",
"debug",
"(",
"msg",
")",
"body",
"=",
"response",
".",
"read",
"(",
")",
"elif",
"response",
".",
"status",
"<",
"300",
"or",
"response",
".",
"status",
">=",
"400",
"or",
"not",
"location",
":",
"self",
".",
"put_http_connection",
"(",
"request",
".",
"host",
",",
"self",
".",
"is_secure",
",",
"connection",
")",
"return",
"response",
"else",
":",
"scheme",
",",
"request",
".",
"host",
",",
"request",
".",
"path",
",",
"params",
",",
"query",
",",
"fragment",
"=",
"urlparse",
".",
"urlparse",
"(",
"location",
")",
"if",
"query",
":",
"request",
".",
"path",
"+=",
"'?'",
"+",
"query",
"msg",
"=",
"'Redirecting: %s'",
"%",
"scheme",
"+",
"'://'",
"msg",
"+=",
"request",
".",
"host",
"+",
"request",
".",
"path",
"boto",
".",
"log",
".",
"debug",
"(",
"msg",
")",
"connection",
"=",
"self",
".",
"get_http_connection",
"(",
"request",
".",
"host",
",",
"scheme",
"==",
"'https'",
")",
"continue",
"except",
"self",
".",
"http_exceptions",
",",
"e",
":",
"for",
"unretryable",
"in",
"self",
".",
"http_unretryable_exceptions",
":",
"if",
"isinstance",
"(",
"e",
",",
"unretryable",
")",
":",
"boto",
".",
"log",
".",
"debug",
"(",
"'encountered unretryable %s exception, re-raising'",
"%",
"e",
".",
"__class__",
".",
"__name__",
")",
"raise",
"e",
"boto",
".",
"log",
".",
"debug",
"(",
"'encountered %s exception, reconnecting'",
"%",
"e",
".",
"__class__",
".",
"__name__",
")",
"connection",
"=",
"self",
".",
"new_http_connection",
"(",
"request",
".",
"host",
",",
"self",
".",
"is_secure",
")",
"time",
".",
"sleep",
"(",
"next_sleep",
")",
"i",
"+=",
"1",
"# If we made it here, it's because we have exhausted our retries",
"# and stil haven't succeeded. So, if we have a response object,",
"# use it to raise an exception.",
"# Otherwise, raise the exception that must have already h#appened.",
"if",
"response",
":",
"raise",
"BotoServerError",
"(",
"response",
".",
"status",
",",
"response",
".",
"reason",
",",
"body",
")",
"elif",
"e",
":",
"raise",
"e",
"else",
":",
"msg",
"=",
"'Please report this exception as a Boto Issue!'",
"raise",
"BotoClientError",
"(",
"msg",
")"
] |
mexe - Multi-execute inside a loop, retrying multiple times to handle
transient Internet errors by simply trying again.
Also handles redirects.
This code was inspired by the S3Utils classes posted to the boto-users
Google group by Larry Bates. Thanks!
|
[
"mexe",
"-",
"Multi",
"-",
"execute",
"inside",
"a",
"loop",
"retrying",
"multiple",
"times",
"to",
"handle",
"transient",
"Internet",
"errors",
"by",
"simply",
"trying",
"again",
".",
"Also",
"handles",
"redirects",
"."
] |
python
|
train
|
cloud9ers/gurumate
|
environment/lib/python2.7/site-packages/MySQL_python-1.2.4c1-py2.7-linux-x86_64.egg/MySQLdb/cursors.py
|
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/MySQL_python-1.2.4c1-py2.7-linux-x86_64.egg/MySQLdb/cursors.py#L206-L254
|
def executemany(self, query, args):
"""Execute a multi-row query.
query -- string, query to execute on server
args
Sequence of sequences or mappings, parameters to use with
query.
Returns long integer rows affected, if any.
This method improves performance on multiple-row INSERT and
REPLACE. Otherwise it is equivalent to looping over args with
execute().
"""
del self.messages[:]
db = self._get_db()
if not args: return
if isinstance(query, unicode):
query = query.encode(db.unicode_literal.charset)
m = insert_values.search(query)
if not m:
r = 0
for a in args:
r = r + self.execute(query, a)
return r
p = m.start(1)
e = m.end(1)
qv = m.group(1)
try:
q = [ qv % db.literal(a) for a in args ]
except TypeError, msg:
if msg.args[0] in ("not enough arguments for format string",
"not all arguments converted"):
self.errorhandler(self, ProgrammingError, msg.args[0])
else:
self.errorhandler(self, TypeError, msg)
except (SystemExit, KeyboardInterrupt):
raise
except:
exc, value, tb = sys.exc_info()
del tb
self.errorhandler(self, exc, value)
r = self._query('\n'.join([query[:p], ',\n'.join(q), query[e:]]))
if not self._defer_warnings: self._warning_check()
return r
|
[
"def",
"executemany",
"(",
"self",
",",
"query",
",",
"args",
")",
":",
"del",
"self",
".",
"messages",
"[",
":",
"]",
"db",
"=",
"self",
".",
"_get_db",
"(",
")",
"if",
"not",
"args",
":",
"return",
"if",
"isinstance",
"(",
"query",
",",
"unicode",
")",
":",
"query",
"=",
"query",
".",
"encode",
"(",
"db",
".",
"unicode_literal",
".",
"charset",
")",
"m",
"=",
"insert_values",
".",
"search",
"(",
"query",
")",
"if",
"not",
"m",
":",
"r",
"=",
"0",
"for",
"a",
"in",
"args",
":",
"r",
"=",
"r",
"+",
"self",
".",
"execute",
"(",
"query",
",",
"a",
")",
"return",
"r",
"p",
"=",
"m",
".",
"start",
"(",
"1",
")",
"e",
"=",
"m",
".",
"end",
"(",
"1",
")",
"qv",
"=",
"m",
".",
"group",
"(",
"1",
")",
"try",
":",
"q",
"=",
"[",
"qv",
"%",
"db",
".",
"literal",
"(",
"a",
")",
"for",
"a",
"in",
"args",
"]",
"except",
"TypeError",
",",
"msg",
":",
"if",
"msg",
".",
"args",
"[",
"0",
"]",
"in",
"(",
"\"not enough arguments for format string\"",
",",
"\"not all arguments converted\"",
")",
":",
"self",
".",
"errorhandler",
"(",
"self",
",",
"ProgrammingError",
",",
"msg",
".",
"args",
"[",
"0",
"]",
")",
"else",
":",
"self",
".",
"errorhandler",
"(",
"self",
",",
"TypeError",
",",
"msg",
")",
"except",
"(",
"SystemExit",
",",
"KeyboardInterrupt",
")",
":",
"raise",
"except",
":",
"exc",
",",
"value",
",",
"tb",
"=",
"sys",
".",
"exc_info",
"(",
")",
"del",
"tb",
"self",
".",
"errorhandler",
"(",
"self",
",",
"exc",
",",
"value",
")",
"r",
"=",
"self",
".",
"_query",
"(",
"'\\n'",
".",
"join",
"(",
"[",
"query",
"[",
":",
"p",
"]",
",",
"',\\n'",
".",
"join",
"(",
"q",
")",
",",
"query",
"[",
"e",
":",
"]",
"]",
")",
")",
"if",
"not",
"self",
".",
"_defer_warnings",
":",
"self",
".",
"_warning_check",
"(",
")",
"return",
"r"
] |
Execute a multi-row query.
query -- string, query to execute on server
args
Sequence of sequences or mappings, parameters to use with
query.
Returns long integer rows affected, if any.
This method improves performance on multiple-row INSERT and
REPLACE. Otherwise it is equivalent to looping over args with
execute().
|
[
"Execute",
"a",
"multi",
"-",
"row",
"query",
".",
"query",
"--",
"string",
"query",
"to",
"execute",
"on",
"server"
] |
python
|
test
|
dariusbakunas/rawdisk
|
rawdisk/filesystems/detector.py
|
https://github.com/dariusbakunas/rawdisk/blob/1dc9d0b377fe5da3c406ccec4abc238c54167403/rawdisk/filesystems/detector.py#L67-L77
|
def register_mbr_plugin(self, fs_id, plugin):
"""Used in plugin's registration routine,
to associate it's detection method with given filesystem id
Args:
fs_id: filesystem id that is read from MBR partition entry
plugin: plugin that supports this filesystem
"""
self.logger.debug('MBR: {}, FS ID: {}'
.format(self.__get_plugin_name(plugin), fs_id))
self.__mbr_plugins[fs_id].append(plugin)
|
[
"def",
"register_mbr_plugin",
"(",
"self",
",",
"fs_id",
",",
"plugin",
")",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"'MBR: {}, FS ID: {}'",
".",
"format",
"(",
"self",
".",
"__get_plugin_name",
"(",
"plugin",
")",
",",
"fs_id",
")",
")",
"self",
".",
"__mbr_plugins",
"[",
"fs_id",
"]",
".",
"append",
"(",
"plugin",
")"
] |
Used in plugin's registration routine,
to associate it's detection method with given filesystem id
Args:
fs_id: filesystem id that is read from MBR partition entry
plugin: plugin that supports this filesystem
|
[
"Used",
"in",
"plugin",
"s",
"registration",
"routine",
"to",
"associate",
"it",
"s",
"detection",
"method",
"with",
"given",
"filesystem",
"id"
] |
python
|
train
|
Hackerfleet/hfos
|
modules/library/hfos/library/manager.py
|
https://github.com/Hackerfleet/hfos/blob/b6df14eacaffb6be5c844108873ff8763ec7f0c9/modules/library/hfos/library/manager.py#L141-L215
|
def _augment_book(self, uuid, event):
"""
Checks if the newly created object is a book and only has an ISBN.
If so, tries to fetch the book data off the internet.
:param uuid: uuid of book to augment
:param client: requesting client
"""
try:
if not isbnmeta:
self.log(
"No isbntools found! Install it to get full "
"functionality!",
lvl=warn)
return
new_book = objectmodels['book'].find_one({'uuid': uuid})
try:
if len(new_book.isbn) != 0:
self.log('Got a lookup candidate: ', new_book._fields)
try:
meta = isbnmeta(
new_book.isbn,
service=self.config.isbnservice
)
mapping = libraryfieldmapping[
self.config.isbnservice
]
new_meta = {}
for key in meta.keys():
if key in mapping:
if isinstance(mapping[key], tuple):
name, conv = mapping[key]
try:
new_meta[name] = conv(meta[key])
except ValueError:
self.log(
'Bad value from lookup:',
name, conv, key
)
else:
new_meta[mapping[key]] = meta[key]
new_book.update(new_meta)
new_book.save()
self._notify_result(event, new_book)
self.log("Book successfully augmented from ",
self.config.isbnservice)
except Exception as e:
self.log("Error during meta lookup: ", e, type(e),
new_book.isbn, lvl=error, exc=True)
error_response = {
'component': 'hfos.alert.manager',
'action': 'notify',
'data': {
'type': 'error',
'message': 'Could not look up metadata, sorry:' + str(e)
}
}
self.log(event, event.client, pretty=True)
self.fireEvent(send(event.client.uuid, error_response))
except Exception as e:
self.log("Error during book update.", e, type(e),
exc=True, lvl=error)
except Exception as e:
self.log("Book creation notification error: ", uuid, e, type(e),
lvl=error, exc=True)
|
[
"def",
"_augment_book",
"(",
"self",
",",
"uuid",
",",
"event",
")",
":",
"try",
":",
"if",
"not",
"isbnmeta",
":",
"self",
".",
"log",
"(",
"\"No isbntools found! Install it to get full \"",
"\"functionality!\"",
",",
"lvl",
"=",
"warn",
")",
"return",
"new_book",
"=",
"objectmodels",
"[",
"'book'",
"]",
".",
"find_one",
"(",
"{",
"'uuid'",
":",
"uuid",
"}",
")",
"try",
":",
"if",
"len",
"(",
"new_book",
".",
"isbn",
")",
"!=",
"0",
":",
"self",
".",
"log",
"(",
"'Got a lookup candidate: '",
",",
"new_book",
".",
"_fields",
")",
"try",
":",
"meta",
"=",
"isbnmeta",
"(",
"new_book",
".",
"isbn",
",",
"service",
"=",
"self",
".",
"config",
".",
"isbnservice",
")",
"mapping",
"=",
"libraryfieldmapping",
"[",
"self",
".",
"config",
".",
"isbnservice",
"]",
"new_meta",
"=",
"{",
"}",
"for",
"key",
"in",
"meta",
".",
"keys",
"(",
")",
":",
"if",
"key",
"in",
"mapping",
":",
"if",
"isinstance",
"(",
"mapping",
"[",
"key",
"]",
",",
"tuple",
")",
":",
"name",
",",
"conv",
"=",
"mapping",
"[",
"key",
"]",
"try",
":",
"new_meta",
"[",
"name",
"]",
"=",
"conv",
"(",
"meta",
"[",
"key",
"]",
")",
"except",
"ValueError",
":",
"self",
".",
"log",
"(",
"'Bad value from lookup:'",
",",
"name",
",",
"conv",
",",
"key",
")",
"else",
":",
"new_meta",
"[",
"mapping",
"[",
"key",
"]",
"]",
"=",
"meta",
"[",
"key",
"]",
"new_book",
".",
"update",
"(",
"new_meta",
")",
"new_book",
".",
"save",
"(",
")",
"self",
".",
"_notify_result",
"(",
"event",
",",
"new_book",
")",
"self",
".",
"log",
"(",
"\"Book successfully augmented from \"",
",",
"self",
".",
"config",
".",
"isbnservice",
")",
"except",
"Exception",
"as",
"e",
":",
"self",
".",
"log",
"(",
"\"Error during meta lookup: \"",
",",
"e",
",",
"type",
"(",
"e",
")",
",",
"new_book",
".",
"isbn",
",",
"lvl",
"=",
"error",
",",
"exc",
"=",
"True",
")",
"error_response",
"=",
"{",
"'component'",
":",
"'hfos.alert.manager'",
",",
"'action'",
":",
"'notify'",
",",
"'data'",
":",
"{",
"'type'",
":",
"'error'",
",",
"'message'",
":",
"'Could not look up metadata, sorry:'",
"+",
"str",
"(",
"e",
")",
"}",
"}",
"self",
".",
"log",
"(",
"event",
",",
"event",
".",
"client",
",",
"pretty",
"=",
"True",
")",
"self",
".",
"fireEvent",
"(",
"send",
"(",
"event",
".",
"client",
".",
"uuid",
",",
"error_response",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"self",
".",
"log",
"(",
"\"Error during book update.\"",
",",
"e",
",",
"type",
"(",
"e",
")",
",",
"exc",
"=",
"True",
",",
"lvl",
"=",
"error",
")",
"except",
"Exception",
"as",
"e",
":",
"self",
".",
"log",
"(",
"\"Book creation notification error: \"",
",",
"uuid",
",",
"e",
",",
"type",
"(",
"e",
")",
",",
"lvl",
"=",
"error",
",",
"exc",
"=",
"True",
")"
] |
Checks if the newly created object is a book and only has an ISBN.
If so, tries to fetch the book data off the internet.
:param uuid: uuid of book to augment
:param client: requesting client
|
[
"Checks",
"if",
"the",
"newly",
"created",
"object",
"is",
"a",
"book",
"and",
"only",
"has",
"an",
"ISBN",
".",
"If",
"so",
"tries",
"to",
"fetch",
"the",
"book",
"data",
"off",
"the",
"internet",
"."
] |
python
|
train
|
saltstack/salt
|
salt/modules/win_iis.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_iis.py#L1547-L1586
|
def list_vdirs(site, app=_DEFAULT_APP):
'''
Get all configured IIS virtual directories for the specified site, or for
the combination of site and application.
Args:
site (str): The IIS site name.
app (str): The IIS application.
Returns:
dict: A dictionary of the virtual directory names and properties.
CLI Example:
.. code-block:: bash
salt '*' win_iis.list_vdirs site
'''
ret = dict()
ps_cmd = ['Get-WebVirtualDirectory',
'-Site', r"'{0}'".format(site),
'-Application', r"'{0}'".format(app),
'|', "Select-Object PhysicalPath, @{ Name = 'name';",
r"Expression = { $_.path.Split('/')[-1] } }"]
cmd_ret = _srvmgr(cmd=ps_cmd, return_json=True)
try:
items = salt.utils.json.loads(cmd_ret['stdout'], strict=False)
except ValueError:
raise CommandExecutionError('Unable to parse return data as Json.')
for item in items:
ret[item['name']] = {'sourcepath': item['physicalPath']}
if not ret:
log.warning('No vdirs found in output: %s', cmd_ret)
return ret
|
[
"def",
"list_vdirs",
"(",
"site",
",",
"app",
"=",
"_DEFAULT_APP",
")",
":",
"ret",
"=",
"dict",
"(",
")",
"ps_cmd",
"=",
"[",
"'Get-WebVirtualDirectory'",
",",
"'-Site'",
",",
"r\"'{0}'\"",
".",
"format",
"(",
"site",
")",
",",
"'-Application'",
",",
"r\"'{0}'\"",
".",
"format",
"(",
"app",
")",
",",
"'|'",
",",
"\"Select-Object PhysicalPath, @{ Name = 'name';\"",
",",
"r\"Expression = { $_.path.Split('/')[-1] } }\"",
"]",
"cmd_ret",
"=",
"_srvmgr",
"(",
"cmd",
"=",
"ps_cmd",
",",
"return_json",
"=",
"True",
")",
"try",
":",
"items",
"=",
"salt",
".",
"utils",
".",
"json",
".",
"loads",
"(",
"cmd_ret",
"[",
"'stdout'",
"]",
",",
"strict",
"=",
"False",
")",
"except",
"ValueError",
":",
"raise",
"CommandExecutionError",
"(",
"'Unable to parse return data as Json.'",
")",
"for",
"item",
"in",
"items",
":",
"ret",
"[",
"item",
"[",
"'name'",
"]",
"]",
"=",
"{",
"'sourcepath'",
":",
"item",
"[",
"'physicalPath'",
"]",
"}",
"if",
"not",
"ret",
":",
"log",
".",
"warning",
"(",
"'No vdirs found in output: %s'",
",",
"cmd_ret",
")",
"return",
"ret"
] |
Get all configured IIS virtual directories for the specified site, or for
the combination of site and application.
Args:
site (str): The IIS site name.
app (str): The IIS application.
Returns:
dict: A dictionary of the virtual directory names and properties.
CLI Example:
.. code-block:: bash
salt '*' win_iis.list_vdirs site
|
[
"Get",
"all",
"configured",
"IIS",
"virtual",
"directories",
"for",
"the",
"specified",
"site",
"or",
"for",
"the",
"combination",
"of",
"site",
"and",
"application",
"."
] |
python
|
train
|
h2oai/h2o-3
|
h2o-py/h2o/frame.py
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/frame.py#L2988-L2995
|
def signif(self, digits=6):
"""
Round doubles/floats to the given number of significant digits.
:param int digits: Number of significant digits to retain.
:returns: new H2OFrame with rounded values from the original frame.
"""
return H2OFrame._expr(expr=ExprNode("signif", self, digits), cache=self._ex._cache)
|
[
"def",
"signif",
"(",
"self",
",",
"digits",
"=",
"6",
")",
":",
"return",
"H2OFrame",
".",
"_expr",
"(",
"expr",
"=",
"ExprNode",
"(",
"\"signif\"",
",",
"self",
",",
"digits",
")",
",",
"cache",
"=",
"self",
".",
"_ex",
".",
"_cache",
")"
] |
Round doubles/floats to the given number of significant digits.
:param int digits: Number of significant digits to retain.
:returns: new H2OFrame with rounded values from the original frame.
|
[
"Round",
"doubles",
"/",
"floats",
"to",
"the",
"given",
"number",
"of",
"significant",
"digits",
"."
] |
python
|
test
|
seleniumbase/SeleniumBase
|
seleniumbase/fixtures/base_case.py
|
https://github.com/seleniumbase/SeleniumBase/blob/62e5b43ee1f90a9ed923841bdd53b1b38358f43a/seleniumbase/fixtures/base_case.py#L2344-L2350
|
def find_partial_link_text(self, partial_link_text,
timeout=settings.LARGE_TIMEOUT):
""" Same as wait_for_partial_link_text() - returns the element """
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
return self.wait_for_partial_link_text(
partial_link_text, timeout=timeout)
|
[
"def",
"find_partial_link_text",
"(",
"self",
",",
"partial_link_text",
",",
"timeout",
"=",
"settings",
".",
"LARGE_TIMEOUT",
")",
":",
"if",
"self",
".",
"timeout_multiplier",
"and",
"timeout",
"==",
"settings",
".",
"LARGE_TIMEOUT",
":",
"timeout",
"=",
"self",
".",
"__get_new_timeout",
"(",
"timeout",
")",
"return",
"self",
".",
"wait_for_partial_link_text",
"(",
"partial_link_text",
",",
"timeout",
"=",
"timeout",
")"
] |
Same as wait_for_partial_link_text() - returns the element
|
[
"Same",
"as",
"wait_for_partial_link_text",
"()",
"-",
"returns",
"the",
"element"
] |
python
|
train
|
gwastro/pycbc
|
pycbc/inference/sampler/emcee_pt.py
|
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/inference/sampler/emcee_pt.py#L119-L172
|
def from_config(cls, cp, model, nprocesses=1, use_mpi=False):
"""
Loads the sampler from the given config file.
For generating the temperature ladder to be used by emcee_pt, either
the number of temperatures (provided by the option 'ntemps'),
or the path to a file storing inverse temperature values (provided
under a subsection inverse-temperatures-file) can be loaded from the
config file. If the latter, the file should be of hdf format, having
an attribute named 'betas' storing the list of inverse temperature
values to be provided to emcee_pt. If the former, emcee_pt will
construct the ladder with "ntemps" geometrically spaced temperatures.
"""
section = "sampler"
# check name
assert cp.get(section, "name") == cls.name, (
"name in section [sampler] must match mine")
# get the number of walkers to use
nwalkers = int(cp.get(section, "nwalkers"))
if cp.has_option(section, "ntemps") and \
cp.has_option(section, "inverse-temperatures-file"):
raise ValueError("Must specify either ntemps or "
"inverse-temperatures-file, not both.")
if cp.has_option(section, "inverse-temperatures-file"):
# get the path of the file containing inverse temperatures values.
inverse_temperatures_file = cp.get(section,
"inverse-temperatures-file")
with h5py.File(inverse_temperatures_file, "r") as fp:
try:
betas = numpy.array(fp.attrs['betas'])
ntemps = betas.shape[0]
except KeyError:
raise AttributeError("No attribute called betas")
else:
# get the number of temperatures
betas = None
ntemps = int(cp.get(section, "ntemps"))
# get the checkpoint interval, if it's specified
checkpoint_interval = cls.checkpoint_from_config(cp, section)
checkpoint_signal = cls.ckpt_signal_from_config(cp, section)
# get the loglikelihood function
logl = get_optional_arg_from_config(cp, section, 'logl-function')
obj = cls(model, ntemps, nwalkers, betas=betas,
checkpoint_interval=checkpoint_interval,
checkpoint_signal=checkpoint_signal,
loglikelihood_function=logl, nprocesses=nprocesses,
use_mpi=use_mpi)
# set target
obj.set_target_from_config(cp, section)
# add burn-in if it's specified
obj.set_burn_in_from_config(cp)
# set prethin options
obj.set_thin_interval_from_config(cp, section)
return obj
|
[
"def",
"from_config",
"(",
"cls",
",",
"cp",
",",
"model",
",",
"nprocesses",
"=",
"1",
",",
"use_mpi",
"=",
"False",
")",
":",
"section",
"=",
"\"sampler\"",
"# check name",
"assert",
"cp",
".",
"get",
"(",
"section",
",",
"\"name\"",
")",
"==",
"cls",
".",
"name",
",",
"(",
"\"name in section [sampler] must match mine\"",
")",
"# get the number of walkers to use",
"nwalkers",
"=",
"int",
"(",
"cp",
".",
"get",
"(",
"section",
",",
"\"nwalkers\"",
")",
")",
"if",
"cp",
".",
"has_option",
"(",
"section",
",",
"\"ntemps\"",
")",
"and",
"cp",
".",
"has_option",
"(",
"section",
",",
"\"inverse-temperatures-file\"",
")",
":",
"raise",
"ValueError",
"(",
"\"Must specify either ntemps or \"",
"\"inverse-temperatures-file, not both.\"",
")",
"if",
"cp",
".",
"has_option",
"(",
"section",
",",
"\"inverse-temperatures-file\"",
")",
":",
"# get the path of the file containing inverse temperatures values.",
"inverse_temperatures_file",
"=",
"cp",
".",
"get",
"(",
"section",
",",
"\"inverse-temperatures-file\"",
")",
"with",
"h5py",
".",
"File",
"(",
"inverse_temperatures_file",
",",
"\"r\"",
")",
"as",
"fp",
":",
"try",
":",
"betas",
"=",
"numpy",
".",
"array",
"(",
"fp",
".",
"attrs",
"[",
"'betas'",
"]",
")",
"ntemps",
"=",
"betas",
".",
"shape",
"[",
"0",
"]",
"except",
"KeyError",
":",
"raise",
"AttributeError",
"(",
"\"No attribute called betas\"",
")",
"else",
":",
"# get the number of temperatures",
"betas",
"=",
"None",
"ntemps",
"=",
"int",
"(",
"cp",
".",
"get",
"(",
"section",
",",
"\"ntemps\"",
")",
")",
"# get the checkpoint interval, if it's specified",
"checkpoint_interval",
"=",
"cls",
".",
"checkpoint_from_config",
"(",
"cp",
",",
"section",
")",
"checkpoint_signal",
"=",
"cls",
".",
"ckpt_signal_from_config",
"(",
"cp",
",",
"section",
")",
"# get the loglikelihood function",
"logl",
"=",
"get_optional_arg_from_config",
"(",
"cp",
",",
"section",
",",
"'logl-function'",
")",
"obj",
"=",
"cls",
"(",
"model",
",",
"ntemps",
",",
"nwalkers",
",",
"betas",
"=",
"betas",
",",
"checkpoint_interval",
"=",
"checkpoint_interval",
",",
"checkpoint_signal",
"=",
"checkpoint_signal",
",",
"loglikelihood_function",
"=",
"logl",
",",
"nprocesses",
"=",
"nprocesses",
",",
"use_mpi",
"=",
"use_mpi",
")",
"# set target",
"obj",
".",
"set_target_from_config",
"(",
"cp",
",",
"section",
")",
"# add burn-in if it's specified",
"obj",
".",
"set_burn_in_from_config",
"(",
"cp",
")",
"# set prethin options",
"obj",
".",
"set_thin_interval_from_config",
"(",
"cp",
",",
"section",
")",
"return",
"obj"
] |
Loads the sampler from the given config file.
For generating the temperature ladder to be used by emcee_pt, either
the number of temperatures (provided by the option 'ntemps'),
or the path to a file storing inverse temperature values (provided
under a subsection inverse-temperatures-file) can be loaded from the
config file. If the latter, the file should be of hdf format, having
an attribute named 'betas' storing the list of inverse temperature
values to be provided to emcee_pt. If the former, emcee_pt will
construct the ladder with "ntemps" geometrically spaced temperatures.
|
[
"Loads",
"the",
"sampler",
"from",
"the",
"given",
"config",
"file",
"."
] |
python
|
train
|
CamDavidsonPilon/lifelines
|
lifelines/utils/__init__.py
|
https://github.com/CamDavidsonPilon/lifelines/blob/bdf6be6f1d10eea4c46365ee0ee6a47d8c30edf8/lifelines/utils/__init__.py#L110-L145
|
def qth_survival_time(q, survival_function, cdf=False):
"""
Returns the time when a single survival function reaches the qth percentile.
Parameters
----------
q: float
a float between 0 and 1 that represents the time when the survival function hit's the qth percentile.
survival_function: Series or single-column DataFrame.
cdf: boolean, optional
When doing left-censored data, cdf=True is used.
Returns
-------
float
See Also
--------
qth_survival_times, median_survival_times
"""
if type(survival_function) is pd.DataFrame: # pylint: disable=unidiomatic-typecheck
if survival_function.shape[1] > 1:
raise ValueError(
"Expecting a dataframe (or series) with a single column. Provide that or use utils.qth_survival_times."
)
survival_function = survival_function.T.squeeze()
if cdf:
if survival_function.iloc[0] > q:
return -np.inf
v = survival_function.index[survival_function.searchsorted([q])[0]]
else:
if survival_function.iloc[-1] > q:
return np.inf
v = survival_function.index[(-survival_function).searchsorted([-q])[0]]
return v
|
[
"def",
"qth_survival_time",
"(",
"q",
",",
"survival_function",
",",
"cdf",
"=",
"False",
")",
":",
"if",
"type",
"(",
"survival_function",
")",
"is",
"pd",
".",
"DataFrame",
":",
"# pylint: disable=unidiomatic-typecheck",
"if",
"survival_function",
".",
"shape",
"[",
"1",
"]",
">",
"1",
":",
"raise",
"ValueError",
"(",
"\"Expecting a dataframe (or series) with a single column. Provide that or use utils.qth_survival_times.\"",
")",
"survival_function",
"=",
"survival_function",
".",
"T",
".",
"squeeze",
"(",
")",
"if",
"cdf",
":",
"if",
"survival_function",
".",
"iloc",
"[",
"0",
"]",
">",
"q",
":",
"return",
"-",
"np",
".",
"inf",
"v",
"=",
"survival_function",
".",
"index",
"[",
"survival_function",
".",
"searchsorted",
"(",
"[",
"q",
"]",
")",
"[",
"0",
"]",
"]",
"else",
":",
"if",
"survival_function",
".",
"iloc",
"[",
"-",
"1",
"]",
">",
"q",
":",
"return",
"np",
".",
"inf",
"v",
"=",
"survival_function",
".",
"index",
"[",
"(",
"-",
"survival_function",
")",
".",
"searchsorted",
"(",
"[",
"-",
"q",
"]",
")",
"[",
"0",
"]",
"]",
"return",
"v"
] |
Returns the time when a single survival function reaches the qth percentile.
Parameters
----------
q: float
a float between 0 and 1 that represents the time when the survival function hit's the qth percentile.
survival_function: Series or single-column DataFrame.
cdf: boolean, optional
When doing left-censored data, cdf=True is used.
Returns
-------
float
See Also
--------
qth_survival_times, median_survival_times
|
[
"Returns",
"the",
"time",
"when",
"a",
"single",
"survival",
"function",
"reaches",
"the",
"qth",
"percentile",
"."
] |
python
|
train
|
wandb/client
|
wandb/vendor/prompt_toolkit/contrib/telnet/server.py
|
https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/contrib/telnet/server.py#L240-L246
|
def erase_screen(self):
"""
Erase output screen.
"""
self.vt100_output.erase_screen()
self.vt100_output.cursor_goto(0, 0)
self.vt100_output.flush()
|
[
"def",
"erase_screen",
"(",
"self",
")",
":",
"self",
".",
"vt100_output",
".",
"erase_screen",
"(",
")",
"self",
".",
"vt100_output",
".",
"cursor_goto",
"(",
"0",
",",
"0",
")",
"self",
".",
"vt100_output",
".",
"flush",
"(",
")"
] |
Erase output screen.
|
[
"Erase",
"output",
"screen",
"."
] |
python
|
train
|
visualfabriq/bquery
|
bquery/ctable.py
|
https://github.com/visualfabriq/bquery/blob/3702e974696e22876944a3339affad2f29e1ee06/bquery/ctable.py#L576-L654
|
def create_agg_ctable(self, groupby_cols, agg_list, expectedlen, rootdir):
'''Create a container for the output table, a dictionary describing it's
columns and a list of tuples describing aggregation
operations to perform.
Args:
groupby_cols (list): a list of columns to groupby over
agg_list (list): the aggregation operations (see groupby for more info)
expectedlen (int): expected length of output table
rootdir (string): the directory to write the table to
Returns:
ctable: A table in the correct format for containing the output of
the specified aggregation operations.
dict: (dtype_dict) dictionary describing columns to create
list: (agg_ops) list of tuples of the form:
(input_col_name, output_col_name, agg_op)
input_col_name (string): name of the column to act on
output_col_name (string): name of the column to output to
agg_op (int): aggregation operation to perform
'''
dtype_dict = {}
# include all the groupby columns
for col in groupby_cols:
dtype_dict[col] = self[col].dtype
agg_ops_list = ['sum', 'count', 'count_distinct', 'sorted_count_distinct', 'mean', 'std']
agg_ops = []
for agg_info in agg_list:
if not isinstance(agg_info, list):
# example: ['m1', 'm2', ...]
# default operation (sum) and default output column name (same is input)
output_col_name = agg_info
input_col_name = agg_info
agg_op = 'sum'
else:
input_col_name = agg_info[0]
agg_op = agg_info[1]
if len(agg_info) == 2:
# example: [['m1', 'sum'], ['m2', 'mean], ...]
# default output column name
output_col_name = input_col_name
else:
# example: [['m1', 'sum', 'mnew1'], ['m1, 'mean','mnew2'], ...]
# fully specified
output_col_name = agg_info[2]
if agg_op not in agg_ops_list:
raise NotImplementedError(
'Unknown Aggregation Type: ' + str(agg_op))
# choose output column dtype based on aggregation operation and
# input column dtype
# TODO: check if the aggregation columns is numeric
# NB: we could build a concatenation for strings like pandas, but I would really prefer to see that as a
# separate operation
if agg_op in ('count', 'count_distinct', 'sorted_count_distinct'):
output_col_dtype = np.dtype(np.int64)
elif agg_op in ('mean', 'std'):
output_col_dtype = np.dtype(np.float64)
else:
output_col_dtype = self[input_col_name].dtype
dtype_dict[output_col_name] = output_col_dtype
# save output
agg_ops.append((input_col_name, output_col_name, agg_op))
# create aggregation table
ct_agg = bcolz.ctable(
np.zeros(expectedlen, [('tmp_col_bquery__', np.bool)]),
expectedlen=expectedlen,
rootdir=rootdir)
return ct_agg, dtype_dict, agg_ops
|
[
"def",
"create_agg_ctable",
"(",
"self",
",",
"groupby_cols",
",",
"agg_list",
",",
"expectedlen",
",",
"rootdir",
")",
":",
"dtype_dict",
"=",
"{",
"}",
"# include all the groupby columns",
"for",
"col",
"in",
"groupby_cols",
":",
"dtype_dict",
"[",
"col",
"]",
"=",
"self",
"[",
"col",
"]",
".",
"dtype",
"agg_ops_list",
"=",
"[",
"'sum'",
",",
"'count'",
",",
"'count_distinct'",
",",
"'sorted_count_distinct'",
",",
"'mean'",
",",
"'std'",
"]",
"agg_ops",
"=",
"[",
"]",
"for",
"agg_info",
"in",
"agg_list",
":",
"if",
"not",
"isinstance",
"(",
"agg_info",
",",
"list",
")",
":",
"# example: ['m1', 'm2', ...]",
"# default operation (sum) and default output column name (same is input)",
"output_col_name",
"=",
"agg_info",
"input_col_name",
"=",
"agg_info",
"agg_op",
"=",
"'sum'",
"else",
":",
"input_col_name",
"=",
"agg_info",
"[",
"0",
"]",
"agg_op",
"=",
"agg_info",
"[",
"1",
"]",
"if",
"len",
"(",
"agg_info",
")",
"==",
"2",
":",
"# example: [['m1', 'sum'], ['m2', 'mean], ...]",
"# default output column name",
"output_col_name",
"=",
"input_col_name",
"else",
":",
"# example: [['m1', 'sum', 'mnew1'], ['m1, 'mean','mnew2'], ...]",
"# fully specified",
"output_col_name",
"=",
"agg_info",
"[",
"2",
"]",
"if",
"agg_op",
"not",
"in",
"agg_ops_list",
":",
"raise",
"NotImplementedError",
"(",
"'Unknown Aggregation Type: '",
"+",
"str",
"(",
"agg_op",
")",
")",
"# choose output column dtype based on aggregation operation and",
"# input column dtype",
"# TODO: check if the aggregation columns is numeric",
"# NB: we could build a concatenation for strings like pandas, but I would really prefer to see that as a",
"# separate operation",
"if",
"agg_op",
"in",
"(",
"'count'",
",",
"'count_distinct'",
",",
"'sorted_count_distinct'",
")",
":",
"output_col_dtype",
"=",
"np",
".",
"dtype",
"(",
"np",
".",
"int64",
")",
"elif",
"agg_op",
"in",
"(",
"'mean'",
",",
"'std'",
")",
":",
"output_col_dtype",
"=",
"np",
".",
"dtype",
"(",
"np",
".",
"float64",
")",
"else",
":",
"output_col_dtype",
"=",
"self",
"[",
"input_col_name",
"]",
".",
"dtype",
"dtype_dict",
"[",
"output_col_name",
"]",
"=",
"output_col_dtype",
"# save output",
"agg_ops",
".",
"append",
"(",
"(",
"input_col_name",
",",
"output_col_name",
",",
"agg_op",
")",
")",
"# create aggregation table",
"ct_agg",
"=",
"bcolz",
".",
"ctable",
"(",
"np",
".",
"zeros",
"(",
"expectedlen",
",",
"[",
"(",
"'tmp_col_bquery__'",
",",
"np",
".",
"bool",
")",
"]",
")",
",",
"expectedlen",
"=",
"expectedlen",
",",
"rootdir",
"=",
"rootdir",
")",
"return",
"ct_agg",
",",
"dtype_dict",
",",
"agg_ops"
] |
Create a container for the output table, a dictionary describing it's
columns and a list of tuples describing aggregation
operations to perform.
Args:
groupby_cols (list): a list of columns to groupby over
agg_list (list): the aggregation operations (see groupby for more info)
expectedlen (int): expected length of output table
rootdir (string): the directory to write the table to
Returns:
ctable: A table in the correct format for containing the output of
the specified aggregation operations.
dict: (dtype_dict) dictionary describing columns to create
list: (agg_ops) list of tuples of the form:
(input_col_name, output_col_name, agg_op)
input_col_name (string): name of the column to act on
output_col_name (string): name of the column to output to
agg_op (int): aggregation operation to perform
|
[
"Create",
"a",
"container",
"for",
"the",
"output",
"table",
"a",
"dictionary",
"describing",
"it",
"s",
"columns",
"and",
"a",
"list",
"of",
"tuples",
"describing",
"aggregation",
"operations",
"to",
"perform",
"."
] |
python
|
train
|
LonamiWebs/Telethon
|
telethon/client/updates.py
|
https://github.com/LonamiWebs/Telethon/blob/1ead9757d366b58c1e0567cddb0196e20f1a445f/telethon/client/updates.py#L70-L101
|
def add_event_handler(self, callback, event=None):
"""
Registers the given callback to be called on the specified event.
Args:
callback (`callable`):
The callable function accepting one parameter to be used.
Note that if you have used `telethon.events.register` in
the callback, ``event`` will be ignored, and instead the
events you previously registered will be used.
event (`_EventBuilder` | `type`, optional):
The event builder class or instance to be used,
for instance ``events.NewMessage``.
If left unspecified, `telethon.events.raw.Raw` (the
:tl:`Update` objects with no further processing) will
be passed instead.
"""
builders = events._get_handlers(callback)
if builders is not None:
for event in builders:
self._event_builders.append((event, callback))
return
if isinstance(event, type):
event = event()
elif not event:
event = events.Raw()
self._event_builders.append((event, callback))
|
[
"def",
"add_event_handler",
"(",
"self",
",",
"callback",
",",
"event",
"=",
"None",
")",
":",
"builders",
"=",
"events",
".",
"_get_handlers",
"(",
"callback",
")",
"if",
"builders",
"is",
"not",
"None",
":",
"for",
"event",
"in",
"builders",
":",
"self",
".",
"_event_builders",
".",
"append",
"(",
"(",
"event",
",",
"callback",
")",
")",
"return",
"if",
"isinstance",
"(",
"event",
",",
"type",
")",
":",
"event",
"=",
"event",
"(",
")",
"elif",
"not",
"event",
":",
"event",
"=",
"events",
".",
"Raw",
"(",
")",
"self",
".",
"_event_builders",
".",
"append",
"(",
"(",
"event",
",",
"callback",
")",
")"
] |
Registers the given callback to be called on the specified event.
Args:
callback (`callable`):
The callable function accepting one parameter to be used.
Note that if you have used `telethon.events.register` in
the callback, ``event`` will be ignored, and instead the
events you previously registered will be used.
event (`_EventBuilder` | `type`, optional):
The event builder class or instance to be used,
for instance ``events.NewMessage``.
If left unspecified, `telethon.events.raw.Raw` (the
:tl:`Update` objects with no further processing) will
be passed instead.
|
[
"Registers",
"the",
"given",
"callback",
"to",
"be",
"called",
"on",
"the",
"specified",
"event",
"."
] |
python
|
train
|
fumitoh/modelx
|
modelx/core/model.py
|
https://github.com/fumitoh/modelx/blob/0180da34d052c44fb94dab9e115e218bbebfc9c3/modelx/core/model.py#L191-L195
|
def clear_descendants(self, source, clear_source=True):
"""Clear values and nodes calculated from `source`."""
removed = self.cellgraph.clear_descendants(source, clear_source)
for node in removed:
del node[OBJ].data[node[KEY]]
|
[
"def",
"clear_descendants",
"(",
"self",
",",
"source",
",",
"clear_source",
"=",
"True",
")",
":",
"removed",
"=",
"self",
".",
"cellgraph",
".",
"clear_descendants",
"(",
"source",
",",
"clear_source",
")",
"for",
"node",
"in",
"removed",
":",
"del",
"node",
"[",
"OBJ",
"]",
".",
"data",
"[",
"node",
"[",
"KEY",
"]",
"]"
] |
Clear values and nodes calculated from `source`.
|
[
"Clear",
"values",
"and",
"nodes",
"calculated",
"from",
"source",
"."
] |
python
|
valid
|
saltstack/salt
|
salt/client/ssh/__init__.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/ssh/__init__.py#L1258-L1304
|
def shim_cmd(self, cmd_str, extension='py'):
'''
Run a shim command.
If tty is enabled, we must scp the shim to the target system and
execute it there
'''
if not self.tty and not self.winrm:
return self.shell.exec_cmd(cmd_str)
# Write the shim to a temporary file in the default temp directory
with tempfile.NamedTemporaryFile(mode='w+b',
prefix='shim_',
delete=False) as shim_tmp_file:
shim_tmp_file.write(salt.utils.stringutils.to_bytes(cmd_str))
# Copy shim to target system, under $HOME/.<randomized name>
target_shim_file = '.{0}.{1}'.format(
binascii.hexlify(os.urandom(6)).decode('ascii'),
extension
)
if self.winrm:
target_shim_file = saltwinshell.get_target_shim_file(self, target_shim_file)
self.shell.send(shim_tmp_file.name, target_shim_file, makedirs=True)
# Remove our shim file
try:
os.remove(shim_tmp_file.name)
except IOError:
pass
# Execute shim
if extension == 'ps1':
ret = self.shell.exec_cmd('"powershell {0}"'.format(target_shim_file))
else:
if not self.winrm:
ret = self.shell.exec_cmd('/bin/sh \'$HOME/{0}\''.format(target_shim_file))
else:
ret = saltwinshell.call_python(self, target_shim_file)
# Remove shim from target system
if not self.winrm:
self.shell.exec_cmd('rm \'$HOME/{0}\''.format(target_shim_file))
else:
self.shell.exec_cmd('del {0}'.format(target_shim_file))
return ret
|
[
"def",
"shim_cmd",
"(",
"self",
",",
"cmd_str",
",",
"extension",
"=",
"'py'",
")",
":",
"if",
"not",
"self",
".",
"tty",
"and",
"not",
"self",
".",
"winrm",
":",
"return",
"self",
".",
"shell",
".",
"exec_cmd",
"(",
"cmd_str",
")",
"# Write the shim to a temporary file in the default temp directory",
"with",
"tempfile",
".",
"NamedTemporaryFile",
"(",
"mode",
"=",
"'w+b'",
",",
"prefix",
"=",
"'shim_'",
",",
"delete",
"=",
"False",
")",
"as",
"shim_tmp_file",
":",
"shim_tmp_file",
".",
"write",
"(",
"salt",
".",
"utils",
".",
"stringutils",
".",
"to_bytes",
"(",
"cmd_str",
")",
")",
"# Copy shim to target system, under $HOME/.<randomized name>",
"target_shim_file",
"=",
"'.{0}.{1}'",
".",
"format",
"(",
"binascii",
".",
"hexlify",
"(",
"os",
".",
"urandom",
"(",
"6",
")",
")",
".",
"decode",
"(",
"'ascii'",
")",
",",
"extension",
")",
"if",
"self",
".",
"winrm",
":",
"target_shim_file",
"=",
"saltwinshell",
".",
"get_target_shim_file",
"(",
"self",
",",
"target_shim_file",
")",
"self",
".",
"shell",
".",
"send",
"(",
"shim_tmp_file",
".",
"name",
",",
"target_shim_file",
",",
"makedirs",
"=",
"True",
")",
"# Remove our shim file",
"try",
":",
"os",
".",
"remove",
"(",
"shim_tmp_file",
".",
"name",
")",
"except",
"IOError",
":",
"pass",
"# Execute shim",
"if",
"extension",
"==",
"'ps1'",
":",
"ret",
"=",
"self",
".",
"shell",
".",
"exec_cmd",
"(",
"'\"powershell {0}\"'",
".",
"format",
"(",
"target_shim_file",
")",
")",
"else",
":",
"if",
"not",
"self",
".",
"winrm",
":",
"ret",
"=",
"self",
".",
"shell",
".",
"exec_cmd",
"(",
"'/bin/sh \\'$HOME/{0}\\''",
".",
"format",
"(",
"target_shim_file",
")",
")",
"else",
":",
"ret",
"=",
"saltwinshell",
".",
"call_python",
"(",
"self",
",",
"target_shim_file",
")",
"# Remove shim from target system",
"if",
"not",
"self",
".",
"winrm",
":",
"self",
".",
"shell",
".",
"exec_cmd",
"(",
"'rm \\'$HOME/{0}\\''",
".",
"format",
"(",
"target_shim_file",
")",
")",
"else",
":",
"self",
".",
"shell",
".",
"exec_cmd",
"(",
"'del {0}'",
".",
"format",
"(",
"target_shim_file",
")",
")",
"return",
"ret"
] |
Run a shim command.
If tty is enabled, we must scp the shim to the target system and
execute it there
|
[
"Run",
"a",
"shim",
"command",
"."
] |
python
|
train
|
tBuLi/symfit
|
symfit/contrib/interactive_guess/interactive_guess.py
|
https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/contrib/interactive_guess/interactive_guess.py#L117-L156
|
def _set_up_figure(self, x_mins, x_maxs, y_mins, y_maxs):
"""
Prepare the matplotlib figure: make all the subplots; adjust their
x and y range; plot the data; and plot an putative function.
"""
self.fig = plt.figure()
# Make room for the sliders:
bot = 0.1 + 0.05*len(self.model.params)
self.fig.subplots_adjust(bottom=bot)
# If these are not ints, matplotlib will crash and burn with an utterly
# vague error.
nrows = int(np.ceil(len(self._projections)**0.5))
ncols = int(np.ceil(len(self._projections)/nrows))
# Make all the subplots: set the x and y limits, scatter the data, and
# plot the putative function.
self._plots = {}
for plotnr, proj in enumerate(self._projections, 1):
x, y = proj
if Derivative(y, x) in self.model:
title_format = '$\\frac{{\\partial {dependant}}}{{\\partial {independant}}} = {expression}$'
else:
title_format = '${dependant}({independant}) = {expression}$'
plotlabel = title_format.format(
dependant=latex(y, mode='plain'),
independant=latex(x, mode='plain'),
expression=latex(self.model[y], mode='plain'))
ax = self.fig.add_subplot(ncols, nrows, plotnr,
label=plotlabel)
ax.set_title(ax.get_label())
ax.set_ylim(y_mins[y], y_maxs[y])
ax.set_xlim(x_mins[x], x_maxs[x])
ax.set_xlabel('${}$'.format(x))
ax.set_ylabel('${}$'.format(y))
self._plot_data(proj, ax)
plot = self._plot_model(proj, ax)
self._plots[proj] = plot
|
[
"def",
"_set_up_figure",
"(",
"self",
",",
"x_mins",
",",
"x_maxs",
",",
"y_mins",
",",
"y_maxs",
")",
":",
"self",
".",
"fig",
"=",
"plt",
".",
"figure",
"(",
")",
"# Make room for the sliders:",
"bot",
"=",
"0.1",
"+",
"0.05",
"*",
"len",
"(",
"self",
".",
"model",
".",
"params",
")",
"self",
".",
"fig",
".",
"subplots_adjust",
"(",
"bottom",
"=",
"bot",
")",
"# If these are not ints, matplotlib will crash and burn with an utterly",
"# vague error.",
"nrows",
"=",
"int",
"(",
"np",
".",
"ceil",
"(",
"len",
"(",
"self",
".",
"_projections",
")",
"**",
"0.5",
")",
")",
"ncols",
"=",
"int",
"(",
"np",
".",
"ceil",
"(",
"len",
"(",
"self",
".",
"_projections",
")",
"/",
"nrows",
")",
")",
"# Make all the subplots: set the x and y limits, scatter the data, and",
"# plot the putative function.",
"self",
".",
"_plots",
"=",
"{",
"}",
"for",
"plotnr",
",",
"proj",
"in",
"enumerate",
"(",
"self",
".",
"_projections",
",",
"1",
")",
":",
"x",
",",
"y",
"=",
"proj",
"if",
"Derivative",
"(",
"y",
",",
"x",
")",
"in",
"self",
".",
"model",
":",
"title_format",
"=",
"'$\\\\frac{{\\\\partial {dependant}}}{{\\\\partial {independant}}} = {expression}$'",
"else",
":",
"title_format",
"=",
"'${dependant}({independant}) = {expression}$'",
"plotlabel",
"=",
"title_format",
".",
"format",
"(",
"dependant",
"=",
"latex",
"(",
"y",
",",
"mode",
"=",
"'plain'",
")",
",",
"independant",
"=",
"latex",
"(",
"x",
",",
"mode",
"=",
"'plain'",
")",
",",
"expression",
"=",
"latex",
"(",
"self",
".",
"model",
"[",
"y",
"]",
",",
"mode",
"=",
"'plain'",
")",
")",
"ax",
"=",
"self",
".",
"fig",
".",
"add_subplot",
"(",
"ncols",
",",
"nrows",
",",
"plotnr",
",",
"label",
"=",
"plotlabel",
")",
"ax",
".",
"set_title",
"(",
"ax",
".",
"get_label",
"(",
")",
")",
"ax",
".",
"set_ylim",
"(",
"y_mins",
"[",
"y",
"]",
",",
"y_maxs",
"[",
"y",
"]",
")",
"ax",
".",
"set_xlim",
"(",
"x_mins",
"[",
"x",
"]",
",",
"x_maxs",
"[",
"x",
"]",
")",
"ax",
".",
"set_xlabel",
"(",
"'${}$'",
".",
"format",
"(",
"x",
")",
")",
"ax",
".",
"set_ylabel",
"(",
"'${}$'",
".",
"format",
"(",
"y",
")",
")",
"self",
".",
"_plot_data",
"(",
"proj",
",",
"ax",
")",
"plot",
"=",
"self",
".",
"_plot_model",
"(",
"proj",
",",
"ax",
")",
"self",
".",
"_plots",
"[",
"proj",
"]",
"=",
"plot"
] |
Prepare the matplotlib figure: make all the subplots; adjust their
x and y range; plot the data; and plot an putative function.
|
[
"Prepare",
"the",
"matplotlib",
"figure",
":",
"make",
"all",
"the",
"subplots",
";",
"adjust",
"their",
"x",
"and",
"y",
"range",
";",
"plot",
"the",
"data",
";",
"and",
"plot",
"an",
"putative",
"function",
"."
] |
python
|
train
|
ibis-project/ibis
|
ibis/sql/sqlite/client.py
|
https://github.com/ibis-project/ibis/blob/1e39a5fd9ef088b45c155e8a5f541767ee8ef2e7/ibis/sql/sqlite/client.py#L302-L311
|
def _register_aggregate(agg, con):
"""Register a Python class that performs aggregation in SQLite.
Parameters
----------
agg : type
con : sqlalchemy.Connection
"""
nargs = number_of_arguments(agg.step) - 1 # because self
con.connection.connection.create_aggregate(agg.__name__, nargs, agg)
|
[
"def",
"_register_aggregate",
"(",
"agg",
",",
"con",
")",
":",
"nargs",
"=",
"number_of_arguments",
"(",
"agg",
".",
"step",
")",
"-",
"1",
"# because self",
"con",
".",
"connection",
".",
"connection",
".",
"create_aggregate",
"(",
"agg",
".",
"__name__",
",",
"nargs",
",",
"agg",
")"
] |
Register a Python class that performs aggregation in SQLite.
Parameters
----------
agg : type
con : sqlalchemy.Connection
|
[
"Register",
"a",
"Python",
"class",
"that",
"performs",
"aggregation",
"in",
"SQLite",
"."
] |
python
|
train
|
LuminosoInsight/wordfreq
|
wordfreq/__init__.py
|
https://github.com/LuminosoInsight/wordfreq/blob/170e3c6536854b06dc63da8d873e8cc4f9ef6180/wordfreq/__init__.py#L357-L369
|
def random_ascii_words(lang='en', wordlist='best', nwords=5,
bits_per_word=12):
"""
Returns a string of random, space separated, ASCII words.
These words are of the given language and from the given wordlist.
There will be `nwords` words in the string.
`bits_per_word` determines the amount of entropy provided by each word;
when it's higher, this function will choose from a larger list of
words, some of which are more rare.
"""
return random_words(lang, wordlist, nwords, bits_per_word, ascii_only=True)
|
[
"def",
"random_ascii_words",
"(",
"lang",
"=",
"'en'",
",",
"wordlist",
"=",
"'best'",
",",
"nwords",
"=",
"5",
",",
"bits_per_word",
"=",
"12",
")",
":",
"return",
"random_words",
"(",
"lang",
",",
"wordlist",
",",
"nwords",
",",
"bits_per_word",
",",
"ascii_only",
"=",
"True",
")"
] |
Returns a string of random, space separated, ASCII words.
These words are of the given language and from the given wordlist.
There will be `nwords` words in the string.
`bits_per_word` determines the amount of entropy provided by each word;
when it's higher, this function will choose from a larger list of
words, some of which are more rare.
|
[
"Returns",
"a",
"string",
"of",
"random",
"space",
"separated",
"ASCII",
"words",
"."
] |
python
|
train
|
simonvh/norns
|
norns/cfg.py
|
https://github.com/simonvh/norns/blob/81db0004c558f91479176daf1918b8c9473b5ee2/norns/cfg.py#L60-L73
|
def load(self, path):
"""
Load yaml-formatted config file.
Parameters
----------
path : str
path to config file
"""
with open(path) as f:
self.config = full_load(f)
if self.config is None:
sys.stderr.write("Warning: config file is empty!\n")
self.config = {}
|
[
"def",
"load",
"(",
"self",
",",
"path",
")",
":",
"with",
"open",
"(",
"path",
")",
"as",
"f",
":",
"self",
".",
"config",
"=",
"full_load",
"(",
"f",
")",
"if",
"self",
".",
"config",
"is",
"None",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"Warning: config file is empty!\\n\"",
")",
"self",
".",
"config",
"=",
"{",
"}"
] |
Load yaml-formatted config file.
Parameters
----------
path : str
path to config file
|
[
"Load",
"yaml",
"-",
"formatted",
"config",
"file",
"."
] |
python
|
train
|
cihai/cihai
|
cihai/log.py
|
https://github.com/cihai/cihai/blob/43b0c2931da18c1ef1ff1cdd71e4b1c5eca24a41/cihai/log.py#L24-L63
|
def default_log_template(self, record):
"""Return the prefix for the log message. Template for Formatter.
:param: record: :py:class:`logging.LogRecord` object. this is passed in
from inside the :py:meth:`logging.Formatter.format` record.
"""
reset = Style.RESET_ALL
levelname = [
LEVEL_COLORS.get(record.levelname),
Style.BRIGHT,
'(%(levelname)s)',
Style.RESET_ALL,
' ',
]
asctime = [
'[',
Fore.BLACK,
Style.DIM,
Style.BRIGHT,
'%(asctime)s',
Fore.RESET,
Style.RESET_ALL,
']',
]
name = [
' ',
Fore.WHITE,
Style.DIM,
Style.BRIGHT,
'%(name)s',
Fore.RESET,
Style.RESET_ALL,
' ',
]
tpl = "".join(reset + levelname + asctime + name + reset)
return tpl
|
[
"def",
"default_log_template",
"(",
"self",
",",
"record",
")",
":",
"reset",
"=",
"Style",
".",
"RESET_ALL",
"levelname",
"=",
"[",
"LEVEL_COLORS",
".",
"get",
"(",
"record",
".",
"levelname",
")",
",",
"Style",
".",
"BRIGHT",
",",
"'(%(levelname)s)'",
",",
"Style",
".",
"RESET_ALL",
",",
"' '",
",",
"]",
"asctime",
"=",
"[",
"'['",
",",
"Fore",
".",
"BLACK",
",",
"Style",
".",
"DIM",
",",
"Style",
".",
"BRIGHT",
",",
"'%(asctime)s'",
",",
"Fore",
".",
"RESET",
",",
"Style",
".",
"RESET_ALL",
",",
"']'",
",",
"]",
"name",
"=",
"[",
"' '",
",",
"Fore",
".",
"WHITE",
",",
"Style",
".",
"DIM",
",",
"Style",
".",
"BRIGHT",
",",
"'%(name)s'",
",",
"Fore",
".",
"RESET",
",",
"Style",
".",
"RESET_ALL",
",",
"' '",
",",
"]",
"tpl",
"=",
"\"\"",
".",
"join",
"(",
"reset",
"+",
"levelname",
"+",
"asctime",
"+",
"name",
"+",
"reset",
")",
"return",
"tpl"
] |
Return the prefix for the log message. Template for Formatter.
:param: record: :py:class:`logging.LogRecord` object. this is passed in
from inside the :py:meth:`logging.Formatter.format` record.
|
[
"Return",
"the",
"prefix",
"for",
"the",
"log",
"message",
".",
"Template",
"for",
"Formatter",
"."
] |
python
|
train
|
cimm-kzn/CGRtools
|
CGRtools/algorithms/standardize.py
|
https://github.com/cimm-kzn/CGRtools/blob/15a19b04f6e4e1d0dab8e0d32a0877c7f7d70f34/CGRtools/algorithms/standardize.py#L26-L60
|
def standardize(self):
"""
standardize functional groups
:return: number of found groups
"""
self.reset_query_marks()
seen = set()
total = 0
for n, atom in self.atoms():
if n in seen:
continue
for k, center in central.items():
if center != atom:
continue
shell = tuple((bond, self._node[m]) for m, bond in self._adj[n].items())
for shell_query, shell_patch, atom_patch in query_patch[k]:
if shell_query != shell:
continue
total += 1
for attr_name, attr_value in atom_patch.items():
setattr(atom, attr_name, attr_value)
for (bond_patch, atom_patch), (bond, atom) in zip(shell_patch, shell):
bond.update(bond_patch)
for attr_name, attr_value in atom_patch.items():
setattr(atom, attr_name, attr_value)
seen.add(n)
seen.update(self._adj[n])
break
else:
continue
break
if total:
self.flush_cache()
return total
|
[
"def",
"standardize",
"(",
"self",
")",
":",
"self",
".",
"reset_query_marks",
"(",
")",
"seen",
"=",
"set",
"(",
")",
"total",
"=",
"0",
"for",
"n",
",",
"atom",
"in",
"self",
".",
"atoms",
"(",
")",
":",
"if",
"n",
"in",
"seen",
":",
"continue",
"for",
"k",
",",
"center",
"in",
"central",
".",
"items",
"(",
")",
":",
"if",
"center",
"!=",
"atom",
":",
"continue",
"shell",
"=",
"tuple",
"(",
"(",
"bond",
",",
"self",
".",
"_node",
"[",
"m",
"]",
")",
"for",
"m",
",",
"bond",
"in",
"self",
".",
"_adj",
"[",
"n",
"]",
".",
"items",
"(",
")",
")",
"for",
"shell_query",
",",
"shell_patch",
",",
"atom_patch",
"in",
"query_patch",
"[",
"k",
"]",
":",
"if",
"shell_query",
"!=",
"shell",
":",
"continue",
"total",
"+=",
"1",
"for",
"attr_name",
",",
"attr_value",
"in",
"atom_patch",
".",
"items",
"(",
")",
":",
"setattr",
"(",
"atom",
",",
"attr_name",
",",
"attr_value",
")",
"for",
"(",
"bond_patch",
",",
"atom_patch",
")",
",",
"(",
"bond",
",",
"atom",
")",
"in",
"zip",
"(",
"shell_patch",
",",
"shell",
")",
":",
"bond",
".",
"update",
"(",
"bond_patch",
")",
"for",
"attr_name",
",",
"attr_value",
"in",
"atom_patch",
".",
"items",
"(",
")",
":",
"setattr",
"(",
"atom",
",",
"attr_name",
",",
"attr_value",
")",
"seen",
".",
"add",
"(",
"n",
")",
"seen",
".",
"update",
"(",
"self",
".",
"_adj",
"[",
"n",
"]",
")",
"break",
"else",
":",
"continue",
"break",
"if",
"total",
":",
"self",
".",
"flush_cache",
"(",
")",
"return",
"total"
] |
standardize functional groups
:return: number of found groups
|
[
"standardize",
"functional",
"groups"
] |
python
|
train
|
aliyun/aliyun-odps-python-sdk
|
odps/df/tools/lib/hll.py
|
https://github.com/aliyun/aliyun-odps-python-sdk/blob/4b0de18f5864386df6068f26f026e62f932c41e4/odps/df/tools/lib/hll.py#L135-L144
|
def merge(self, buffer, other_hyper_log_log):
"""
Merge the HyperLogLog
:param other_hyper_log_log:
:return:
"""
for i in range(len(buffer)):
buffer[i] = max(buffer[i], other_hyper_log_log[i])
|
[
"def",
"merge",
"(",
"self",
",",
"buffer",
",",
"other_hyper_log_log",
")",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"buffer",
")",
")",
":",
"buffer",
"[",
"i",
"]",
"=",
"max",
"(",
"buffer",
"[",
"i",
"]",
",",
"other_hyper_log_log",
"[",
"i",
"]",
")"
] |
Merge the HyperLogLog
:param other_hyper_log_log:
:return:
|
[
"Merge",
"the",
"HyperLogLog"
] |
python
|
train
|
TAPPGuild/tapp-config
|
tapp_config.py
|
https://github.com/TAPPGuild/tapp-config/blob/20fdbe00e4763f38a90845ad2cfb63c94e13ca81/tapp_config.py#L20-L41
|
def get_config(name=__name__):
"""
Get a configuration parser for a given TAPP name.
Reads config.ini files only, not in-database configuration records.
:param name: The tapp name to get a configuration for.
:rtype: ConfigParser
:return: A config parser matching the given name
"""
cfg = ConfigParser()
path = os.environ.get('%s_CONFIG_FILE' % name.upper())
if path is None or path == "":
fname = '/etc/tapp/%s.ini' % name
if isfile(fname):
path = fname
elif isfile('cfg.ini'):
path = 'cfg.ini'
else:
raise ValueError("Unable to get configuration for tapp %s" % name)
cfg.read(path)
return cfg
|
[
"def",
"get_config",
"(",
"name",
"=",
"__name__",
")",
":",
"cfg",
"=",
"ConfigParser",
"(",
")",
"path",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'%s_CONFIG_FILE'",
"%",
"name",
".",
"upper",
"(",
")",
")",
"if",
"path",
"is",
"None",
"or",
"path",
"==",
"\"\"",
":",
"fname",
"=",
"'/etc/tapp/%s.ini'",
"%",
"name",
"if",
"isfile",
"(",
"fname",
")",
":",
"path",
"=",
"fname",
"elif",
"isfile",
"(",
"'cfg.ini'",
")",
":",
"path",
"=",
"'cfg.ini'",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unable to get configuration for tapp %s\"",
"%",
"name",
")",
"cfg",
".",
"read",
"(",
"path",
")",
"return",
"cfg"
] |
Get a configuration parser for a given TAPP name.
Reads config.ini files only, not in-database configuration records.
:param name: The tapp name to get a configuration for.
:rtype: ConfigParser
:return: A config parser matching the given name
|
[
"Get",
"a",
"configuration",
"parser",
"for",
"a",
"given",
"TAPP",
"name",
".",
"Reads",
"config",
".",
"ini",
"files",
"only",
"not",
"in",
"-",
"database",
"configuration",
"records",
"."
] |
python
|
train
|
jingw/pyhdfs
|
pyhdfs.py
|
https://github.com/jingw/pyhdfs/blob/b382b34f7cb28b41559f5be73102beb1732cd933/pyhdfs.py#L713-L719
|
def exists(self, path, **kwargs):
"""Return true if the given path exists"""
try:
self.get_file_status(path, **kwargs)
return True
except HdfsFileNotFoundException:
return False
|
[
"def",
"exists",
"(",
"self",
",",
"path",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"self",
".",
"get_file_status",
"(",
"path",
",",
"*",
"*",
"kwargs",
")",
"return",
"True",
"except",
"HdfsFileNotFoundException",
":",
"return",
"False"
] |
Return true if the given path exists
|
[
"Return",
"true",
"if",
"the",
"given",
"path",
"exists"
] |
python
|
train
|
ozgur/python-firebase
|
firebase/firebase.py
|
https://github.com/ozgur/python-firebase/blob/6b96b326f6d8f477503ca42fdfbd81bcbe1f9e0d/firebase/firebase.py#L188-L195
|
def get_user(self):
"""
Method that gets the authenticated user. The returning user has
the token, email and the provider data.
"""
token = self.authenticator.create_token(self.extra)
user_id = self.extra.get('id')
return FirebaseUser(self.email, token, self.provider, user_id)
|
[
"def",
"get_user",
"(",
"self",
")",
":",
"token",
"=",
"self",
".",
"authenticator",
".",
"create_token",
"(",
"self",
".",
"extra",
")",
"user_id",
"=",
"self",
".",
"extra",
".",
"get",
"(",
"'id'",
")",
"return",
"FirebaseUser",
"(",
"self",
".",
"email",
",",
"token",
",",
"self",
".",
"provider",
",",
"user_id",
")"
] |
Method that gets the authenticated user. The returning user has
the token, email and the provider data.
|
[
"Method",
"that",
"gets",
"the",
"authenticated",
"user",
".",
"The",
"returning",
"user",
"has",
"the",
"token",
"email",
"and",
"the",
"provider",
"data",
"."
] |
python
|
valid
|
gwastro/pycbc
|
pycbc/events/veto.py
|
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/events/veto.py#L70-L90
|
def indices_outside_times(times, start, end):
"""
Return an index array into times that like outside the durations defined by start end arrays
Parameters
----------
times: numpy.ndarray
Array of times
start: numpy.ndarray
Array of duration start times
end: numpy.ndarray
Array of duration end times
Returns
-------
indices: numpy.ndarray
Array of indices into times
"""
exclude = indices_within_times(times, start, end)
indices = numpy.arange(0, len(times))
return numpy.delete(indices, exclude)
|
[
"def",
"indices_outside_times",
"(",
"times",
",",
"start",
",",
"end",
")",
":",
"exclude",
"=",
"indices_within_times",
"(",
"times",
",",
"start",
",",
"end",
")",
"indices",
"=",
"numpy",
".",
"arange",
"(",
"0",
",",
"len",
"(",
"times",
")",
")",
"return",
"numpy",
".",
"delete",
"(",
"indices",
",",
"exclude",
")"
] |
Return an index array into times that like outside the durations defined by start end arrays
Parameters
----------
times: numpy.ndarray
Array of times
start: numpy.ndarray
Array of duration start times
end: numpy.ndarray
Array of duration end times
Returns
-------
indices: numpy.ndarray
Array of indices into times
|
[
"Return",
"an",
"index",
"array",
"into",
"times",
"that",
"like",
"outside",
"the",
"durations",
"defined",
"by",
"start",
"end",
"arrays"
] |
python
|
train
|
briancappello/flask-unchained
|
flask_unchained/bundles/security/decorators/auth_required.py
|
https://github.com/briancappello/flask-unchained/blob/4d536cb90e2cc4829c1c05f2c74d3e22901a1399/flask_unchained/bundles/security/decorators/auth_required.py#L13-L54
|
def auth_required(decorated_fn=None, **role_rules):
"""
Decorator for requiring an authenticated user, optionally with roles.
Roles are passed as keyword arguments, like so::
@auth_required(role='REQUIRE_THIS_ONE_ROLE')
@auth_required(roles=['REQUIRE', 'ALL', 'OF', 'THESE', 'ROLES'])
@auth_required(one_of=['EITHER_THIS_ROLE', 'OR_THIS_ONE'])
One of role or roles kwargs can also be combined with one_of::
@auth_required(role='REQUIRED', one_of=['THIS', 'OR_THIS'])
Aborts with ``HTTP 401: Unauthorized`` if no user is logged in, or
``HTTP 403: Forbidden`` if any of the specified role checks fail.
"""
required_roles = []
one_of_roles = []
if not (decorated_fn and callable(decorated_fn)):
if 'role' in role_rules and 'roles' in role_rules:
raise RuntimeError('specify only one of `role` or `roles` kwargs')
elif 'role' in role_rules:
required_roles = [role_rules['role']]
elif 'roles' in role_rules:
required_roles = role_rules['roles']
if 'one_of' in role_rules:
one_of_roles = role_rules['one_of']
def wrapper(fn):
@wraps(fn)
@_auth_required()
@roles_required(*required_roles)
@roles_accepted(*one_of_roles)
def decorated(*args, **kwargs):
return fn(*args, **kwargs)
return decorated
if decorated_fn and callable(decorated_fn):
return wrapper(decorated_fn)
return wrapper
|
[
"def",
"auth_required",
"(",
"decorated_fn",
"=",
"None",
",",
"*",
"*",
"role_rules",
")",
":",
"required_roles",
"=",
"[",
"]",
"one_of_roles",
"=",
"[",
"]",
"if",
"not",
"(",
"decorated_fn",
"and",
"callable",
"(",
"decorated_fn",
")",
")",
":",
"if",
"'role'",
"in",
"role_rules",
"and",
"'roles'",
"in",
"role_rules",
":",
"raise",
"RuntimeError",
"(",
"'specify only one of `role` or `roles` kwargs'",
")",
"elif",
"'role'",
"in",
"role_rules",
":",
"required_roles",
"=",
"[",
"role_rules",
"[",
"'role'",
"]",
"]",
"elif",
"'roles'",
"in",
"role_rules",
":",
"required_roles",
"=",
"role_rules",
"[",
"'roles'",
"]",
"if",
"'one_of'",
"in",
"role_rules",
":",
"one_of_roles",
"=",
"role_rules",
"[",
"'one_of'",
"]",
"def",
"wrapper",
"(",
"fn",
")",
":",
"@",
"wraps",
"(",
"fn",
")",
"@",
"_auth_required",
"(",
")",
"@",
"roles_required",
"(",
"*",
"required_roles",
")",
"@",
"roles_accepted",
"(",
"*",
"one_of_roles",
")",
"def",
"decorated",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"fn",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"decorated",
"if",
"decorated_fn",
"and",
"callable",
"(",
"decorated_fn",
")",
":",
"return",
"wrapper",
"(",
"decorated_fn",
")",
"return",
"wrapper"
] |
Decorator for requiring an authenticated user, optionally with roles.
Roles are passed as keyword arguments, like so::
@auth_required(role='REQUIRE_THIS_ONE_ROLE')
@auth_required(roles=['REQUIRE', 'ALL', 'OF', 'THESE', 'ROLES'])
@auth_required(one_of=['EITHER_THIS_ROLE', 'OR_THIS_ONE'])
One of role or roles kwargs can also be combined with one_of::
@auth_required(role='REQUIRED', one_of=['THIS', 'OR_THIS'])
Aborts with ``HTTP 401: Unauthorized`` if no user is logged in, or
``HTTP 403: Forbidden`` if any of the specified role checks fail.
|
[
"Decorator",
"for",
"requiring",
"an",
"authenticated",
"user",
"optionally",
"with",
"roles",
"."
] |
python
|
train
|
daviddrysdale/python-phonenumbers
|
python/phonenumbers/phonenumbermatcher.py
|
https://github.com/daviddrysdale/python-phonenumbers/blob/9cc5bb4ab5e661e70789b4c64bf7a9383c7bdc20/python/phonenumbers/phonenumbermatcher.py#L556-L582
|
def _extract_match(self, candidate, offset):
"""Attempts to extract a match from a candidate string.
Arguments:
candidate -- The candidate text that might contain a phone number.
offset -- The offset of candidate within self.text
Returns the match found, None if none can be found
"""
# Skip a match that is more likely a publication page reference or a
# date.
if (_SLASH_SEPARATED_DATES.search(candidate)):
return None
# Skip potential time-stamps.
if _TIME_STAMPS.search(candidate):
following_text = self.text[offset + len(candidate):]
if _TIME_STAMPS_SUFFIX.match(following_text):
return None
# Try to come up with a valid match given the entire candidate.
match = self._parse_and_verify(candidate, offset)
if match is not None:
return match
# If that failed, try to find an "inner match" -- there might be a
# phone number within this candidate.
return self._extract_inner_match(candidate, offset)
|
[
"def",
"_extract_match",
"(",
"self",
",",
"candidate",
",",
"offset",
")",
":",
"# Skip a match that is more likely a publication page reference or a",
"# date.",
"if",
"(",
"_SLASH_SEPARATED_DATES",
".",
"search",
"(",
"candidate",
")",
")",
":",
"return",
"None",
"# Skip potential time-stamps.",
"if",
"_TIME_STAMPS",
".",
"search",
"(",
"candidate",
")",
":",
"following_text",
"=",
"self",
".",
"text",
"[",
"offset",
"+",
"len",
"(",
"candidate",
")",
":",
"]",
"if",
"_TIME_STAMPS_SUFFIX",
".",
"match",
"(",
"following_text",
")",
":",
"return",
"None",
"# Try to come up with a valid match given the entire candidate.",
"match",
"=",
"self",
".",
"_parse_and_verify",
"(",
"candidate",
",",
"offset",
")",
"if",
"match",
"is",
"not",
"None",
":",
"return",
"match",
"# If that failed, try to find an \"inner match\" -- there might be a",
"# phone number within this candidate.",
"return",
"self",
".",
"_extract_inner_match",
"(",
"candidate",
",",
"offset",
")"
] |
Attempts to extract a match from a candidate string.
Arguments:
candidate -- The candidate text that might contain a phone number.
offset -- The offset of candidate within self.text
Returns the match found, None if none can be found
|
[
"Attempts",
"to",
"extract",
"a",
"match",
"from",
"a",
"candidate",
"string",
"."
] |
python
|
train
|
maartenbreddels/ipyvolume
|
ipyvolume/pylab.py
|
https://github.com/maartenbreddels/ipyvolume/blob/e68b72852b61276f8e6793bc8811f5b2432a155f/ipyvolume/pylab.py#L1350-L1440
|
def selector_default(output_widget=None):
"""Capture selection events from the current figure, and apply the selections to Scatter objects.
Example:
>>> import ipyvolume as ipv
>>> ipv.figure()
>>> ipv.examples.gaussian()
>>> ipv.selector_default()
>>> ipv.show()
Now hold the control key to do selections, type
* 'C' for circle
* 'R' for rectangle
* 'L' for lasso
* '=' for replace mode
* '&' for logically and mode
* '|' for logically or mode
* '-' for subtract mode
"""
fig = gcf()
if output_widget is None:
output_widget = ipywidgets.Output()
display(output_widget)
def lasso(data, other=None, fig=fig):
with output_widget:
inside = None
if data['device'] and data['type'] == 'lasso':
region = shapely.geometry.Polygon(data['device'])
@np.vectorize
def inside_polygon(x, y):
return region.contains(shapely.geometry.Point([x, y]))
inside = inside_polygon
if data['device'] and data['type'] == 'circle':
x1, y1 = data['device']['begin']
x2, y2 = data['device']['end']
dx = x2 - x1
dy = y2 - y1
r = (dx ** 2 + dy ** 2) ** 0.5
def inside_circle(x, y):
return ((x - x1) ** 2 + (y - y1) ** 2) < r ** 2
inside = inside_circle
if data['device'] and data['type'] == 'rectangle':
x1, y1 = data['device']['begin']
x2, y2 = data['device']['end']
x = [x1, x2]
y = [y1, y2]
xmin, xmax = min(x), max(x)
ymin, ymax = min(y), max(y)
def inside_rectangle(x, y):
return (x > xmin) & (x < xmax) & (y > ymin) & (y < ymax)
inside = inside_rectangle
def join(x, y, mode):
Nx = 0 if (x is None or len(x[0]) == 0) else np.max(x)
Ny = 0 if len(y[0]) == 0 else np.max(y)
N = max(Nx, Ny)
xmask = np.zeros(N + 1, np.bool)
ymask = np.zeros(N + 1, np.bool)
if x is not None:
xmask[x] = True
ymask[y] = True
if mode == "replace":
return np.where(ymask)
if mode == "and":
mask = xmask & ymask
return np.where(ymask if x is None else mask)
if mode == "or":
mask = xmask | ymask
return np.where(ymask if x is None else mask)
if mode == "subtract":
mask = xmask & ~ymask
return np.where(ymask if x is None else mask)
for scatter in fig.scatters:
x, y = fig.project(scatter.x, scatter.y, scatter.z)
mask = inside(x, y)
scatter.selected = join(scatter.selected, np.where(mask), fig.selection_mode)
fig.on_selection(lasso)
|
[
"def",
"selector_default",
"(",
"output_widget",
"=",
"None",
")",
":",
"fig",
"=",
"gcf",
"(",
")",
"if",
"output_widget",
"is",
"None",
":",
"output_widget",
"=",
"ipywidgets",
".",
"Output",
"(",
")",
"display",
"(",
"output_widget",
")",
"def",
"lasso",
"(",
"data",
",",
"other",
"=",
"None",
",",
"fig",
"=",
"fig",
")",
":",
"with",
"output_widget",
":",
"inside",
"=",
"None",
"if",
"data",
"[",
"'device'",
"]",
"and",
"data",
"[",
"'type'",
"]",
"==",
"'lasso'",
":",
"region",
"=",
"shapely",
".",
"geometry",
".",
"Polygon",
"(",
"data",
"[",
"'device'",
"]",
")",
"@",
"np",
".",
"vectorize",
"def",
"inside_polygon",
"(",
"x",
",",
"y",
")",
":",
"return",
"region",
".",
"contains",
"(",
"shapely",
".",
"geometry",
".",
"Point",
"(",
"[",
"x",
",",
"y",
"]",
")",
")",
"inside",
"=",
"inside_polygon",
"if",
"data",
"[",
"'device'",
"]",
"and",
"data",
"[",
"'type'",
"]",
"==",
"'circle'",
":",
"x1",
",",
"y1",
"=",
"data",
"[",
"'device'",
"]",
"[",
"'begin'",
"]",
"x2",
",",
"y2",
"=",
"data",
"[",
"'device'",
"]",
"[",
"'end'",
"]",
"dx",
"=",
"x2",
"-",
"x1",
"dy",
"=",
"y2",
"-",
"y1",
"r",
"=",
"(",
"dx",
"**",
"2",
"+",
"dy",
"**",
"2",
")",
"**",
"0.5",
"def",
"inside_circle",
"(",
"x",
",",
"y",
")",
":",
"return",
"(",
"(",
"x",
"-",
"x1",
")",
"**",
"2",
"+",
"(",
"y",
"-",
"y1",
")",
"**",
"2",
")",
"<",
"r",
"**",
"2",
"inside",
"=",
"inside_circle",
"if",
"data",
"[",
"'device'",
"]",
"and",
"data",
"[",
"'type'",
"]",
"==",
"'rectangle'",
":",
"x1",
",",
"y1",
"=",
"data",
"[",
"'device'",
"]",
"[",
"'begin'",
"]",
"x2",
",",
"y2",
"=",
"data",
"[",
"'device'",
"]",
"[",
"'end'",
"]",
"x",
"=",
"[",
"x1",
",",
"x2",
"]",
"y",
"=",
"[",
"y1",
",",
"y2",
"]",
"xmin",
",",
"xmax",
"=",
"min",
"(",
"x",
")",
",",
"max",
"(",
"x",
")",
"ymin",
",",
"ymax",
"=",
"min",
"(",
"y",
")",
",",
"max",
"(",
"y",
")",
"def",
"inside_rectangle",
"(",
"x",
",",
"y",
")",
":",
"return",
"(",
"x",
">",
"xmin",
")",
"&",
"(",
"x",
"<",
"xmax",
")",
"&",
"(",
"y",
">",
"ymin",
")",
"&",
"(",
"y",
"<",
"ymax",
")",
"inside",
"=",
"inside_rectangle",
"def",
"join",
"(",
"x",
",",
"y",
",",
"mode",
")",
":",
"Nx",
"=",
"0",
"if",
"(",
"x",
"is",
"None",
"or",
"len",
"(",
"x",
"[",
"0",
"]",
")",
"==",
"0",
")",
"else",
"np",
".",
"max",
"(",
"x",
")",
"Ny",
"=",
"0",
"if",
"len",
"(",
"y",
"[",
"0",
"]",
")",
"==",
"0",
"else",
"np",
".",
"max",
"(",
"y",
")",
"N",
"=",
"max",
"(",
"Nx",
",",
"Ny",
")",
"xmask",
"=",
"np",
".",
"zeros",
"(",
"N",
"+",
"1",
",",
"np",
".",
"bool",
")",
"ymask",
"=",
"np",
".",
"zeros",
"(",
"N",
"+",
"1",
",",
"np",
".",
"bool",
")",
"if",
"x",
"is",
"not",
"None",
":",
"xmask",
"[",
"x",
"]",
"=",
"True",
"ymask",
"[",
"y",
"]",
"=",
"True",
"if",
"mode",
"==",
"\"replace\"",
":",
"return",
"np",
".",
"where",
"(",
"ymask",
")",
"if",
"mode",
"==",
"\"and\"",
":",
"mask",
"=",
"xmask",
"&",
"ymask",
"return",
"np",
".",
"where",
"(",
"ymask",
"if",
"x",
"is",
"None",
"else",
"mask",
")",
"if",
"mode",
"==",
"\"or\"",
":",
"mask",
"=",
"xmask",
"|",
"ymask",
"return",
"np",
".",
"where",
"(",
"ymask",
"if",
"x",
"is",
"None",
"else",
"mask",
")",
"if",
"mode",
"==",
"\"subtract\"",
":",
"mask",
"=",
"xmask",
"&",
"~",
"ymask",
"return",
"np",
".",
"where",
"(",
"ymask",
"if",
"x",
"is",
"None",
"else",
"mask",
")",
"for",
"scatter",
"in",
"fig",
".",
"scatters",
":",
"x",
",",
"y",
"=",
"fig",
".",
"project",
"(",
"scatter",
".",
"x",
",",
"scatter",
".",
"y",
",",
"scatter",
".",
"z",
")",
"mask",
"=",
"inside",
"(",
"x",
",",
"y",
")",
"scatter",
".",
"selected",
"=",
"join",
"(",
"scatter",
".",
"selected",
",",
"np",
".",
"where",
"(",
"mask",
")",
",",
"fig",
".",
"selection_mode",
")",
"fig",
".",
"on_selection",
"(",
"lasso",
")"
] |
Capture selection events from the current figure, and apply the selections to Scatter objects.
Example:
>>> import ipyvolume as ipv
>>> ipv.figure()
>>> ipv.examples.gaussian()
>>> ipv.selector_default()
>>> ipv.show()
Now hold the control key to do selections, type
* 'C' for circle
* 'R' for rectangle
* 'L' for lasso
* '=' for replace mode
* '&' for logically and mode
* '|' for logically or mode
* '-' for subtract mode
|
[
"Capture",
"selection",
"events",
"from",
"the",
"current",
"figure",
"and",
"apply",
"the",
"selections",
"to",
"Scatter",
"objects",
"."
] |
python
|
train
|
singularityhub/singularity-cli
|
spython/main/parse/docker.py
|
https://github.com/singularityhub/singularity-cli/blob/cb36b4504812ca87e29c6a40b222a545d1865799/spython/main/parse/docker.py#L156-L185
|
def _add(self, lines):
'''Add can also handle https, and compressed files.
Parameters
==========
line: the line from the recipe file to parse for ADD
'''
lines = self._setup('ADD', lines)
for line in lines:
values = line.split(" ")
frompath = values.pop(0)
# Custom parsing for frompath
# If it's a web address, add to install routine to get
if frompath.startswith('http'):
for topath in values:
self._parse_http(frompath, topath)
# Add the file, and decompress in install
elif re.search("[.](gz|gzip|bz2|xz)$", frompath.strip()):
for topath in values:
self._parse_archive(frompath, topath)
# Just add the files
else:
for topath in values:
self._add_files(frompath, topath)
|
[
"def",
"_add",
"(",
"self",
",",
"lines",
")",
":",
"lines",
"=",
"self",
".",
"_setup",
"(",
"'ADD'",
",",
"lines",
")",
"for",
"line",
"in",
"lines",
":",
"values",
"=",
"line",
".",
"split",
"(",
"\" \"",
")",
"frompath",
"=",
"values",
".",
"pop",
"(",
"0",
")",
"# Custom parsing for frompath",
"# If it's a web address, add to install routine to get",
"if",
"frompath",
".",
"startswith",
"(",
"'http'",
")",
":",
"for",
"topath",
"in",
"values",
":",
"self",
".",
"_parse_http",
"(",
"frompath",
",",
"topath",
")",
"# Add the file, and decompress in install",
"elif",
"re",
".",
"search",
"(",
"\"[.](gz|gzip|bz2|xz)$\"",
",",
"frompath",
".",
"strip",
"(",
")",
")",
":",
"for",
"topath",
"in",
"values",
":",
"self",
".",
"_parse_archive",
"(",
"frompath",
",",
"topath",
")",
"# Just add the files",
"else",
":",
"for",
"topath",
"in",
"values",
":",
"self",
".",
"_add_files",
"(",
"frompath",
",",
"topath",
")"
] |
Add can also handle https, and compressed files.
Parameters
==========
line: the line from the recipe file to parse for ADD
|
[
"Add",
"can",
"also",
"handle",
"https",
"and",
"compressed",
"files",
"."
] |
python
|
train
|
tanghaibao/jcvi
|
jcvi/variation/str.py
|
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/variation/str.py#L947-L996
|
def compilevcf(args):
"""
%prog compilevcf samples.csv
Compile vcf results into master spreadsheet.
"""
p = OptionParser(compilevcf.__doc__)
p.add_option("--db", default="hg38", help="Use these lobSTR db")
p.add_option("--nofilter", default=False, action="store_true",
help="Do not filter the variants")
p.set_home("lobstr")
p.set_cpus()
p.set_aws_opts(store="hli-mv-data-science/htang/str-data")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
samples, = args
workdir = opts.workdir
store = opts.output_path
cleanup = not opts.nocleanup
filtered = not opts.nofilter
dbs = opts.db.split(",")
cwd = os.getcwd()
mkdir(workdir)
os.chdir(workdir)
samples = op.join(cwd, samples)
stridsfile = "STR.ids"
if samples.endswith((".vcf", ".vcf.gz")):
vcffiles = [samples]
else:
vcffiles = [x.strip() for x in must_open(samples)]
if not op.exists(stridsfile):
ids = []
for db in dbs:
ids.extend(STRFile(opts.lobstr_home, db=db).ids)
uids = uniqify(ids)
logging.debug("Combined: {} Unique: {}".format(len(ids), len(uids)))
fw = open(stridsfile, "w")
print("\n".join(uids), file=fw)
fw.close()
run_args = [(x, filtered, cleanup, store) for x in vcffiles]
cpus = min(opts.cpus, len(run_args))
p = Pool(processes=cpus)
for res in p.map_async(run_compile, run_args).get():
continue
|
[
"def",
"compilevcf",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"compilevcf",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--db\"",
",",
"default",
"=",
"\"hg38\"",
",",
"help",
"=",
"\"Use these lobSTR db\"",
")",
"p",
".",
"add_option",
"(",
"\"--nofilter\"",
",",
"default",
"=",
"False",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Do not filter the variants\"",
")",
"p",
".",
"set_home",
"(",
"\"lobstr\"",
")",
"p",
".",
"set_cpus",
"(",
")",
"p",
".",
"set_aws_opts",
"(",
"store",
"=",
"\"hli-mv-data-science/htang/str-data\"",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"1",
":",
"sys",
".",
"exit",
"(",
"not",
"p",
".",
"print_help",
"(",
")",
")",
"samples",
",",
"=",
"args",
"workdir",
"=",
"opts",
".",
"workdir",
"store",
"=",
"opts",
".",
"output_path",
"cleanup",
"=",
"not",
"opts",
".",
"nocleanup",
"filtered",
"=",
"not",
"opts",
".",
"nofilter",
"dbs",
"=",
"opts",
".",
"db",
".",
"split",
"(",
"\",\"",
")",
"cwd",
"=",
"os",
".",
"getcwd",
"(",
")",
"mkdir",
"(",
"workdir",
")",
"os",
".",
"chdir",
"(",
"workdir",
")",
"samples",
"=",
"op",
".",
"join",
"(",
"cwd",
",",
"samples",
")",
"stridsfile",
"=",
"\"STR.ids\"",
"if",
"samples",
".",
"endswith",
"(",
"(",
"\".vcf\"",
",",
"\".vcf.gz\"",
")",
")",
":",
"vcffiles",
"=",
"[",
"samples",
"]",
"else",
":",
"vcffiles",
"=",
"[",
"x",
".",
"strip",
"(",
")",
"for",
"x",
"in",
"must_open",
"(",
"samples",
")",
"]",
"if",
"not",
"op",
".",
"exists",
"(",
"stridsfile",
")",
":",
"ids",
"=",
"[",
"]",
"for",
"db",
"in",
"dbs",
":",
"ids",
".",
"extend",
"(",
"STRFile",
"(",
"opts",
".",
"lobstr_home",
",",
"db",
"=",
"db",
")",
".",
"ids",
")",
"uids",
"=",
"uniqify",
"(",
"ids",
")",
"logging",
".",
"debug",
"(",
"\"Combined: {} Unique: {}\"",
".",
"format",
"(",
"len",
"(",
"ids",
")",
",",
"len",
"(",
"uids",
")",
")",
")",
"fw",
"=",
"open",
"(",
"stridsfile",
",",
"\"w\"",
")",
"print",
"(",
"\"\\n\"",
".",
"join",
"(",
"uids",
")",
",",
"file",
"=",
"fw",
")",
"fw",
".",
"close",
"(",
")",
"run_args",
"=",
"[",
"(",
"x",
",",
"filtered",
",",
"cleanup",
",",
"store",
")",
"for",
"x",
"in",
"vcffiles",
"]",
"cpus",
"=",
"min",
"(",
"opts",
".",
"cpus",
",",
"len",
"(",
"run_args",
")",
")",
"p",
"=",
"Pool",
"(",
"processes",
"=",
"cpus",
")",
"for",
"res",
"in",
"p",
".",
"map_async",
"(",
"run_compile",
",",
"run_args",
")",
".",
"get",
"(",
")",
":",
"continue"
] |
%prog compilevcf samples.csv
Compile vcf results into master spreadsheet.
|
[
"%prog",
"compilevcf",
"samples",
".",
"csv"
] |
python
|
train
|
blockstack/blockstack-core
|
blockstack/lib/client.py
|
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/client.py#L3100-L3138
|
def get_name_history(name, hostport=None, proxy=None, history_page=None):
"""
Get the full history of a name
Returns {'status': True, 'history': ...} on success, where history is grouped by block
Returns {'error': ...} on error
"""
assert hostport or proxy, 'Need hostport or proxy'
if proxy is None:
proxy = connect_hostport(hostport)
hist = {}
indexing = None
lastblock = None
if history_page != None:
resp = get_name_history_page(name, history_page, proxy=proxy)
if 'error' in resp:
return resp
indexing = resp['indexing']
lastblock = resp['lastblock']
return {'status': True, 'history': resp['history'], 'indexing': indexing, 'lastblock': lastblock}
for i in range(0, 100000000): # this is obviously too big
resp = get_name_history_page(name, i, proxy=proxy)
if 'error' in resp:
return resp
indexing = resp['indexing']
lastblock = resp['lastblock']
if len(resp['history']) == 0:
# caught up
break
hist = name_history_merge(hist, resp['history'])
return {'status': True, 'history': hist, 'indexing': indexing, 'lastblock': lastblock}
|
[
"def",
"get_name_history",
"(",
"name",
",",
"hostport",
"=",
"None",
",",
"proxy",
"=",
"None",
",",
"history_page",
"=",
"None",
")",
":",
"assert",
"hostport",
"or",
"proxy",
",",
"'Need hostport or proxy'",
"if",
"proxy",
"is",
"None",
":",
"proxy",
"=",
"connect_hostport",
"(",
"hostport",
")",
"hist",
"=",
"{",
"}",
"indexing",
"=",
"None",
"lastblock",
"=",
"None",
"if",
"history_page",
"!=",
"None",
":",
"resp",
"=",
"get_name_history_page",
"(",
"name",
",",
"history_page",
",",
"proxy",
"=",
"proxy",
")",
"if",
"'error'",
"in",
"resp",
":",
"return",
"resp",
"indexing",
"=",
"resp",
"[",
"'indexing'",
"]",
"lastblock",
"=",
"resp",
"[",
"'lastblock'",
"]",
"return",
"{",
"'status'",
":",
"True",
",",
"'history'",
":",
"resp",
"[",
"'history'",
"]",
",",
"'indexing'",
":",
"indexing",
",",
"'lastblock'",
":",
"lastblock",
"}",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"100000000",
")",
":",
"# this is obviously too big",
"resp",
"=",
"get_name_history_page",
"(",
"name",
",",
"i",
",",
"proxy",
"=",
"proxy",
")",
"if",
"'error'",
"in",
"resp",
":",
"return",
"resp",
"indexing",
"=",
"resp",
"[",
"'indexing'",
"]",
"lastblock",
"=",
"resp",
"[",
"'lastblock'",
"]",
"if",
"len",
"(",
"resp",
"[",
"'history'",
"]",
")",
"==",
"0",
":",
"# caught up ",
"break",
"hist",
"=",
"name_history_merge",
"(",
"hist",
",",
"resp",
"[",
"'history'",
"]",
")",
"return",
"{",
"'status'",
":",
"True",
",",
"'history'",
":",
"hist",
",",
"'indexing'",
":",
"indexing",
",",
"'lastblock'",
":",
"lastblock",
"}"
] |
Get the full history of a name
Returns {'status': True, 'history': ...} on success, where history is grouped by block
Returns {'error': ...} on error
|
[
"Get",
"the",
"full",
"history",
"of",
"a",
"name",
"Returns",
"{",
"status",
":",
"True",
"history",
":",
"...",
"}",
"on",
"success",
"where",
"history",
"is",
"grouped",
"by",
"block",
"Returns",
"{",
"error",
":",
"...",
"}",
"on",
"error"
] |
python
|
train
|
IdentityPython/oidcendpoint
|
src/oidcendpoint/token_handler.py
|
https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/token_handler.py#L279-L300
|
def factory(ec, code=None, token=None, refresh=None, **kwargs):
"""
Create a token handler
:param code:
:param token:
:param refresh:
:return: TokenHandler instance
"""
TTYPE = {'code': 'A', 'token': 'T', 'refresh': 'R'}
args = {}
if code:
args['code_handler'] = init_token_handler(ec, code, TTYPE['code'])
if token:
args['access_token_handler'] = init_token_handler(ec, token, TTYPE['token'])
if refresh:
args['refresh_token_handler'] = init_token_handler(ec, token, TTYPE['refresh'])
return TokenHandler(**args)
|
[
"def",
"factory",
"(",
"ec",
",",
"code",
"=",
"None",
",",
"token",
"=",
"None",
",",
"refresh",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"TTYPE",
"=",
"{",
"'code'",
":",
"'A'",
",",
"'token'",
":",
"'T'",
",",
"'refresh'",
":",
"'R'",
"}",
"args",
"=",
"{",
"}",
"if",
"code",
":",
"args",
"[",
"'code_handler'",
"]",
"=",
"init_token_handler",
"(",
"ec",
",",
"code",
",",
"TTYPE",
"[",
"'code'",
"]",
")",
"if",
"token",
":",
"args",
"[",
"'access_token_handler'",
"]",
"=",
"init_token_handler",
"(",
"ec",
",",
"token",
",",
"TTYPE",
"[",
"'token'",
"]",
")",
"if",
"refresh",
":",
"args",
"[",
"'refresh_token_handler'",
"]",
"=",
"init_token_handler",
"(",
"ec",
",",
"token",
",",
"TTYPE",
"[",
"'refresh'",
"]",
")",
"return",
"TokenHandler",
"(",
"*",
"*",
"args",
")"
] |
Create a token handler
:param code:
:param token:
:param refresh:
:return: TokenHandler instance
|
[
"Create",
"a",
"token",
"handler"
] |
python
|
train
|
openstack/python-monascaclient
|
monascaclient/common/monasca_manager.py
|
https://github.com/openstack/python-monascaclient/blob/03b07534145928eb2debad938da033c232dda105/monascaclient/common/monasca_manager.py#L37-L51
|
def _list(self, path, dim_key=None, **kwargs):
"""Get a list of metrics."""
url_str = self.base_url + path
if dim_key and dim_key in kwargs:
dim_str = self.get_dimensions_url_string(kwargs[dim_key])
kwargs[dim_key] = dim_str
if kwargs:
url_str += '?%s' % parse.urlencode(kwargs, True)
body = self.client.list(
path=url_str
)
return self._parse_body(body)
|
[
"def",
"_list",
"(",
"self",
",",
"path",
",",
"dim_key",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"url_str",
"=",
"self",
".",
"base_url",
"+",
"path",
"if",
"dim_key",
"and",
"dim_key",
"in",
"kwargs",
":",
"dim_str",
"=",
"self",
".",
"get_dimensions_url_string",
"(",
"kwargs",
"[",
"dim_key",
"]",
")",
"kwargs",
"[",
"dim_key",
"]",
"=",
"dim_str",
"if",
"kwargs",
":",
"url_str",
"+=",
"'?%s'",
"%",
"parse",
".",
"urlencode",
"(",
"kwargs",
",",
"True",
")",
"body",
"=",
"self",
".",
"client",
".",
"list",
"(",
"path",
"=",
"url_str",
")",
"return",
"self",
".",
"_parse_body",
"(",
"body",
")"
] |
Get a list of metrics.
|
[
"Get",
"a",
"list",
"of",
"metrics",
"."
] |
python
|
train
|
user-cont/conu
|
conu/backend/k8s/backend.py
|
https://github.com/user-cont/conu/blob/08caae7bb6bdd265b55bb106c3da6a7946a5a352/conu/backend/k8s/backend.py#L169-L177
|
def delete_namespace(self, name):
"""
Delete namespace with specific name
:param name: str, namespace to delete
:return: None
"""
self.core_api.delete_namespace(name, client.V1DeleteOptions())
logger.info("Deleting namespace: %s", name)
|
[
"def",
"delete_namespace",
"(",
"self",
",",
"name",
")",
":",
"self",
".",
"core_api",
".",
"delete_namespace",
"(",
"name",
",",
"client",
".",
"V1DeleteOptions",
"(",
")",
")",
"logger",
".",
"info",
"(",
"\"Deleting namespace: %s\"",
",",
"name",
")"
] |
Delete namespace with specific name
:param name: str, namespace to delete
:return: None
|
[
"Delete",
"namespace",
"with",
"specific",
"name",
":",
"param",
"name",
":",
"str",
"namespace",
"to",
"delete",
":",
"return",
":",
"None"
] |
python
|
train
|
tanghaibao/goatools
|
goatools/gosubdag/go_tasks.py
|
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/gosubdag/go_tasks.py#L91-L96
|
def get_go2children_go2obj(go2obj):
"""Return go2children (set of child GO IDs) for all GO ID keys in go2obj."""
goobjs, altgo2goobj = get_goobjs_altgo2goobj(go2obj)
go2children = get_id2children(goobjs)
add_alt_goids(go2children, altgo2goobj)
return go2children
|
[
"def",
"get_go2children_go2obj",
"(",
"go2obj",
")",
":",
"goobjs",
",",
"altgo2goobj",
"=",
"get_goobjs_altgo2goobj",
"(",
"go2obj",
")",
"go2children",
"=",
"get_id2children",
"(",
"goobjs",
")",
"add_alt_goids",
"(",
"go2children",
",",
"altgo2goobj",
")",
"return",
"go2children"
] |
Return go2children (set of child GO IDs) for all GO ID keys in go2obj.
|
[
"Return",
"go2children",
"(",
"set",
"of",
"child",
"GO",
"IDs",
")",
"for",
"all",
"GO",
"ID",
"keys",
"in",
"go2obj",
"."
] |
python
|
train
|
waqasbhatti/astrobase
|
astrobase/lcfit/transits.py
|
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/lcfit/transits.py#L85-L401
|
def traptransit_fit_magseries(times, mags, errs,
transitparams,
sigclip=10.0,
plotfit=False,
magsarefluxes=False,
verbose=True):
'''This fits a trapezoid transit model to a magnitude time series.
Parameters
----------
times,mags,errs : np.array
The input mag/flux time-series to fit a trapezoid planet-transit model
to.
period : float
The period to use for the model fit.
transitparams : list of floats
These are initial parameters for the transit model fit. A list of the
following form is required::
transitparams = [transitperiod (time),
transitepoch (time),
transitdepth (flux or mags),
transitduration (phase),
ingressduration (phase)]
- for magnitudes -> `transitdepth` should be < 0
- for fluxes -> `transitdepth` should be > 0
If `transitepoch` is None, this function will do an initial spline fit
to find an approximate minimum of the phased light curve using the given
period.
The `transitdepth` provided is checked against the value of
`magsarefluxes`. if `magsarefluxes = True`, the `transitdepth` is forced
to be > 0; if `magsarefluxes` = False, the `transitdepth` is forced to
be < 0.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
magsarefluxes : bool
If True, will treat the input values of `mags` as fluxes for purposes of
plotting the fit and sig-clipping.
plotfit : str or False
If this is a string, this function will make a plot for the fit to the
mag/flux time-series and writes the plot to the path specified here.
ignoreinitfail : bool
If this is True, ignores the initial failure to find a set of optimized
Fourier parameters using the global optimization function and proceeds
to do a least-squares fit anyway.
verbose : bool
If True, will indicate progress and warn of any problems.
Returns
-------
dict
This function returns a dict containing the model fit parameters, the
minimized chi-sq value and the reduced chi-sq value. The form of this
dict is mostly standardized across all functions in this module::
{
'fittype':'traptransit',
'fitinfo':{
'initialparams':the initial transit params provided,
'finalparams':the final model fit transit params ,
'finalparamerrs':formal errors in the params,
'leastsqfit':the full tuple returned by scipy.leastsq,
'fitmags': the model fit mags,
'fitepoch': the epoch of minimum light for the fit,
'ntransitpoints': the number of LC points in transit phase
},
'fitchisq': the minimized value of the fit's chi-sq,
'fitredchisq':the reduced chi-sq value,
'fitplotfile': the output fit plot if fitplot is not None,
'magseries':{
'times':input times in phase order of the model,
'phase':the phases of the model mags,
'mags':input mags/fluxes in the phase order of the model,
'errs':errs in the phase order of the model,
'magsarefluxes':input value of magsarefluxes kwarg
}
}
'''
stimes, smags, serrs = sigclip_magseries(times, mags, errs,
sigclip=sigclip,
magsarefluxes=magsarefluxes)
# get rid of zero errs
nzind = np.nonzero(serrs)
stimes, smags, serrs = stimes[nzind], smags[nzind], serrs[nzind]
# check the transitparams
transitperiod, transitepoch, transitdepth = transitparams[0:3]
# check if we have a transitepoch to use
if transitepoch is None:
if verbose:
LOGWARNING('no transitepoch given in transitparams, '
'trying to figure it out automatically...')
# do a spline fit to figure out the approximate min of the LC
try:
spfit = spline_fit_magseries(times, mags, errs, transitperiod,
sigclip=sigclip,
magsarefluxes=magsarefluxes,
verbose=verbose)
transitepoch = spfit['fitinfo']['fitepoch']
# if the spline-fit fails, try a savgol fit instead
except Exception as e:
sgfit = savgol_fit_magseries(times, mags, errs, transitperiod,
sigclip=sigclip,
magsarefluxes=magsarefluxes,
verbose=verbose)
transitepoch = sgfit['fitinfo']['fitepoch']
# if everything failed, then bail out and ask for the transitepoch
finally:
if transitepoch is None:
LOGERROR("couldn't automatically figure out the transit epoch, "
"can't continue. please provide it in transitparams.")
# assemble the returndict
returndict = {
'fittype':'traptransit',
'fitinfo':{
'initialparams':transitparams,
'finalparams':None,
'leastsqfit':None,
'fitmags':None,
'fitepoch':None,
},
'fitchisq':np.nan,
'fitredchisq':np.nan,
'fitplotfile':None,
'magseries':{
'phase':None,
'times':None,
'mags':None,
'errs':None,
'magsarefluxes':magsarefluxes,
},
}
return returndict
else:
# check the case when there are more than one transitepochs
# returned
if transitepoch.size > 1:
if verbose:
LOGWARNING(
"could not auto-find a single minimum in LC for "
"transitepoch, using the first one returned"
)
transitparams[1] = transitepoch[0]
else:
if verbose:
LOGWARNING(
'using automatically determined transitepoch = %.5f'
% transitepoch
)
transitparams[1] = transitepoch.item()
# next, check the transitdepth and fix it to the form required
if magsarefluxes:
if transitdepth < 0.0:
transitparams[2] = -transitdepth
else:
if transitdepth > 0.0:
transitparams[2] = -transitdepth
# finally, do the fit
try:
leastsqfit = spleastsq(transits.trapezoid_transit_residual,
transitparams,
args=(stimes, smags, serrs),
full_output=True)
except Exception as e:
leastsqfit = None
# if the fit succeeded, then we can return the final parameters
if leastsqfit and leastsqfit[-1] in (1,2,3,4):
finalparams = leastsqfit[0]
covxmatrix = leastsqfit[1]
# calculate the chisq and reduced chisq
fitmags, phase, ptimes, pmags, perrs, n_transitpoints = (
transits.trapezoid_transit_func(
finalparams,
stimes, smags, serrs,
get_ntransitpoints=True
)
)
fitchisq = np.sum(
((fitmags - pmags)*(fitmags - pmags)) / (perrs*perrs)
)
fitredchisq = fitchisq/(len(pmags) - len(finalparams) - 1)
# get the residual variance and calculate the formal 1-sigma errs on the
# final parameters
residuals = leastsqfit[2]['fvec']
residualvariance = (
np.sum(residuals*residuals)/(pmags.size - finalparams.size)
)
if covxmatrix is not None:
covmatrix = residualvariance*covxmatrix
stderrs = np.sqrt(np.diag(covmatrix))
else:
LOGERROR('covxmatrix not available, fit probably failed!')
stderrs = None
if verbose:
LOGINFO(
'final fit done. chisq = %.5f, reduced chisq = %.5f' %
(fitchisq, fitredchisq)
)
# get the fit epoch
fperiod, fepoch = finalparams[:2]
# assemble the returndict
returndict = {
'fittype':'traptransit',
'fitinfo':{
'initialparams':transitparams,
'finalparams':finalparams,
'finalparamerrs':stderrs,
'leastsqfit':leastsqfit,
'fitmags':fitmags,
'fitepoch':fepoch,
'ntransitpoints':n_transitpoints
},
'fitchisq':fitchisq,
'fitredchisq':fitredchisq,
'fitplotfile':None,
'magseries':{
'phase':phase,
'times':ptimes,
'mags':pmags,
'errs':perrs,
'magsarefluxes':magsarefluxes,
},
}
# make the fit plot if required
if plotfit and isinstance(plotfit, str):
make_fit_plot(phase, pmags, perrs, fitmags,
fperiod, ptimes.min(), fepoch,
plotfit,
magsarefluxes=magsarefluxes)
returndict['fitplotfile'] = plotfit
return returndict
# if the leastsq fit failed, return nothing
else:
LOGERROR('trapezoid-fit: least-squared fit to the light curve failed!')
# assemble the returndict
returndict = {
'fittype':'traptransit',
'fitinfo':{
'initialparams':transitparams,
'finalparams':None,
'finalparamerrs':None,
'leastsqfit':leastsqfit,
'fitmags':None,
'fitepoch':None,
'ntransitpoints':0
},
'fitchisq':np.nan,
'fitredchisq':np.nan,
'fitplotfile':None,
'magseries':{
'phase':None,
'times':None,
'mags':None,
'errs':None,
'magsarefluxes':magsarefluxes,
},
}
return returndict
|
[
"def",
"traptransit_fit_magseries",
"(",
"times",
",",
"mags",
",",
"errs",
",",
"transitparams",
",",
"sigclip",
"=",
"10.0",
",",
"plotfit",
"=",
"False",
",",
"magsarefluxes",
"=",
"False",
",",
"verbose",
"=",
"True",
")",
":",
"stimes",
",",
"smags",
",",
"serrs",
"=",
"sigclip_magseries",
"(",
"times",
",",
"mags",
",",
"errs",
",",
"sigclip",
"=",
"sigclip",
",",
"magsarefluxes",
"=",
"magsarefluxes",
")",
"# get rid of zero errs",
"nzind",
"=",
"np",
".",
"nonzero",
"(",
"serrs",
")",
"stimes",
",",
"smags",
",",
"serrs",
"=",
"stimes",
"[",
"nzind",
"]",
",",
"smags",
"[",
"nzind",
"]",
",",
"serrs",
"[",
"nzind",
"]",
"# check the transitparams",
"transitperiod",
",",
"transitepoch",
",",
"transitdepth",
"=",
"transitparams",
"[",
"0",
":",
"3",
"]",
"# check if we have a transitepoch to use",
"if",
"transitepoch",
"is",
"None",
":",
"if",
"verbose",
":",
"LOGWARNING",
"(",
"'no transitepoch given in transitparams, '",
"'trying to figure it out automatically...'",
")",
"# do a spline fit to figure out the approximate min of the LC",
"try",
":",
"spfit",
"=",
"spline_fit_magseries",
"(",
"times",
",",
"mags",
",",
"errs",
",",
"transitperiod",
",",
"sigclip",
"=",
"sigclip",
",",
"magsarefluxes",
"=",
"magsarefluxes",
",",
"verbose",
"=",
"verbose",
")",
"transitepoch",
"=",
"spfit",
"[",
"'fitinfo'",
"]",
"[",
"'fitepoch'",
"]",
"# if the spline-fit fails, try a savgol fit instead",
"except",
"Exception",
"as",
"e",
":",
"sgfit",
"=",
"savgol_fit_magseries",
"(",
"times",
",",
"mags",
",",
"errs",
",",
"transitperiod",
",",
"sigclip",
"=",
"sigclip",
",",
"magsarefluxes",
"=",
"magsarefluxes",
",",
"verbose",
"=",
"verbose",
")",
"transitepoch",
"=",
"sgfit",
"[",
"'fitinfo'",
"]",
"[",
"'fitepoch'",
"]",
"# if everything failed, then bail out and ask for the transitepoch",
"finally",
":",
"if",
"transitepoch",
"is",
"None",
":",
"LOGERROR",
"(",
"\"couldn't automatically figure out the transit epoch, \"",
"\"can't continue. please provide it in transitparams.\"",
")",
"# assemble the returndict",
"returndict",
"=",
"{",
"'fittype'",
":",
"'traptransit'",
",",
"'fitinfo'",
":",
"{",
"'initialparams'",
":",
"transitparams",
",",
"'finalparams'",
":",
"None",
",",
"'leastsqfit'",
":",
"None",
",",
"'fitmags'",
":",
"None",
",",
"'fitepoch'",
":",
"None",
",",
"}",
",",
"'fitchisq'",
":",
"np",
".",
"nan",
",",
"'fitredchisq'",
":",
"np",
".",
"nan",
",",
"'fitplotfile'",
":",
"None",
",",
"'magseries'",
":",
"{",
"'phase'",
":",
"None",
",",
"'times'",
":",
"None",
",",
"'mags'",
":",
"None",
",",
"'errs'",
":",
"None",
",",
"'magsarefluxes'",
":",
"magsarefluxes",
",",
"}",
",",
"}",
"return",
"returndict",
"else",
":",
"# check the case when there are more than one transitepochs",
"# returned",
"if",
"transitepoch",
".",
"size",
">",
"1",
":",
"if",
"verbose",
":",
"LOGWARNING",
"(",
"\"could not auto-find a single minimum in LC for \"",
"\"transitepoch, using the first one returned\"",
")",
"transitparams",
"[",
"1",
"]",
"=",
"transitepoch",
"[",
"0",
"]",
"else",
":",
"if",
"verbose",
":",
"LOGWARNING",
"(",
"'using automatically determined transitepoch = %.5f'",
"%",
"transitepoch",
")",
"transitparams",
"[",
"1",
"]",
"=",
"transitepoch",
".",
"item",
"(",
")",
"# next, check the transitdepth and fix it to the form required",
"if",
"magsarefluxes",
":",
"if",
"transitdepth",
"<",
"0.0",
":",
"transitparams",
"[",
"2",
"]",
"=",
"-",
"transitdepth",
"else",
":",
"if",
"transitdepth",
">",
"0.0",
":",
"transitparams",
"[",
"2",
"]",
"=",
"-",
"transitdepth",
"# finally, do the fit",
"try",
":",
"leastsqfit",
"=",
"spleastsq",
"(",
"transits",
".",
"trapezoid_transit_residual",
",",
"transitparams",
",",
"args",
"=",
"(",
"stimes",
",",
"smags",
",",
"serrs",
")",
",",
"full_output",
"=",
"True",
")",
"except",
"Exception",
"as",
"e",
":",
"leastsqfit",
"=",
"None",
"# if the fit succeeded, then we can return the final parameters",
"if",
"leastsqfit",
"and",
"leastsqfit",
"[",
"-",
"1",
"]",
"in",
"(",
"1",
",",
"2",
",",
"3",
",",
"4",
")",
":",
"finalparams",
"=",
"leastsqfit",
"[",
"0",
"]",
"covxmatrix",
"=",
"leastsqfit",
"[",
"1",
"]",
"# calculate the chisq and reduced chisq",
"fitmags",
",",
"phase",
",",
"ptimes",
",",
"pmags",
",",
"perrs",
",",
"n_transitpoints",
"=",
"(",
"transits",
".",
"trapezoid_transit_func",
"(",
"finalparams",
",",
"stimes",
",",
"smags",
",",
"serrs",
",",
"get_ntransitpoints",
"=",
"True",
")",
")",
"fitchisq",
"=",
"np",
".",
"sum",
"(",
"(",
"(",
"fitmags",
"-",
"pmags",
")",
"*",
"(",
"fitmags",
"-",
"pmags",
")",
")",
"/",
"(",
"perrs",
"*",
"perrs",
")",
")",
"fitredchisq",
"=",
"fitchisq",
"/",
"(",
"len",
"(",
"pmags",
")",
"-",
"len",
"(",
"finalparams",
")",
"-",
"1",
")",
"# get the residual variance and calculate the formal 1-sigma errs on the",
"# final parameters",
"residuals",
"=",
"leastsqfit",
"[",
"2",
"]",
"[",
"'fvec'",
"]",
"residualvariance",
"=",
"(",
"np",
".",
"sum",
"(",
"residuals",
"*",
"residuals",
")",
"/",
"(",
"pmags",
".",
"size",
"-",
"finalparams",
".",
"size",
")",
")",
"if",
"covxmatrix",
"is",
"not",
"None",
":",
"covmatrix",
"=",
"residualvariance",
"*",
"covxmatrix",
"stderrs",
"=",
"np",
".",
"sqrt",
"(",
"np",
".",
"diag",
"(",
"covmatrix",
")",
")",
"else",
":",
"LOGERROR",
"(",
"'covxmatrix not available, fit probably failed!'",
")",
"stderrs",
"=",
"None",
"if",
"verbose",
":",
"LOGINFO",
"(",
"'final fit done. chisq = %.5f, reduced chisq = %.5f'",
"%",
"(",
"fitchisq",
",",
"fitredchisq",
")",
")",
"# get the fit epoch",
"fperiod",
",",
"fepoch",
"=",
"finalparams",
"[",
":",
"2",
"]",
"# assemble the returndict",
"returndict",
"=",
"{",
"'fittype'",
":",
"'traptransit'",
",",
"'fitinfo'",
":",
"{",
"'initialparams'",
":",
"transitparams",
",",
"'finalparams'",
":",
"finalparams",
",",
"'finalparamerrs'",
":",
"stderrs",
",",
"'leastsqfit'",
":",
"leastsqfit",
",",
"'fitmags'",
":",
"fitmags",
",",
"'fitepoch'",
":",
"fepoch",
",",
"'ntransitpoints'",
":",
"n_transitpoints",
"}",
",",
"'fitchisq'",
":",
"fitchisq",
",",
"'fitredchisq'",
":",
"fitredchisq",
",",
"'fitplotfile'",
":",
"None",
",",
"'magseries'",
":",
"{",
"'phase'",
":",
"phase",
",",
"'times'",
":",
"ptimes",
",",
"'mags'",
":",
"pmags",
",",
"'errs'",
":",
"perrs",
",",
"'magsarefluxes'",
":",
"magsarefluxes",
",",
"}",
",",
"}",
"# make the fit plot if required",
"if",
"plotfit",
"and",
"isinstance",
"(",
"plotfit",
",",
"str",
")",
":",
"make_fit_plot",
"(",
"phase",
",",
"pmags",
",",
"perrs",
",",
"fitmags",
",",
"fperiod",
",",
"ptimes",
".",
"min",
"(",
")",
",",
"fepoch",
",",
"plotfit",
",",
"magsarefluxes",
"=",
"magsarefluxes",
")",
"returndict",
"[",
"'fitplotfile'",
"]",
"=",
"plotfit",
"return",
"returndict",
"# if the leastsq fit failed, return nothing",
"else",
":",
"LOGERROR",
"(",
"'trapezoid-fit: least-squared fit to the light curve failed!'",
")",
"# assemble the returndict",
"returndict",
"=",
"{",
"'fittype'",
":",
"'traptransit'",
",",
"'fitinfo'",
":",
"{",
"'initialparams'",
":",
"transitparams",
",",
"'finalparams'",
":",
"None",
",",
"'finalparamerrs'",
":",
"None",
",",
"'leastsqfit'",
":",
"leastsqfit",
",",
"'fitmags'",
":",
"None",
",",
"'fitepoch'",
":",
"None",
",",
"'ntransitpoints'",
":",
"0",
"}",
",",
"'fitchisq'",
":",
"np",
".",
"nan",
",",
"'fitredchisq'",
":",
"np",
".",
"nan",
",",
"'fitplotfile'",
":",
"None",
",",
"'magseries'",
":",
"{",
"'phase'",
":",
"None",
",",
"'times'",
":",
"None",
",",
"'mags'",
":",
"None",
",",
"'errs'",
":",
"None",
",",
"'magsarefluxes'",
":",
"magsarefluxes",
",",
"}",
",",
"}",
"return",
"returndict"
] |
This fits a trapezoid transit model to a magnitude time series.
Parameters
----------
times,mags,errs : np.array
The input mag/flux time-series to fit a trapezoid planet-transit model
to.
period : float
The period to use for the model fit.
transitparams : list of floats
These are initial parameters for the transit model fit. A list of the
following form is required::
transitparams = [transitperiod (time),
transitepoch (time),
transitdepth (flux or mags),
transitduration (phase),
ingressduration (phase)]
- for magnitudes -> `transitdepth` should be < 0
- for fluxes -> `transitdepth` should be > 0
If `transitepoch` is None, this function will do an initial spline fit
to find an approximate minimum of the phased light curve using the given
period.
The `transitdepth` provided is checked against the value of
`magsarefluxes`. if `magsarefluxes = True`, the `transitdepth` is forced
to be > 0; if `magsarefluxes` = False, the `transitdepth` is forced to
be < 0.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
magsarefluxes : bool
If True, will treat the input values of `mags` as fluxes for purposes of
plotting the fit and sig-clipping.
plotfit : str or False
If this is a string, this function will make a plot for the fit to the
mag/flux time-series and writes the plot to the path specified here.
ignoreinitfail : bool
If this is True, ignores the initial failure to find a set of optimized
Fourier parameters using the global optimization function and proceeds
to do a least-squares fit anyway.
verbose : bool
If True, will indicate progress and warn of any problems.
Returns
-------
dict
This function returns a dict containing the model fit parameters, the
minimized chi-sq value and the reduced chi-sq value. The form of this
dict is mostly standardized across all functions in this module::
{
'fittype':'traptransit',
'fitinfo':{
'initialparams':the initial transit params provided,
'finalparams':the final model fit transit params ,
'finalparamerrs':formal errors in the params,
'leastsqfit':the full tuple returned by scipy.leastsq,
'fitmags': the model fit mags,
'fitepoch': the epoch of minimum light for the fit,
'ntransitpoints': the number of LC points in transit phase
},
'fitchisq': the minimized value of the fit's chi-sq,
'fitredchisq':the reduced chi-sq value,
'fitplotfile': the output fit plot if fitplot is not None,
'magseries':{
'times':input times in phase order of the model,
'phase':the phases of the model mags,
'mags':input mags/fluxes in the phase order of the model,
'errs':errs in the phase order of the model,
'magsarefluxes':input value of magsarefluxes kwarg
}
}
|
[
"This",
"fits",
"a",
"trapezoid",
"transit",
"model",
"to",
"a",
"magnitude",
"time",
"series",
"."
] |
python
|
valid
|
PmagPy/PmagPy
|
dialogs/pmag_er_magic_dialogs.py
|
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/pmag_er_magic_dialogs.py#L981-L993
|
def on_helpButton(self, event, page=None):
"""shows html help page"""
# for use on the command line:
path = find_pmag_dir.get_pmag_dir()
# for use with pyinstaller
#path = self.main_frame.resource_dir
help_page = os.path.join(path, 'dialogs', 'help_files', page)
# if using with py2app, the directory structure is flat,
# so check to see where the resource actually is
if not os.path.exists(help_page):
help_page = os.path.join(path, 'help_files', page)
html_frame = pw.HtmlFrame(self, page=help_page)
html_frame.Show()
|
[
"def",
"on_helpButton",
"(",
"self",
",",
"event",
",",
"page",
"=",
"None",
")",
":",
"# for use on the command line:",
"path",
"=",
"find_pmag_dir",
".",
"get_pmag_dir",
"(",
")",
"# for use with pyinstaller",
"#path = self.main_frame.resource_dir",
"help_page",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"'dialogs'",
",",
"'help_files'",
",",
"page",
")",
"# if using with py2app, the directory structure is flat,",
"# so check to see where the resource actually is",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"help_page",
")",
":",
"help_page",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"'help_files'",
",",
"page",
")",
"html_frame",
"=",
"pw",
".",
"HtmlFrame",
"(",
"self",
",",
"page",
"=",
"help_page",
")",
"html_frame",
".",
"Show",
"(",
")"
] |
shows html help page
|
[
"shows",
"html",
"help",
"page"
] |
python
|
train
|
palantir/typedjsonrpc
|
typedjsonrpc/server.py
|
https://github.com/palantir/typedjsonrpc/blob/274218fcd236ff9643506caa629029c9ba25a0fb/typedjsonrpc/server.py#L177-L194
|
def debug_application(self, environ, start_response):
"""Run the application and preserve the traceback frames.
:param environ: The environment which is passed into the wsgi application
:type environ: dict[str, object]
:param start_response: The start_response function of the wsgi application
:type start_response: (str, list[(str, str)]) -> None
:rtype: generator[str]
.. versionadded:: 0.1.0
"""
adapter = self._debug_map.bind_to_environ(environ)
if adapter.test():
_, args = adapter.match()
return self.handle_debug(environ, start_response, args["traceback_id"])
else:
return super(DebuggedJsonRpcApplication, self).debug_application(environ,
start_response)
|
[
"def",
"debug_application",
"(",
"self",
",",
"environ",
",",
"start_response",
")",
":",
"adapter",
"=",
"self",
".",
"_debug_map",
".",
"bind_to_environ",
"(",
"environ",
")",
"if",
"adapter",
".",
"test",
"(",
")",
":",
"_",
",",
"args",
"=",
"adapter",
".",
"match",
"(",
")",
"return",
"self",
".",
"handle_debug",
"(",
"environ",
",",
"start_response",
",",
"args",
"[",
"\"traceback_id\"",
"]",
")",
"else",
":",
"return",
"super",
"(",
"DebuggedJsonRpcApplication",
",",
"self",
")",
".",
"debug_application",
"(",
"environ",
",",
"start_response",
")"
] |
Run the application and preserve the traceback frames.
:param environ: The environment which is passed into the wsgi application
:type environ: dict[str, object]
:param start_response: The start_response function of the wsgi application
:type start_response: (str, list[(str, str)]) -> None
:rtype: generator[str]
.. versionadded:: 0.1.0
|
[
"Run",
"the",
"application",
"and",
"preserve",
"the",
"traceback",
"frames",
"."
] |
python
|
train
|
coin-or/GiMPy
|
src/gimpy/graph.py
|
https://github.com/coin-or/GiMPy/blob/51853122a50eb6019d06bbdedbfc396a833b5a22/src/gimpy/graph.py#L2349-L2403
|
def simplex_determine_leaving_arc(self, t, k, l):
'''
API:
simplex_determine_leaving_arc(self, t, k, l)
Description:
Determines and returns the leaving arc.
Input:
t: current spanning tree solution.
k: tail of the entering arc.
l: head of the entering arc.
Return:
Returns the tuple that represents leaving arc, capacity of the
cycle and cycle.
'''
# k,l are the first two elements of the cycle
cycle = self.simplex_identify_cycle(t, k, l)
flow_kl = self.get_edge_attr(k, l, 'flow')
capacity_kl = self.get_edge_attr(k, l, 'capacity')
min_capacity = capacity_kl
# check if k,l is in U or L
if flow_kl==capacity_kl:
# l,k will be the last two elements
cycle.reverse()
n = len(cycle)
index = 0
# determine last blocking arc
t.add_edge(k, l)
tel = t.get_edge_list()
while index < (n-1):
if (cycle[index], cycle[index+1]) in tel:
flow = self.edge_attr[(cycle[index], cycle[index+1])]['flow']
capacity = \
self.edge_attr[(cycle[index],cycle[index+1])]['capacity']
if min_capacity >= (capacity-flow):
candidate = (cycle[index], cycle[index+1])
min_capacity = capacity-flow
else:
flow = self.edge_attr[(cycle[index+1], cycle[index])]['flow']
if min_capacity >= flow:
candidate = (cycle[index+1], cycle[index])
min_capacity = flow
index += 1
# check arc (cycle[n-1], cycle[0])
if (cycle[n-1], cycle[0]) in tel:
flow = self.edge_attr[(cycle[n-1], cycle[0])]['flow']
capacity = self.edge_attr[(cycle[n-1], cycle[0])]['capacity']
if min_capacity >= (capacity-flow):
candidate = (cycle[n-1], cycle[0])
min_capacity = capacity-flow
else:
flow = self.edge_attr[(cycle[0], cycle[n-1])]['flow']
if min_capacity >= flow:
candidate = (cycle[0], cycle[n-1])
min_capacity = flow
return (candidate, min_capacity, cycle)
|
[
"def",
"simplex_determine_leaving_arc",
"(",
"self",
",",
"t",
",",
"k",
",",
"l",
")",
":",
"# k,l are the first two elements of the cycle",
"cycle",
"=",
"self",
".",
"simplex_identify_cycle",
"(",
"t",
",",
"k",
",",
"l",
")",
"flow_kl",
"=",
"self",
".",
"get_edge_attr",
"(",
"k",
",",
"l",
",",
"'flow'",
")",
"capacity_kl",
"=",
"self",
".",
"get_edge_attr",
"(",
"k",
",",
"l",
",",
"'capacity'",
")",
"min_capacity",
"=",
"capacity_kl",
"# check if k,l is in U or L",
"if",
"flow_kl",
"==",
"capacity_kl",
":",
"# l,k will be the last two elements",
"cycle",
".",
"reverse",
"(",
")",
"n",
"=",
"len",
"(",
"cycle",
")",
"index",
"=",
"0",
"# determine last blocking arc",
"t",
".",
"add_edge",
"(",
"k",
",",
"l",
")",
"tel",
"=",
"t",
".",
"get_edge_list",
"(",
")",
"while",
"index",
"<",
"(",
"n",
"-",
"1",
")",
":",
"if",
"(",
"cycle",
"[",
"index",
"]",
",",
"cycle",
"[",
"index",
"+",
"1",
"]",
")",
"in",
"tel",
":",
"flow",
"=",
"self",
".",
"edge_attr",
"[",
"(",
"cycle",
"[",
"index",
"]",
",",
"cycle",
"[",
"index",
"+",
"1",
"]",
")",
"]",
"[",
"'flow'",
"]",
"capacity",
"=",
"self",
".",
"edge_attr",
"[",
"(",
"cycle",
"[",
"index",
"]",
",",
"cycle",
"[",
"index",
"+",
"1",
"]",
")",
"]",
"[",
"'capacity'",
"]",
"if",
"min_capacity",
">=",
"(",
"capacity",
"-",
"flow",
")",
":",
"candidate",
"=",
"(",
"cycle",
"[",
"index",
"]",
",",
"cycle",
"[",
"index",
"+",
"1",
"]",
")",
"min_capacity",
"=",
"capacity",
"-",
"flow",
"else",
":",
"flow",
"=",
"self",
".",
"edge_attr",
"[",
"(",
"cycle",
"[",
"index",
"+",
"1",
"]",
",",
"cycle",
"[",
"index",
"]",
")",
"]",
"[",
"'flow'",
"]",
"if",
"min_capacity",
">=",
"flow",
":",
"candidate",
"=",
"(",
"cycle",
"[",
"index",
"+",
"1",
"]",
",",
"cycle",
"[",
"index",
"]",
")",
"min_capacity",
"=",
"flow",
"index",
"+=",
"1",
"# check arc (cycle[n-1], cycle[0])",
"if",
"(",
"cycle",
"[",
"n",
"-",
"1",
"]",
",",
"cycle",
"[",
"0",
"]",
")",
"in",
"tel",
":",
"flow",
"=",
"self",
".",
"edge_attr",
"[",
"(",
"cycle",
"[",
"n",
"-",
"1",
"]",
",",
"cycle",
"[",
"0",
"]",
")",
"]",
"[",
"'flow'",
"]",
"capacity",
"=",
"self",
".",
"edge_attr",
"[",
"(",
"cycle",
"[",
"n",
"-",
"1",
"]",
",",
"cycle",
"[",
"0",
"]",
")",
"]",
"[",
"'capacity'",
"]",
"if",
"min_capacity",
">=",
"(",
"capacity",
"-",
"flow",
")",
":",
"candidate",
"=",
"(",
"cycle",
"[",
"n",
"-",
"1",
"]",
",",
"cycle",
"[",
"0",
"]",
")",
"min_capacity",
"=",
"capacity",
"-",
"flow",
"else",
":",
"flow",
"=",
"self",
".",
"edge_attr",
"[",
"(",
"cycle",
"[",
"0",
"]",
",",
"cycle",
"[",
"n",
"-",
"1",
"]",
")",
"]",
"[",
"'flow'",
"]",
"if",
"min_capacity",
">=",
"flow",
":",
"candidate",
"=",
"(",
"cycle",
"[",
"0",
"]",
",",
"cycle",
"[",
"n",
"-",
"1",
"]",
")",
"min_capacity",
"=",
"flow",
"return",
"(",
"candidate",
",",
"min_capacity",
",",
"cycle",
")"
] |
API:
simplex_determine_leaving_arc(self, t, k, l)
Description:
Determines and returns the leaving arc.
Input:
t: current spanning tree solution.
k: tail of the entering arc.
l: head of the entering arc.
Return:
Returns the tuple that represents leaving arc, capacity of the
cycle and cycle.
|
[
"API",
":",
"simplex_determine_leaving_arc",
"(",
"self",
"t",
"k",
"l",
")",
"Description",
":",
"Determines",
"and",
"returns",
"the",
"leaving",
"arc",
".",
"Input",
":",
"t",
":",
"current",
"spanning",
"tree",
"solution",
".",
"k",
":",
"tail",
"of",
"the",
"entering",
"arc",
".",
"l",
":",
"head",
"of",
"the",
"entering",
"arc",
".",
"Return",
":",
"Returns",
"the",
"tuple",
"that",
"represents",
"leaving",
"arc",
"capacity",
"of",
"the",
"cycle",
"and",
"cycle",
"."
] |
python
|
train
|
ska-sa/katcp-python
|
katcp/client.py
|
https://github.com/ska-sa/katcp-python/blob/9127c826a1d030c53b84d0e95743e20e5c5ea153/katcp/client.py#L821-L843
|
def start(self, timeout=None):
"""Start the client in a new thread.
Parameters
----------
timeout : float in seconds
Seconds to wait for client thread to start. Do not specify a
timeout if start() is being called from the same ioloop that this
client will be installed on, since it will block the ioloop without
progressing.
"""
if self._running.isSet():
raise RuntimeError("Device client already started.")
# Make sure we have an ioloop
self.ioloop = self._ioloop_manager.get_ioloop()
if timeout:
t0 = self.ioloop.time()
self._ioloop_manager.start(timeout)
self.ioloop.add_callback(self._install)
if timeout:
remaining_timeout = timeout - (self.ioloop.time() - t0)
self.wait_running(remaining_timeout)
|
[
"def",
"start",
"(",
"self",
",",
"timeout",
"=",
"None",
")",
":",
"if",
"self",
".",
"_running",
".",
"isSet",
"(",
")",
":",
"raise",
"RuntimeError",
"(",
"\"Device client already started.\"",
")",
"# Make sure we have an ioloop",
"self",
".",
"ioloop",
"=",
"self",
".",
"_ioloop_manager",
".",
"get_ioloop",
"(",
")",
"if",
"timeout",
":",
"t0",
"=",
"self",
".",
"ioloop",
".",
"time",
"(",
")",
"self",
".",
"_ioloop_manager",
".",
"start",
"(",
"timeout",
")",
"self",
".",
"ioloop",
".",
"add_callback",
"(",
"self",
".",
"_install",
")",
"if",
"timeout",
":",
"remaining_timeout",
"=",
"timeout",
"-",
"(",
"self",
".",
"ioloop",
".",
"time",
"(",
")",
"-",
"t0",
")",
"self",
".",
"wait_running",
"(",
"remaining_timeout",
")"
] |
Start the client in a new thread.
Parameters
----------
timeout : float in seconds
Seconds to wait for client thread to start. Do not specify a
timeout if start() is being called from the same ioloop that this
client will be installed on, since it will block the ioloop without
progressing.
|
[
"Start",
"the",
"client",
"in",
"a",
"new",
"thread",
"."
] |
python
|
train
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.