repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
wummel/linkchecker
|
linkcheck/fileutil.py
|
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/fileutil.py#L31-L57
|
def write_file (filename, content, backup=False, callback=None):
"""Overwrite a possibly existing file with new content. Do this
in a manner that does not leave truncated or broken files behind.
@param filename: name of file to write
@type filename: string
@param content: file content to write
@type content: string
@param backup: if backup file should be left
@type backup: bool
@param callback: non-default storage function
@type callback: None or function taking two parameters (fileobj, content)
"""
# first write in a temp file
f = file(filename+".tmp", 'wb')
if callback is None:
f.write(content)
else:
callback(f, content)
f.close()
# move orig file to backup
if os.path.exists(filename):
os.rename(filename, filename+".bak")
# move temp file to orig
os.rename(filename+".tmp", filename)
# remove backup
if not backup and os.path.exists(filename+".bak"):
os.remove(filename+".bak")
|
[
"def",
"write_file",
"(",
"filename",
",",
"content",
",",
"backup",
"=",
"False",
",",
"callback",
"=",
"None",
")",
":",
"# first write in a temp file",
"f",
"=",
"file",
"(",
"filename",
"+",
"\".tmp\"",
",",
"'wb'",
")",
"if",
"callback",
"is",
"None",
":",
"f",
".",
"write",
"(",
"content",
")",
"else",
":",
"callback",
"(",
"f",
",",
"content",
")",
"f",
".",
"close",
"(",
")",
"# move orig file to backup",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"filename",
")",
":",
"os",
".",
"rename",
"(",
"filename",
",",
"filename",
"+",
"\".bak\"",
")",
"# move temp file to orig",
"os",
".",
"rename",
"(",
"filename",
"+",
"\".tmp\"",
",",
"filename",
")",
"# remove backup",
"if",
"not",
"backup",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"filename",
"+",
"\".bak\"",
")",
":",
"os",
".",
"remove",
"(",
"filename",
"+",
"\".bak\"",
")"
] |
Overwrite a possibly existing file with new content. Do this
in a manner that does not leave truncated or broken files behind.
@param filename: name of file to write
@type filename: string
@param content: file content to write
@type content: string
@param backup: if backup file should be left
@type backup: bool
@param callback: non-default storage function
@type callback: None or function taking two parameters (fileobj, content)
|
[
"Overwrite",
"a",
"possibly",
"existing",
"file",
"with",
"new",
"content",
".",
"Do",
"this",
"in",
"a",
"manner",
"that",
"does",
"not",
"leave",
"truncated",
"or",
"broken",
"files",
"behind",
"."
] |
python
|
train
|
vpelletier/python-libusb1
|
usb1/__init__.py
|
https://github.com/vpelletier/python-libusb1/blob/740c9778e28523e4ec3543415d95f5400ae0fa24/usb1/__init__.py#L692-L710
|
def getISOBufferList(self):
"""
Get individual ISO transfer's buffer.
Returns a list with one item per ISO transfer, with their
individually-configured sizes.
Returned list is consistent with getISOSetupList return value.
Should not be called on a submitted transfer.
See also iterISO.
"""
transfer_p = self.__transfer
transfer = transfer_p.contents
# pylint: disable=undefined-variable
if transfer.type != TRANSFER_TYPE_ISOCHRONOUS:
# pylint: enable=undefined-variable
raise TypeError(
'This method cannot be called on non-iso transfers.'
)
return libusb1.get_iso_packet_buffer_list(transfer_p)
|
[
"def",
"getISOBufferList",
"(",
"self",
")",
":",
"transfer_p",
"=",
"self",
".",
"__transfer",
"transfer",
"=",
"transfer_p",
".",
"contents",
"# pylint: disable=undefined-variable",
"if",
"transfer",
".",
"type",
"!=",
"TRANSFER_TYPE_ISOCHRONOUS",
":",
"# pylint: enable=undefined-variable",
"raise",
"TypeError",
"(",
"'This method cannot be called on non-iso transfers.'",
")",
"return",
"libusb1",
".",
"get_iso_packet_buffer_list",
"(",
"transfer_p",
")"
] |
Get individual ISO transfer's buffer.
Returns a list with one item per ISO transfer, with their
individually-configured sizes.
Returned list is consistent with getISOSetupList return value.
Should not be called on a submitted transfer.
See also iterISO.
|
[
"Get",
"individual",
"ISO",
"transfer",
"s",
"buffer",
".",
"Returns",
"a",
"list",
"with",
"one",
"item",
"per",
"ISO",
"transfer",
"with",
"their",
"individually",
"-",
"configured",
"sizes",
".",
"Returned",
"list",
"is",
"consistent",
"with",
"getISOSetupList",
"return",
"value",
".",
"Should",
"not",
"be",
"called",
"on",
"a",
"submitted",
"transfer",
"."
] |
python
|
train
|
pytorch/ignite
|
ignite/contrib/handlers/param_scheduler.py
|
https://github.com/pytorch/ignite/blob/a96bd07cb58822cfb39fd81765135712f1db41ca/ignite/contrib/handlers/param_scheduler.py#L460-L481
|
def simulate_values(cls, num_events, lr_scheduler, **kwargs):
"""Method to simulate scheduled values during num_events events.
Args:
num_events (int): number of events during the simulation.
lr_scheduler (subclass of `torch.optim.lr_scheduler._LRScheduler`): lr_scheduler object to wrap.
Returns:
list of pairs: [event_index, value]
"""
# This scheduler uses `torch.optim.lr_scheduler._LRScheduler` which
# should be replicated in order to simulate LR values and
# not perturb original scheduler.
copy_lr_scheduler = LRScheduler._replicate_lr_scheduler(lr_scheduler)
values = []
scheduler = cls(save_history=False, lr_scheduler=copy_lr_scheduler)
for i in range(num_events):
scheduler(engine=None)
values.append([i, scheduler.optimizer_param_groups[0][scheduler.param_name]])
return values
|
[
"def",
"simulate_values",
"(",
"cls",
",",
"num_events",
",",
"lr_scheduler",
",",
"*",
"*",
"kwargs",
")",
":",
"# This scheduler uses `torch.optim.lr_scheduler._LRScheduler` which",
"# should be replicated in order to simulate LR values and",
"# not perturb original scheduler.",
"copy_lr_scheduler",
"=",
"LRScheduler",
".",
"_replicate_lr_scheduler",
"(",
"lr_scheduler",
")",
"values",
"=",
"[",
"]",
"scheduler",
"=",
"cls",
"(",
"save_history",
"=",
"False",
",",
"lr_scheduler",
"=",
"copy_lr_scheduler",
")",
"for",
"i",
"in",
"range",
"(",
"num_events",
")",
":",
"scheduler",
"(",
"engine",
"=",
"None",
")",
"values",
".",
"append",
"(",
"[",
"i",
",",
"scheduler",
".",
"optimizer_param_groups",
"[",
"0",
"]",
"[",
"scheduler",
".",
"param_name",
"]",
"]",
")",
"return",
"values"
] |
Method to simulate scheduled values during num_events events.
Args:
num_events (int): number of events during the simulation.
lr_scheduler (subclass of `torch.optim.lr_scheduler._LRScheduler`): lr_scheduler object to wrap.
Returns:
list of pairs: [event_index, value]
|
[
"Method",
"to",
"simulate",
"scheduled",
"values",
"during",
"num_events",
"events",
"."
] |
python
|
train
|
MacHu-GWU/docfly-project
|
docfly/api_reference_doc.py
|
https://github.com/MacHu-GWU/docfly-project/blob/46da8a9793211301c3ebc12d195228dbf79fdfec/docfly/api_reference_doc.py#L144-L170
|
def generate_package_content(self, package):
"""Generate package.rst text content.
::
{{ package_name }}
==================
.. automodule:: {{ package_name }}
:members:
sub packages and modules
------------------------
.. toctree::
:maxdepth: 1
{{ sub_package_name1 }} <{{ sub_package_name1 }}/__init__>
{{ sub_package_name2 }} <{{ sub_package_name2 }}/__init__>
{{ sub_module_name1}} <{{ sub_module_name1}}>
{{ sub_module_name2}} <{{ sub_module_name2}}>
"""
if isinstance(package, Package):
return package.render(ignored_package=self.ignored_package)
else: # pragma: no cover
raise Exception("%r is not a Package object" % package)
|
[
"def",
"generate_package_content",
"(",
"self",
",",
"package",
")",
":",
"if",
"isinstance",
"(",
"package",
",",
"Package",
")",
":",
"return",
"package",
".",
"render",
"(",
"ignored_package",
"=",
"self",
".",
"ignored_package",
")",
"else",
":",
"# pragma: no cover",
"raise",
"Exception",
"(",
"\"%r is not a Package object\"",
"%",
"package",
")"
] |
Generate package.rst text content.
::
{{ package_name }}
==================
.. automodule:: {{ package_name }}
:members:
sub packages and modules
------------------------
.. toctree::
:maxdepth: 1
{{ sub_package_name1 }} <{{ sub_package_name1 }}/__init__>
{{ sub_package_name2 }} <{{ sub_package_name2 }}/__init__>
{{ sub_module_name1}} <{{ sub_module_name1}}>
{{ sub_module_name2}} <{{ sub_module_name2}}>
|
[
"Generate",
"package",
".",
"rst",
"text",
"content",
"."
] |
python
|
train
|
gabstopper/smc-python
|
smc/core/node.py
|
https://github.com/gabstopper/smc-python/blob/e027b8a5dcfaf884eada32d113d41c1e56b32457/smc/core/node.py#L532-L546
|
def change_ssh_pwd(self, pwd=None, comment=None):
"""
Executes a change SSH password operation on the specified node
:param str pwd: changed password value
:param str comment: optional comment for audit log
:raises NodeCommandFailed: cannot change ssh password
:return: None
"""
self.make_request(
NodeCommandFailed,
method='update',
resource='change_ssh_pwd',
params={'comment': comment},
json={'value': pwd})
|
[
"def",
"change_ssh_pwd",
"(",
"self",
",",
"pwd",
"=",
"None",
",",
"comment",
"=",
"None",
")",
":",
"self",
".",
"make_request",
"(",
"NodeCommandFailed",
",",
"method",
"=",
"'update'",
",",
"resource",
"=",
"'change_ssh_pwd'",
",",
"params",
"=",
"{",
"'comment'",
":",
"comment",
"}",
",",
"json",
"=",
"{",
"'value'",
":",
"pwd",
"}",
")"
] |
Executes a change SSH password operation on the specified node
:param str pwd: changed password value
:param str comment: optional comment for audit log
:raises NodeCommandFailed: cannot change ssh password
:return: None
|
[
"Executes",
"a",
"change",
"SSH",
"password",
"operation",
"on",
"the",
"specified",
"node"
] |
python
|
train
|
pandas-dev/pandas
|
pandas/plotting/_misc.py
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/plotting/_misc.py#L14-L133
|
def scatter_matrix(frame, alpha=0.5, figsize=None, ax=None, grid=False,
diagonal='hist', marker='.', density_kwds=None,
hist_kwds=None, range_padding=0.05, **kwds):
"""
Draw a matrix of scatter plots.
Parameters
----------
frame : DataFrame
alpha : float, optional
amount of transparency applied
figsize : (float,float), optional
a tuple (width, height) in inches
ax : Matplotlib axis object, optional
grid : bool, optional
setting this to True will show the grid
diagonal : {'hist', 'kde'}
pick between 'kde' and 'hist' for
either Kernel Density Estimation or Histogram
plot in the diagonal
marker : str, optional
Matplotlib marker type, default '.'
hist_kwds : other plotting keyword arguments
To be passed to hist function
density_kwds : other plotting keyword arguments
To be passed to kernel density estimate plot
range_padding : float, optional
relative extension of axis range in x and y
with respect to (x_max - x_min) or (y_max - y_min),
default 0.05
kwds : other plotting keyword arguments
To be passed to scatter function
Examples
--------
>>> df = pd.DataFrame(np.random.randn(1000, 4), columns=['A','B','C','D'])
>>> scatter_matrix(df, alpha=0.2)
"""
df = frame._get_numeric_data()
n = df.columns.size
naxes = n * n
fig, axes = _subplots(naxes=naxes, figsize=figsize, ax=ax,
squeeze=False)
# no gaps between subplots
fig.subplots_adjust(wspace=0, hspace=0)
mask = notna(df)
marker = _get_marker_compat(marker)
hist_kwds = hist_kwds or {}
density_kwds = density_kwds or {}
# GH 14855
kwds.setdefault('edgecolors', 'none')
boundaries_list = []
for a in df.columns:
values = df[a].values[mask[a].values]
rmin_, rmax_ = np.min(values), np.max(values)
rdelta_ext = (rmax_ - rmin_) * range_padding / 2.
boundaries_list.append((rmin_ - rdelta_ext, rmax_ + rdelta_ext))
for i, a in zip(lrange(n), df.columns):
for j, b in zip(lrange(n), df.columns):
ax = axes[i, j]
if i == j:
values = df[a].values[mask[a].values]
# Deal with the diagonal by drawing a histogram there.
if diagonal == 'hist':
ax.hist(values, **hist_kwds)
elif diagonal in ('kde', 'density'):
from scipy.stats import gaussian_kde
y = values
gkde = gaussian_kde(y)
ind = np.linspace(y.min(), y.max(), 1000)
ax.plot(ind, gkde.evaluate(ind), **density_kwds)
ax.set_xlim(boundaries_list[i])
else:
common = (mask[a] & mask[b]).values
ax.scatter(df[b][common], df[a][common],
marker=marker, alpha=alpha, **kwds)
ax.set_xlim(boundaries_list[j])
ax.set_ylim(boundaries_list[i])
ax.set_xlabel(b)
ax.set_ylabel(a)
if j != 0:
ax.yaxis.set_visible(False)
if i != n - 1:
ax.xaxis.set_visible(False)
if len(df.columns) > 1:
lim1 = boundaries_list[0]
locs = axes[0][1].yaxis.get_majorticklocs()
locs = locs[(lim1[0] <= locs) & (locs <= lim1[1])]
adj = (locs - lim1[0]) / (lim1[1] - lim1[0])
lim0 = axes[0][0].get_ylim()
adj = adj * (lim0[1] - lim0[0]) + lim0[0]
axes[0][0].yaxis.set_ticks(adj)
if np.all(locs == locs.astype(int)):
# if all ticks are int
locs = locs.astype(int)
axes[0][0].yaxis.set_ticklabels(locs)
_set_ticks_props(axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0)
return axes
|
[
"def",
"scatter_matrix",
"(",
"frame",
",",
"alpha",
"=",
"0.5",
",",
"figsize",
"=",
"None",
",",
"ax",
"=",
"None",
",",
"grid",
"=",
"False",
",",
"diagonal",
"=",
"'hist'",
",",
"marker",
"=",
"'.'",
",",
"density_kwds",
"=",
"None",
",",
"hist_kwds",
"=",
"None",
",",
"range_padding",
"=",
"0.05",
",",
"*",
"*",
"kwds",
")",
":",
"df",
"=",
"frame",
".",
"_get_numeric_data",
"(",
")",
"n",
"=",
"df",
".",
"columns",
".",
"size",
"naxes",
"=",
"n",
"*",
"n",
"fig",
",",
"axes",
"=",
"_subplots",
"(",
"naxes",
"=",
"naxes",
",",
"figsize",
"=",
"figsize",
",",
"ax",
"=",
"ax",
",",
"squeeze",
"=",
"False",
")",
"# no gaps between subplots",
"fig",
".",
"subplots_adjust",
"(",
"wspace",
"=",
"0",
",",
"hspace",
"=",
"0",
")",
"mask",
"=",
"notna",
"(",
"df",
")",
"marker",
"=",
"_get_marker_compat",
"(",
"marker",
")",
"hist_kwds",
"=",
"hist_kwds",
"or",
"{",
"}",
"density_kwds",
"=",
"density_kwds",
"or",
"{",
"}",
"# GH 14855",
"kwds",
".",
"setdefault",
"(",
"'edgecolors'",
",",
"'none'",
")",
"boundaries_list",
"=",
"[",
"]",
"for",
"a",
"in",
"df",
".",
"columns",
":",
"values",
"=",
"df",
"[",
"a",
"]",
".",
"values",
"[",
"mask",
"[",
"a",
"]",
".",
"values",
"]",
"rmin_",
",",
"rmax_",
"=",
"np",
".",
"min",
"(",
"values",
")",
",",
"np",
".",
"max",
"(",
"values",
")",
"rdelta_ext",
"=",
"(",
"rmax_",
"-",
"rmin_",
")",
"*",
"range_padding",
"/",
"2.",
"boundaries_list",
".",
"append",
"(",
"(",
"rmin_",
"-",
"rdelta_ext",
",",
"rmax_",
"+",
"rdelta_ext",
")",
")",
"for",
"i",
",",
"a",
"in",
"zip",
"(",
"lrange",
"(",
"n",
")",
",",
"df",
".",
"columns",
")",
":",
"for",
"j",
",",
"b",
"in",
"zip",
"(",
"lrange",
"(",
"n",
")",
",",
"df",
".",
"columns",
")",
":",
"ax",
"=",
"axes",
"[",
"i",
",",
"j",
"]",
"if",
"i",
"==",
"j",
":",
"values",
"=",
"df",
"[",
"a",
"]",
".",
"values",
"[",
"mask",
"[",
"a",
"]",
".",
"values",
"]",
"# Deal with the diagonal by drawing a histogram there.",
"if",
"diagonal",
"==",
"'hist'",
":",
"ax",
".",
"hist",
"(",
"values",
",",
"*",
"*",
"hist_kwds",
")",
"elif",
"diagonal",
"in",
"(",
"'kde'",
",",
"'density'",
")",
":",
"from",
"scipy",
".",
"stats",
"import",
"gaussian_kde",
"y",
"=",
"values",
"gkde",
"=",
"gaussian_kde",
"(",
"y",
")",
"ind",
"=",
"np",
".",
"linspace",
"(",
"y",
".",
"min",
"(",
")",
",",
"y",
".",
"max",
"(",
")",
",",
"1000",
")",
"ax",
".",
"plot",
"(",
"ind",
",",
"gkde",
".",
"evaluate",
"(",
"ind",
")",
",",
"*",
"*",
"density_kwds",
")",
"ax",
".",
"set_xlim",
"(",
"boundaries_list",
"[",
"i",
"]",
")",
"else",
":",
"common",
"=",
"(",
"mask",
"[",
"a",
"]",
"&",
"mask",
"[",
"b",
"]",
")",
".",
"values",
"ax",
".",
"scatter",
"(",
"df",
"[",
"b",
"]",
"[",
"common",
"]",
",",
"df",
"[",
"a",
"]",
"[",
"common",
"]",
",",
"marker",
"=",
"marker",
",",
"alpha",
"=",
"alpha",
",",
"*",
"*",
"kwds",
")",
"ax",
".",
"set_xlim",
"(",
"boundaries_list",
"[",
"j",
"]",
")",
"ax",
".",
"set_ylim",
"(",
"boundaries_list",
"[",
"i",
"]",
")",
"ax",
".",
"set_xlabel",
"(",
"b",
")",
"ax",
".",
"set_ylabel",
"(",
"a",
")",
"if",
"j",
"!=",
"0",
":",
"ax",
".",
"yaxis",
".",
"set_visible",
"(",
"False",
")",
"if",
"i",
"!=",
"n",
"-",
"1",
":",
"ax",
".",
"xaxis",
".",
"set_visible",
"(",
"False",
")",
"if",
"len",
"(",
"df",
".",
"columns",
")",
">",
"1",
":",
"lim1",
"=",
"boundaries_list",
"[",
"0",
"]",
"locs",
"=",
"axes",
"[",
"0",
"]",
"[",
"1",
"]",
".",
"yaxis",
".",
"get_majorticklocs",
"(",
")",
"locs",
"=",
"locs",
"[",
"(",
"lim1",
"[",
"0",
"]",
"<=",
"locs",
")",
"&",
"(",
"locs",
"<=",
"lim1",
"[",
"1",
"]",
")",
"]",
"adj",
"=",
"(",
"locs",
"-",
"lim1",
"[",
"0",
"]",
")",
"/",
"(",
"lim1",
"[",
"1",
"]",
"-",
"lim1",
"[",
"0",
"]",
")",
"lim0",
"=",
"axes",
"[",
"0",
"]",
"[",
"0",
"]",
".",
"get_ylim",
"(",
")",
"adj",
"=",
"adj",
"*",
"(",
"lim0",
"[",
"1",
"]",
"-",
"lim0",
"[",
"0",
"]",
")",
"+",
"lim0",
"[",
"0",
"]",
"axes",
"[",
"0",
"]",
"[",
"0",
"]",
".",
"yaxis",
".",
"set_ticks",
"(",
"adj",
")",
"if",
"np",
".",
"all",
"(",
"locs",
"==",
"locs",
".",
"astype",
"(",
"int",
")",
")",
":",
"# if all ticks are int",
"locs",
"=",
"locs",
".",
"astype",
"(",
"int",
")",
"axes",
"[",
"0",
"]",
"[",
"0",
"]",
".",
"yaxis",
".",
"set_ticklabels",
"(",
"locs",
")",
"_set_ticks_props",
"(",
"axes",
",",
"xlabelsize",
"=",
"8",
",",
"xrot",
"=",
"90",
",",
"ylabelsize",
"=",
"8",
",",
"yrot",
"=",
"0",
")",
"return",
"axes"
] |
Draw a matrix of scatter plots.
Parameters
----------
frame : DataFrame
alpha : float, optional
amount of transparency applied
figsize : (float,float), optional
a tuple (width, height) in inches
ax : Matplotlib axis object, optional
grid : bool, optional
setting this to True will show the grid
diagonal : {'hist', 'kde'}
pick between 'kde' and 'hist' for
either Kernel Density Estimation or Histogram
plot in the diagonal
marker : str, optional
Matplotlib marker type, default '.'
hist_kwds : other plotting keyword arguments
To be passed to hist function
density_kwds : other plotting keyword arguments
To be passed to kernel density estimate plot
range_padding : float, optional
relative extension of axis range in x and y
with respect to (x_max - x_min) or (y_max - y_min),
default 0.05
kwds : other plotting keyword arguments
To be passed to scatter function
Examples
--------
>>> df = pd.DataFrame(np.random.randn(1000, 4), columns=['A','B','C','D'])
>>> scatter_matrix(df, alpha=0.2)
|
[
"Draw",
"a",
"matrix",
"of",
"scatter",
"plots",
"."
] |
python
|
train
|
mbanton/nose-mongoengine
|
nose_mongoengine/__init__.py
|
https://github.com/mbanton/nose-mongoengine/blob/3a06f52cd32f217b512af4a76d8ed4174185df59/nose_mongoengine/__init__.py#L125-L227
|
def configure(self, options, conf):
"""Parse the command line options and start an instance of mongodb
"""
# This option has to be specified on the command line, to enable the
# plugin.
if not options.mongoengine or options.mongodb_bin:
return
if not options.mongodb_bin:
self.mongodb_param['mongodb_bin'] = scan_path()
if self.mongodb_param['mongodb_bin'] is None:
raise AssertionError(
"Mongoengine plugin enabled, but no mongod on path, "
"please specify path to binary\n"
"ie. --mongoengine-mongodb=/path/to/mongod")
else:
self.mongodb_param['mongodb_bin'] = os.path.abspath(
os.path.expanduser(os.path.expandvars(options.mongodb_bin)))
if not os.path.exists(self.mongodb_param['mongodb_bin']):
raise AssertionError(
"Invalid mongodb binary %r" % \
self.mongodb_param['mongodb_bin'])
# Its necessary to enable in nose
self.enabled = True
db_log_path = os.path.expandvars(os.path.expanduser(
options.mongodb_logpath))
try:
db_file = open(db_log_path, "w")
db_file.close()
except Exception as exc:
raise AssertionError("Invalid log path %r" % exc)
if not options.mongodb_port:
self.mongodb_param['db_port'] = get_open_port()
else:
self.mongodb_param['db_port'] = options.mongodb_port
db_prealloc = options.mongodb_prealloc
db_scripting = options.mongodb_scripting
self.clear_context['module'] = options.mongoengine_clear_after_module
self.clear_context['class'] = options.mongoengine_clear_after_class
# generate random database name
self.database_name = str(uuid.uuid1())
#########################################
# Start a instance of mongo
#########################################
# Stores data here
self.mongodb_param['db_path'] = tempfile.mkdtemp()
if not os.path.exists(self.mongodb_param['db_path']):
os.mkdir(self.mongodb_param['db_path'])
args = [
self.mongodb_param['mongodb_bin'],
"--dbpath",
self.mongodb_param['db_path'],
"--port",
str(self.mongodb_param['db_port']),
# don't flood stdout, we're not reading it
"--quiet",
# save the port
"--nohttpinterface",
# disable unused.
"--nounixsocket",
# use a smaller default file size
"--smallfiles",
# journaling on by default in 2.0 and makes it to slow
# for tests, can causes failures in jenkins
"--nojournal",
# Default is /dev/null
"--logpath",
db_log_path,
"-vvvvv"
]
if not db_prealloc:
args.append("--noprealloc")
if not db_scripting:
args.append("--noscripting")
self.process = Popen(
args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
self._running = True
os.environ["TEST_MONGODB"] = "localhost:%s" % \
self.mongodb_param['db_port']
os.environ["TEST_MONGODB_DATABASE"] = self.database_name
# Give a moment for mongodb to finish coming up
time.sleep(0.3)
# Connecting using mongoengine
self.connection = connect(self.database_name, host="localhost",
port=self.mongodb_param['db_port'])
|
[
"def",
"configure",
"(",
"self",
",",
"options",
",",
"conf",
")",
":",
"# This option has to be specified on the command line, to enable the",
"# plugin.",
"if",
"not",
"options",
".",
"mongoengine",
"or",
"options",
".",
"mongodb_bin",
":",
"return",
"if",
"not",
"options",
".",
"mongodb_bin",
":",
"self",
".",
"mongodb_param",
"[",
"'mongodb_bin'",
"]",
"=",
"scan_path",
"(",
")",
"if",
"self",
".",
"mongodb_param",
"[",
"'mongodb_bin'",
"]",
"is",
"None",
":",
"raise",
"AssertionError",
"(",
"\"Mongoengine plugin enabled, but no mongod on path, \"",
"\"please specify path to binary\\n\"",
"\"ie. --mongoengine-mongodb=/path/to/mongod\"",
")",
"else",
":",
"self",
".",
"mongodb_param",
"[",
"'mongodb_bin'",
"]",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"os",
".",
"path",
".",
"expandvars",
"(",
"options",
".",
"mongodb_bin",
")",
")",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"mongodb_param",
"[",
"'mongodb_bin'",
"]",
")",
":",
"raise",
"AssertionError",
"(",
"\"Invalid mongodb binary %r\"",
"%",
"self",
".",
"mongodb_param",
"[",
"'mongodb_bin'",
"]",
")",
"# Its necessary to enable in nose",
"self",
".",
"enabled",
"=",
"True",
"db_log_path",
"=",
"os",
".",
"path",
".",
"expandvars",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"options",
".",
"mongodb_logpath",
")",
")",
"try",
":",
"db_file",
"=",
"open",
"(",
"db_log_path",
",",
"\"w\"",
")",
"db_file",
".",
"close",
"(",
")",
"except",
"Exception",
"as",
"exc",
":",
"raise",
"AssertionError",
"(",
"\"Invalid log path %r\"",
"%",
"exc",
")",
"if",
"not",
"options",
".",
"mongodb_port",
":",
"self",
".",
"mongodb_param",
"[",
"'db_port'",
"]",
"=",
"get_open_port",
"(",
")",
"else",
":",
"self",
".",
"mongodb_param",
"[",
"'db_port'",
"]",
"=",
"options",
".",
"mongodb_port",
"db_prealloc",
"=",
"options",
".",
"mongodb_prealloc",
"db_scripting",
"=",
"options",
".",
"mongodb_scripting",
"self",
".",
"clear_context",
"[",
"'module'",
"]",
"=",
"options",
".",
"mongoengine_clear_after_module",
"self",
".",
"clear_context",
"[",
"'class'",
"]",
"=",
"options",
".",
"mongoengine_clear_after_class",
"# generate random database name",
"self",
".",
"database_name",
"=",
"str",
"(",
"uuid",
".",
"uuid1",
"(",
")",
")",
"#########################################",
"# Start a instance of mongo",
"#########################################",
"# Stores data here",
"self",
".",
"mongodb_param",
"[",
"'db_path'",
"]",
"=",
"tempfile",
".",
"mkdtemp",
"(",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"mongodb_param",
"[",
"'db_path'",
"]",
")",
":",
"os",
".",
"mkdir",
"(",
"self",
".",
"mongodb_param",
"[",
"'db_path'",
"]",
")",
"args",
"=",
"[",
"self",
".",
"mongodb_param",
"[",
"'mongodb_bin'",
"]",
",",
"\"--dbpath\"",
",",
"self",
".",
"mongodb_param",
"[",
"'db_path'",
"]",
",",
"\"--port\"",
",",
"str",
"(",
"self",
".",
"mongodb_param",
"[",
"'db_port'",
"]",
")",
",",
"# don't flood stdout, we're not reading it",
"\"--quiet\"",
",",
"# save the port",
"\"--nohttpinterface\"",
",",
"# disable unused.",
"\"--nounixsocket\"",
",",
"# use a smaller default file size",
"\"--smallfiles\"",
",",
"# journaling on by default in 2.0 and makes it to slow",
"# for tests, can causes failures in jenkins",
"\"--nojournal\"",
",",
"# Default is /dev/null",
"\"--logpath\"",
",",
"db_log_path",
",",
"\"-vvvvv\"",
"]",
"if",
"not",
"db_prealloc",
":",
"args",
".",
"append",
"(",
"\"--noprealloc\"",
")",
"if",
"not",
"db_scripting",
":",
"args",
".",
"append",
"(",
"\"--noscripting\"",
")",
"self",
".",
"process",
"=",
"Popen",
"(",
"args",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"STDOUT",
")",
"self",
".",
"_running",
"=",
"True",
"os",
".",
"environ",
"[",
"\"TEST_MONGODB\"",
"]",
"=",
"\"localhost:%s\"",
"%",
"self",
".",
"mongodb_param",
"[",
"'db_port'",
"]",
"os",
".",
"environ",
"[",
"\"TEST_MONGODB_DATABASE\"",
"]",
"=",
"self",
".",
"database_name",
"# Give a moment for mongodb to finish coming up",
"time",
".",
"sleep",
"(",
"0.3",
")",
"# Connecting using mongoengine",
"self",
".",
"connection",
"=",
"connect",
"(",
"self",
".",
"database_name",
",",
"host",
"=",
"\"localhost\"",
",",
"port",
"=",
"self",
".",
"mongodb_param",
"[",
"'db_port'",
"]",
")"
] |
Parse the command line options and start an instance of mongodb
|
[
"Parse",
"the",
"command",
"line",
"options",
"and",
"start",
"an",
"instance",
"of",
"mongodb"
] |
python
|
train
|
tgbugs/ontquery
|
ontquery/plugins/interlex_client.py
|
https://github.com/tgbugs/ontquery/blob/bcf4863cb2bf221afe2b093c5dc7da1377300041/ontquery/plugins/interlex_client.py#L403-L517
|
def update_entity(
self,
ilx_id: str,
label: str = None,
type: str = None,
definition: str = None,
comment: str = None,
superclass: str = None,
synonyms: list = None) -> dict:
""" Updates pre-existing entity as long as the api_key is from the account that created it
Args:
label: name of entity
type: entities type
Can be any of the following: term, cde, fde, pde, annotation, relationship
definition: entities definition
comment: a foot note regarding either the interpretation of the data or the data itself
superclass: entity is a sub-part of this entity
Example: Organ is a superclass to Brain
synonyms: entity synonyms
Returns:
Server response that is a nested dictionary format
"""
template_entity_input = {k:v for k, v in locals().copy().items() if k != 'self'}
if template_entity_input.get('superclass'):
template_entity_input['superclass'] = self.fix_ilx(template_entity_input['superclass'])
existing_entity = self.get_entity(ilx_id=ilx_id)
if not existing_entity['id']: # TODO: Need to make a proper ilx_id check error
raise self.EntityDoesNotExistError(
f'ilx_id provided {ilx_id} does not exist')
update_url = self.base_url + 'term/edit/{id}'.format(id=existing_entity['id'])
if label:
existing_entity['label'] = label
if type:
existing_entity['type'] = type
if definition:
existing_entity['definition'] = definition
if comment:
existing_entity['comment'] = comment
if superclass:
existing_entity['superclass'] = {'ilx_id': superclass}
existing_entity = self.process_superclass(existing_entity)
# If a match use old data, else append new synonym
if synonyms:
if existing_entity['synonyms']:
new_existing_synonyms = []
existing_synonyms = {syn['literal'].lower():syn for syn in existing_entity['synonyms']}
for synonym in synonyms:
existing_synonym = existing_synonyms.get(synonym.lower())
if not existing_synonym:
new_existing_synonyms.append({'literal': synonym})
else:
new_existing_synonyms.append(existing_synonym)
existing_entity['synonyms'] = new_existing_synonyms
# Just in case I need this...
# if synonyms_to_delete:
# if existing_entity['synonyms']:
# remaining_existing_synonyms = []
# existing_synonyms = {syn['literal'].lower():syn for syn in existing_entity['synonyms']}
# for synonym in synonyms:
# if existing_synonyms.get(synonym.lower()):
# existing_synonyms.pop(synonym.lower())
# else:
# print('WARNING: synonym you wanted to delete', synonym, 'does not exist')
# existing_entity['synonyms'] = list(existing_synonyms.values())
response = self.post(
url = update_url,
data = existing_entity,
)
# BUG: server response is bad and needs to actually search again to get proper format
raw_entity_outout = self.get_entity(response['ilx'])
entity_output = {}
ics = [(e['iri'], e['curie'])
for e in raw_entity_outout['existing_ids']]
entity_output['iri'], entity_output['curie'] = sorted((i, c)
for i, c in ics
if 'ilx_' in i)[0]
### FOR NEW BETA. Old can have 'ilx_' in the ids ###
if 'tmp' in raw_entity_outout['ilx']:
_id = raw_entity_outout['ilx'].split('_')[-1]
entity_output['iri'] = 'http://uri.interlex.org/base/tmp_' + _id
entity_output['curie'] = 'TMP:' + _id
print(template_entity_input)
for key, value in template_entity_input.items():
if key == 'superclass':
if raw_entity_outout.get('superclasses'):
entity_output[key] = raw_entity_outout['superclasses'][0]['ilx']
elif key == 'synonyms':
entity_output[key] = [syn['literal']
for syn in raw_entity_outout['synonyms']]
elif key == 'ilx_id':
pass
else:
entity_output[key] = str(raw_entity_outout[key])
if entity_output.get('superclass'):
entity_output['superclass'] = self.ilx_base_url + entity_output['superclass']
entity_output['ilx'] = self.ilx_base_url + raw_entity_outout['ilx']
return entity_output
|
[
"def",
"update_entity",
"(",
"self",
",",
"ilx_id",
":",
"str",
",",
"label",
":",
"str",
"=",
"None",
",",
"type",
":",
"str",
"=",
"None",
",",
"definition",
":",
"str",
"=",
"None",
",",
"comment",
":",
"str",
"=",
"None",
",",
"superclass",
":",
"str",
"=",
"None",
",",
"synonyms",
":",
"list",
"=",
"None",
")",
"->",
"dict",
":",
"template_entity_input",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"locals",
"(",
")",
".",
"copy",
"(",
")",
".",
"items",
"(",
")",
"if",
"k",
"!=",
"'self'",
"}",
"if",
"template_entity_input",
".",
"get",
"(",
"'superclass'",
")",
":",
"template_entity_input",
"[",
"'superclass'",
"]",
"=",
"self",
".",
"fix_ilx",
"(",
"template_entity_input",
"[",
"'superclass'",
"]",
")",
"existing_entity",
"=",
"self",
".",
"get_entity",
"(",
"ilx_id",
"=",
"ilx_id",
")",
"if",
"not",
"existing_entity",
"[",
"'id'",
"]",
":",
"# TODO: Need to make a proper ilx_id check error",
"raise",
"self",
".",
"EntityDoesNotExistError",
"(",
"f'ilx_id provided {ilx_id} does not exist'",
")",
"update_url",
"=",
"self",
".",
"base_url",
"+",
"'term/edit/{id}'",
".",
"format",
"(",
"id",
"=",
"existing_entity",
"[",
"'id'",
"]",
")",
"if",
"label",
":",
"existing_entity",
"[",
"'label'",
"]",
"=",
"label",
"if",
"type",
":",
"existing_entity",
"[",
"'type'",
"]",
"=",
"type",
"if",
"definition",
":",
"existing_entity",
"[",
"'definition'",
"]",
"=",
"definition",
"if",
"comment",
":",
"existing_entity",
"[",
"'comment'",
"]",
"=",
"comment",
"if",
"superclass",
":",
"existing_entity",
"[",
"'superclass'",
"]",
"=",
"{",
"'ilx_id'",
":",
"superclass",
"}",
"existing_entity",
"=",
"self",
".",
"process_superclass",
"(",
"existing_entity",
")",
"# If a match use old data, else append new synonym",
"if",
"synonyms",
":",
"if",
"existing_entity",
"[",
"'synonyms'",
"]",
":",
"new_existing_synonyms",
"=",
"[",
"]",
"existing_synonyms",
"=",
"{",
"syn",
"[",
"'literal'",
"]",
".",
"lower",
"(",
")",
":",
"syn",
"for",
"syn",
"in",
"existing_entity",
"[",
"'synonyms'",
"]",
"}",
"for",
"synonym",
"in",
"synonyms",
":",
"existing_synonym",
"=",
"existing_synonyms",
".",
"get",
"(",
"synonym",
".",
"lower",
"(",
")",
")",
"if",
"not",
"existing_synonym",
":",
"new_existing_synonyms",
".",
"append",
"(",
"{",
"'literal'",
":",
"synonym",
"}",
")",
"else",
":",
"new_existing_synonyms",
".",
"append",
"(",
"existing_synonym",
")",
"existing_entity",
"[",
"'synonyms'",
"]",
"=",
"new_existing_synonyms",
"# Just in case I need this...",
"# if synonyms_to_delete:",
"# if existing_entity['synonyms']:",
"# remaining_existing_synonyms = []",
"# existing_synonyms = {syn['literal'].lower():syn for syn in existing_entity['synonyms']}",
"# for synonym in synonyms:",
"# if existing_synonyms.get(synonym.lower()):",
"# existing_synonyms.pop(synonym.lower())",
"# else:",
"# print('WARNING: synonym you wanted to delete', synonym, 'does not exist')",
"# existing_entity['synonyms'] = list(existing_synonyms.values())",
"response",
"=",
"self",
".",
"post",
"(",
"url",
"=",
"update_url",
",",
"data",
"=",
"existing_entity",
",",
")",
"# BUG: server response is bad and needs to actually search again to get proper format",
"raw_entity_outout",
"=",
"self",
".",
"get_entity",
"(",
"response",
"[",
"'ilx'",
"]",
")",
"entity_output",
"=",
"{",
"}",
"ics",
"=",
"[",
"(",
"e",
"[",
"'iri'",
"]",
",",
"e",
"[",
"'curie'",
"]",
")",
"for",
"e",
"in",
"raw_entity_outout",
"[",
"'existing_ids'",
"]",
"]",
"entity_output",
"[",
"'iri'",
"]",
",",
"entity_output",
"[",
"'curie'",
"]",
"=",
"sorted",
"(",
"(",
"i",
",",
"c",
")",
"for",
"i",
",",
"c",
"in",
"ics",
"if",
"'ilx_'",
"in",
"i",
")",
"[",
"0",
"]",
"### FOR NEW BETA. Old can have 'ilx_' in the ids ###",
"if",
"'tmp'",
"in",
"raw_entity_outout",
"[",
"'ilx'",
"]",
":",
"_id",
"=",
"raw_entity_outout",
"[",
"'ilx'",
"]",
".",
"split",
"(",
"'_'",
")",
"[",
"-",
"1",
"]",
"entity_output",
"[",
"'iri'",
"]",
"=",
"'http://uri.interlex.org/base/tmp_'",
"+",
"_id",
"entity_output",
"[",
"'curie'",
"]",
"=",
"'TMP:'",
"+",
"_id",
"print",
"(",
"template_entity_input",
")",
"for",
"key",
",",
"value",
"in",
"template_entity_input",
".",
"items",
"(",
")",
":",
"if",
"key",
"==",
"'superclass'",
":",
"if",
"raw_entity_outout",
".",
"get",
"(",
"'superclasses'",
")",
":",
"entity_output",
"[",
"key",
"]",
"=",
"raw_entity_outout",
"[",
"'superclasses'",
"]",
"[",
"0",
"]",
"[",
"'ilx'",
"]",
"elif",
"key",
"==",
"'synonyms'",
":",
"entity_output",
"[",
"key",
"]",
"=",
"[",
"syn",
"[",
"'literal'",
"]",
"for",
"syn",
"in",
"raw_entity_outout",
"[",
"'synonyms'",
"]",
"]",
"elif",
"key",
"==",
"'ilx_id'",
":",
"pass",
"else",
":",
"entity_output",
"[",
"key",
"]",
"=",
"str",
"(",
"raw_entity_outout",
"[",
"key",
"]",
")",
"if",
"entity_output",
".",
"get",
"(",
"'superclass'",
")",
":",
"entity_output",
"[",
"'superclass'",
"]",
"=",
"self",
".",
"ilx_base_url",
"+",
"entity_output",
"[",
"'superclass'",
"]",
"entity_output",
"[",
"'ilx'",
"]",
"=",
"self",
".",
"ilx_base_url",
"+",
"raw_entity_outout",
"[",
"'ilx'",
"]",
"return",
"entity_output"
] |
Updates pre-existing entity as long as the api_key is from the account that created it
Args:
label: name of entity
type: entities type
Can be any of the following: term, cde, fde, pde, annotation, relationship
definition: entities definition
comment: a foot note regarding either the interpretation of the data or the data itself
superclass: entity is a sub-part of this entity
Example: Organ is a superclass to Brain
synonyms: entity synonyms
Returns:
Server response that is a nested dictionary format
|
[
"Updates",
"pre",
"-",
"existing",
"entity",
"as",
"long",
"as",
"the",
"api_key",
"is",
"from",
"the",
"account",
"that",
"created",
"it"
] |
python
|
train
|
SpriteLink/NIPAP
|
pynipap/pynipap.py
|
https://github.com/SpriteLink/NIPAP/blob/f96069f11ab952d80b13cab06e0528f2d24b3de9/pynipap/pynipap.py#L1077-L1111
|
def smart_search(cls, query_string, search_options=None, extra_query = None):
""" Perform a smart prefix search.
Maps to the function
:py:func:`nipap.backend.Nipap.smart_search_prefix` in the backend.
Please see the documentation for the backend function for
information regarding input arguments and return values.
"""
if search_options is None:
search_options = {}
xmlrpc = XMLRPCConnection()
try:
smart_result = xmlrpc.connection.smart_search_prefix(
{
'query_string': query_string,
'search_options': search_options,
'auth': AuthOptions().options,
'extra_query': extra_query
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
result = dict()
result['interpretation'] = smart_result['interpretation']
result['search_options'] = smart_result['search_options']
result['error'] = smart_result['error']
if 'error_message' in smart_result:
result['error_message'] = smart_result['error_message']
result['result'] = list()
for prefix in smart_result['result']:
p = Prefix.from_dict(prefix)
result['result'].append(p)
return result
|
[
"def",
"smart_search",
"(",
"cls",
",",
"query_string",
",",
"search_options",
"=",
"None",
",",
"extra_query",
"=",
"None",
")",
":",
"if",
"search_options",
"is",
"None",
":",
"search_options",
"=",
"{",
"}",
"xmlrpc",
"=",
"XMLRPCConnection",
"(",
")",
"try",
":",
"smart_result",
"=",
"xmlrpc",
".",
"connection",
".",
"smart_search_prefix",
"(",
"{",
"'query_string'",
":",
"query_string",
",",
"'search_options'",
":",
"search_options",
",",
"'auth'",
":",
"AuthOptions",
"(",
")",
".",
"options",
",",
"'extra_query'",
":",
"extra_query",
"}",
")",
"except",
"xmlrpclib",
".",
"Fault",
"as",
"xml_fault",
":",
"raise",
"_fault_to_exception",
"(",
"xml_fault",
")",
"result",
"=",
"dict",
"(",
")",
"result",
"[",
"'interpretation'",
"]",
"=",
"smart_result",
"[",
"'interpretation'",
"]",
"result",
"[",
"'search_options'",
"]",
"=",
"smart_result",
"[",
"'search_options'",
"]",
"result",
"[",
"'error'",
"]",
"=",
"smart_result",
"[",
"'error'",
"]",
"if",
"'error_message'",
"in",
"smart_result",
":",
"result",
"[",
"'error_message'",
"]",
"=",
"smart_result",
"[",
"'error_message'",
"]",
"result",
"[",
"'result'",
"]",
"=",
"list",
"(",
")",
"for",
"prefix",
"in",
"smart_result",
"[",
"'result'",
"]",
":",
"p",
"=",
"Prefix",
".",
"from_dict",
"(",
"prefix",
")",
"result",
"[",
"'result'",
"]",
".",
"append",
"(",
"p",
")",
"return",
"result"
] |
Perform a smart prefix search.
Maps to the function
:py:func:`nipap.backend.Nipap.smart_search_prefix` in the backend.
Please see the documentation for the backend function for
information regarding input arguments and return values.
|
[
"Perform",
"a",
"smart",
"prefix",
"search",
"."
] |
python
|
train
|
openego/eDisGo
|
edisgo/data/import_data.py
|
https://github.com/openego/eDisGo/blob/e6245bdaf236f9c49dbda5a18c1c458290f41e2b/edisgo/data/import_data.py#L118-L250
|
def _build_lv_grid(ding0_grid, network):
"""
Build eDisGo LV grid from Ding0 data
Parameters
----------
ding0_grid: ding0.MVGridDing0
Ding0 MV grid object
Returns
-------
list of LVGrid
LV grids
dict
Dictionary containing a mapping of LV stations in Ding0 to newly
created eDisGo LV stations. This mapping is used to use the same
instances of LV stations in the MV grid graph.
"""
lv_station_mapping = {}
lv_grids = []
lv_grid_mapping = {}
for la in ding0_grid.grid_district._lv_load_areas:
for lvgd in la._lv_grid_districts:
ding0_lv_grid = lvgd.lv_grid
if not ding0_lv_grid.grid_district.lv_load_area.is_aggregated:
# Create LV grid instance
lv_grid = LVGrid(
id=ding0_lv_grid.id_db,
geom=ding0_lv_grid.grid_district.geo_data,
grid_district={
'geom': ding0_lv_grid.grid_district.geo_data,
'population': ding0_lv_grid.grid_district.population},
voltage_nom=ding0_lv_grid.v_level / 1e3,
network=network)
station = {repr(_): _ for _ in
network.mv_grid.graph.nodes_by_attribute(
'lv_station')}['LVStation_' + str(
ding0_lv_grid._station.id_db)]
station.grid = lv_grid
for t in station.transformers:
t.grid = lv_grid
lv_grid.graph.add_node(station, type='lv_station')
lv_station_mapping.update({ding0_lv_grid._station: station})
# Create list of load instances and add these to grid's graph
loads = {_: Load(
id=_.id_db,
geom=_.geo_data,
grid=lv_grid,
consumption=_.consumption) for _ in ding0_lv_grid.loads()}
lv_grid.graph.add_nodes_from(loads.values(), type='load')
# Create list of generator instances and add these to grid's
# graph
generators = {_: (GeneratorFluctuating(
id=_.id_db,
geom=_.geo_data,
nominal_capacity=_.capacity,
type=_.type,
subtype=_.subtype,
grid=lv_grid,
weather_cell_id=_.weather_cell_id,
v_level=_.v_level) if _.type in ['wind', 'solar'] else
Generator(
id=_.id_db,
geom=_.geo_data,
nominal_capacity=_.capacity,
type=_.type,
subtype=_.subtype,
grid=lv_grid,
v_level=_.v_level))
for _ in ding0_lv_grid.generators()}
lv_grid.graph.add_nodes_from(generators.values(),
type='generator')
# Create list of branch tee instances and add these to grid's
# graph
branch_tees = {
_: BranchTee(id=_.id_db,
geom=_.geo_data,
grid=lv_grid,
in_building=_.in_building)
for _ in ding0_lv_grid._cable_distributors}
lv_grid.graph.add_nodes_from(branch_tees.values(),
type='branch_tee')
# Merge node above defined above to a single dict
nodes = {**loads,
**generators,
**branch_tees,
**{ding0_lv_grid._station: station}}
edges = []
edges_raw = list(nx.get_edge_attributes(
ding0_lv_grid._graph, 'branch').items())
for edge in edges_raw:
edges.append({'adj_nodes': edge[0], 'branch': edge[1]})
# Create list of line instances and add these to grid's graph
lines = [(nodes[_['adj_nodes'][0]], nodes[_['adj_nodes'][1]],
{'line': Line(
id=_['branch'].id_db,
type=_['branch'].type,
length=_['branch'].length / 1e3,
kind=_['branch'].kind,
grid=lv_grid)
})
for _ in edges]
# convert voltage from V to kV
for line in lines:
# ToDo: remove work around once it's fixed in ding0
if line[2]['line'].type['U_n'] >= 400:
line[2]['line'].type['U_n'] = \
line[2]['line'].type['U_n'] / 1e3
lv_grid.graph.add_edges_from(lines, type='line')
# Add LV station as association to LV grid
lv_grid._station = station
# Add to lv grid mapping
lv_grid_mapping.update({lv_grid: ding0_lv_grid})
# Put all LV grid to a list of LV grids
lv_grids.append(lv_grid)
# ToDo: don't forget to adapt lv stations creation in MV grid
return lv_grids, lv_station_mapping, lv_grid_mapping
|
[
"def",
"_build_lv_grid",
"(",
"ding0_grid",
",",
"network",
")",
":",
"lv_station_mapping",
"=",
"{",
"}",
"lv_grids",
"=",
"[",
"]",
"lv_grid_mapping",
"=",
"{",
"}",
"for",
"la",
"in",
"ding0_grid",
".",
"grid_district",
".",
"_lv_load_areas",
":",
"for",
"lvgd",
"in",
"la",
".",
"_lv_grid_districts",
":",
"ding0_lv_grid",
"=",
"lvgd",
".",
"lv_grid",
"if",
"not",
"ding0_lv_grid",
".",
"grid_district",
".",
"lv_load_area",
".",
"is_aggregated",
":",
"# Create LV grid instance",
"lv_grid",
"=",
"LVGrid",
"(",
"id",
"=",
"ding0_lv_grid",
".",
"id_db",
",",
"geom",
"=",
"ding0_lv_grid",
".",
"grid_district",
".",
"geo_data",
",",
"grid_district",
"=",
"{",
"'geom'",
":",
"ding0_lv_grid",
".",
"grid_district",
".",
"geo_data",
",",
"'population'",
":",
"ding0_lv_grid",
".",
"grid_district",
".",
"population",
"}",
",",
"voltage_nom",
"=",
"ding0_lv_grid",
".",
"v_level",
"/",
"1e3",
",",
"network",
"=",
"network",
")",
"station",
"=",
"{",
"repr",
"(",
"_",
")",
":",
"_",
"for",
"_",
"in",
"network",
".",
"mv_grid",
".",
"graph",
".",
"nodes_by_attribute",
"(",
"'lv_station'",
")",
"}",
"[",
"'LVStation_'",
"+",
"str",
"(",
"ding0_lv_grid",
".",
"_station",
".",
"id_db",
")",
"]",
"station",
".",
"grid",
"=",
"lv_grid",
"for",
"t",
"in",
"station",
".",
"transformers",
":",
"t",
".",
"grid",
"=",
"lv_grid",
"lv_grid",
".",
"graph",
".",
"add_node",
"(",
"station",
",",
"type",
"=",
"'lv_station'",
")",
"lv_station_mapping",
".",
"update",
"(",
"{",
"ding0_lv_grid",
".",
"_station",
":",
"station",
"}",
")",
"# Create list of load instances and add these to grid's graph",
"loads",
"=",
"{",
"_",
":",
"Load",
"(",
"id",
"=",
"_",
".",
"id_db",
",",
"geom",
"=",
"_",
".",
"geo_data",
",",
"grid",
"=",
"lv_grid",
",",
"consumption",
"=",
"_",
".",
"consumption",
")",
"for",
"_",
"in",
"ding0_lv_grid",
".",
"loads",
"(",
")",
"}",
"lv_grid",
".",
"graph",
".",
"add_nodes_from",
"(",
"loads",
".",
"values",
"(",
")",
",",
"type",
"=",
"'load'",
")",
"# Create list of generator instances and add these to grid's",
"# graph",
"generators",
"=",
"{",
"_",
":",
"(",
"GeneratorFluctuating",
"(",
"id",
"=",
"_",
".",
"id_db",
",",
"geom",
"=",
"_",
".",
"geo_data",
",",
"nominal_capacity",
"=",
"_",
".",
"capacity",
",",
"type",
"=",
"_",
".",
"type",
",",
"subtype",
"=",
"_",
".",
"subtype",
",",
"grid",
"=",
"lv_grid",
",",
"weather_cell_id",
"=",
"_",
".",
"weather_cell_id",
",",
"v_level",
"=",
"_",
".",
"v_level",
")",
"if",
"_",
".",
"type",
"in",
"[",
"'wind'",
",",
"'solar'",
"]",
"else",
"Generator",
"(",
"id",
"=",
"_",
".",
"id_db",
",",
"geom",
"=",
"_",
".",
"geo_data",
",",
"nominal_capacity",
"=",
"_",
".",
"capacity",
",",
"type",
"=",
"_",
".",
"type",
",",
"subtype",
"=",
"_",
".",
"subtype",
",",
"grid",
"=",
"lv_grid",
",",
"v_level",
"=",
"_",
".",
"v_level",
")",
")",
"for",
"_",
"in",
"ding0_lv_grid",
".",
"generators",
"(",
")",
"}",
"lv_grid",
".",
"graph",
".",
"add_nodes_from",
"(",
"generators",
".",
"values",
"(",
")",
",",
"type",
"=",
"'generator'",
")",
"# Create list of branch tee instances and add these to grid's",
"# graph",
"branch_tees",
"=",
"{",
"_",
":",
"BranchTee",
"(",
"id",
"=",
"_",
".",
"id_db",
",",
"geom",
"=",
"_",
".",
"geo_data",
",",
"grid",
"=",
"lv_grid",
",",
"in_building",
"=",
"_",
".",
"in_building",
")",
"for",
"_",
"in",
"ding0_lv_grid",
".",
"_cable_distributors",
"}",
"lv_grid",
".",
"graph",
".",
"add_nodes_from",
"(",
"branch_tees",
".",
"values",
"(",
")",
",",
"type",
"=",
"'branch_tee'",
")",
"# Merge node above defined above to a single dict",
"nodes",
"=",
"{",
"*",
"*",
"loads",
",",
"*",
"*",
"generators",
",",
"*",
"*",
"branch_tees",
",",
"*",
"*",
"{",
"ding0_lv_grid",
".",
"_station",
":",
"station",
"}",
"}",
"edges",
"=",
"[",
"]",
"edges_raw",
"=",
"list",
"(",
"nx",
".",
"get_edge_attributes",
"(",
"ding0_lv_grid",
".",
"_graph",
",",
"'branch'",
")",
".",
"items",
"(",
")",
")",
"for",
"edge",
"in",
"edges_raw",
":",
"edges",
".",
"append",
"(",
"{",
"'adj_nodes'",
":",
"edge",
"[",
"0",
"]",
",",
"'branch'",
":",
"edge",
"[",
"1",
"]",
"}",
")",
"# Create list of line instances and add these to grid's graph",
"lines",
"=",
"[",
"(",
"nodes",
"[",
"_",
"[",
"'adj_nodes'",
"]",
"[",
"0",
"]",
"]",
",",
"nodes",
"[",
"_",
"[",
"'adj_nodes'",
"]",
"[",
"1",
"]",
"]",
",",
"{",
"'line'",
":",
"Line",
"(",
"id",
"=",
"_",
"[",
"'branch'",
"]",
".",
"id_db",
",",
"type",
"=",
"_",
"[",
"'branch'",
"]",
".",
"type",
",",
"length",
"=",
"_",
"[",
"'branch'",
"]",
".",
"length",
"/",
"1e3",
",",
"kind",
"=",
"_",
"[",
"'branch'",
"]",
".",
"kind",
",",
"grid",
"=",
"lv_grid",
")",
"}",
")",
"for",
"_",
"in",
"edges",
"]",
"# convert voltage from V to kV",
"for",
"line",
"in",
"lines",
":",
"# ToDo: remove work around once it's fixed in ding0",
"if",
"line",
"[",
"2",
"]",
"[",
"'line'",
"]",
".",
"type",
"[",
"'U_n'",
"]",
">=",
"400",
":",
"line",
"[",
"2",
"]",
"[",
"'line'",
"]",
".",
"type",
"[",
"'U_n'",
"]",
"=",
"line",
"[",
"2",
"]",
"[",
"'line'",
"]",
".",
"type",
"[",
"'U_n'",
"]",
"/",
"1e3",
"lv_grid",
".",
"graph",
".",
"add_edges_from",
"(",
"lines",
",",
"type",
"=",
"'line'",
")",
"# Add LV station as association to LV grid",
"lv_grid",
".",
"_station",
"=",
"station",
"# Add to lv grid mapping",
"lv_grid_mapping",
".",
"update",
"(",
"{",
"lv_grid",
":",
"ding0_lv_grid",
"}",
")",
"# Put all LV grid to a list of LV grids",
"lv_grids",
".",
"append",
"(",
"lv_grid",
")",
"# ToDo: don't forget to adapt lv stations creation in MV grid",
"return",
"lv_grids",
",",
"lv_station_mapping",
",",
"lv_grid_mapping"
] |
Build eDisGo LV grid from Ding0 data
Parameters
----------
ding0_grid: ding0.MVGridDing0
Ding0 MV grid object
Returns
-------
list of LVGrid
LV grids
dict
Dictionary containing a mapping of LV stations in Ding0 to newly
created eDisGo LV stations. This mapping is used to use the same
instances of LV stations in the MV grid graph.
|
[
"Build",
"eDisGo",
"LV",
"grid",
"from",
"Ding0",
"data"
] |
python
|
train
|
yinkaisheng/Python-UIAutomation-for-Windows
|
uiautomation/uiautomation.py
|
https://github.com/yinkaisheng/Python-UIAutomation-for-Windows/blob/2cc91060982cc8b777152e698d677cc2989bf263/uiautomation/uiautomation.py#L2187-L2199
|
def SetWindowPos(handle: int, hWndInsertAfter: int, x: int, y: int, width: int, height: int, flags: int) -> bool:
"""
SetWindowPos from Win32.
handle: int, the handle of a native window.
hWndInsertAfter: int, a value whose name starts with 'HWND' in class SWP.
x: int.
y: int.
width: int.
height: int.
flags: int, values whose name starts with 'SWP' in class `SWP`.
Return bool, True if succeed otherwise False.
"""
return ctypes.windll.user32.SetWindowPos(ctypes.c_void_p(handle), ctypes.c_void_p(hWndInsertAfter), x, y, width, height, flags)
|
[
"def",
"SetWindowPos",
"(",
"handle",
":",
"int",
",",
"hWndInsertAfter",
":",
"int",
",",
"x",
":",
"int",
",",
"y",
":",
"int",
",",
"width",
":",
"int",
",",
"height",
":",
"int",
",",
"flags",
":",
"int",
")",
"->",
"bool",
":",
"return",
"ctypes",
".",
"windll",
".",
"user32",
".",
"SetWindowPos",
"(",
"ctypes",
".",
"c_void_p",
"(",
"handle",
")",
",",
"ctypes",
".",
"c_void_p",
"(",
"hWndInsertAfter",
")",
",",
"x",
",",
"y",
",",
"width",
",",
"height",
",",
"flags",
")"
] |
SetWindowPos from Win32.
handle: int, the handle of a native window.
hWndInsertAfter: int, a value whose name starts with 'HWND' in class SWP.
x: int.
y: int.
width: int.
height: int.
flags: int, values whose name starts with 'SWP' in class `SWP`.
Return bool, True if succeed otherwise False.
|
[
"SetWindowPos",
"from",
"Win32",
".",
"handle",
":",
"int",
"the",
"handle",
"of",
"a",
"native",
"window",
".",
"hWndInsertAfter",
":",
"int",
"a",
"value",
"whose",
"name",
"starts",
"with",
"HWND",
"in",
"class",
"SWP",
".",
"x",
":",
"int",
".",
"y",
":",
"int",
".",
"width",
":",
"int",
".",
"height",
":",
"int",
".",
"flags",
":",
"int",
"values",
"whose",
"name",
"starts",
"with",
"SWP",
"in",
"class",
"SWP",
".",
"Return",
"bool",
"True",
"if",
"succeed",
"otherwise",
"False",
"."
] |
python
|
valid
|
tensorpack/tensorpack
|
tensorpack/tfutils/varreplace.py
|
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/tfutils/varreplace.py#L59-L97
|
def freeze_variables(stop_gradient=True, skip_collection=False):
"""
Return a context to freeze variables,
by wrapping ``tf.get_variable`` with a custom getter.
It works by either applying ``tf.stop_gradient`` on the variables,
or by keeping them out of the ``TRAINABLE_VARIABLES`` collection, or
both.
Example:
.. code-block:: python
with varreplace.freeze_variable(stop_gradient=False, skip_collection=True):
x = FullyConnected('fc', x, 1000) # fc/* will not be trained
Args:
stop_gradient (bool): if True, variables returned from `get_variable`
will be wrapped with `tf.stop_gradient` and therefore has no
gradient when used later.
Note that the created variables may still have gradient when accessed
by other approaches (e.g. by name, or by collection).
Also note that this makes `tf.get_variable` returns a Tensor instead of a Variable,
which may break existing code.
Therefore, it's recommended to use the `skip_collection` option instead.
skip_collection (bool): if True, do not add the variable to
``TRAINABLE_VARIABLES`` collection, but to ``MODEL_VARIABLES``
collection. As a result they will not be trained by default.
"""
def custom_getter(getter, *args, **kwargs):
trainable = kwargs.get('trainable', True)
name = args[0] if len(args) else kwargs.get('name')
if skip_collection:
kwargs['trainable'] = False
v = getter(*args, **kwargs)
if skip_collection:
tf.add_to_collection(tf.GraphKeys.MODEL_VARIABLES, v)
if trainable and stop_gradient:
v = tf.stop_gradient(v, name='freezed_' + name)
return v
return custom_getter_scope(custom_getter)
|
[
"def",
"freeze_variables",
"(",
"stop_gradient",
"=",
"True",
",",
"skip_collection",
"=",
"False",
")",
":",
"def",
"custom_getter",
"(",
"getter",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"trainable",
"=",
"kwargs",
".",
"get",
"(",
"'trainable'",
",",
"True",
")",
"name",
"=",
"args",
"[",
"0",
"]",
"if",
"len",
"(",
"args",
")",
"else",
"kwargs",
".",
"get",
"(",
"'name'",
")",
"if",
"skip_collection",
":",
"kwargs",
"[",
"'trainable'",
"]",
"=",
"False",
"v",
"=",
"getter",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"skip_collection",
":",
"tf",
".",
"add_to_collection",
"(",
"tf",
".",
"GraphKeys",
".",
"MODEL_VARIABLES",
",",
"v",
")",
"if",
"trainable",
"and",
"stop_gradient",
":",
"v",
"=",
"tf",
".",
"stop_gradient",
"(",
"v",
",",
"name",
"=",
"'freezed_'",
"+",
"name",
")",
"return",
"v",
"return",
"custom_getter_scope",
"(",
"custom_getter",
")"
] |
Return a context to freeze variables,
by wrapping ``tf.get_variable`` with a custom getter.
It works by either applying ``tf.stop_gradient`` on the variables,
or by keeping them out of the ``TRAINABLE_VARIABLES`` collection, or
both.
Example:
.. code-block:: python
with varreplace.freeze_variable(stop_gradient=False, skip_collection=True):
x = FullyConnected('fc', x, 1000) # fc/* will not be trained
Args:
stop_gradient (bool): if True, variables returned from `get_variable`
will be wrapped with `tf.stop_gradient` and therefore has no
gradient when used later.
Note that the created variables may still have gradient when accessed
by other approaches (e.g. by name, or by collection).
Also note that this makes `tf.get_variable` returns a Tensor instead of a Variable,
which may break existing code.
Therefore, it's recommended to use the `skip_collection` option instead.
skip_collection (bool): if True, do not add the variable to
``TRAINABLE_VARIABLES`` collection, but to ``MODEL_VARIABLES``
collection. As a result they will not be trained by default.
|
[
"Return",
"a",
"context",
"to",
"freeze",
"variables",
"by",
"wrapping",
"tf",
".",
"get_variable",
"with",
"a",
"custom",
"getter",
".",
"It",
"works",
"by",
"either",
"applying",
"tf",
".",
"stop_gradient",
"on",
"the",
"variables",
"or",
"by",
"keeping",
"them",
"out",
"of",
"the",
"TRAINABLE_VARIABLES",
"collection",
"or",
"both",
"."
] |
python
|
train
|
openstax/cnx-publishing
|
cnxpublishing/subscribers.py
|
https://github.com/openstax/cnx-publishing/blob/f55b4a2c45d8618737288f1b74b4139d5ac74154/cnxpublishing/subscribers.py#L32-L64
|
def post_publication_processing(event, cursor):
"""Process post-publication events coming out of the database."""
module_ident, ident_hash = event.module_ident, event.ident_hash
celery_app = get_current_registry().celery_app
# Check baking is not already queued.
cursor.execute('SELECT result_id::text '
'FROM document_baking_result_associations '
'WHERE module_ident = %s', (module_ident,))
for result in cursor.fetchall():
state = celery_app.AsyncResult(result[0]).state
if state in ('QUEUED', 'STARTED', 'RETRY'):
logger.debug('Already queued module_ident={} ident_hash={}'.format(
module_ident, ident_hash))
return
logger.debug('Queued for processing module_ident={} ident_hash={}'.format(
module_ident, ident_hash))
recipe_ids = _get_recipe_ids(module_ident, cursor)
update_module_state(cursor, module_ident, 'processing', recipe_ids[0])
# Commit the state change before preceding.
cursor.connection.commit()
# Start of task
# FIXME Looking up the task isn't the most clear usage here.
task_name = 'cnxpublishing.subscribers.baking_processor'
baking_processor = celery_app.tasks[task_name]
result = baking_processor.delay(module_ident, ident_hash)
baking_processor.backend.store_result(result.id, None, 'QUEUED')
# Save the mapping between a celery task and this event.
track_baking_proc_state(result, module_ident, cursor)
|
[
"def",
"post_publication_processing",
"(",
"event",
",",
"cursor",
")",
":",
"module_ident",
",",
"ident_hash",
"=",
"event",
".",
"module_ident",
",",
"event",
".",
"ident_hash",
"celery_app",
"=",
"get_current_registry",
"(",
")",
".",
"celery_app",
"# Check baking is not already queued.",
"cursor",
".",
"execute",
"(",
"'SELECT result_id::text '",
"'FROM document_baking_result_associations '",
"'WHERE module_ident = %s'",
",",
"(",
"module_ident",
",",
")",
")",
"for",
"result",
"in",
"cursor",
".",
"fetchall",
"(",
")",
":",
"state",
"=",
"celery_app",
".",
"AsyncResult",
"(",
"result",
"[",
"0",
"]",
")",
".",
"state",
"if",
"state",
"in",
"(",
"'QUEUED'",
",",
"'STARTED'",
",",
"'RETRY'",
")",
":",
"logger",
".",
"debug",
"(",
"'Already queued module_ident={} ident_hash={}'",
".",
"format",
"(",
"module_ident",
",",
"ident_hash",
")",
")",
"return",
"logger",
".",
"debug",
"(",
"'Queued for processing module_ident={} ident_hash={}'",
".",
"format",
"(",
"module_ident",
",",
"ident_hash",
")",
")",
"recipe_ids",
"=",
"_get_recipe_ids",
"(",
"module_ident",
",",
"cursor",
")",
"update_module_state",
"(",
"cursor",
",",
"module_ident",
",",
"'processing'",
",",
"recipe_ids",
"[",
"0",
"]",
")",
"# Commit the state change before preceding.",
"cursor",
".",
"connection",
".",
"commit",
"(",
")",
"# Start of task",
"# FIXME Looking up the task isn't the most clear usage here.",
"task_name",
"=",
"'cnxpublishing.subscribers.baking_processor'",
"baking_processor",
"=",
"celery_app",
".",
"tasks",
"[",
"task_name",
"]",
"result",
"=",
"baking_processor",
".",
"delay",
"(",
"module_ident",
",",
"ident_hash",
")",
"baking_processor",
".",
"backend",
".",
"store_result",
"(",
"result",
".",
"id",
",",
"None",
",",
"'QUEUED'",
")",
"# Save the mapping between a celery task and this event.",
"track_baking_proc_state",
"(",
"result",
",",
"module_ident",
",",
"cursor",
")"
] |
Process post-publication events coming out of the database.
|
[
"Process",
"post",
"-",
"publication",
"events",
"coming",
"out",
"of",
"the",
"database",
"."
] |
python
|
valid
|
SBRG/ssbio
|
ssbio/protein/structure/homology/itasser/itasserprop.py
|
https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/structure/homology/itasser/itasserprop.py#L240-L286
|
def get_dict(self, only_attributes=None, exclude_attributes=None, df_format=False):
"""Summarize the I-TASSER run in a dictionary containing modeling results and top predictions from COACH
Args:
only_attributes (str, list): Attributes that should be returned. If not provided, all are returned.
exclude_attributes (str, list): Attributes that should be excluded.
df_format (bool): If dictionary values should be formatted for a dataframe
(everything possible is transformed into strings, int, or float -
if something can't be transformed it is excluded)
Returns:
dict: Dictionary of attributes
"""
to_exclude = ['coach_bsites', 'coach_ec', 'coach_go_mf', 'coach_go_bp', 'coach_go_cc']
if not exclude_attributes:
excluder = to_exclude
else:
excluder = ssbio.utils.force_list(exclude_attributes)
excluder.extend(to_exclude)
summary_dict = StructProp.get_dict(self, only_attributes=only_attributes,
exclude_attributes=excluder,
df_format=df_format)
if self.coach_bsites:
tmp = {'top_bsite_' + k:v for k, v in self.coach_bsites[0].items()}
summary_dict.update(tmp)
if self.coach_ec:
tmp = {'top_ec_' + k: v for k, v in self.coach_ec[0].items()}
summary_dict.update(tmp)
if self.coach_go_mf:
tmp = {'top_go_mf_' + k: v for k, v in self.coach_go_mf[0].items()}
summary_dict.update(tmp)
if self.coach_go_bp:
tmp = {'top_go_bp_' + k: v for k, v in self.coach_go_bp[0].items()}
summary_dict.update(tmp)
if self.coach_go_cc:
tmp = {'top_go_cc_' + k: v for k, v in self.coach_go_cc[0].items()}
summary_dict.update(tmp)
return summary_dict
|
[
"def",
"get_dict",
"(",
"self",
",",
"only_attributes",
"=",
"None",
",",
"exclude_attributes",
"=",
"None",
",",
"df_format",
"=",
"False",
")",
":",
"to_exclude",
"=",
"[",
"'coach_bsites'",
",",
"'coach_ec'",
",",
"'coach_go_mf'",
",",
"'coach_go_bp'",
",",
"'coach_go_cc'",
"]",
"if",
"not",
"exclude_attributes",
":",
"excluder",
"=",
"to_exclude",
"else",
":",
"excluder",
"=",
"ssbio",
".",
"utils",
".",
"force_list",
"(",
"exclude_attributes",
")",
"excluder",
".",
"extend",
"(",
"to_exclude",
")",
"summary_dict",
"=",
"StructProp",
".",
"get_dict",
"(",
"self",
",",
"only_attributes",
"=",
"only_attributes",
",",
"exclude_attributes",
"=",
"excluder",
",",
"df_format",
"=",
"df_format",
")",
"if",
"self",
".",
"coach_bsites",
":",
"tmp",
"=",
"{",
"'top_bsite_'",
"+",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"self",
".",
"coach_bsites",
"[",
"0",
"]",
".",
"items",
"(",
")",
"}",
"summary_dict",
".",
"update",
"(",
"tmp",
")",
"if",
"self",
".",
"coach_ec",
":",
"tmp",
"=",
"{",
"'top_ec_'",
"+",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"self",
".",
"coach_ec",
"[",
"0",
"]",
".",
"items",
"(",
")",
"}",
"summary_dict",
".",
"update",
"(",
"tmp",
")",
"if",
"self",
".",
"coach_go_mf",
":",
"tmp",
"=",
"{",
"'top_go_mf_'",
"+",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"self",
".",
"coach_go_mf",
"[",
"0",
"]",
".",
"items",
"(",
")",
"}",
"summary_dict",
".",
"update",
"(",
"tmp",
")",
"if",
"self",
".",
"coach_go_bp",
":",
"tmp",
"=",
"{",
"'top_go_bp_'",
"+",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"self",
".",
"coach_go_bp",
"[",
"0",
"]",
".",
"items",
"(",
")",
"}",
"summary_dict",
".",
"update",
"(",
"tmp",
")",
"if",
"self",
".",
"coach_go_cc",
":",
"tmp",
"=",
"{",
"'top_go_cc_'",
"+",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"self",
".",
"coach_go_cc",
"[",
"0",
"]",
".",
"items",
"(",
")",
"}",
"summary_dict",
".",
"update",
"(",
"tmp",
")",
"return",
"summary_dict"
] |
Summarize the I-TASSER run in a dictionary containing modeling results and top predictions from COACH
Args:
only_attributes (str, list): Attributes that should be returned. If not provided, all are returned.
exclude_attributes (str, list): Attributes that should be excluded.
df_format (bool): If dictionary values should be formatted for a dataframe
(everything possible is transformed into strings, int, or float -
if something can't be transformed it is excluded)
Returns:
dict: Dictionary of attributes
|
[
"Summarize",
"the",
"I",
"-",
"TASSER",
"run",
"in",
"a",
"dictionary",
"containing",
"modeling",
"results",
"and",
"top",
"predictions",
"from",
"COACH"
] |
python
|
train
|
dossier/dossier.models
|
dossier/models/pairwise.py
|
https://github.com/dossier/dossier.models/blob/c9e282f690eab72963926329efe1600709e48b13/dossier/models/pairwise.py#L309-L339
|
def classify(self, feature_names, classifier, transformer, candidates,
query_fc=None):
'''Returns ``[probability]`` in correspondence with
``candidates``.
Where each ``probability`` corresponds to the probability that
the corresponding candidate is classified with a positive label
given the training data.
The list returned is in correspondence with the list of
candidates given.
N.B. The contract of this method should be simplified by
bundling ``feature_names``, ``classifier`` and ``transformer``
into one thing known as "the model." ---AG
'''
if query_fc is None:
query_fc = self.query_fc
dis = {}
for name in feature_names:
vec = dict_vector()
query = vec.fit_transform([get_feat(query_fc, name)])
cans = vec.transform(get_feat(fc, name) for _, fc in candidates)
dis[name] = 1 - pairwise_distances(
cans, query, metric='cosine', n_jobs=1)[:,0]
# in correspondence with `candidates`
phi_dicts = transformer.transform(
[dict([(name, dis[name][i]) for name in feature_names])
for i in xrange(len(candidates))])
return classifier.predict_proba(phi_dicts)[:,1]
|
[
"def",
"classify",
"(",
"self",
",",
"feature_names",
",",
"classifier",
",",
"transformer",
",",
"candidates",
",",
"query_fc",
"=",
"None",
")",
":",
"if",
"query_fc",
"is",
"None",
":",
"query_fc",
"=",
"self",
".",
"query_fc",
"dis",
"=",
"{",
"}",
"for",
"name",
"in",
"feature_names",
":",
"vec",
"=",
"dict_vector",
"(",
")",
"query",
"=",
"vec",
".",
"fit_transform",
"(",
"[",
"get_feat",
"(",
"query_fc",
",",
"name",
")",
"]",
")",
"cans",
"=",
"vec",
".",
"transform",
"(",
"get_feat",
"(",
"fc",
",",
"name",
")",
"for",
"_",
",",
"fc",
"in",
"candidates",
")",
"dis",
"[",
"name",
"]",
"=",
"1",
"-",
"pairwise_distances",
"(",
"cans",
",",
"query",
",",
"metric",
"=",
"'cosine'",
",",
"n_jobs",
"=",
"1",
")",
"[",
":",
",",
"0",
"]",
"# in correspondence with `candidates`",
"phi_dicts",
"=",
"transformer",
".",
"transform",
"(",
"[",
"dict",
"(",
"[",
"(",
"name",
",",
"dis",
"[",
"name",
"]",
"[",
"i",
"]",
")",
"for",
"name",
"in",
"feature_names",
"]",
")",
"for",
"i",
"in",
"xrange",
"(",
"len",
"(",
"candidates",
")",
")",
"]",
")",
"return",
"classifier",
".",
"predict_proba",
"(",
"phi_dicts",
")",
"[",
":",
",",
"1",
"]"
] |
Returns ``[probability]`` in correspondence with
``candidates``.
Where each ``probability`` corresponds to the probability that
the corresponding candidate is classified with a positive label
given the training data.
The list returned is in correspondence with the list of
candidates given.
N.B. The contract of this method should be simplified by
bundling ``feature_names``, ``classifier`` and ``transformer``
into one thing known as "the model." ---AG
|
[
"Returns",
"[",
"probability",
"]",
"in",
"correspondence",
"with",
"candidates",
"."
] |
python
|
train
|
INM-6/hybridLFPy
|
examples/Hagen_et_al_2016_cercor/analysis_params.py
|
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/examples/Hagen_et_al_2016_cercor/analysis_params.py#L92-L96
|
def set_default_fig_style(self):
'''default figure size'''
plt.rcParams.update({
'figure.figsize' : [self.frontierswidth/self.inchpercm, self.frontierswidth/self.inchpercm],
})
|
[
"def",
"set_default_fig_style",
"(",
"self",
")",
":",
"plt",
".",
"rcParams",
".",
"update",
"(",
"{",
"'figure.figsize'",
":",
"[",
"self",
".",
"frontierswidth",
"/",
"self",
".",
"inchpercm",
",",
"self",
".",
"frontierswidth",
"/",
"self",
".",
"inchpercm",
"]",
",",
"}",
")"
] |
default figure size
|
[
"default",
"figure",
"size"
] |
python
|
train
|
yaml/pyyaml
|
lib/yaml/__init__.py
|
https://github.com/yaml/pyyaml/blob/e471e86bf6dabdad45a1438c20a4a5c033eb9034/lib/yaml/__init__.py#L69-L78
|
def parse(stream, Loader=Loader):
"""
Parse a YAML stream and produce parsing events.
"""
loader = Loader(stream)
try:
while loader.check_event():
yield loader.get_event()
finally:
loader.dispose()
|
[
"def",
"parse",
"(",
"stream",
",",
"Loader",
"=",
"Loader",
")",
":",
"loader",
"=",
"Loader",
"(",
"stream",
")",
"try",
":",
"while",
"loader",
".",
"check_event",
"(",
")",
":",
"yield",
"loader",
".",
"get_event",
"(",
")",
"finally",
":",
"loader",
".",
"dispose",
"(",
")"
] |
Parse a YAML stream and produce parsing events.
|
[
"Parse",
"a",
"YAML",
"stream",
"and",
"produce",
"parsing",
"events",
"."
] |
python
|
train
|
tensorflow/tensorboard
|
tensorboard/plugins/hparams/get_experiment.py
|
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/hparams/get_experiment.py#L36-L52
|
def run(self):
"""Handles the request specified on construction.
Returns:
An Experiment object.
"""
experiment = self._context.experiment()
if experiment is None:
raise error.HParamsError(
"Can't find an HParams-plugin experiment data in"
" the log directory. Note that it takes some time to"
" scan the log directory; if you just started"
" Tensorboard it could be that we haven't finished"
" scanning it yet. Consider trying again in a"
" few seconds.")
return experiment
|
[
"def",
"run",
"(",
"self",
")",
":",
"experiment",
"=",
"self",
".",
"_context",
".",
"experiment",
"(",
")",
"if",
"experiment",
"is",
"None",
":",
"raise",
"error",
".",
"HParamsError",
"(",
"\"Can't find an HParams-plugin experiment data in\"",
"\" the log directory. Note that it takes some time to\"",
"\" scan the log directory; if you just started\"",
"\" Tensorboard it could be that we haven't finished\"",
"\" scanning it yet. Consider trying again in a\"",
"\" few seconds.\"",
")",
"return",
"experiment"
] |
Handles the request specified on construction.
Returns:
An Experiment object.
|
[
"Handles",
"the",
"request",
"specified",
"on",
"construction",
"."
] |
python
|
train
|
sepandhaghighi/pycm
|
pycm/pycm_obj.py
|
https://github.com/sepandhaghighi/pycm/blob/cb03258afd6a821d10acba73c965aaac174bedcd/pycm/pycm_obj.py#L269-L315
|
def save_csv(
self,
name,
address=True,
class_param=None,
class_name=None,
matrix_save=True,
normalize=False):
"""
Save ConfusionMatrix in CSV file.
:param name: filename
:type name : str
:param address: flag for address return
:type address : bool
:param class_param : class parameters list for save, Example : ["TPR","TNR","AUC"]
:type class_param : list
:param class_name : class name (sub set of classes), Example :[1,2,3]
:type class_name : list
:param matrix_save : save matrix flag
:type matrix_save : bool
:param normalize : save normalize matrix flag
:type normalize : bool
:return: saving Status as dict {"Status":bool , "Message":str}
"""
try:
message = None
classes = class_filter(self.classes, class_name)
csv_file = open(name + ".csv", "w")
csv_data = csv_print(
classes,
self.class_stat,
self.digit,
class_param)
csv_file.write(csv_data)
if matrix_save:
matrix = self.table
if normalize:
matrix = self.normalized_table
csv_matrix_file = open(name + "_matrix" + ".csv", "w")
csv_matrix_data = csv_matrix_print(self.classes, matrix)
csv_matrix_file.write(csv_matrix_data)
if address:
message = os.path.join(os.getcwd(), name + ".csv")
return {"Status": True, "Message": message}
except Exception as e:
return {"Status": False, "Message": str(e)}
|
[
"def",
"save_csv",
"(",
"self",
",",
"name",
",",
"address",
"=",
"True",
",",
"class_param",
"=",
"None",
",",
"class_name",
"=",
"None",
",",
"matrix_save",
"=",
"True",
",",
"normalize",
"=",
"False",
")",
":",
"try",
":",
"message",
"=",
"None",
"classes",
"=",
"class_filter",
"(",
"self",
".",
"classes",
",",
"class_name",
")",
"csv_file",
"=",
"open",
"(",
"name",
"+",
"\".csv\"",
",",
"\"w\"",
")",
"csv_data",
"=",
"csv_print",
"(",
"classes",
",",
"self",
".",
"class_stat",
",",
"self",
".",
"digit",
",",
"class_param",
")",
"csv_file",
".",
"write",
"(",
"csv_data",
")",
"if",
"matrix_save",
":",
"matrix",
"=",
"self",
".",
"table",
"if",
"normalize",
":",
"matrix",
"=",
"self",
".",
"normalized_table",
"csv_matrix_file",
"=",
"open",
"(",
"name",
"+",
"\"_matrix\"",
"+",
"\".csv\"",
",",
"\"w\"",
")",
"csv_matrix_data",
"=",
"csv_matrix_print",
"(",
"self",
".",
"classes",
",",
"matrix",
")",
"csv_matrix_file",
".",
"write",
"(",
"csv_matrix_data",
")",
"if",
"address",
":",
"message",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"getcwd",
"(",
")",
",",
"name",
"+",
"\".csv\"",
")",
"return",
"{",
"\"Status\"",
":",
"True",
",",
"\"Message\"",
":",
"message",
"}",
"except",
"Exception",
"as",
"e",
":",
"return",
"{",
"\"Status\"",
":",
"False",
",",
"\"Message\"",
":",
"str",
"(",
"e",
")",
"}"
] |
Save ConfusionMatrix in CSV file.
:param name: filename
:type name : str
:param address: flag for address return
:type address : bool
:param class_param : class parameters list for save, Example : ["TPR","TNR","AUC"]
:type class_param : list
:param class_name : class name (sub set of classes), Example :[1,2,3]
:type class_name : list
:param matrix_save : save matrix flag
:type matrix_save : bool
:param normalize : save normalize matrix flag
:type normalize : bool
:return: saving Status as dict {"Status":bool , "Message":str}
|
[
"Save",
"ConfusionMatrix",
"in",
"CSV",
"file",
"."
] |
python
|
train
|
meejah/txtorcon
|
txtorcon/onion.py
|
https://github.com/meejah/txtorcon/blob/14053b95adf0b4bd9dd9c317bece912a26578a93/txtorcon/onion.py#L1053-L1071
|
def _compute_permanent_id(private_key):
"""
Internal helper. Return an authenticated service's permanent ID
given an RSA private key object.
The permanent ID is the base32 encoding of the SHA1 hash of the
first 10 bytes (80 bits) of the public key.
"""
pub = private_key.public_key()
p = pub.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.PKCS1
)
z = ''.join(p.decode('ascii').strip().split('\n')[1:-1])
b = base64.b64decode(z)
h1 = hashlib.new('sha1')
h1.update(b)
permanent_id = h1.digest()[:10]
return base64.b32encode(permanent_id).lower().decode('ascii')
|
[
"def",
"_compute_permanent_id",
"(",
"private_key",
")",
":",
"pub",
"=",
"private_key",
".",
"public_key",
"(",
")",
"p",
"=",
"pub",
".",
"public_bytes",
"(",
"encoding",
"=",
"serialization",
".",
"Encoding",
".",
"PEM",
",",
"format",
"=",
"serialization",
".",
"PublicFormat",
".",
"PKCS1",
")",
"z",
"=",
"''",
".",
"join",
"(",
"p",
".",
"decode",
"(",
"'ascii'",
")",
".",
"strip",
"(",
")",
".",
"split",
"(",
"'\\n'",
")",
"[",
"1",
":",
"-",
"1",
"]",
")",
"b",
"=",
"base64",
".",
"b64decode",
"(",
"z",
")",
"h1",
"=",
"hashlib",
".",
"new",
"(",
"'sha1'",
")",
"h1",
".",
"update",
"(",
"b",
")",
"permanent_id",
"=",
"h1",
".",
"digest",
"(",
")",
"[",
":",
"10",
"]",
"return",
"base64",
".",
"b32encode",
"(",
"permanent_id",
")",
".",
"lower",
"(",
")",
".",
"decode",
"(",
"'ascii'",
")"
] |
Internal helper. Return an authenticated service's permanent ID
given an RSA private key object.
The permanent ID is the base32 encoding of the SHA1 hash of the
first 10 bytes (80 bits) of the public key.
|
[
"Internal",
"helper",
".",
"Return",
"an",
"authenticated",
"service",
"s",
"permanent",
"ID",
"given",
"an",
"RSA",
"private",
"key",
"object",
"."
] |
python
|
train
|
jbm950/pygame_toolbox
|
pygame_toolbox/tilegame_tools/__init__.py
|
https://github.com/jbm950/pygame_toolbox/blob/3fe32145fc149e4dd0963c30a2b6a4dddd4fac0e/pygame_toolbox/tilegame_tools/__init__.py#L234-L260
|
def set_offset(self, offset, mid=None):
"""This method will allow the menu to be placed anywhere in the open
window instead of just the upper left corner.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Inputs:
offset - This is the x,y tuple of the position that you want to
move the screen to.
mid - The offset will be treated as the value passed in instead of
the top left pixel.
'x' (the x point in offset will be treated as the middle of the
menu image)
'y' (the y point in offset will be treated as the middle of the
menu image)
'c' (the offset will be treated as the center of the menu image)
(doc string updated ver 0.1)
"""
ptg.BaseScreen.set_offset(self, offset, mid)
for i in self.tilelist:
for j in i:
j.rect[0] += offset[0]
j.rect[1] += offset[1]
|
[
"def",
"set_offset",
"(",
"self",
",",
"offset",
",",
"mid",
"=",
"None",
")",
":",
"ptg",
".",
"BaseScreen",
".",
"set_offset",
"(",
"self",
",",
"offset",
",",
"mid",
")",
"for",
"i",
"in",
"self",
".",
"tilelist",
":",
"for",
"j",
"in",
"i",
":",
"j",
".",
"rect",
"[",
"0",
"]",
"+=",
"offset",
"[",
"0",
"]",
"j",
".",
"rect",
"[",
"1",
"]",
"+=",
"offset",
"[",
"1",
"]"
] |
This method will allow the menu to be placed anywhere in the open
window instead of just the upper left corner.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Inputs:
offset - This is the x,y tuple of the position that you want to
move the screen to.
mid - The offset will be treated as the value passed in instead of
the top left pixel.
'x' (the x point in offset will be treated as the middle of the
menu image)
'y' (the y point in offset will be treated as the middle of the
menu image)
'c' (the offset will be treated as the center of the menu image)
(doc string updated ver 0.1)
|
[
"This",
"method",
"will",
"allow",
"the",
"menu",
"to",
"be",
"placed",
"anywhere",
"in",
"the",
"open",
"window",
"instead",
"of",
"just",
"the",
"upper",
"left",
"corner",
".",
"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~",
"Inputs",
":",
"offset",
"-",
"This",
"is",
"the",
"x",
"y",
"tuple",
"of",
"the",
"position",
"that",
"you",
"want",
"to",
"move",
"the",
"screen",
"to",
"."
] |
python
|
train
|
secdev/scapy
|
scapy/layers/tls/crypto/cipher_block.py
|
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/layers/tls/crypto/cipher_block.py#L77-L87
|
def encrypt(self, data):
"""
Encrypt the data. Also, update the cipher iv. This is needed for SSLv3
and TLS 1.0. For TLS 1.1/1.2, it is overwritten in TLS.post_build().
"""
if False in six.itervalues(self.ready):
raise CipherError(data)
encryptor = self._cipher.encryptor()
tmp = encryptor.update(data) + encryptor.finalize()
self.iv = tmp[-self.block_size:]
return tmp
|
[
"def",
"encrypt",
"(",
"self",
",",
"data",
")",
":",
"if",
"False",
"in",
"six",
".",
"itervalues",
"(",
"self",
".",
"ready",
")",
":",
"raise",
"CipherError",
"(",
"data",
")",
"encryptor",
"=",
"self",
".",
"_cipher",
".",
"encryptor",
"(",
")",
"tmp",
"=",
"encryptor",
".",
"update",
"(",
"data",
")",
"+",
"encryptor",
".",
"finalize",
"(",
")",
"self",
".",
"iv",
"=",
"tmp",
"[",
"-",
"self",
".",
"block_size",
":",
"]",
"return",
"tmp"
] |
Encrypt the data. Also, update the cipher iv. This is needed for SSLv3
and TLS 1.0. For TLS 1.1/1.2, it is overwritten in TLS.post_build().
|
[
"Encrypt",
"the",
"data",
".",
"Also",
"update",
"the",
"cipher",
"iv",
".",
"This",
"is",
"needed",
"for",
"SSLv3",
"and",
"TLS",
"1",
".",
"0",
".",
"For",
"TLS",
"1",
".",
"1",
"/",
"1",
".",
"2",
"it",
"is",
"overwritten",
"in",
"TLS",
".",
"post_build",
"()",
"."
] |
python
|
train
|
pvlib/pvlib-python
|
pvlib/pvsystem.py
|
https://github.com/pvlib/pvlib-python/blob/2e844a595b820b43d1170269781fa66bd0ccc8a3/pvlib/pvsystem.py#L1539-L1645
|
def retrieve_sam(name=None, path=None):
'''
Retrieve latest module and inverter info from a local file or the
SAM website.
This function will retrieve either:
* CEC module database
* Sandia Module database
* CEC Inverter database
* Anton Driesse Inverter database
and return it as a pandas DataFrame.
Parameters
----------
name : None or string, default None
Name can be one of:
* 'CECMod' - returns the CEC module database
* 'CECInverter' - returns the CEC Inverter database
* 'SandiaInverter' - returns the CEC Inverter database
(CEC is only current inverter db available; tag kept for
backwards compatibility)
* 'SandiaMod' - returns the Sandia Module database
* 'ADRInverter' - returns the ADR Inverter database
path : None or string, default None
Path to the SAM file. May also be a URL.
If both name and path are None, a dialogue will open allowing the
user to select a file.
Returns
-------
samfile : DataFrame
A DataFrame containing all the elements of the desired database.
Each column represents a module or inverter, and a specific
dataset can be retrieved by the command
Notes
-----
Files available at https://sam.nrel.gov/sites/default/files/
Examples
--------
>>> from pvlib import pvsystem
>>> invdb = pvsystem.retrieve_sam('CECInverter')
>>> inverter = invdb.AE_Solar_Energy__AE6_0__277V__277V__CEC_2012_
>>> inverter
Vac 277.000000
Paco 6000.000000
Pdco 6165.670000
Vdco 361.123000
Pso 36.792300
C0 -0.000002
C1 -0.000047
C2 -0.001861
C3 0.000721
Pnt 0.070000
Vdcmax 600.000000
Idcmax 32.000000
Mppt_low 200.000000
Mppt_high 500.000000
Name: AE_Solar_Energy__AE6_0__277V__277V__CEC_2012_, dtype: float64
'''
if name is not None:
name = name.lower()
data_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'data')
if name == 'cecmod':
csvdata = os.path.join(
data_path, 'sam-library-cec-modules-2017-6-5.csv')
elif name == 'sandiamod':
csvdata = os.path.join(
data_path, 'sam-library-sandia-modules-2015-6-30.csv')
elif name == 'adrinverter':
csvdata = os.path.join(data_path, 'adr-library-2013-10-01.csv')
elif name in ['cecinverter', 'sandiainverter']:
# Allowing either, to provide for old code,
# while aligning with current expectations
csvdata = os.path.join(
data_path, 'sam-library-cec-inverters-2018-3-18.csv')
else:
raise ValueError('invalid name {}'.format(name))
elif path is not None:
if path.startswith('http'):
response = urlopen(path)
csvdata = io.StringIO(response.read().decode(errors='ignore'))
else:
csvdata = path
elif name is None and path is None:
try:
# python 2
import Tkinter as tkinter
from tkFileDialog import askopenfilename
except ImportError:
# python 3
import tkinter
from tkinter.filedialog import askopenfilename
tkinter.Tk().withdraw()
csvdata = askopenfilename()
return _parse_raw_sam_df(csvdata)
|
[
"def",
"retrieve_sam",
"(",
"name",
"=",
"None",
",",
"path",
"=",
"None",
")",
":",
"if",
"name",
"is",
"not",
"None",
":",
"name",
"=",
"name",
".",
"lower",
"(",
")",
"data_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"__file__",
")",
")",
",",
"'data'",
")",
"if",
"name",
"==",
"'cecmod'",
":",
"csvdata",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data_path",
",",
"'sam-library-cec-modules-2017-6-5.csv'",
")",
"elif",
"name",
"==",
"'sandiamod'",
":",
"csvdata",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data_path",
",",
"'sam-library-sandia-modules-2015-6-30.csv'",
")",
"elif",
"name",
"==",
"'adrinverter'",
":",
"csvdata",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data_path",
",",
"'adr-library-2013-10-01.csv'",
")",
"elif",
"name",
"in",
"[",
"'cecinverter'",
",",
"'sandiainverter'",
"]",
":",
"# Allowing either, to provide for old code,",
"# while aligning with current expectations",
"csvdata",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data_path",
",",
"'sam-library-cec-inverters-2018-3-18.csv'",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'invalid name {}'",
".",
"format",
"(",
"name",
")",
")",
"elif",
"path",
"is",
"not",
"None",
":",
"if",
"path",
".",
"startswith",
"(",
"'http'",
")",
":",
"response",
"=",
"urlopen",
"(",
"path",
")",
"csvdata",
"=",
"io",
".",
"StringIO",
"(",
"response",
".",
"read",
"(",
")",
".",
"decode",
"(",
"errors",
"=",
"'ignore'",
")",
")",
"else",
":",
"csvdata",
"=",
"path",
"elif",
"name",
"is",
"None",
"and",
"path",
"is",
"None",
":",
"try",
":",
"# python 2",
"import",
"Tkinter",
"as",
"tkinter",
"from",
"tkFileDialog",
"import",
"askopenfilename",
"except",
"ImportError",
":",
"# python 3",
"import",
"tkinter",
"from",
"tkinter",
".",
"filedialog",
"import",
"askopenfilename",
"tkinter",
".",
"Tk",
"(",
")",
".",
"withdraw",
"(",
")",
"csvdata",
"=",
"askopenfilename",
"(",
")",
"return",
"_parse_raw_sam_df",
"(",
"csvdata",
")"
] |
Retrieve latest module and inverter info from a local file or the
SAM website.
This function will retrieve either:
* CEC module database
* Sandia Module database
* CEC Inverter database
* Anton Driesse Inverter database
and return it as a pandas DataFrame.
Parameters
----------
name : None or string, default None
Name can be one of:
* 'CECMod' - returns the CEC module database
* 'CECInverter' - returns the CEC Inverter database
* 'SandiaInverter' - returns the CEC Inverter database
(CEC is only current inverter db available; tag kept for
backwards compatibility)
* 'SandiaMod' - returns the Sandia Module database
* 'ADRInverter' - returns the ADR Inverter database
path : None or string, default None
Path to the SAM file. May also be a URL.
If both name and path are None, a dialogue will open allowing the
user to select a file.
Returns
-------
samfile : DataFrame
A DataFrame containing all the elements of the desired database.
Each column represents a module or inverter, and a specific
dataset can be retrieved by the command
Notes
-----
Files available at https://sam.nrel.gov/sites/default/files/
Examples
--------
>>> from pvlib import pvsystem
>>> invdb = pvsystem.retrieve_sam('CECInverter')
>>> inverter = invdb.AE_Solar_Energy__AE6_0__277V__277V__CEC_2012_
>>> inverter
Vac 277.000000
Paco 6000.000000
Pdco 6165.670000
Vdco 361.123000
Pso 36.792300
C0 -0.000002
C1 -0.000047
C2 -0.001861
C3 0.000721
Pnt 0.070000
Vdcmax 600.000000
Idcmax 32.000000
Mppt_low 200.000000
Mppt_high 500.000000
Name: AE_Solar_Energy__AE6_0__277V__277V__CEC_2012_, dtype: float64
|
[
"Retrieve",
"latest",
"module",
"and",
"inverter",
"info",
"from",
"a",
"local",
"file",
"or",
"the",
"SAM",
"website",
"."
] |
python
|
train
|
titusjan/argos
|
argos/repo/rtiplugins/hdf5.py
|
https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/repo/rtiplugins/hdf5.py#L306-L318
|
def unit(self):
""" Returns the unit of the RTI by calling dataSetUnit on the underlying dataset
"""
unit = dataSetUnit(self._h5Dataset)
fieldNames = self._h5Dataset.dtype.names
# If the missing value attribute is a list with the same length as the number of fields,
# return the missing value for field that equals the self.nodeName.
if hasattr(unit, '__len__') and len(unit) == len(fieldNames):
idx = fieldNames.index(self.nodeName)
return unit[idx]
else:
return unit
|
[
"def",
"unit",
"(",
"self",
")",
":",
"unit",
"=",
"dataSetUnit",
"(",
"self",
".",
"_h5Dataset",
")",
"fieldNames",
"=",
"self",
".",
"_h5Dataset",
".",
"dtype",
".",
"names",
"# If the missing value attribute is a list with the same length as the number of fields,",
"# return the missing value for field that equals the self.nodeName.",
"if",
"hasattr",
"(",
"unit",
",",
"'__len__'",
")",
"and",
"len",
"(",
"unit",
")",
"==",
"len",
"(",
"fieldNames",
")",
":",
"idx",
"=",
"fieldNames",
".",
"index",
"(",
"self",
".",
"nodeName",
")",
"return",
"unit",
"[",
"idx",
"]",
"else",
":",
"return",
"unit"
] |
Returns the unit of the RTI by calling dataSetUnit on the underlying dataset
|
[
"Returns",
"the",
"unit",
"of",
"the",
"RTI",
"by",
"calling",
"dataSetUnit",
"on",
"the",
"underlying",
"dataset"
] |
python
|
train
|
elastic/elasticsearch-dsl-py
|
elasticsearch_dsl/search.py
|
https://github.com/elastic/elasticsearch-dsl-py/blob/874b52472fc47b601de0e5fa0e4300e21aff0085/elasticsearch_dsl/search.py#L474-L517
|
def source(self, fields=None, **kwargs):
"""
Selectively control how the _source field is returned.
:arg fields: wildcard string, array of wildcards, or dictionary of includes and excludes
If ``fields`` is None, the entire document will be returned for
each hit. If fields is a dictionary with keys of 'include' and/or
'exclude' the fields will be either included or excluded appropriately.
Calling this multiple times with the same named parameter will override the
previous values with the new ones.
Example::
s = Search()
s = s.source(include=['obj1.*'], exclude=["*.description"])
s = Search()
s = s.source(include=['obj1.*']).source(exclude=["*.description"])
"""
s = self._clone()
if fields and kwargs:
raise ValueError("You cannot specify fields and kwargs at the same time.")
if fields is not None:
s._source = fields
return s
if kwargs and not isinstance(s._source, dict):
s._source = {}
for key, value in kwargs.items():
if value is None:
try:
del s._source[key]
except KeyError:
pass
else:
s._source[key] = value
return s
|
[
"def",
"source",
"(",
"self",
",",
"fields",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"s",
"=",
"self",
".",
"_clone",
"(",
")",
"if",
"fields",
"and",
"kwargs",
":",
"raise",
"ValueError",
"(",
"\"You cannot specify fields and kwargs at the same time.\"",
")",
"if",
"fields",
"is",
"not",
"None",
":",
"s",
".",
"_source",
"=",
"fields",
"return",
"s",
"if",
"kwargs",
"and",
"not",
"isinstance",
"(",
"s",
".",
"_source",
",",
"dict",
")",
":",
"s",
".",
"_source",
"=",
"{",
"}",
"for",
"key",
",",
"value",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"if",
"value",
"is",
"None",
":",
"try",
":",
"del",
"s",
".",
"_source",
"[",
"key",
"]",
"except",
"KeyError",
":",
"pass",
"else",
":",
"s",
".",
"_source",
"[",
"key",
"]",
"=",
"value",
"return",
"s"
] |
Selectively control how the _source field is returned.
:arg fields: wildcard string, array of wildcards, or dictionary of includes and excludes
If ``fields`` is None, the entire document will be returned for
each hit. If fields is a dictionary with keys of 'include' and/or
'exclude' the fields will be either included or excluded appropriately.
Calling this multiple times with the same named parameter will override the
previous values with the new ones.
Example::
s = Search()
s = s.source(include=['obj1.*'], exclude=["*.description"])
s = Search()
s = s.source(include=['obj1.*']).source(exclude=["*.description"])
|
[
"Selectively",
"control",
"how",
"the",
"_source",
"field",
"is",
"returned",
"."
] |
python
|
train
|
cathalgarvey/deadlock
|
deadlock/core.py
|
https://github.com/cathalgarvey/deadlock/blob/30099b476ff767611ce617150a0c574fc03fdf79/deadlock/core.py#L136-L166
|
def main_encrypt(A):
"Encrypt to recipient list using primary key OR prompted key. Recipients may be IDs or petnames."
profile = get_profile(A)
localKeys = profile.get('local keys', [])
if not localKeys:
localKeys = [make_lock_securely(warn_only = A.ignore_entropy)]
else:
localKeys = [crypto.UserLock.private_from_b64(k['private_key']) for k in localKeys]
# First key is considered "main"
userKey = localKeys[0]
print("User ID:", userKey.userID)
if not os.path.exists(A.path):
error_out("File or directory '{}' does not exist.".format(A.path))
# Create, fetch or error out for recipient list:
recipients = resolve_recipients(profile, A.recipient)
recipients.append(userKey)
print("Recipients:", *set(k.userID if isinstance(k, crypto.UserLock) else k for k in recipients))
# Do files OR folders
if os.path.isfile(A.path):
crypted = encrypt_file(A.path, userKey, recipients)
elif os.path.isdir(A.path):
crypted = encrypt_folder(A.path, userKey, recipients)
else:
error_out("Specified path '{}' is neither a file nor a folder.".format(A.path))
if A.base64:
crypted = crypto.b64encode(crypted)
if not A.output:
A.output = hex(int.from_bytes(os.urandom(6),'big'))[2:] + ".minilock"
print("Saving output to", A.output)
with open(A.output, "wb") as O:
O.write(crypted)
|
[
"def",
"main_encrypt",
"(",
"A",
")",
":",
"profile",
"=",
"get_profile",
"(",
"A",
")",
"localKeys",
"=",
"profile",
".",
"get",
"(",
"'local keys'",
",",
"[",
"]",
")",
"if",
"not",
"localKeys",
":",
"localKeys",
"=",
"[",
"make_lock_securely",
"(",
"warn_only",
"=",
"A",
".",
"ignore_entropy",
")",
"]",
"else",
":",
"localKeys",
"=",
"[",
"crypto",
".",
"UserLock",
".",
"private_from_b64",
"(",
"k",
"[",
"'private_key'",
"]",
")",
"for",
"k",
"in",
"localKeys",
"]",
"# First key is considered \"main\"",
"userKey",
"=",
"localKeys",
"[",
"0",
"]",
"print",
"(",
"\"User ID:\"",
",",
"userKey",
".",
"userID",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"A",
".",
"path",
")",
":",
"error_out",
"(",
"\"File or directory '{}' does not exist.\"",
".",
"format",
"(",
"A",
".",
"path",
")",
")",
"# Create, fetch or error out for recipient list:",
"recipients",
"=",
"resolve_recipients",
"(",
"profile",
",",
"A",
".",
"recipient",
")",
"recipients",
".",
"append",
"(",
"userKey",
")",
"print",
"(",
"\"Recipients:\"",
",",
"*",
"set",
"(",
"k",
".",
"userID",
"if",
"isinstance",
"(",
"k",
",",
"crypto",
".",
"UserLock",
")",
"else",
"k",
"for",
"k",
"in",
"recipients",
")",
")",
"# Do files OR folders",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"A",
".",
"path",
")",
":",
"crypted",
"=",
"encrypt_file",
"(",
"A",
".",
"path",
",",
"userKey",
",",
"recipients",
")",
"elif",
"os",
".",
"path",
".",
"isdir",
"(",
"A",
".",
"path",
")",
":",
"crypted",
"=",
"encrypt_folder",
"(",
"A",
".",
"path",
",",
"userKey",
",",
"recipients",
")",
"else",
":",
"error_out",
"(",
"\"Specified path '{}' is neither a file nor a folder.\"",
".",
"format",
"(",
"A",
".",
"path",
")",
")",
"if",
"A",
".",
"base64",
":",
"crypted",
"=",
"crypto",
".",
"b64encode",
"(",
"crypted",
")",
"if",
"not",
"A",
".",
"output",
":",
"A",
".",
"output",
"=",
"hex",
"(",
"int",
".",
"from_bytes",
"(",
"os",
".",
"urandom",
"(",
"6",
")",
",",
"'big'",
")",
")",
"[",
"2",
":",
"]",
"+",
"\".minilock\"",
"print",
"(",
"\"Saving output to\"",
",",
"A",
".",
"output",
")",
"with",
"open",
"(",
"A",
".",
"output",
",",
"\"wb\"",
")",
"as",
"O",
":",
"O",
".",
"write",
"(",
"crypted",
")"
] |
Encrypt to recipient list using primary key OR prompted key. Recipients may be IDs or petnames.
|
[
"Encrypt",
"to",
"recipient",
"list",
"using",
"primary",
"key",
"OR",
"prompted",
"key",
".",
"Recipients",
"may",
"be",
"IDs",
"or",
"petnames",
"."
] |
python
|
train
|
yinkaisheng/Python-UIAutomation-for-Windows
|
uiautomation/uiautomation.py
|
https://github.com/yinkaisheng/Python-UIAutomation-for-Windows/blob/2cc91060982cc8b777152e698d677cc2989bf263/uiautomation/uiautomation.py#L6416-L6464
|
def Select(self, itemName: str = '', condition: Callable = None, waitTime: float = OPERATION_WAIT_TIME) -> bool:
"""
Show combobox's popup menu and select a item by name.
itemName: str.
condition: Callable function(comboBoxItemName: str)->bool, if condition is valid, ignore itemName.
waitTime: float.
Some comboboxs doesn't support SelectionPattern, here is a workaround.
This method tries to and selection support.
It may not work for some comboboxes, such as comboboxes in older Qt version.
If it doesn't work, you should write your own version Select, or it doesn't support selection at all.
"""
expandCollapsePattern = self.GetExpandCollapsePattern()
if expandCollapsePattern:
expandCollapsePattern.Expand()
else:
#Windows Form's ComboBoxControl doesn't support ExpandCollapsePattern
self.Click(x=-10, ratioY=0.5, simulateMove=False)
find = False
if condition:
listItemControl = self.ListItemControl(Compare=lambda c, d: condition(c.Name))
else:
listItemControl = self.ListItemControl(Name=itemName)
if listItemControl.Exists(1):
scrollItemPattern = listItemControl.GetScrollItemPattern()
if scrollItemPattern:
scrollItemPattern.ScrollIntoView(waitTime=0.1)
listItemControl.Click(waitTime=waitTime)
find = True
else:
#ComboBox's popup window is a child of root control
listControl = ListControl(searchDepth= 1)
if listControl.Exists(1):
if condition:
listItemControl = self.ListItemControl(Compare=lambda c, d: condition(c.Name))
else:
listItemControl = self.ListItemControl(Name=itemName)
if listItemControl.Exists(0, 0):
scrollItemPattern = listItemControl.GetScrollItemPattern()
if scrollItemPattern:
scrollItemPattern.ScrollIntoView(waitTime=0.1)
listItemControl.Click(waitTime=waitTime)
find = True
if not find:
Logger.ColorfullyWriteLine('Can\'t find <Color=Cyan>{}</Color> in ComboBoxControl or it does not support selection.'.format(itemName), ConsoleColor.Yellow)
if expandCollapsePattern:
expandCollapsePattern.Collapse(waitTime)
else:
self.Click(x=-10, ratioY=0.5, simulateMove=False, waitTime=waitTime)
return find
|
[
"def",
"Select",
"(",
"self",
",",
"itemName",
":",
"str",
"=",
"''",
",",
"condition",
":",
"Callable",
"=",
"None",
",",
"waitTime",
":",
"float",
"=",
"OPERATION_WAIT_TIME",
")",
"->",
"bool",
":",
"expandCollapsePattern",
"=",
"self",
".",
"GetExpandCollapsePattern",
"(",
")",
"if",
"expandCollapsePattern",
":",
"expandCollapsePattern",
".",
"Expand",
"(",
")",
"else",
":",
"#Windows Form's ComboBoxControl doesn't support ExpandCollapsePattern",
"self",
".",
"Click",
"(",
"x",
"=",
"-",
"10",
",",
"ratioY",
"=",
"0.5",
",",
"simulateMove",
"=",
"False",
")",
"find",
"=",
"False",
"if",
"condition",
":",
"listItemControl",
"=",
"self",
".",
"ListItemControl",
"(",
"Compare",
"=",
"lambda",
"c",
",",
"d",
":",
"condition",
"(",
"c",
".",
"Name",
")",
")",
"else",
":",
"listItemControl",
"=",
"self",
".",
"ListItemControl",
"(",
"Name",
"=",
"itemName",
")",
"if",
"listItemControl",
".",
"Exists",
"(",
"1",
")",
":",
"scrollItemPattern",
"=",
"listItemControl",
".",
"GetScrollItemPattern",
"(",
")",
"if",
"scrollItemPattern",
":",
"scrollItemPattern",
".",
"ScrollIntoView",
"(",
"waitTime",
"=",
"0.1",
")",
"listItemControl",
".",
"Click",
"(",
"waitTime",
"=",
"waitTime",
")",
"find",
"=",
"True",
"else",
":",
"#ComboBox's popup window is a child of root control",
"listControl",
"=",
"ListControl",
"(",
"searchDepth",
"=",
"1",
")",
"if",
"listControl",
".",
"Exists",
"(",
"1",
")",
":",
"if",
"condition",
":",
"listItemControl",
"=",
"self",
".",
"ListItemControl",
"(",
"Compare",
"=",
"lambda",
"c",
",",
"d",
":",
"condition",
"(",
"c",
".",
"Name",
")",
")",
"else",
":",
"listItemControl",
"=",
"self",
".",
"ListItemControl",
"(",
"Name",
"=",
"itemName",
")",
"if",
"listItemControl",
".",
"Exists",
"(",
"0",
",",
"0",
")",
":",
"scrollItemPattern",
"=",
"listItemControl",
".",
"GetScrollItemPattern",
"(",
")",
"if",
"scrollItemPattern",
":",
"scrollItemPattern",
".",
"ScrollIntoView",
"(",
"waitTime",
"=",
"0.1",
")",
"listItemControl",
".",
"Click",
"(",
"waitTime",
"=",
"waitTime",
")",
"find",
"=",
"True",
"if",
"not",
"find",
":",
"Logger",
".",
"ColorfullyWriteLine",
"(",
"'Can\\'t find <Color=Cyan>{}</Color> in ComboBoxControl or it does not support selection.'",
".",
"format",
"(",
"itemName",
")",
",",
"ConsoleColor",
".",
"Yellow",
")",
"if",
"expandCollapsePattern",
":",
"expandCollapsePattern",
".",
"Collapse",
"(",
"waitTime",
")",
"else",
":",
"self",
".",
"Click",
"(",
"x",
"=",
"-",
"10",
",",
"ratioY",
"=",
"0.5",
",",
"simulateMove",
"=",
"False",
",",
"waitTime",
"=",
"waitTime",
")",
"return",
"find"
] |
Show combobox's popup menu and select a item by name.
itemName: str.
condition: Callable function(comboBoxItemName: str)->bool, if condition is valid, ignore itemName.
waitTime: float.
Some comboboxs doesn't support SelectionPattern, here is a workaround.
This method tries to and selection support.
It may not work for some comboboxes, such as comboboxes in older Qt version.
If it doesn't work, you should write your own version Select, or it doesn't support selection at all.
|
[
"Show",
"combobox",
"s",
"popup",
"menu",
"and",
"select",
"a",
"item",
"by",
"name",
".",
"itemName",
":",
"str",
".",
"condition",
":",
"Callable",
"function",
"(",
"comboBoxItemName",
":",
"str",
")",
"-",
">",
"bool",
"if",
"condition",
"is",
"valid",
"ignore",
"itemName",
".",
"waitTime",
":",
"float",
".",
"Some",
"comboboxs",
"doesn",
"t",
"support",
"SelectionPattern",
"here",
"is",
"a",
"workaround",
".",
"This",
"method",
"tries",
"to",
"and",
"selection",
"support",
".",
"It",
"may",
"not",
"work",
"for",
"some",
"comboboxes",
"such",
"as",
"comboboxes",
"in",
"older",
"Qt",
"version",
".",
"If",
"it",
"doesn",
"t",
"work",
"you",
"should",
"write",
"your",
"own",
"version",
"Select",
"or",
"it",
"doesn",
"t",
"support",
"selection",
"at",
"all",
"."
] |
python
|
valid
|
openstack/horizon
|
horizon/tables/actions.py
|
https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/horizon/tables/actions.py#L388-L413
|
def get_link_url(self, datum=None):
"""Returns the final URL based on the value of ``url``.
If ``url`` is callable it will call the function.
If not, it will then try to call ``reverse`` on ``url``.
Failing that, it will simply return the value of ``url`` as-is.
When called for a row action, the current row data object will be
passed as the first parameter.
"""
if not self.url:
raise NotImplementedError('A LinkAction class must have a '
'url attribute or define its own '
'get_link_url method.')
if callable(self.url):
return self.url(datum, **self.kwargs)
try:
if datum:
obj_id = self.table.get_object_id(datum)
return urls.reverse(self.url, args=(obj_id,))
else:
return urls.reverse(self.url)
except urls.NoReverseMatch as ex:
LOG.info('No reverse found for "%(url)s": %(exception)s',
{'url': self.url, 'exception': ex})
return self.url
|
[
"def",
"get_link_url",
"(",
"self",
",",
"datum",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"url",
":",
"raise",
"NotImplementedError",
"(",
"'A LinkAction class must have a '",
"'url attribute or define its own '",
"'get_link_url method.'",
")",
"if",
"callable",
"(",
"self",
".",
"url",
")",
":",
"return",
"self",
".",
"url",
"(",
"datum",
",",
"*",
"*",
"self",
".",
"kwargs",
")",
"try",
":",
"if",
"datum",
":",
"obj_id",
"=",
"self",
".",
"table",
".",
"get_object_id",
"(",
"datum",
")",
"return",
"urls",
".",
"reverse",
"(",
"self",
".",
"url",
",",
"args",
"=",
"(",
"obj_id",
",",
")",
")",
"else",
":",
"return",
"urls",
".",
"reverse",
"(",
"self",
".",
"url",
")",
"except",
"urls",
".",
"NoReverseMatch",
"as",
"ex",
":",
"LOG",
".",
"info",
"(",
"'No reverse found for \"%(url)s\": %(exception)s'",
",",
"{",
"'url'",
":",
"self",
".",
"url",
",",
"'exception'",
":",
"ex",
"}",
")",
"return",
"self",
".",
"url"
] |
Returns the final URL based on the value of ``url``.
If ``url`` is callable it will call the function.
If not, it will then try to call ``reverse`` on ``url``.
Failing that, it will simply return the value of ``url`` as-is.
When called for a row action, the current row data object will be
passed as the first parameter.
|
[
"Returns",
"the",
"final",
"URL",
"based",
"on",
"the",
"value",
"of",
"url",
"."
] |
python
|
train
|
stephenmcd/gunicorn-console
|
gunicorn_console.py
|
https://github.com/stephenmcd/gunicorn-console/blob/f5c9b9a69ea1f2ca00aac3565cb99491684d868a/gunicorn_console.py#L265-L295
|
def main():
"""
Main entry point for gunicorn_console.
"""
# Set up curses.
stdscr = curses.initscr()
curses.start_color()
curses.init_pair(1, foreground_colour, background_colour)
curses.noecho()
stdscr.keypad(True)
stdscr.nodelay(True)
try:
curses.curs_set(False)
except:
pass
try:
# Run main event loop until quit.
while True:
try:
update_gunicorns()
handle_keypress(stdscr)
display_output(stdscr)
curses.napms(int(screen_delay * 1000))
except KeyboardInterrupt:
break
finally:
# Tear down curses.
curses.nocbreak()
stdscr.keypad(False)
curses.echo()
curses.endwin()
|
[
"def",
"main",
"(",
")",
":",
"# Set up curses.",
"stdscr",
"=",
"curses",
".",
"initscr",
"(",
")",
"curses",
".",
"start_color",
"(",
")",
"curses",
".",
"init_pair",
"(",
"1",
",",
"foreground_colour",
",",
"background_colour",
")",
"curses",
".",
"noecho",
"(",
")",
"stdscr",
".",
"keypad",
"(",
"True",
")",
"stdscr",
".",
"nodelay",
"(",
"True",
")",
"try",
":",
"curses",
".",
"curs_set",
"(",
"False",
")",
"except",
":",
"pass",
"try",
":",
"# Run main event loop until quit.",
"while",
"True",
":",
"try",
":",
"update_gunicorns",
"(",
")",
"handle_keypress",
"(",
"stdscr",
")",
"display_output",
"(",
"stdscr",
")",
"curses",
".",
"napms",
"(",
"int",
"(",
"screen_delay",
"*",
"1000",
")",
")",
"except",
"KeyboardInterrupt",
":",
"break",
"finally",
":",
"# Tear down curses.",
"curses",
".",
"nocbreak",
"(",
")",
"stdscr",
".",
"keypad",
"(",
"False",
")",
"curses",
".",
"echo",
"(",
")",
"curses",
".",
"endwin",
"(",
")"
] |
Main entry point for gunicorn_console.
|
[
"Main",
"entry",
"point",
"for",
"gunicorn_console",
"."
] |
python
|
train
|
DataBiosphere/toil
|
src/toil/cwl/cwltoil.py
|
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/cwl/cwltoil.py#L1031-L1037
|
def cleanTempDirs(job):
"""Remove temporarly created directories."""
if job is CWLJob and job._succeeded: # Only CWLJobs have this attribute.
for tempDir in job.openTempDirs:
if os.path.exists(tempDir):
shutil.rmtree(tempDir)
job.openTempDirs = []
|
[
"def",
"cleanTempDirs",
"(",
"job",
")",
":",
"if",
"job",
"is",
"CWLJob",
"and",
"job",
".",
"_succeeded",
":",
"# Only CWLJobs have this attribute.",
"for",
"tempDir",
"in",
"job",
".",
"openTempDirs",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"tempDir",
")",
":",
"shutil",
".",
"rmtree",
"(",
"tempDir",
")",
"job",
".",
"openTempDirs",
"=",
"[",
"]"
] |
Remove temporarly created directories.
|
[
"Remove",
"temporarly",
"created",
"directories",
"."
] |
python
|
train
|
halcy/Mastodon.py
|
mastodon/Mastodon.py
|
https://github.com/halcy/Mastodon.py/blob/35c43562dd3d34d6ebf7a0f757c09e8fcccc957c/mastodon/Mastodon.py#L635-L642
|
def timeline_local(self, max_id=None, min_id=None, since_id=None, limit=None):
"""
Fetches the local / instance-wide timeline, not including replies.
Returns a list of `toot dicts`_.
"""
return self.timeline('local', max_id=max_id, min_id=min_id,
since_id=since_id, limit=limit)
|
[
"def",
"timeline_local",
"(",
"self",
",",
"max_id",
"=",
"None",
",",
"min_id",
"=",
"None",
",",
"since_id",
"=",
"None",
",",
"limit",
"=",
"None",
")",
":",
"return",
"self",
".",
"timeline",
"(",
"'local'",
",",
"max_id",
"=",
"max_id",
",",
"min_id",
"=",
"min_id",
",",
"since_id",
"=",
"since_id",
",",
"limit",
"=",
"limit",
")"
] |
Fetches the local / instance-wide timeline, not including replies.
Returns a list of `toot dicts`_.
|
[
"Fetches",
"the",
"local",
"/",
"instance",
"-",
"wide",
"timeline",
"not",
"including",
"replies",
"."
] |
python
|
train
|
QualiSystems/vCenterShell
|
package/cloudshell/cp/vcenter/common/vcenter/deployment_details_factory.py
|
https://github.com/QualiSystems/vCenterShell/blob/e2e24cd938a92a68f4a8e6a860810d3ef72aae6d/package/cloudshell/cp/vcenter/common/vcenter/deployment_details_factory.py#L6-L25
|
def create_deployment_details(vcenter_resource_model, vm_cluster, vm_storage, vm_resource_pool, vm_location):
"""
:type vcenter_resource_model: VMwarevCenterResourceModel
:type vm_cluster: str
:type vm_storage: str
:type vm_resource_pool: str
:type vm_location: str
:rtype: DeploymentDetails
"""
vm_cluster = vm_cluster or vcenter_resource_model.vm_cluster
vm_storage = vm_storage or vcenter_resource_model.vm_storage
vm_resource_pool = vm_resource_pool or vcenter_resource_model.vm_resource_pool
vm_location = vm_location or vcenter_resource_model.vm_location
return DeploymentDetails(
vm_cluster=vm_cluster,
vm_storage=vm_storage,
vm_resource_pool=vm_resource_pool,
vm_location=vm_location
)
|
[
"def",
"create_deployment_details",
"(",
"vcenter_resource_model",
",",
"vm_cluster",
",",
"vm_storage",
",",
"vm_resource_pool",
",",
"vm_location",
")",
":",
"vm_cluster",
"=",
"vm_cluster",
"or",
"vcenter_resource_model",
".",
"vm_cluster",
"vm_storage",
"=",
"vm_storage",
"or",
"vcenter_resource_model",
".",
"vm_storage",
"vm_resource_pool",
"=",
"vm_resource_pool",
"or",
"vcenter_resource_model",
".",
"vm_resource_pool",
"vm_location",
"=",
"vm_location",
"or",
"vcenter_resource_model",
".",
"vm_location",
"return",
"DeploymentDetails",
"(",
"vm_cluster",
"=",
"vm_cluster",
",",
"vm_storage",
"=",
"vm_storage",
",",
"vm_resource_pool",
"=",
"vm_resource_pool",
",",
"vm_location",
"=",
"vm_location",
")"
] |
:type vcenter_resource_model: VMwarevCenterResourceModel
:type vm_cluster: str
:type vm_storage: str
:type vm_resource_pool: str
:type vm_location: str
:rtype: DeploymentDetails
|
[
":",
"type",
"vcenter_resource_model",
":",
"VMwarevCenterResourceModel",
":",
"type",
"vm_cluster",
":",
"str",
":",
"type",
"vm_storage",
":",
"str",
":",
"type",
"vm_resource_pool",
":",
"str",
":",
"type",
"vm_location",
":",
"str",
":",
"rtype",
":",
"DeploymentDetails"
] |
python
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/vqa_self_attention.py
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/vqa_self_attention.py#L654-L678
|
def iterative_encoder_decoder(encoder_input,
encoder_self_attention_bias,
encoder_decoder_attention_bias,
query,
hparams):
"""Iterative encoder decoder."""
for _ in range(hparams.num_rec_steps):
with tf.variable_scope("step", reuse=tf.AUTO_REUSE):
encoder_output = image_question_encoder(
encoder_input,
encoder_self_attention_bias,
hparams,
query)
decoder_output = decoder(
query,
encoder_output,
None,
encoder_decoder_attention_bias,
hparams)
encoder_input = encoder_output
query = decoder_output
return decoder_output
|
[
"def",
"iterative_encoder_decoder",
"(",
"encoder_input",
",",
"encoder_self_attention_bias",
",",
"encoder_decoder_attention_bias",
",",
"query",
",",
"hparams",
")",
":",
"for",
"_",
"in",
"range",
"(",
"hparams",
".",
"num_rec_steps",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"\"step\"",
",",
"reuse",
"=",
"tf",
".",
"AUTO_REUSE",
")",
":",
"encoder_output",
"=",
"image_question_encoder",
"(",
"encoder_input",
",",
"encoder_self_attention_bias",
",",
"hparams",
",",
"query",
")",
"decoder_output",
"=",
"decoder",
"(",
"query",
",",
"encoder_output",
",",
"None",
",",
"encoder_decoder_attention_bias",
",",
"hparams",
")",
"encoder_input",
"=",
"encoder_output",
"query",
"=",
"decoder_output",
"return",
"decoder_output"
] |
Iterative encoder decoder.
|
[
"Iterative",
"encoder",
"decoder",
"."
] |
python
|
train
|
buzzfeed/caliendo
|
caliendo/expected_value.py
|
https://github.com/buzzfeed/caliendo/blob/1628a10f7782ad67c0422b5cbc9bf4979ac40abc/caliendo/expected_value.py#L131-L142
|
def save( self ):
"""
Save method for the ExpectedValue of a call.
"""
packets = self.__enumerate_packets()
delete_expected_value(self.call_hash)
for packet in packets:
packet['call_hash'] = self.call_hash
insert_expected_value(packet)
return self
|
[
"def",
"save",
"(",
"self",
")",
":",
"packets",
"=",
"self",
".",
"__enumerate_packets",
"(",
")",
"delete_expected_value",
"(",
"self",
".",
"call_hash",
")",
"for",
"packet",
"in",
"packets",
":",
"packet",
"[",
"'call_hash'",
"]",
"=",
"self",
".",
"call_hash",
"insert_expected_value",
"(",
"packet",
")",
"return",
"self"
] |
Save method for the ExpectedValue of a call.
|
[
"Save",
"method",
"for",
"the",
"ExpectedValue",
"of",
"a",
"call",
"."
] |
python
|
train
|
matthewdeanmartin/jiggle_version
|
sample_projects/ver_in_weird_file/setup_helpers.py
|
https://github.com/matthewdeanmartin/jiggle_version/blob/963656a0a47b7162780a5f6c8f4b8bbbebc148f5/sample_projects/ver_in_weird_file/setup_helpers.py#L125-L134
|
def long_description(*filenames):
"""Provide a long description."""
res = ['']
for filename in filenames:
with open(filename) as fp:
for line in fp:
res.append(' ' + line)
res.append('')
res.append('\n')
return EMPTYSTRING.join(res)
|
[
"def",
"long_description",
"(",
"*",
"filenames",
")",
":",
"res",
"=",
"[",
"''",
"]",
"for",
"filename",
"in",
"filenames",
":",
"with",
"open",
"(",
"filename",
")",
"as",
"fp",
":",
"for",
"line",
"in",
"fp",
":",
"res",
".",
"append",
"(",
"' '",
"+",
"line",
")",
"res",
".",
"append",
"(",
"''",
")",
"res",
".",
"append",
"(",
"'\\n'",
")",
"return",
"EMPTYSTRING",
".",
"join",
"(",
"res",
")"
] |
Provide a long description.
|
[
"Provide",
"a",
"long",
"description",
"."
] |
python
|
train
|
openid/python-openid
|
openid/consumer/consumer.py
|
https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/openid/consumer/consumer.py#L989-L1035
|
def _verifyDiscoverySingle(self, endpoint, to_match):
"""Verify that the given endpoint matches the information
extracted from the OpenID assertion, and raise an exception if
there is a mismatch.
@type endpoint: openid.consumer.discover.OpenIDServiceEndpoint
@type to_match: openid.consumer.discover.OpenIDServiceEndpoint
@rtype: NoneType
@raises ProtocolError: when the endpoint does not match the
discovered information.
"""
# Every type URI that's in the to_match endpoint has to be
# present in the discovered endpoint.
for type_uri in to_match.type_uris:
if not endpoint.usesExtension(type_uri):
raise TypeURIMismatch(type_uri, endpoint)
# Fragments do not influence discovery, so we can't compare a
# claimed identifier with a fragment to discovered information.
defragged_claimed_id, _ = urldefrag(to_match.claimed_id)
if defragged_claimed_id != endpoint.claimed_id:
raise ProtocolError(
'Claimed ID does not match (different subjects!), '
'Expected %s, got %s' %
(defragged_claimed_id, endpoint.claimed_id))
if to_match.getLocalID() != endpoint.getLocalID():
raise ProtocolError('local_id mismatch. Expected %s, got %s' %
(to_match.getLocalID(), endpoint.getLocalID()))
# If the server URL is None, this must be an OpenID 1
# response, because op_endpoint is a required parameter in
# OpenID 2. In that case, we don't actually care what the
# discovered server_url is, because signature checking or
# check_auth should take care of that check for us.
if to_match.server_url is None:
assert to_match.preferredNamespace() == OPENID1_NS, (
"""The code calling this must ensure that OpenID 2
responses have a non-none `openid.op_endpoint' and
that it is set as the `server_url' attribute of the
`to_match' endpoint.""")
elif to_match.server_url != endpoint.server_url:
raise ProtocolError('OP Endpoint mismatch. Expected %s, got %s' %
(to_match.server_url, endpoint.server_url))
|
[
"def",
"_verifyDiscoverySingle",
"(",
"self",
",",
"endpoint",
",",
"to_match",
")",
":",
"# Every type URI that's in the to_match endpoint has to be",
"# present in the discovered endpoint.",
"for",
"type_uri",
"in",
"to_match",
".",
"type_uris",
":",
"if",
"not",
"endpoint",
".",
"usesExtension",
"(",
"type_uri",
")",
":",
"raise",
"TypeURIMismatch",
"(",
"type_uri",
",",
"endpoint",
")",
"# Fragments do not influence discovery, so we can't compare a",
"# claimed identifier with a fragment to discovered information.",
"defragged_claimed_id",
",",
"_",
"=",
"urldefrag",
"(",
"to_match",
".",
"claimed_id",
")",
"if",
"defragged_claimed_id",
"!=",
"endpoint",
".",
"claimed_id",
":",
"raise",
"ProtocolError",
"(",
"'Claimed ID does not match (different subjects!), '",
"'Expected %s, got %s'",
"%",
"(",
"defragged_claimed_id",
",",
"endpoint",
".",
"claimed_id",
")",
")",
"if",
"to_match",
".",
"getLocalID",
"(",
")",
"!=",
"endpoint",
".",
"getLocalID",
"(",
")",
":",
"raise",
"ProtocolError",
"(",
"'local_id mismatch. Expected %s, got %s'",
"%",
"(",
"to_match",
".",
"getLocalID",
"(",
")",
",",
"endpoint",
".",
"getLocalID",
"(",
")",
")",
")",
"# If the server URL is None, this must be an OpenID 1",
"# response, because op_endpoint is a required parameter in",
"# OpenID 2. In that case, we don't actually care what the",
"# discovered server_url is, because signature checking or",
"# check_auth should take care of that check for us.",
"if",
"to_match",
".",
"server_url",
"is",
"None",
":",
"assert",
"to_match",
".",
"preferredNamespace",
"(",
")",
"==",
"OPENID1_NS",
",",
"(",
"\"\"\"The code calling this must ensure that OpenID 2\n responses have a non-none `openid.op_endpoint' and\n that it is set as the `server_url' attribute of the\n `to_match' endpoint.\"\"\"",
")",
"elif",
"to_match",
".",
"server_url",
"!=",
"endpoint",
".",
"server_url",
":",
"raise",
"ProtocolError",
"(",
"'OP Endpoint mismatch. Expected %s, got %s'",
"%",
"(",
"to_match",
".",
"server_url",
",",
"endpoint",
".",
"server_url",
")",
")"
] |
Verify that the given endpoint matches the information
extracted from the OpenID assertion, and raise an exception if
there is a mismatch.
@type endpoint: openid.consumer.discover.OpenIDServiceEndpoint
@type to_match: openid.consumer.discover.OpenIDServiceEndpoint
@rtype: NoneType
@raises ProtocolError: when the endpoint does not match the
discovered information.
|
[
"Verify",
"that",
"the",
"given",
"endpoint",
"matches",
"the",
"information",
"extracted",
"from",
"the",
"OpenID",
"assertion",
"and",
"raise",
"an",
"exception",
"if",
"there",
"is",
"a",
"mismatch",
"."
] |
python
|
train
|
google/grr
|
grr/core/grr_response_core/lib/util/random.py
|
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/core/grr_response_core/lib/util/random.py#L29-L38
|
def UInt32():
"""Returns a pseudo-random 32-bit unsigned integer."""
with _mutex:
try:
return _random_buffer.pop()
except IndexError:
data = os.urandom(struct.calcsize("=L") * _random_buffer_size)
_random_buffer.extend(
struct.unpack("=" + "L" * _random_buffer_size, data))
return _random_buffer.pop()
|
[
"def",
"UInt32",
"(",
")",
":",
"with",
"_mutex",
":",
"try",
":",
"return",
"_random_buffer",
".",
"pop",
"(",
")",
"except",
"IndexError",
":",
"data",
"=",
"os",
".",
"urandom",
"(",
"struct",
".",
"calcsize",
"(",
"\"=L\"",
")",
"*",
"_random_buffer_size",
")",
"_random_buffer",
".",
"extend",
"(",
"struct",
".",
"unpack",
"(",
"\"=\"",
"+",
"\"L\"",
"*",
"_random_buffer_size",
",",
"data",
")",
")",
"return",
"_random_buffer",
".",
"pop",
"(",
")"
] |
Returns a pseudo-random 32-bit unsigned integer.
|
[
"Returns",
"a",
"pseudo",
"-",
"random",
"32",
"-",
"bit",
"unsigned",
"integer",
"."
] |
python
|
train
|
lpantano/seqcluster
|
seqcluster/seqbuster/snps.py
|
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/seqbuster/snps.py#L80-L92
|
def print_vcf(data):
"""Print vcf line following rules."""
id_name = "."
qual = "."
chrom = data['chrom']
pos = data['pre_pos']
nt_ref = data['nt'][1]
nt_snp = data['nt'][0]
flt = "PASS"
info = "ID=%s" % data['mature']
frmt = "GT:NR:NS"
gntp = "%s:%s:%s" % (_genotype(data), data["counts"], data["diff"])
print("\t".join(map(str, [chrom, pos, id_name, nt_ref, nt_snp, qual, flt, info, frmt, gntp])), file=STDOUT, end="")
|
[
"def",
"print_vcf",
"(",
"data",
")",
":",
"id_name",
"=",
"\".\"",
"qual",
"=",
"\".\"",
"chrom",
"=",
"data",
"[",
"'chrom'",
"]",
"pos",
"=",
"data",
"[",
"'pre_pos'",
"]",
"nt_ref",
"=",
"data",
"[",
"'nt'",
"]",
"[",
"1",
"]",
"nt_snp",
"=",
"data",
"[",
"'nt'",
"]",
"[",
"0",
"]",
"flt",
"=",
"\"PASS\"",
"info",
"=",
"\"ID=%s\"",
"%",
"data",
"[",
"'mature'",
"]",
"frmt",
"=",
"\"GT:NR:NS\"",
"gntp",
"=",
"\"%s:%s:%s\"",
"%",
"(",
"_genotype",
"(",
"data",
")",
",",
"data",
"[",
"\"counts\"",
"]",
",",
"data",
"[",
"\"diff\"",
"]",
")",
"print",
"(",
"\"\\t\"",
".",
"join",
"(",
"map",
"(",
"str",
",",
"[",
"chrom",
",",
"pos",
",",
"id_name",
",",
"nt_ref",
",",
"nt_snp",
",",
"qual",
",",
"flt",
",",
"info",
",",
"frmt",
",",
"gntp",
"]",
")",
")",
",",
"file",
"=",
"STDOUT",
",",
"end",
"=",
"\"\"",
")"
] |
Print vcf line following rules.
|
[
"Print",
"vcf",
"line",
"following",
"rules",
"."
] |
python
|
train
|
imbolc/aiohttp-login
|
aiohttp_login/sql.py
|
https://github.com/imbolc/aiohttp-login/blob/43b30d8630ca5c14d4b75c398eb5f6a27ddf0a52/aiohttp_login/sql.py#L60-L71
|
def update_sql(table, filter, updates):
'''
>>> update_sql('tbl', {'foo': 'a', 'bar': 1}, {'bar': 2, 'baz': 'b'})
('UPDATE tbl SET bar=$1, baz=$2 WHERE bar=$3 AND foo=$4', [2, 'b', 1, 'a'])
'''
where_keys, where_vals = _split_dict(filter)
up_keys, up_vals = _split_dict(updates)
changes = _pairs(up_keys, sep=', ')
where = _pairs(where_keys, start=len(up_keys) + 1)
sql = 'UPDATE {} SET {} WHERE {}'.format(
table, changes, where)
return sql, up_vals + where_vals
|
[
"def",
"update_sql",
"(",
"table",
",",
"filter",
",",
"updates",
")",
":",
"where_keys",
",",
"where_vals",
"=",
"_split_dict",
"(",
"filter",
")",
"up_keys",
",",
"up_vals",
"=",
"_split_dict",
"(",
"updates",
")",
"changes",
"=",
"_pairs",
"(",
"up_keys",
",",
"sep",
"=",
"', '",
")",
"where",
"=",
"_pairs",
"(",
"where_keys",
",",
"start",
"=",
"len",
"(",
"up_keys",
")",
"+",
"1",
")",
"sql",
"=",
"'UPDATE {} SET {} WHERE {}'",
".",
"format",
"(",
"table",
",",
"changes",
",",
"where",
")",
"return",
"sql",
",",
"up_vals",
"+",
"where_vals"
] |
>>> update_sql('tbl', {'foo': 'a', 'bar': 1}, {'bar': 2, 'baz': 'b'})
('UPDATE tbl SET bar=$1, baz=$2 WHERE bar=$3 AND foo=$4', [2, 'b', 1, 'a'])
|
[
">>>",
"update_sql",
"(",
"tbl",
"{",
"foo",
":",
"a",
"bar",
":",
"1",
"}",
"{",
"bar",
":",
"2",
"baz",
":",
"b",
"}",
")",
"(",
"UPDATE",
"tbl",
"SET",
"bar",
"=",
"$1",
"baz",
"=",
"$2",
"WHERE",
"bar",
"=",
"$3",
"AND",
"foo",
"=",
"$4",
"[",
"2",
"b",
"1",
"a",
"]",
")"
] |
python
|
train
|
yyuu/botornado
|
boto/ec2/keypair.py
|
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/ec2/keypair.py#L60-L89
|
def save(self, directory_path):
"""
Save the material (the unencrypted PEM encoded RSA private key)
of a newly created KeyPair to a local file.
:type directory_path: string
:param directory_path: The fully qualified path to the directory
in which the keypair will be saved. The
keypair file will be named using the name
of the keypair as the base name and .pem
for the file extension. If a file of that
name already exists in the directory, an
exception will be raised and the old file
will not be overwritten.
:rtype: bool
:return: True if successful.
"""
if self.material:
directory_path = os.path.expanduser(directory_path)
file_path = os.path.join(directory_path, '%s.pem' % self.name)
if os.path.exists(file_path):
raise BotoClientError('%s already exists, it will not be overwritten' % file_path)
fp = open(file_path, 'wb')
fp.write(self.material)
fp.close()
os.chmod(file_path, 0600)
return True
else:
raise BotoClientError('KeyPair contains no material')
|
[
"def",
"save",
"(",
"self",
",",
"directory_path",
")",
":",
"if",
"self",
".",
"material",
":",
"directory_path",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"directory_path",
")",
"file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory_path",
",",
"'%s.pem'",
"%",
"self",
".",
"name",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"file_path",
")",
":",
"raise",
"BotoClientError",
"(",
"'%s already exists, it will not be overwritten'",
"%",
"file_path",
")",
"fp",
"=",
"open",
"(",
"file_path",
",",
"'wb'",
")",
"fp",
".",
"write",
"(",
"self",
".",
"material",
")",
"fp",
".",
"close",
"(",
")",
"os",
".",
"chmod",
"(",
"file_path",
",",
"0600",
")",
"return",
"True",
"else",
":",
"raise",
"BotoClientError",
"(",
"'KeyPair contains no material'",
")"
] |
Save the material (the unencrypted PEM encoded RSA private key)
of a newly created KeyPair to a local file.
:type directory_path: string
:param directory_path: The fully qualified path to the directory
in which the keypair will be saved. The
keypair file will be named using the name
of the keypair as the base name and .pem
for the file extension. If a file of that
name already exists in the directory, an
exception will be raised and the old file
will not be overwritten.
:rtype: bool
:return: True if successful.
|
[
"Save",
"the",
"material",
"(",
"the",
"unencrypted",
"PEM",
"encoded",
"RSA",
"private",
"key",
")",
"of",
"a",
"newly",
"created",
"KeyPair",
"to",
"a",
"local",
"file",
".",
":",
"type",
"directory_path",
":",
"string",
":",
"param",
"directory_path",
":",
"The",
"fully",
"qualified",
"path",
"to",
"the",
"directory",
"in",
"which",
"the",
"keypair",
"will",
"be",
"saved",
".",
"The",
"keypair",
"file",
"will",
"be",
"named",
"using",
"the",
"name",
"of",
"the",
"keypair",
"as",
"the",
"base",
"name",
"and",
".",
"pem",
"for",
"the",
"file",
"extension",
".",
"If",
"a",
"file",
"of",
"that",
"name",
"already",
"exists",
"in",
"the",
"directory",
"an",
"exception",
"will",
"be",
"raised",
"and",
"the",
"old",
"file",
"will",
"not",
"be",
"overwritten",
".",
":",
"rtype",
":",
"bool",
":",
"return",
":",
"True",
"if",
"successful",
"."
] |
python
|
train
|
rueckstiess/mtools
|
mtools/util/logevent.py
|
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/logevent.py#L844-L940
|
def _parse_document(self):
"""Parse system.profile doc, copy all values to member variables."""
self._reset()
doc = self._profile_doc
self._split_tokens_calculated = True
self._split_tokens = None
self._duration_calculated = True
self._duration = doc[u'millis']
self._datetime_calculated = True
self._datetime = doc[u'ts']
if self._datetime.tzinfo is None:
self._datetime = self._datetime.replace(tzinfo=tzutc())
self._datetime_format = None
self._reformat_timestamp('ctime', force=True)
self._thread_calculated = True
self._thread = doc['thread']
self._operation_calculated = True
self._operation = doc[u'op']
self._namespace = doc[u'ns']
self._command_calculated = True
if self.operation == 'command':
self._command = doc[u'command'].keys()[0]
# query pattern for system.profile events, all three cases.
# See SERVER-13245
if 'query' in doc:
if 'query' in doc['query'] and isinstance(doc['query']['query'],
dict):
self._pattern = str(doc['query']['query']).replace("'", '"')
elif '$query' in doc['query']:
self._pattern = str(doc['query']['$query']).replace("'", '"')
else:
self._pattern = str(doc['query']).replace("'", '"')
# sort pattern
if ('orderby' in doc['query'] and
isinstance(doc['query']['orderby'], dict)):
self._sort_pattern = str(doc['query']
['orderby']).replace("'", '"')
elif '$orderby' in doc['query']:
self._sort_pattern = str(doc['query']
['$orderby']).replace("'", '"')
else:
self._sort_pattern = None
self._counters_calculated = True
self._nscanned = doc[u'nscanned'] if 'nscanned' in doc else None
self._ntoreturn = doc[u'ntoreturn'] if 'ntoreturn' in doc else None
self._nupdated = doc[u'nupdated'] if 'nupdated' in doc else None
self._nreturned = doc[u'nreturned'] if 'nreturned' in doc else None
self._ninserted = doc[u'ninserted'] if 'ninserted' in doc else None
self._ndeleted = doc[u'ndeleted'] if 'ndeleted' in doc else None
self._numYields = doc[u'numYield'] if 'numYield' in doc else None
if u'lockStats' in doc:
self._r = doc[u'lockStats'][u'timeLockedMicros'][u'r']
self._w = doc[u'lockStats'][u'timeLockedMicros'][u'w']
self._r_acquiring = doc[u'lockStats']['timeAcquiringMicros'][u'r']
self._w_acquiring = doc[u'lockStats']['timeAcquiringMicros'][u'w']
locks = 'w:%i' % self.w if self.w is not None else 'r:%i' % self.r
elif u'locks' in doc:
locks = json.dumps(doc[u'locks'])
else:
locks = ''
# build a fake line_str
payload = ''
if 'query' in doc:
payload += ('query: %s' % str(doc[u'query'])
.replace("u'", "'").replace("'", '"'))
if 'command' in doc:
payload += ('command: %s' % str(doc[u'command'])
.replace("u'", "'").replace("'", '"'))
if 'updateobj' in doc:
payload += (' update: %s' % str(doc[u'updateobj'])
.replace("u'", "'").replace("'", '"'))
scanned = 'nscanned:%i' % self._nscanned if 'nscanned' in doc else ''
yields = 'numYields:%i' % self._numYields if 'numYield' in doc else ''
duration = '%ims' % self.duration if self.duration is not None else ''
self._line_str = ("[{thread}] {operation} {namespace} {payload} "
"{scanned} {yields} locks(micros) {locks} "
"{duration}".format(datetime=self.datetime,
thread=self.thread,
operation=self.operation,
namespace=self.namespace,
payload=payload, scanned=scanned,
yields=yields, locks=locks,
duration=duration))
|
[
"def",
"_parse_document",
"(",
"self",
")",
":",
"self",
".",
"_reset",
"(",
")",
"doc",
"=",
"self",
".",
"_profile_doc",
"self",
".",
"_split_tokens_calculated",
"=",
"True",
"self",
".",
"_split_tokens",
"=",
"None",
"self",
".",
"_duration_calculated",
"=",
"True",
"self",
".",
"_duration",
"=",
"doc",
"[",
"u'millis'",
"]",
"self",
".",
"_datetime_calculated",
"=",
"True",
"self",
".",
"_datetime",
"=",
"doc",
"[",
"u'ts'",
"]",
"if",
"self",
".",
"_datetime",
".",
"tzinfo",
"is",
"None",
":",
"self",
".",
"_datetime",
"=",
"self",
".",
"_datetime",
".",
"replace",
"(",
"tzinfo",
"=",
"tzutc",
"(",
")",
")",
"self",
".",
"_datetime_format",
"=",
"None",
"self",
".",
"_reformat_timestamp",
"(",
"'ctime'",
",",
"force",
"=",
"True",
")",
"self",
".",
"_thread_calculated",
"=",
"True",
"self",
".",
"_thread",
"=",
"doc",
"[",
"'thread'",
"]",
"self",
".",
"_operation_calculated",
"=",
"True",
"self",
".",
"_operation",
"=",
"doc",
"[",
"u'op'",
"]",
"self",
".",
"_namespace",
"=",
"doc",
"[",
"u'ns'",
"]",
"self",
".",
"_command_calculated",
"=",
"True",
"if",
"self",
".",
"operation",
"==",
"'command'",
":",
"self",
".",
"_command",
"=",
"doc",
"[",
"u'command'",
"]",
".",
"keys",
"(",
")",
"[",
"0",
"]",
"# query pattern for system.profile events, all three cases.",
"# See SERVER-13245",
"if",
"'query'",
"in",
"doc",
":",
"if",
"'query'",
"in",
"doc",
"[",
"'query'",
"]",
"and",
"isinstance",
"(",
"doc",
"[",
"'query'",
"]",
"[",
"'query'",
"]",
",",
"dict",
")",
":",
"self",
".",
"_pattern",
"=",
"str",
"(",
"doc",
"[",
"'query'",
"]",
"[",
"'query'",
"]",
")",
".",
"replace",
"(",
"\"'\"",
",",
"'\"'",
")",
"elif",
"'$query'",
"in",
"doc",
"[",
"'query'",
"]",
":",
"self",
".",
"_pattern",
"=",
"str",
"(",
"doc",
"[",
"'query'",
"]",
"[",
"'$query'",
"]",
")",
".",
"replace",
"(",
"\"'\"",
",",
"'\"'",
")",
"else",
":",
"self",
".",
"_pattern",
"=",
"str",
"(",
"doc",
"[",
"'query'",
"]",
")",
".",
"replace",
"(",
"\"'\"",
",",
"'\"'",
")",
"# sort pattern",
"if",
"(",
"'orderby'",
"in",
"doc",
"[",
"'query'",
"]",
"and",
"isinstance",
"(",
"doc",
"[",
"'query'",
"]",
"[",
"'orderby'",
"]",
",",
"dict",
")",
")",
":",
"self",
".",
"_sort_pattern",
"=",
"str",
"(",
"doc",
"[",
"'query'",
"]",
"[",
"'orderby'",
"]",
")",
".",
"replace",
"(",
"\"'\"",
",",
"'\"'",
")",
"elif",
"'$orderby'",
"in",
"doc",
"[",
"'query'",
"]",
":",
"self",
".",
"_sort_pattern",
"=",
"str",
"(",
"doc",
"[",
"'query'",
"]",
"[",
"'$orderby'",
"]",
")",
".",
"replace",
"(",
"\"'\"",
",",
"'\"'",
")",
"else",
":",
"self",
".",
"_sort_pattern",
"=",
"None",
"self",
".",
"_counters_calculated",
"=",
"True",
"self",
".",
"_nscanned",
"=",
"doc",
"[",
"u'nscanned'",
"]",
"if",
"'nscanned'",
"in",
"doc",
"else",
"None",
"self",
".",
"_ntoreturn",
"=",
"doc",
"[",
"u'ntoreturn'",
"]",
"if",
"'ntoreturn'",
"in",
"doc",
"else",
"None",
"self",
".",
"_nupdated",
"=",
"doc",
"[",
"u'nupdated'",
"]",
"if",
"'nupdated'",
"in",
"doc",
"else",
"None",
"self",
".",
"_nreturned",
"=",
"doc",
"[",
"u'nreturned'",
"]",
"if",
"'nreturned'",
"in",
"doc",
"else",
"None",
"self",
".",
"_ninserted",
"=",
"doc",
"[",
"u'ninserted'",
"]",
"if",
"'ninserted'",
"in",
"doc",
"else",
"None",
"self",
".",
"_ndeleted",
"=",
"doc",
"[",
"u'ndeleted'",
"]",
"if",
"'ndeleted'",
"in",
"doc",
"else",
"None",
"self",
".",
"_numYields",
"=",
"doc",
"[",
"u'numYield'",
"]",
"if",
"'numYield'",
"in",
"doc",
"else",
"None",
"if",
"u'lockStats'",
"in",
"doc",
":",
"self",
".",
"_r",
"=",
"doc",
"[",
"u'lockStats'",
"]",
"[",
"u'timeLockedMicros'",
"]",
"[",
"u'r'",
"]",
"self",
".",
"_w",
"=",
"doc",
"[",
"u'lockStats'",
"]",
"[",
"u'timeLockedMicros'",
"]",
"[",
"u'w'",
"]",
"self",
".",
"_r_acquiring",
"=",
"doc",
"[",
"u'lockStats'",
"]",
"[",
"'timeAcquiringMicros'",
"]",
"[",
"u'r'",
"]",
"self",
".",
"_w_acquiring",
"=",
"doc",
"[",
"u'lockStats'",
"]",
"[",
"'timeAcquiringMicros'",
"]",
"[",
"u'w'",
"]",
"locks",
"=",
"'w:%i'",
"%",
"self",
".",
"w",
"if",
"self",
".",
"w",
"is",
"not",
"None",
"else",
"'r:%i'",
"%",
"self",
".",
"r",
"elif",
"u'locks'",
"in",
"doc",
":",
"locks",
"=",
"json",
".",
"dumps",
"(",
"doc",
"[",
"u'locks'",
"]",
")",
"else",
":",
"locks",
"=",
"''",
"# build a fake line_str",
"payload",
"=",
"''",
"if",
"'query'",
"in",
"doc",
":",
"payload",
"+=",
"(",
"'query: %s'",
"%",
"str",
"(",
"doc",
"[",
"u'query'",
"]",
")",
".",
"replace",
"(",
"\"u'\"",
",",
"\"'\"",
")",
".",
"replace",
"(",
"\"'\"",
",",
"'\"'",
")",
")",
"if",
"'command'",
"in",
"doc",
":",
"payload",
"+=",
"(",
"'command: %s'",
"%",
"str",
"(",
"doc",
"[",
"u'command'",
"]",
")",
".",
"replace",
"(",
"\"u'\"",
",",
"\"'\"",
")",
".",
"replace",
"(",
"\"'\"",
",",
"'\"'",
")",
")",
"if",
"'updateobj'",
"in",
"doc",
":",
"payload",
"+=",
"(",
"' update: %s'",
"%",
"str",
"(",
"doc",
"[",
"u'updateobj'",
"]",
")",
".",
"replace",
"(",
"\"u'\"",
",",
"\"'\"",
")",
".",
"replace",
"(",
"\"'\"",
",",
"'\"'",
")",
")",
"scanned",
"=",
"'nscanned:%i'",
"%",
"self",
".",
"_nscanned",
"if",
"'nscanned'",
"in",
"doc",
"else",
"''",
"yields",
"=",
"'numYields:%i'",
"%",
"self",
".",
"_numYields",
"if",
"'numYield'",
"in",
"doc",
"else",
"''",
"duration",
"=",
"'%ims'",
"%",
"self",
".",
"duration",
"if",
"self",
".",
"duration",
"is",
"not",
"None",
"else",
"''",
"self",
".",
"_line_str",
"=",
"(",
"\"[{thread}] {operation} {namespace} {payload} \"",
"\"{scanned} {yields} locks(micros) {locks} \"",
"\"{duration}\"",
".",
"format",
"(",
"datetime",
"=",
"self",
".",
"datetime",
",",
"thread",
"=",
"self",
".",
"thread",
",",
"operation",
"=",
"self",
".",
"operation",
",",
"namespace",
"=",
"self",
".",
"namespace",
",",
"payload",
"=",
"payload",
",",
"scanned",
"=",
"scanned",
",",
"yields",
"=",
"yields",
",",
"locks",
"=",
"locks",
",",
"duration",
"=",
"duration",
")",
")"
] |
Parse system.profile doc, copy all values to member variables.
|
[
"Parse",
"system",
".",
"profile",
"doc",
"copy",
"all",
"values",
"to",
"member",
"variables",
"."
] |
python
|
train
|
rocky/python3-trepan
|
trepan/lib/sighandler.py
|
https://github.com/rocky/python3-trepan/blob/14e91bc0acce090d67be145b1ac040cab92ac5f3/trepan/lib/sighandler.py#L428-L435
|
def handle_print(self, signame, set_print):
"""Set whether we print or not when this signal is caught."""
if set_print:
self.sigs[signame].print_method = self.dbgr.intf[-1].msg
else:
self.sigs[signame].print_method = None
pass
return set_print
|
[
"def",
"handle_print",
"(",
"self",
",",
"signame",
",",
"set_print",
")",
":",
"if",
"set_print",
":",
"self",
".",
"sigs",
"[",
"signame",
"]",
".",
"print_method",
"=",
"self",
".",
"dbgr",
".",
"intf",
"[",
"-",
"1",
"]",
".",
"msg",
"else",
":",
"self",
".",
"sigs",
"[",
"signame",
"]",
".",
"print_method",
"=",
"None",
"pass",
"return",
"set_print"
] |
Set whether we print or not when this signal is caught.
|
[
"Set",
"whether",
"we",
"print",
"or",
"not",
"when",
"this",
"signal",
"is",
"caught",
"."
] |
python
|
test
|
gem/oq-engine
|
openquake/hazardlib/gsim/megawati_2003.py
|
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/megawati_2003.py#L94-L98
|
def _get_distance_scaling(self, coe, rhypo):
"""
Returns the distance scaling term
"""
return coe["a3"] * np.log(rhypo) + coe["a4"] * rhypo
|
[
"def",
"_get_distance_scaling",
"(",
"self",
",",
"coe",
",",
"rhypo",
")",
":",
"return",
"coe",
"[",
"\"a3\"",
"]",
"*",
"np",
".",
"log",
"(",
"rhypo",
")",
"+",
"coe",
"[",
"\"a4\"",
"]",
"*",
"rhypo"
] |
Returns the distance scaling term
|
[
"Returns",
"the",
"distance",
"scaling",
"term"
] |
python
|
train
|
apple/turicreate
|
deps/src/boost_1_68_0/tools/build/src/build/virtual_target.py
|
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/virtual_target.py#L437-L444
|
def root (self, set = None):
""" Sets/gets the 'root' flag. Target is root is it directly correspods to some
variant of a main target.
"""
assert isinstance(set, (int, bool, type(None)))
if set:
self.root_ = True
return self.root_
|
[
"def",
"root",
"(",
"self",
",",
"set",
"=",
"None",
")",
":",
"assert",
"isinstance",
"(",
"set",
",",
"(",
"int",
",",
"bool",
",",
"type",
"(",
"None",
")",
")",
")",
"if",
"set",
":",
"self",
".",
"root_",
"=",
"True",
"return",
"self",
".",
"root_"
] |
Sets/gets the 'root' flag. Target is root is it directly correspods to some
variant of a main target.
|
[
"Sets",
"/",
"gets",
"the",
"root",
"flag",
".",
"Target",
"is",
"root",
"is",
"it",
"directly",
"correspods",
"to",
"some",
"variant",
"of",
"a",
"main",
"target",
"."
] |
python
|
train
|
ibm-watson-iot/iot-python
|
tmp/src/things/things.py
|
https://github.com/ibm-watson-iot/iot-python/blob/195f05adce3fba4ec997017e41e02ebd85c0c4cc/tmp/src/things/things.py#L751-L768
|
def createEvent(self, physicalInterfaceId, eventTypeId, eventId):
"""
Create an event mapping for a physical interface.
Parameters:
physicalInterfaceId (string) - value returned by the platform when creating the physical interface
eventTypeId (string) - value returned by the platform when creating the event type
eventId (string) - matches the event id used by the device in the MQTT topic
Throws APIException on failure.
"""
req = ApiClient.allEventsUrl % (self.host, "/draft", physicalInterfaceId)
body = {"eventId" : eventId, "eventTypeId" : eventTypeId}
resp = requests.post(req, auth=self.credentials, headers={"Content-Type":"application/json"}, data=json.dumps(body),
verify=self.verify)
if resp.status_code == 201:
self.logger.debug("Event mapping created")
else:
raise ibmiotf.APIException(resp.status_code, "HTTP error creating event mapping", resp)
return resp.json()
|
[
"def",
"createEvent",
"(",
"self",
",",
"physicalInterfaceId",
",",
"eventTypeId",
",",
"eventId",
")",
":",
"req",
"=",
"ApiClient",
".",
"allEventsUrl",
"%",
"(",
"self",
".",
"host",
",",
"\"/draft\"",
",",
"physicalInterfaceId",
")",
"body",
"=",
"{",
"\"eventId\"",
":",
"eventId",
",",
"\"eventTypeId\"",
":",
"eventTypeId",
"}",
"resp",
"=",
"requests",
".",
"post",
"(",
"req",
",",
"auth",
"=",
"self",
".",
"credentials",
",",
"headers",
"=",
"{",
"\"Content-Type\"",
":",
"\"application/json\"",
"}",
",",
"data",
"=",
"json",
".",
"dumps",
"(",
"body",
")",
",",
"verify",
"=",
"self",
".",
"verify",
")",
"if",
"resp",
".",
"status_code",
"==",
"201",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Event mapping created\"",
")",
"else",
":",
"raise",
"ibmiotf",
".",
"APIException",
"(",
"resp",
".",
"status_code",
",",
"\"HTTP error creating event mapping\"",
",",
"resp",
")",
"return",
"resp",
".",
"json",
"(",
")"
] |
Create an event mapping for a physical interface.
Parameters:
physicalInterfaceId (string) - value returned by the platform when creating the physical interface
eventTypeId (string) - value returned by the platform when creating the event type
eventId (string) - matches the event id used by the device in the MQTT topic
Throws APIException on failure.
|
[
"Create",
"an",
"event",
"mapping",
"for",
"a",
"physical",
"interface",
".",
"Parameters",
":",
"physicalInterfaceId",
"(",
"string",
")",
"-",
"value",
"returned",
"by",
"the",
"platform",
"when",
"creating",
"the",
"physical",
"interface",
"eventTypeId",
"(",
"string",
")",
"-",
"value",
"returned",
"by",
"the",
"platform",
"when",
"creating",
"the",
"event",
"type",
"eventId",
"(",
"string",
")",
"-",
"matches",
"the",
"event",
"id",
"used",
"by",
"the",
"device",
"in",
"the",
"MQTT",
"topic",
"Throws",
"APIException",
"on",
"failure",
"."
] |
python
|
test
|
ClimateImpactLab/DataFS
|
datafs/datafs.py
|
https://github.com/ClimateImpactLab/DataFS/blob/0d32c2b4e18d300a11b748a552f6adbc3dd8f59d/datafs/datafs.py#L130-L167
|
def configure(ctx, helper, edit):
'''
Update configuration
'''
ctx.obj.config = ConfigFile(ctx.obj.config_file)
if edit:
ctx.obj.config.edit_config_file()
return
if os.path.isfile(ctx.obj.config.config_file):
ctx.obj.config.read_config()
if ctx.obj.profile is None:
ctx.obj.profile = ctx.obj.config.default_profile
args, kwargs = _parse_args_and_kwargs(ctx.args)
assert len(args) == 0, 'Unrecognized arguments: "{}"'.format(args)
if ctx.obj.profile not in ctx.obj.config.config['profiles']:
ctx.obj.config.config['profiles'][ctx.obj.profile] = {
'api': {'user_config': {}}, 'manager': {}, 'authorities': {}}
profile_config = ctx.obj.config.config['profiles'][ctx.obj.profile]
profile_config['api']['user_config'].update(kwargs)
ctx.obj.config.write_config(ctx.obj.config_file)
_generate_api(ctx)
if ctx.obj.api.manager is not None:
check_requirements(
to_populate=profile_config['api']['user_config'],
prompts=ctx.obj.api.manager.required_user_config,
helper=helper)
ctx.obj.config.write_config(ctx.obj.config_file)
|
[
"def",
"configure",
"(",
"ctx",
",",
"helper",
",",
"edit",
")",
":",
"ctx",
".",
"obj",
".",
"config",
"=",
"ConfigFile",
"(",
"ctx",
".",
"obj",
".",
"config_file",
")",
"if",
"edit",
":",
"ctx",
".",
"obj",
".",
"config",
".",
"edit_config_file",
"(",
")",
"return",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"ctx",
".",
"obj",
".",
"config",
".",
"config_file",
")",
":",
"ctx",
".",
"obj",
".",
"config",
".",
"read_config",
"(",
")",
"if",
"ctx",
".",
"obj",
".",
"profile",
"is",
"None",
":",
"ctx",
".",
"obj",
".",
"profile",
"=",
"ctx",
".",
"obj",
".",
"config",
".",
"default_profile",
"args",
",",
"kwargs",
"=",
"_parse_args_and_kwargs",
"(",
"ctx",
".",
"args",
")",
"assert",
"len",
"(",
"args",
")",
"==",
"0",
",",
"'Unrecognized arguments: \"{}\"'",
".",
"format",
"(",
"args",
")",
"if",
"ctx",
".",
"obj",
".",
"profile",
"not",
"in",
"ctx",
".",
"obj",
".",
"config",
".",
"config",
"[",
"'profiles'",
"]",
":",
"ctx",
".",
"obj",
".",
"config",
".",
"config",
"[",
"'profiles'",
"]",
"[",
"ctx",
".",
"obj",
".",
"profile",
"]",
"=",
"{",
"'api'",
":",
"{",
"'user_config'",
":",
"{",
"}",
"}",
",",
"'manager'",
":",
"{",
"}",
",",
"'authorities'",
":",
"{",
"}",
"}",
"profile_config",
"=",
"ctx",
".",
"obj",
".",
"config",
".",
"config",
"[",
"'profiles'",
"]",
"[",
"ctx",
".",
"obj",
".",
"profile",
"]",
"profile_config",
"[",
"'api'",
"]",
"[",
"'user_config'",
"]",
".",
"update",
"(",
"kwargs",
")",
"ctx",
".",
"obj",
".",
"config",
".",
"write_config",
"(",
"ctx",
".",
"obj",
".",
"config_file",
")",
"_generate_api",
"(",
"ctx",
")",
"if",
"ctx",
".",
"obj",
".",
"api",
".",
"manager",
"is",
"not",
"None",
":",
"check_requirements",
"(",
"to_populate",
"=",
"profile_config",
"[",
"'api'",
"]",
"[",
"'user_config'",
"]",
",",
"prompts",
"=",
"ctx",
".",
"obj",
".",
"api",
".",
"manager",
".",
"required_user_config",
",",
"helper",
"=",
"helper",
")",
"ctx",
".",
"obj",
".",
"config",
".",
"write_config",
"(",
"ctx",
".",
"obj",
".",
"config_file",
")"
] |
Update configuration
|
[
"Update",
"configuration"
] |
python
|
train
|
saltstack/salt
|
salt/states/file.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/file.py#L1131-L1189
|
def _get_template_texts(source_list=None,
template='jinja',
defaults=None,
context=None,
**kwargs):
'''
Iterate a list of sources and process them as templates.
Returns a list of 'chunks' containing the rendered templates.
'''
ret = {'name': '_get_template_texts',
'changes': {},
'result': True,
'comment': '',
'data': []}
if source_list is None:
return _error(ret,
'_get_template_texts called with empty source_list')
txtl = []
for (source, source_hash) in source_list:
context_dict = defaults if defaults else {}
if context:
context_dict = salt.utils.dictupdate.merge(context_dict, context)
rndrd_templ_fn = __salt__['cp.get_template'](
source,
'',
template=template,
saltenv=__env__,
context=context_dict,
**kwargs
)
log.debug('cp.get_template returned %s (Called with: %s)',
rndrd_templ_fn, source)
if rndrd_templ_fn:
tmplines = None
with salt.utils.files.fopen(rndrd_templ_fn, 'rb') as fp_:
tmplines = fp_.read()
tmplines = salt.utils.stringutils.to_unicode(tmplines)
tmplines = tmplines.splitlines(True)
if not tmplines:
msg = 'Failed to read rendered template file {0} ({1})'.format(
rndrd_templ_fn, source
)
log.debug(msg)
ret['name'] = source
return _error(ret, msg)
txtl.append(''.join(tmplines))
else:
msg = 'Failed to load template file {0}'.format(source)
log.debug(msg)
ret['name'] = source
return _error(ret, msg)
ret['data'] = txtl
return ret
|
[
"def",
"_get_template_texts",
"(",
"source_list",
"=",
"None",
",",
"template",
"=",
"'jinja'",
",",
"defaults",
"=",
"None",
",",
"context",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"ret",
"=",
"{",
"'name'",
":",
"'_get_template_texts'",
",",
"'changes'",
":",
"{",
"}",
",",
"'result'",
":",
"True",
",",
"'comment'",
":",
"''",
",",
"'data'",
":",
"[",
"]",
"}",
"if",
"source_list",
"is",
"None",
":",
"return",
"_error",
"(",
"ret",
",",
"'_get_template_texts called with empty source_list'",
")",
"txtl",
"=",
"[",
"]",
"for",
"(",
"source",
",",
"source_hash",
")",
"in",
"source_list",
":",
"context_dict",
"=",
"defaults",
"if",
"defaults",
"else",
"{",
"}",
"if",
"context",
":",
"context_dict",
"=",
"salt",
".",
"utils",
".",
"dictupdate",
".",
"merge",
"(",
"context_dict",
",",
"context",
")",
"rndrd_templ_fn",
"=",
"__salt__",
"[",
"'cp.get_template'",
"]",
"(",
"source",
",",
"''",
",",
"template",
"=",
"template",
",",
"saltenv",
"=",
"__env__",
",",
"context",
"=",
"context_dict",
",",
"*",
"*",
"kwargs",
")",
"log",
".",
"debug",
"(",
"'cp.get_template returned %s (Called with: %s)'",
",",
"rndrd_templ_fn",
",",
"source",
")",
"if",
"rndrd_templ_fn",
":",
"tmplines",
"=",
"None",
"with",
"salt",
".",
"utils",
".",
"files",
".",
"fopen",
"(",
"rndrd_templ_fn",
",",
"'rb'",
")",
"as",
"fp_",
":",
"tmplines",
"=",
"fp_",
".",
"read",
"(",
")",
"tmplines",
"=",
"salt",
".",
"utils",
".",
"stringutils",
".",
"to_unicode",
"(",
"tmplines",
")",
"tmplines",
"=",
"tmplines",
".",
"splitlines",
"(",
"True",
")",
"if",
"not",
"tmplines",
":",
"msg",
"=",
"'Failed to read rendered template file {0} ({1})'",
".",
"format",
"(",
"rndrd_templ_fn",
",",
"source",
")",
"log",
".",
"debug",
"(",
"msg",
")",
"ret",
"[",
"'name'",
"]",
"=",
"source",
"return",
"_error",
"(",
"ret",
",",
"msg",
")",
"txtl",
".",
"append",
"(",
"''",
".",
"join",
"(",
"tmplines",
")",
")",
"else",
":",
"msg",
"=",
"'Failed to load template file {0}'",
".",
"format",
"(",
"source",
")",
"log",
".",
"debug",
"(",
"msg",
")",
"ret",
"[",
"'name'",
"]",
"=",
"source",
"return",
"_error",
"(",
"ret",
",",
"msg",
")",
"ret",
"[",
"'data'",
"]",
"=",
"txtl",
"return",
"ret"
] |
Iterate a list of sources and process them as templates.
Returns a list of 'chunks' containing the rendered templates.
|
[
"Iterate",
"a",
"list",
"of",
"sources",
"and",
"process",
"them",
"as",
"templates",
".",
"Returns",
"a",
"list",
"of",
"chunks",
"containing",
"the",
"rendered",
"templates",
"."
] |
python
|
train
|
bwhite/hadoopy
|
hadoopy/thirdparty/pyinstaller/PyInstaller/hooks/hookutils.py
|
https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/hooks/hookutils.py#L115-L124
|
def qt4_plugins_binaries(plugin_type):
"""Return list of dynamic libraries formated for mod.binaries."""
binaries = []
pdir = qt4_plugins_dir()
files = misc.dlls_in_dir(os.path.join(pdir, plugin_type))
for f in files:
binaries.append((
os.path.join('qt4_plugins', plugin_type, os.path.basename(f)),
f, 'BINARY'))
return binaries
|
[
"def",
"qt4_plugins_binaries",
"(",
"plugin_type",
")",
":",
"binaries",
"=",
"[",
"]",
"pdir",
"=",
"qt4_plugins_dir",
"(",
")",
"files",
"=",
"misc",
".",
"dlls_in_dir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"pdir",
",",
"plugin_type",
")",
")",
"for",
"f",
"in",
"files",
":",
"binaries",
".",
"append",
"(",
"(",
"os",
".",
"path",
".",
"join",
"(",
"'qt4_plugins'",
",",
"plugin_type",
",",
"os",
".",
"path",
".",
"basename",
"(",
"f",
")",
")",
",",
"f",
",",
"'BINARY'",
")",
")",
"return",
"binaries"
] |
Return list of dynamic libraries formated for mod.binaries.
|
[
"Return",
"list",
"of",
"dynamic",
"libraries",
"formated",
"for",
"mod",
".",
"binaries",
"."
] |
python
|
train
|
datadotworld/data.world-py
|
datadotworld/client/api.py
|
https://github.com/datadotworld/data.world-py/blob/ffaeb115f358731ab0b805b0c43b7ff2e3cf0a77/datadotworld/client/api.py#L282-L321
|
def add_files_via_url(self, dataset_key, files={}):
"""Add or update dataset files linked to source URLs
:param dataset_key: Dataset identifier, in the form of owner/id
:type dataset_key: str
:param files: Dict containing the name of files and metadata
Uses file name as a dict containing File description, labels and
source URLs to add or update (Default value = {})
*description and labels are optional.*
:type files: dict
:raises RestApiException: If a server error occurs
Examples
--------
>>> import datadotworld as dw
>>> url = 'http://www.acme.inc/example.csv'
>>> api_client = dw.api_client()
>>> api_client.add_files_via_url(
... 'username/test-dataset',
... {'example.csv': {
... 'url': url,
... 'labels': ['raw data'],
... 'description': 'file description'}}) # doctest: +SKIP
"""
file_requests = [_swagger.FileCreateOrUpdateRequest(
name=file_name,
source=_swagger.FileSourceCreateOrUpdateRequest(
url=file_info['url'],
expand_archive=file_info.get('expand_archive',
False)),
description=file_info.get('description'),
labels=file_info.get('labels'),
) for file_name, file_info in files.items()]
owner_id, dataset_id = parse_dataset_key(dataset_key)
try:
self._datasets_api.add_files_by_source(
owner_id, dataset_id,
_swagger.FileBatchUpdateRequest(files=file_requests))
except _swagger.rest.ApiException as e:
raise RestApiError(cause=e)
|
[
"def",
"add_files_via_url",
"(",
"self",
",",
"dataset_key",
",",
"files",
"=",
"{",
"}",
")",
":",
"file_requests",
"=",
"[",
"_swagger",
".",
"FileCreateOrUpdateRequest",
"(",
"name",
"=",
"file_name",
",",
"source",
"=",
"_swagger",
".",
"FileSourceCreateOrUpdateRequest",
"(",
"url",
"=",
"file_info",
"[",
"'url'",
"]",
",",
"expand_archive",
"=",
"file_info",
".",
"get",
"(",
"'expand_archive'",
",",
"False",
")",
")",
",",
"description",
"=",
"file_info",
".",
"get",
"(",
"'description'",
")",
",",
"labels",
"=",
"file_info",
".",
"get",
"(",
"'labels'",
")",
",",
")",
"for",
"file_name",
",",
"file_info",
"in",
"files",
".",
"items",
"(",
")",
"]",
"owner_id",
",",
"dataset_id",
"=",
"parse_dataset_key",
"(",
"dataset_key",
")",
"try",
":",
"self",
".",
"_datasets_api",
".",
"add_files_by_source",
"(",
"owner_id",
",",
"dataset_id",
",",
"_swagger",
".",
"FileBatchUpdateRequest",
"(",
"files",
"=",
"file_requests",
")",
")",
"except",
"_swagger",
".",
"rest",
".",
"ApiException",
"as",
"e",
":",
"raise",
"RestApiError",
"(",
"cause",
"=",
"e",
")"
] |
Add or update dataset files linked to source URLs
:param dataset_key: Dataset identifier, in the form of owner/id
:type dataset_key: str
:param files: Dict containing the name of files and metadata
Uses file name as a dict containing File description, labels and
source URLs to add or update (Default value = {})
*description and labels are optional.*
:type files: dict
:raises RestApiException: If a server error occurs
Examples
--------
>>> import datadotworld as dw
>>> url = 'http://www.acme.inc/example.csv'
>>> api_client = dw.api_client()
>>> api_client.add_files_via_url(
... 'username/test-dataset',
... {'example.csv': {
... 'url': url,
... 'labels': ['raw data'],
... 'description': 'file description'}}) # doctest: +SKIP
|
[
"Add",
"or",
"update",
"dataset",
"files",
"linked",
"to",
"source",
"URLs"
] |
python
|
train
|
quantopian/pgcontents
|
pgcontents/pgmanager.py
|
https://github.com/quantopian/pgcontents/blob/ed36268b7917332d16868208e1e565742a8753e1/pgcontents/pgmanager.py#L264-L280
|
def _file_model_from_db(self, record, content, format):
"""
Build a file model from database record.
"""
# TODO: Most of this is shared with _notebook_model_from_db.
path = to_api_path(record['parent_name'] + record['name'])
model = base_model(path)
model['type'] = 'file'
model['last_modified'] = model['created'] = record['created_at']
if content:
bcontent = record['content']
model['content'], model['format'], model['mimetype'] = from_b64(
path,
bcontent,
format,
)
return model
|
[
"def",
"_file_model_from_db",
"(",
"self",
",",
"record",
",",
"content",
",",
"format",
")",
":",
"# TODO: Most of this is shared with _notebook_model_from_db.",
"path",
"=",
"to_api_path",
"(",
"record",
"[",
"'parent_name'",
"]",
"+",
"record",
"[",
"'name'",
"]",
")",
"model",
"=",
"base_model",
"(",
"path",
")",
"model",
"[",
"'type'",
"]",
"=",
"'file'",
"model",
"[",
"'last_modified'",
"]",
"=",
"model",
"[",
"'created'",
"]",
"=",
"record",
"[",
"'created_at'",
"]",
"if",
"content",
":",
"bcontent",
"=",
"record",
"[",
"'content'",
"]",
"model",
"[",
"'content'",
"]",
",",
"model",
"[",
"'format'",
"]",
",",
"model",
"[",
"'mimetype'",
"]",
"=",
"from_b64",
"(",
"path",
",",
"bcontent",
",",
"format",
",",
")",
"return",
"model"
] |
Build a file model from database record.
|
[
"Build",
"a",
"file",
"model",
"from",
"database",
"record",
"."
] |
python
|
test
|
square/pylink
|
pylink/threads.py
|
https://github.com/square/pylink/blob/81dda0a191d923a8b2627c52cb778aba24d279d7/pylink/threads.py#L41-L56
|
def run(self):
"""Runs the thread.
Args:
self (ThreadReturn): the ``ThreadReturn`` instance
Returns:
``None``
"""
target = getattr(self, '_Thread__target', getattr(self, '_target', None))
args = getattr(self, '_Thread__args', getattr(self, '_args', None))
kwargs = getattr(self, '_Thread__kwargs', getattr(self, '_kwargs', None))
if target is not None:
self._return = target(*args, **kwargs)
return None
|
[
"def",
"run",
"(",
"self",
")",
":",
"target",
"=",
"getattr",
"(",
"self",
",",
"'_Thread__target'",
",",
"getattr",
"(",
"self",
",",
"'_target'",
",",
"None",
")",
")",
"args",
"=",
"getattr",
"(",
"self",
",",
"'_Thread__args'",
",",
"getattr",
"(",
"self",
",",
"'_args'",
",",
"None",
")",
")",
"kwargs",
"=",
"getattr",
"(",
"self",
",",
"'_Thread__kwargs'",
",",
"getattr",
"(",
"self",
",",
"'_kwargs'",
",",
"None",
")",
")",
"if",
"target",
"is",
"not",
"None",
":",
"self",
".",
"_return",
"=",
"target",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"None"
] |
Runs the thread.
Args:
self (ThreadReturn): the ``ThreadReturn`` instance
Returns:
``None``
|
[
"Runs",
"the",
"thread",
"."
] |
python
|
train
|
pymacaron/pymacaron
|
pymacaron/utils.py
|
https://github.com/pymacaron/pymacaron/blob/af244f203f8216108b39d374d46bf8e1813f13d5/pymacaron/utils.py#L17-L41
|
def is_ec2_instance():
"""Try fetching instance metadata at 'curl http://169.254.169.254/latest/meta-data/'
to see if host is on an ec2 instance"""
# Note: this code assumes that docker containers running on ec2 instances
# inherit instances metadata, which they do as of 2016-08-25
global IS_EC2_INSTANCE
if IS_EC2_INSTANCE != -1:
# Returned the cached value
return IS_EC2_INSTANCE
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(0.2)
try:
s.connect(("169.254.169.254", 80))
IS_EC2_INSTANCE = 1
return True
except socket.timeout:
IS_EC2_INSTANCE = 0
return False
except socket.error:
IS_EC2_INSTANCE = 0
return False
|
[
"def",
"is_ec2_instance",
"(",
")",
":",
"# Note: this code assumes that docker containers running on ec2 instances",
"# inherit instances metadata, which they do as of 2016-08-25",
"global",
"IS_EC2_INSTANCE",
"if",
"IS_EC2_INSTANCE",
"!=",
"-",
"1",
":",
"# Returned the cached value",
"return",
"IS_EC2_INSTANCE",
"s",
"=",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_INET",
",",
"socket",
".",
"SOCK_STREAM",
")",
"s",
".",
"settimeout",
"(",
"0.2",
")",
"try",
":",
"s",
".",
"connect",
"(",
"(",
"\"169.254.169.254\"",
",",
"80",
")",
")",
"IS_EC2_INSTANCE",
"=",
"1",
"return",
"True",
"except",
"socket",
".",
"timeout",
":",
"IS_EC2_INSTANCE",
"=",
"0",
"return",
"False",
"except",
"socket",
".",
"error",
":",
"IS_EC2_INSTANCE",
"=",
"0",
"return",
"False"
] |
Try fetching instance metadata at 'curl http://169.254.169.254/latest/meta-data/'
to see if host is on an ec2 instance
|
[
"Try",
"fetching",
"instance",
"metadata",
"at",
"curl",
"http",
":",
"//",
"169",
".",
"254",
".",
"169",
".",
"254",
"/",
"latest",
"/",
"meta",
"-",
"data",
"/",
"to",
"see",
"if",
"host",
"is",
"on",
"an",
"ec2",
"instance"
] |
python
|
train
|
maxzheng/bumper-lib
|
bumper/cars.py
|
https://github.com/maxzheng/bumper-lib/blob/32a9dec5448673825bb2d7d92fa68882b597f794/bumper/cars.py#L252-L257
|
def as_requirement(self):
""" Convert back to a :class:`pkg_resources.Requirement` instance """
if self.new_version:
return pkg_resources.Requirement.parse(self.name + ''.join(self.new_version))
else:
return pkg_resources.Requirement.parse(self.name)
|
[
"def",
"as_requirement",
"(",
"self",
")",
":",
"if",
"self",
".",
"new_version",
":",
"return",
"pkg_resources",
".",
"Requirement",
".",
"parse",
"(",
"self",
".",
"name",
"+",
"''",
".",
"join",
"(",
"self",
".",
"new_version",
")",
")",
"else",
":",
"return",
"pkg_resources",
".",
"Requirement",
".",
"parse",
"(",
"self",
".",
"name",
")"
] |
Convert back to a :class:`pkg_resources.Requirement` instance
|
[
"Convert",
"back",
"to",
"a",
":",
"class",
":",
"pkg_resources",
".",
"Requirement",
"instance"
] |
python
|
valid
|
jobovy/galpy
|
galpy/potential/KuzminKutuzovStaeckelPotential.py
|
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/potential/KuzminKutuzovStaeckelPotential.py#L258-L274
|
def _l2deriv(self,l,n):
"""
NAME:
_l2deriv
PURPOSE:
evaluate the second derivative w.r.t. lambda for this potential
INPUT:
l - prolate spheroidal coordinate lambda
n - prolate spheroidal coordinate nu
OUTPUT:
second derivative w.r.t. lambda
HISTORY:
2015-02-15 - Written - Trick (MPIA)
"""
numer = -3.*nu.sqrt(l) - nu.sqrt(n)
denom = 4. * l**1.5 * (nu.sqrt(l)+nu.sqrt(n))**3
return numer / denom
|
[
"def",
"_l2deriv",
"(",
"self",
",",
"l",
",",
"n",
")",
":",
"numer",
"=",
"-",
"3.",
"*",
"nu",
".",
"sqrt",
"(",
"l",
")",
"-",
"nu",
".",
"sqrt",
"(",
"n",
")",
"denom",
"=",
"4.",
"*",
"l",
"**",
"1.5",
"*",
"(",
"nu",
".",
"sqrt",
"(",
"l",
")",
"+",
"nu",
".",
"sqrt",
"(",
"n",
")",
")",
"**",
"3",
"return",
"numer",
"/",
"denom"
] |
NAME:
_l2deriv
PURPOSE:
evaluate the second derivative w.r.t. lambda for this potential
INPUT:
l - prolate spheroidal coordinate lambda
n - prolate spheroidal coordinate nu
OUTPUT:
second derivative w.r.t. lambda
HISTORY:
2015-02-15 - Written - Trick (MPIA)
|
[
"NAME",
":",
"_l2deriv",
"PURPOSE",
":",
"evaluate",
"the",
"second",
"derivative",
"w",
".",
"r",
".",
"t",
".",
"lambda",
"for",
"this",
"potential",
"INPUT",
":",
"l",
"-",
"prolate",
"spheroidal",
"coordinate",
"lambda",
"n",
"-",
"prolate",
"spheroidal",
"coordinate",
"nu",
"OUTPUT",
":",
"second",
"derivative",
"w",
".",
"r",
".",
"t",
".",
"lambda",
"HISTORY",
":",
"2015",
"-",
"02",
"-",
"15",
"-",
"Written",
"-",
"Trick",
"(",
"MPIA",
")"
] |
python
|
train
|
datacratic/pymldb
|
pymldb/__init__.py
|
https://github.com/datacratic/pymldb/blob/e41f3c37138e9fd4a82ef3db685899cdafa4125e/pymldb/__init__.py#L131-L165
|
def post_and_track(self, url, payload, refresh_rate_sec=1):
"""
Post and track progress, displaying progress bars.
May display the wrong progress if 2 things post/put on the same
procedure name at the same time.
"""
if not url.startswith('/v1/procedures'):
raise Exception("The only supported route is /v1/procedures")
if url.endswith('/runs'):
raise Exception(
"Posting and tracking run is unsupported at the moment")
if len(url.split('/')) != 3:
raise Exception("You must POST a procedure")
if 'params' not in payload:
payload['params'] = {}
payload['params']['runOnCreation'] = False
res = self.post('/v1/procedures', payload).json()
proc_id = res['id']
pm = ProgressMonitor(self, refresh_rate_sec, proc_id,
notebook=self.notebook)
t = threading.Thread(target=pm.monitor_progress)
t.start()
try:
return self.post('/v1/procedures/{}/runs'.format(proc_id), {})
except Exception as e:
print(e)
finally:
pm.event.set()
t.join()
|
[
"def",
"post_and_track",
"(",
"self",
",",
"url",
",",
"payload",
",",
"refresh_rate_sec",
"=",
"1",
")",
":",
"if",
"not",
"url",
".",
"startswith",
"(",
"'/v1/procedures'",
")",
":",
"raise",
"Exception",
"(",
"\"The only supported route is /v1/procedures\"",
")",
"if",
"url",
".",
"endswith",
"(",
"'/runs'",
")",
":",
"raise",
"Exception",
"(",
"\"Posting and tracking run is unsupported at the moment\"",
")",
"if",
"len",
"(",
"url",
".",
"split",
"(",
"'/'",
")",
")",
"!=",
"3",
":",
"raise",
"Exception",
"(",
"\"You must POST a procedure\"",
")",
"if",
"'params'",
"not",
"in",
"payload",
":",
"payload",
"[",
"'params'",
"]",
"=",
"{",
"}",
"payload",
"[",
"'params'",
"]",
"[",
"'runOnCreation'",
"]",
"=",
"False",
"res",
"=",
"self",
".",
"post",
"(",
"'/v1/procedures'",
",",
"payload",
")",
".",
"json",
"(",
")",
"proc_id",
"=",
"res",
"[",
"'id'",
"]",
"pm",
"=",
"ProgressMonitor",
"(",
"self",
",",
"refresh_rate_sec",
",",
"proc_id",
",",
"notebook",
"=",
"self",
".",
"notebook",
")",
"t",
"=",
"threading",
".",
"Thread",
"(",
"target",
"=",
"pm",
".",
"monitor_progress",
")",
"t",
".",
"start",
"(",
")",
"try",
":",
"return",
"self",
".",
"post",
"(",
"'/v1/procedures/{}/runs'",
".",
"format",
"(",
"proc_id",
")",
",",
"{",
"}",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"e",
")",
"finally",
":",
"pm",
".",
"event",
".",
"set",
"(",
")",
"t",
".",
"join",
"(",
")"
] |
Post and track progress, displaying progress bars.
May display the wrong progress if 2 things post/put on the same
procedure name at the same time.
|
[
"Post",
"and",
"track",
"progress",
"displaying",
"progress",
"bars",
"."
] |
python
|
train
|
rochacbruno/dynaconf
|
dynaconf/loaders/toml_loader.py
|
https://github.com/rochacbruno/dynaconf/blob/5a7cc8f8252251cbdf4f4112965801f9dfe2831d/dynaconf/loaders/toml_loader.py#L15-L38
|
def load(obj, env=None, silent=True, key=None, filename=None):
"""
Reads and loads in to "obj" a single key or all keys from source file.
:param obj: the settings instance
:param env: settings current env default='development'
:param silent: if errors should raise
:param key: if defined load a single key, else load all in env
:param filename: Optional custom filename to load
:return: None
"""
if toml is None: # pragma: no cover
BaseLoader.warn_not_installed(obj, "toml")
return
loader = BaseLoader(
obj=obj,
env=env,
identifier="toml",
extensions=TOML_EXTENSIONS,
file_reader=toml.load,
string_reader=toml.loads,
)
loader.load(filename=filename, key=key, silent=silent)
|
[
"def",
"load",
"(",
"obj",
",",
"env",
"=",
"None",
",",
"silent",
"=",
"True",
",",
"key",
"=",
"None",
",",
"filename",
"=",
"None",
")",
":",
"if",
"toml",
"is",
"None",
":",
"# pragma: no cover",
"BaseLoader",
".",
"warn_not_installed",
"(",
"obj",
",",
"\"toml\"",
")",
"return",
"loader",
"=",
"BaseLoader",
"(",
"obj",
"=",
"obj",
",",
"env",
"=",
"env",
",",
"identifier",
"=",
"\"toml\"",
",",
"extensions",
"=",
"TOML_EXTENSIONS",
",",
"file_reader",
"=",
"toml",
".",
"load",
",",
"string_reader",
"=",
"toml",
".",
"loads",
",",
")",
"loader",
".",
"load",
"(",
"filename",
"=",
"filename",
",",
"key",
"=",
"key",
",",
"silent",
"=",
"silent",
")"
] |
Reads and loads in to "obj" a single key or all keys from source file.
:param obj: the settings instance
:param env: settings current env default='development'
:param silent: if errors should raise
:param key: if defined load a single key, else load all in env
:param filename: Optional custom filename to load
:return: None
|
[
"Reads",
"and",
"loads",
"in",
"to",
"obj",
"a",
"single",
"key",
"or",
"all",
"keys",
"from",
"source",
"file",
"."
] |
python
|
train
|
maxzheng/bumper-lib
|
bumper/utils.py
|
https://github.com/maxzheng/bumper-lib/blob/32a9dec5448673825bb2d7d92fa68882b597f794/bumper/utils.py#L60-L63
|
def all_package_versions(package):
""" All versions for package """
info = PyPI.package_info(package)
return info and sorted(info['releases'].keys(), key=lambda x: x.split(), reverse=True) or []
|
[
"def",
"all_package_versions",
"(",
"package",
")",
":",
"info",
"=",
"PyPI",
".",
"package_info",
"(",
"package",
")",
"return",
"info",
"and",
"sorted",
"(",
"info",
"[",
"'releases'",
"]",
".",
"keys",
"(",
")",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
".",
"split",
"(",
")",
",",
"reverse",
"=",
"True",
")",
"or",
"[",
"]"
] |
All versions for package
|
[
"All",
"versions",
"for",
"package"
] |
python
|
valid
|
openstack/networking-cisco
|
networking_cisco/apps/saf/server/services/firewall/native/fabric_setup_base.py
|
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/services/firewall/native/fabric_setup_base.py#L890-L901
|
def _create_service_nwk(self, tenant_id, tenant_name, direc):
"""Function to create the service in network in DCNM. """
net_dict = self.retrieve_dcnm_net_info(tenant_id, direc)
net = utils.Dict2Obj(net_dict)
subnet_dict = self.retrieve_dcnm_subnet_info(tenant_id, direc)
subnet = utils.Dict2Obj(subnet_dict)
try:
self.dcnm_obj.create_service_network(tenant_name, net, subnet)
except dexc.DfaClientRequestFailed:
LOG.error("Failed to create network in DCNM %s", direc)
return False
return True
|
[
"def",
"_create_service_nwk",
"(",
"self",
",",
"tenant_id",
",",
"tenant_name",
",",
"direc",
")",
":",
"net_dict",
"=",
"self",
".",
"retrieve_dcnm_net_info",
"(",
"tenant_id",
",",
"direc",
")",
"net",
"=",
"utils",
".",
"Dict2Obj",
"(",
"net_dict",
")",
"subnet_dict",
"=",
"self",
".",
"retrieve_dcnm_subnet_info",
"(",
"tenant_id",
",",
"direc",
")",
"subnet",
"=",
"utils",
".",
"Dict2Obj",
"(",
"subnet_dict",
")",
"try",
":",
"self",
".",
"dcnm_obj",
".",
"create_service_network",
"(",
"tenant_name",
",",
"net",
",",
"subnet",
")",
"except",
"dexc",
".",
"DfaClientRequestFailed",
":",
"LOG",
".",
"error",
"(",
"\"Failed to create network in DCNM %s\"",
",",
"direc",
")",
"return",
"False",
"return",
"True"
] |
Function to create the service in network in DCNM.
|
[
"Function",
"to",
"create",
"the",
"service",
"in",
"network",
"in",
"DCNM",
"."
] |
python
|
train
|
vmlaker/mpipe
|
src/UnorderedWorker.py
|
https://github.com/vmlaker/mpipe/blob/5a1804cf64271931f0cd3e4fff3e2b38291212dd/src/UnorderedWorker.py#L17-L34
|
def init2(
self,
input_tube, # Read task from the input tube.
output_tubes, # Send result on all the output tubes.
num_workers, # Total number of workers in the stage.
disable_result, # Whether to override any result with None.
do_stop_task, # Whether to call doTask() on "stop" request.
):
"""Create *num_workers* worker objects with *input_tube* and
an iterable of *output_tubes*. The worker reads a task from *input_tube*
and writes the result to *output_tubes*."""
super(UnorderedWorker, self).__init__()
self._tube_task_input = input_tube
self._tubes_result_output = output_tubes
self._num_workers = num_workers
self._disable_result = disable_result
self._do_stop_task = do_stop_task
|
[
"def",
"init2",
"(",
"self",
",",
"input_tube",
",",
"# Read task from the input tube.",
"output_tubes",
",",
"# Send result on all the output tubes.",
"num_workers",
",",
"# Total number of workers in the stage.",
"disable_result",
",",
"# Whether to override any result with None.",
"do_stop_task",
",",
"# Whether to call doTask() on \"stop\" request.",
")",
":",
"super",
"(",
"UnorderedWorker",
",",
"self",
")",
".",
"__init__",
"(",
")",
"self",
".",
"_tube_task_input",
"=",
"input_tube",
"self",
".",
"_tubes_result_output",
"=",
"output_tubes",
"self",
".",
"_num_workers",
"=",
"num_workers",
"self",
".",
"_disable_result",
"=",
"disable_result",
"self",
".",
"_do_stop_task",
"=",
"do_stop_task"
] |
Create *num_workers* worker objects with *input_tube* and
an iterable of *output_tubes*. The worker reads a task from *input_tube*
and writes the result to *output_tubes*.
|
[
"Create",
"*",
"num_workers",
"*",
"worker",
"objects",
"with",
"*",
"input_tube",
"*",
"and",
"an",
"iterable",
"of",
"*",
"output_tubes",
"*",
".",
"The",
"worker",
"reads",
"a",
"task",
"from",
"*",
"input_tube",
"*",
"and",
"writes",
"the",
"result",
"to",
"*",
"output_tubes",
"*",
"."
] |
python
|
train
|
chrislit/abydos
|
abydos/phonetic/_beider_morse.py
|
https://github.com/chrislit/abydos/blob/165466b3ff6afd8024a4c8660421b0c4e7773db9/abydos/phonetic/_beider_morse.py#L590-L611
|
def _phonetic_numbers(self, phonetic):
"""Prepare & join phonetic numbers.
Split phonetic value on '-', run through _pnums_with_leading_space,
and join with ' '
Parameters
----------
phonetic : str
A Beider-Morse phonetic encoding
Returns
-------
str
A Beider-Morse phonetic code
"""
phonetic_array = phonetic.split('-') # for names with spaces in them
result = ' '.join(
[self._pnums_with_leading_space(i)[1:] for i in phonetic_array]
)
return result
|
[
"def",
"_phonetic_numbers",
"(",
"self",
",",
"phonetic",
")",
":",
"phonetic_array",
"=",
"phonetic",
".",
"split",
"(",
"'-'",
")",
"# for names with spaces in them",
"result",
"=",
"' '",
".",
"join",
"(",
"[",
"self",
".",
"_pnums_with_leading_space",
"(",
"i",
")",
"[",
"1",
":",
"]",
"for",
"i",
"in",
"phonetic_array",
"]",
")",
"return",
"result"
] |
Prepare & join phonetic numbers.
Split phonetic value on '-', run through _pnums_with_leading_space,
and join with ' '
Parameters
----------
phonetic : str
A Beider-Morse phonetic encoding
Returns
-------
str
A Beider-Morse phonetic code
|
[
"Prepare",
"&",
"join",
"phonetic",
"numbers",
"."
] |
python
|
valid
|
pyviz/holoviews
|
holoviews/plotting/plotly/plot.py
|
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/plotting/plotly/plot.py#L33-L39
|
def _trigger_refresh(self, key):
"Triggers update to a plot on a refresh event"
if self.top_level:
self.update(key)
else:
self.current_key = None
self.current_frame = None
|
[
"def",
"_trigger_refresh",
"(",
"self",
",",
"key",
")",
":",
"if",
"self",
".",
"top_level",
":",
"self",
".",
"update",
"(",
"key",
")",
"else",
":",
"self",
".",
"current_key",
"=",
"None",
"self",
".",
"current_frame",
"=",
"None"
] |
Triggers update to a plot on a refresh event
|
[
"Triggers",
"update",
"to",
"a",
"plot",
"on",
"a",
"refresh",
"event"
] |
python
|
train
|
great-expectations/great_expectations
|
great_expectations/data_asset/file_data_asset.py
|
https://github.com/great-expectations/great_expectations/blob/08385c40529d4f14a1c46916788aecc47f33ee9d/great_expectations/data_asset/file_data_asset.py#L587-L643
|
def expect_file_to_be_valid_json(self, schema=None, result_format=None,
include_config=False, catch_exceptions=None,
meta=None):
"""
schema : string
optional JSON schema file on which JSON data file is validated against
result_format (str or None):
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean):
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None):
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None):
A JSON-serializable dictionary (nesting allowed) that will
be included in the output without modification. \
For more detail, see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
"""
success = False
if schema is None:
try:
with open(self._path, 'r') as f:
json.load(f)
success = True
except ValueError:
success = False
else:
try:
with open(schema, 'r') as s:
schema_data = s.read()
sdata = json.loads(schema_data)
with open(self._path, 'r') as f:
json_data = f.read()
jdata = json.loads(json_data)
jsonschema.validate(jdata, sdata)
success = True
except jsonschema.ValidationError:
success = False
except jsonschema.SchemaError:
raise
except:
raise
return {"success":success}
|
[
"def",
"expect_file_to_be_valid_json",
"(",
"self",
",",
"schema",
"=",
"None",
",",
"result_format",
"=",
"None",
",",
"include_config",
"=",
"False",
",",
"catch_exceptions",
"=",
"None",
",",
"meta",
"=",
"None",
")",
":",
"success",
"=",
"False",
"if",
"schema",
"is",
"None",
":",
"try",
":",
"with",
"open",
"(",
"self",
".",
"_path",
",",
"'r'",
")",
"as",
"f",
":",
"json",
".",
"load",
"(",
"f",
")",
"success",
"=",
"True",
"except",
"ValueError",
":",
"success",
"=",
"False",
"else",
":",
"try",
":",
"with",
"open",
"(",
"schema",
",",
"'r'",
")",
"as",
"s",
":",
"schema_data",
"=",
"s",
".",
"read",
"(",
")",
"sdata",
"=",
"json",
".",
"loads",
"(",
"schema_data",
")",
"with",
"open",
"(",
"self",
".",
"_path",
",",
"'r'",
")",
"as",
"f",
":",
"json_data",
"=",
"f",
".",
"read",
"(",
")",
"jdata",
"=",
"json",
".",
"loads",
"(",
"json_data",
")",
"jsonschema",
".",
"validate",
"(",
"jdata",
",",
"sdata",
")",
"success",
"=",
"True",
"except",
"jsonschema",
".",
"ValidationError",
":",
"success",
"=",
"False",
"except",
"jsonschema",
".",
"SchemaError",
":",
"raise",
"except",
":",
"raise",
"return",
"{",
"\"success\"",
":",
"success",
"}"
] |
schema : string
optional JSON schema file on which JSON data file is validated against
result_format (str or None):
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean):
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None):
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None):
A JSON-serializable dictionary (nesting allowed) that will
be included in the output without modification. \
For more detail, see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
|
[
"schema",
":",
"string",
"optional",
"JSON",
"schema",
"file",
"on",
"which",
"JSON",
"data",
"file",
"is",
"validated",
"against"
] |
python
|
train
|
mokelly/wabbit_wappa
|
wabbit_wappa/__init__.py
|
https://github.com/mokelly/wabbit_wappa/blob/dfe5bf6d6036079e473c4148335cd6f339d0299b/wabbit_wappa/__init__.py#L263-L274
|
def _get_response(self, parse_result=True):
"""If 'parse_result' is False, ignore the received output and return None."""
# expect_exact is faster than just exact, and fine for our purpose
# (http://pexpect.readthedocs.org/en/latest/api/pexpect.html#pexpect.spawn.expect_exact)
# searchwindowsize and other attributes may also affect efficiency
self.vw_process.expect_exact('\r\n', searchwindowsize=-1) # Wait until process outputs a complete line
if parse_result:
output = self.vw_process.before
result_struct = VWResult(output, active_mode=self.active_mode)
else:
result_struct = None
return result_struct
|
[
"def",
"_get_response",
"(",
"self",
",",
"parse_result",
"=",
"True",
")",
":",
"# expect_exact is faster than just exact, and fine for our purpose",
"# (http://pexpect.readthedocs.org/en/latest/api/pexpect.html#pexpect.spawn.expect_exact)",
"# searchwindowsize and other attributes may also affect efficiency",
"self",
".",
"vw_process",
".",
"expect_exact",
"(",
"'\\r\\n'",
",",
"searchwindowsize",
"=",
"-",
"1",
")",
"# Wait until process outputs a complete line",
"if",
"parse_result",
":",
"output",
"=",
"self",
".",
"vw_process",
".",
"before",
"result_struct",
"=",
"VWResult",
"(",
"output",
",",
"active_mode",
"=",
"self",
".",
"active_mode",
")",
"else",
":",
"result_struct",
"=",
"None",
"return",
"result_struct"
] |
If 'parse_result' is False, ignore the received output and return None.
|
[
"If",
"parse_result",
"is",
"False",
"ignore",
"the",
"received",
"output",
"and",
"return",
"None",
"."
] |
python
|
train
|
LonamiWebs/Telethon
|
telethon/client/chats.py
|
https://github.com/LonamiWebs/Telethon/blob/1ead9757d366b58c1e0567cddb0196e20f1a445f/telethon/client/chats.py#L277-L331
|
def iter_participants(
self, entity, limit=None, *, search='',
filter=None, aggressive=False
):
"""
Iterator over the participants belonging to the specified chat.
Args:
entity (`entity`):
The entity from which to retrieve the participants list.
limit (`int`):
Limits amount of participants fetched.
search (`str`, optional):
Look for participants with this string in name/username.
If ``aggressive is True``, the symbols from this string will
be used.
filter (:tl:`ChannelParticipantsFilter`, optional):
The filter to be used, if you want e.g. only admins
Note that you might not have permissions for some filter.
This has no effect for normal chats or users.
.. note::
The filter :tl:`ChannelParticipantsBanned` will return
*restricted* users. If you want *banned* users you should
use :tl:`ChannelParticipantsKicked` instead.
aggressive (`bool`, optional):
Aggressively looks for all participants in the chat.
This is useful for channels since 20 July 2018,
Telegram added a server-side limit where only the
first 200 members can be retrieved. With this flag
set, more than 200 will be often be retrieved.
This has no effect if a ``filter`` is given.
Yields:
The :tl:`User` objects returned by :tl:`GetParticipantsRequest`
with an additional ``.participant`` attribute which is the
matched :tl:`ChannelParticipant` type for channels/megagroups
or :tl:`ChatParticipants` for normal chats.
"""
return _ParticipantsIter(
self,
limit,
entity=entity,
filter=filter,
search=search,
aggressive=aggressive
)
|
[
"def",
"iter_participants",
"(",
"self",
",",
"entity",
",",
"limit",
"=",
"None",
",",
"*",
",",
"search",
"=",
"''",
",",
"filter",
"=",
"None",
",",
"aggressive",
"=",
"False",
")",
":",
"return",
"_ParticipantsIter",
"(",
"self",
",",
"limit",
",",
"entity",
"=",
"entity",
",",
"filter",
"=",
"filter",
",",
"search",
"=",
"search",
",",
"aggressive",
"=",
"aggressive",
")"
] |
Iterator over the participants belonging to the specified chat.
Args:
entity (`entity`):
The entity from which to retrieve the participants list.
limit (`int`):
Limits amount of participants fetched.
search (`str`, optional):
Look for participants with this string in name/username.
If ``aggressive is True``, the symbols from this string will
be used.
filter (:tl:`ChannelParticipantsFilter`, optional):
The filter to be used, if you want e.g. only admins
Note that you might not have permissions for some filter.
This has no effect for normal chats or users.
.. note::
The filter :tl:`ChannelParticipantsBanned` will return
*restricted* users. If you want *banned* users you should
use :tl:`ChannelParticipantsKicked` instead.
aggressive (`bool`, optional):
Aggressively looks for all participants in the chat.
This is useful for channels since 20 July 2018,
Telegram added a server-side limit where only the
first 200 members can be retrieved. With this flag
set, more than 200 will be often be retrieved.
This has no effect if a ``filter`` is given.
Yields:
The :tl:`User` objects returned by :tl:`GetParticipantsRequest`
with an additional ``.participant`` attribute which is the
matched :tl:`ChannelParticipant` type for channels/megagroups
or :tl:`ChatParticipants` for normal chats.
|
[
"Iterator",
"over",
"the",
"participants",
"belonging",
"to",
"the",
"specified",
"chat",
"."
] |
python
|
train
|
Rackspace-DOT/flask_keystone
|
flask_keystone/__init__.py
|
https://github.com/Rackspace-DOT/flask_keystone/blob/6f6d630e9e66a3beca6607b0b786510ec2a79747/flask_keystone/__init__.py#L205-L239
|
def _make_user_model(self):
"""
Dynamically generate a User class for use with FlaskKeystone.
:returns: a generated User class, inherited from
:class:`flask_keystone.UserBase`.
:rtype: class
This User model is intended to work somewhat similarly to the User
class that is created for Flask-Login, however it is Dynamically
generated based on configuration values in `oslo.config.cfg`, and
is populated automatically from the request headers added by
:mod:`keystonemiddleware`.
This User class has the concept of "roles", which are defined in
oslo.config, and generates helper functions to quickly Determine
whether these roles apply to a particular instance.
"""
class User(UserBase):
"""
A User as defined by the response from Keystone.
Note: This class is dynamically generated by :class:`FlaskKeystone`
from the :class:`flask_keystone.UserBase` class.
:param request: The incoming `flask.Request` object, after being
handled by the :mod:`keystonemiddleware`
:returns: :class:`flask_keystone.UserBase`
"""
pass
User.generate_has_role_function(self.roles)
User.generate_is_role_functions(self.roles)
return User
|
[
"def",
"_make_user_model",
"(",
"self",
")",
":",
"class",
"User",
"(",
"UserBase",
")",
":",
"\"\"\"\n A User as defined by the response from Keystone.\n\n Note: This class is dynamically generated by :class:`FlaskKeystone`\n from the :class:`flask_keystone.UserBase` class.\n\n :param request: The incoming `flask.Request` object, after being\n handled by the :mod:`keystonemiddleware`\n :returns: :class:`flask_keystone.UserBase`\n \"\"\"",
"pass",
"User",
".",
"generate_has_role_function",
"(",
"self",
".",
"roles",
")",
"User",
".",
"generate_is_role_functions",
"(",
"self",
".",
"roles",
")",
"return",
"User"
] |
Dynamically generate a User class for use with FlaskKeystone.
:returns: a generated User class, inherited from
:class:`flask_keystone.UserBase`.
:rtype: class
This User model is intended to work somewhat similarly to the User
class that is created for Flask-Login, however it is Dynamically
generated based on configuration values in `oslo.config.cfg`, and
is populated automatically from the request headers added by
:mod:`keystonemiddleware`.
This User class has the concept of "roles", which are defined in
oslo.config, and generates helper functions to quickly Determine
whether these roles apply to a particular instance.
|
[
"Dynamically",
"generate",
"a",
"User",
"class",
"for",
"use",
"with",
"FlaskKeystone",
"."
] |
python
|
train
|
rvswift/EB
|
EB/builder/utilities/ensemble_storage.py
|
https://github.com/rvswift/EB/blob/341880b79faf8147dc9fa6e90438531cd09fabcc/EB/builder/utilities/ensemble_storage.py#L17-L35
|
def set_prop(self, prop, value, ef=None):
"""
set attributes values
:param prop:
:param value:
:param ef:
:return:
"""
if ef:
# prop should be restricted to n_decoys, an int, the no. of decoys corresponding to a given FPF.
# value is restricted to the corresponding enrichment factor and should be a float
self.ef[prop] = value
else:
if prop == 'ensemble':
# value is a tuple of strings that gives the ensemble composition
self.ensemble = value
elif prop == 'auc':
# value is a float that gives the auc value
self.auc = value
|
[
"def",
"set_prop",
"(",
"self",
",",
"prop",
",",
"value",
",",
"ef",
"=",
"None",
")",
":",
"if",
"ef",
":",
"# prop should be restricted to n_decoys, an int, the no. of decoys corresponding to a given FPF.",
"# value is restricted to the corresponding enrichment factor and should be a float",
"self",
".",
"ef",
"[",
"prop",
"]",
"=",
"value",
"else",
":",
"if",
"prop",
"==",
"'ensemble'",
":",
"# value is a tuple of strings that gives the ensemble composition",
"self",
".",
"ensemble",
"=",
"value",
"elif",
"prop",
"==",
"'auc'",
":",
"# value is a float that gives the auc value",
"self",
".",
"auc",
"=",
"value"
] |
set attributes values
:param prop:
:param value:
:param ef:
:return:
|
[
"set",
"attributes",
"values",
":",
"param",
"prop",
":",
":",
"param",
"value",
":",
":",
"param",
"ef",
":",
":",
"return",
":"
] |
python
|
train
|
SwissDataScienceCenter/renku-python
|
renku/cli/doctor.py
|
https://github.com/SwissDataScienceCenter/renku-python/blob/691644d695b055a01e0ca22b2620e55bbd928c0d/renku/cli/doctor.py#L36-L49
|
def doctor(ctx, client):
"""Check your system and repository for potential problems."""
click.secho('\n'.join(textwrap.wrap(DOCTOR_INFO)) + '\n', bold=True)
from . import _checks
is_ok = True
for attr in _checks.__all__:
is_ok &= getattr(_checks, attr)(client)
if is_ok:
click.secho('Everything seems to be ok.', fg='green')
ctx.exit(0 if is_ok else 1)
|
[
"def",
"doctor",
"(",
"ctx",
",",
"client",
")",
":",
"click",
".",
"secho",
"(",
"'\\n'",
".",
"join",
"(",
"textwrap",
".",
"wrap",
"(",
"DOCTOR_INFO",
")",
")",
"+",
"'\\n'",
",",
"bold",
"=",
"True",
")",
"from",
".",
"import",
"_checks",
"is_ok",
"=",
"True",
"for",
"attr",
"in",
"_checks",
".",
"__all__",
":",
"is_ok",
"&=",
"getattr",
"(",
"_checks",
",",
"attr",
")",
"(",
"client",
")",
"if",
"is_ok",
":",
"click",
".",
"secho",
"(",
"'Everything seems to be ok.'",
",",
"fg",
"=",
"'green'",
")",
"ctx",
".",
"exit",
"(",
"0",
"if",
"is_ok",
"else",
"1",
")"
] |
Check your system and repository for potential problems.
|
[
"Check",
"your",
"system",
"and",
"repository",
"for",
"potential",
"problems",
"."
] |
python
|
train
|
firstprayer/monsql
|
monsql/query.py
|
https://github.com/firstprayer/monsql/blob/6285c15b574c8664046eae2edfeb548c7b173efd/monsql/query.py#L85-L193
|
def to_sql(self):
"""
This function build a sql condition string (those used in the 'WHERE' clause) based on given condition
Supported match pattern:
{a: 1} -> a == 1
{a: {$gt: 1}} -> a > 1
{a: {$gte: 1}} -> a >= 1
{a: {$lt: 1}} -> a < 1
{a: {$lte: 1}} -> a <= 1
{a: {$eq: 1}} -> a == 1
{a: {$in: [1, 2]}} -> a == 1
{a: {$contains: '123'}} -> a like %123%
And complex combination
{$not: condition} -> NOT (condition)
{$and: [condition1, condition2]} -> condition1 and condition2
{$or: [condition1, condition2]} -> condition1 or condition2
"""
condition = self.condition
if condition:
# If the condition is not None nor empty
if len(condition.keys()) > 1:
# If in the form of {'a': 1, 'b': 2}, simplify to {'$and': [{'a': 1, 'b': 2}]}
split_conditions = []
for key in condition.keys():
split_conditions.append({key: condition[key]})
return QueryCondition({'$and': split_conditions}).to_sql()
else:
query_field, query_value = condition.items()[0]
if query_field in QueryCondition.COMPLEX_QUERY_INDICATOR:
# This is a composite query
if u'$not' == query_field:
not_condition = QueryCondition(query_value).to_sql()
if not_condition is not None:
return 'NOT (%s)' %(not_condition)
else:
return None
if query_field in (u'$or', u'$and', ):
conditions = query_value
if not isinstance(conditions, list) or len(conditions) < 2:
raise MonSQLException('QUERY VALUE FOR KEY %s MUST BE LIST WITH LENGTH BEING AT LEAST 2' %(query_field))
# compute sub conditions recursively
conditions = map(lambda c: QueryCondition(c).to_sql(), conditions)
conditions = filter(lambda c: c is not None, conditions)
# join them together
if len(conditions) > 0:
if query_field == u'$or':
return ' OR '.join(conditions)
elif query_field == u'$and':
return ' AND '.join(conditions)
else:
return None
else:
raise MonSQLException('Unsupport query_field')
else:
# This is a one-field query like {'id': ...}
if query_field in QueryCondition.MYSQL_RESERVE_WORDS:
query_field = "`%s`" %(query_field)
if not type(query_value) is types.DictType:
# transform {'id': 1} to {'id': {'$eq': 1}} for convenience
query_value = {'$eq': query_value}
if len(query_value.keys()) > 1:
# Deal with situation like a: {'$gt': 1, '$lt': 10}
# Split into {$and: [a: {'$gt': 1}, a: {'$lt': 10}]}
split_conditions = []
for key in query_value.keys():
split_conditions.append(QueryCondition({query_field: {key: query_value[key]}}))
return QueryCondition({'$and': split_conditions}).to_sql()
else:
# The simple case of {a: {$complex_operator: 1}}
complex_operator = query_value.keys()[0] # the complex operator
target_value = query_value[complex_operator]
query_str = None
if u"$contains" == complex_operator:
query_str = u"LIKE " + value_to_sql_str('%' + target_value + '%')
elif complex_operator in ('$eq', '$gte', '$gt', '$lt', '$lte'):
map_dic = {'$eq': '=', '$gte': '>=', '$gt': '>', '$lt': '<', '$lte': '<='}
query_str = map_dic[complex_operator] + value_to_sql_str(target_value)
elif u'$in' == complex_operator:
if len(target_value) == 0:
query_str = u"IN (null) "
else:
query_str = u"IN (" + u','.join([str(_v_) for _v_ in target_value]) + u") "
else:
raise MonSQLException(u"Unsupport complex query: %s" %(complex_operator))
return query_field + ' ' + query_str
else:
return None
# For testing
assert False
|
[
"def",
"to_sql",
"(",
"self",
")",
":",
"condition",
"=",
"self",
".",
"condition",
"if",
"condition",
":",
"# If the condition is not None nor empty",
"if",
"len",
"(",
"condition",
".",
"keys",
"(",
")",
")",
">",
"1",
":",
"# If in the form of {'a': 1, 'b': 2}, simplify to {'$and': [{'a': 1, 'b': 2}]}",
"split_conditions",
"=",
"[",
"]",
"for",
"key",
"in",
"condition",
".",
"keys",
"(",
")",
":",
"split_conditions",
".",
"append",
"(",
"{",
"key",
":",
"condition",
"[",
"key",
"]",
"}",
")",
"return",
"QueryCondition",
"(",
"{",
"'$and'",
":",
"split_conditions",
"}",
")",
".",
"to_sql",
"(",
")",
"else",
":",
"query_field",
",",
"query_value",
"=",
"condition",
".",
"items",
"(",
")",
"[",
"0",
"]",
"if",
"query_field",
"in",
"QueryCondition",
".",
"COMPLEX_QUERY_INDICATOR",
":",
"# This is a composite query",
"if",
"u'$not'",
"==",
"query_field",
":",
"not_condition",
"=",
"QueryCondition",
"(",
"query_value",
")",
".",
"to_sql",
"(",
")",
"if",
"not_condition",
"is",
"not",
"None",
":",
"return",
"'NOT (%s)'",
"%",
"(",
"not_condition",
")",
"else",
":",
"return",
"None",
"if",
"query_field",
"in",
"(",
"u'$or'",
",",
"u'$and'",
",",
")",
":",
"conditions",
"=",
"query_value",
"if",
"not",
"isinstance",
"(",
"conditions",
",",
"list",
")",
"or",
"len",
"(",
"conditions",
")",
"<",
"2",
":",
"raise",
"MonSQLException",
"(",
"'QUERY VALUE FOR KEY %s MUST BE LIST WITH LENGTH BEING AT LEAST 2'",
"%",
"(",
"query_field",
")",
")",
"# compute sub conditions recursively",
"conditions",
"=",
"map",
"(",
"lambda",
"c",
":",
"QueryCondition",
"(",
"c",
")",
".",
"to_sql",
"(",
")",
",",
"conditions",
")",
"conditions",
"=",
"filter",
"(",
"lambda",
"c",
":",
"c",
"is",
"not",
"None",
",",
"conditions",
")",
"# join them together",
"if",
"len",
"(",
"conditions",
")",
">",
"0",
":",
"if",
"query_field",
"==",
"u'$or'",
":",
"return",
"' OR '",
".",
"join",
"(",
"conditions",
")",
"elif",
"query_field",
"==",
"u'$and'",
":",
"return",
"' AND '",
".",
"join",
"(",
"conditions",
")",
"else",
":",
"return",
"None",
"else",
":",
"raise",
"MonSQLException",
"(",
"'Unsupport query_field'",
")",
"else",
":",
"# This is a one-field query like {'id': ...}",
"if",
"query_field",
"in",
"QueryCondition",
".",
"MYSQL_RESERVE_WORDS",
":",
"query_field",
"=",
"\"`%s`\"",
"%",
"(",
"query_field",
")",
"if",
"not",
"type",
"(",
"query_value",
")",
"is",
"types",
".",
"DictType",
":",
"# transform {'id': 1} to {'id': {'$eq': 1}} for convenience",
"query_value",
"=",
"{",
"'$eq'",
":",
"query_value",
"}",
"if",
"len",
"(",
"query_value",
".",
"keys",
"(",
")",
")",
">",
"1",
":",
"# Deal with situation like a: {'$gt': 1, '$lt': 10}",
"# Split into {$and: [a: {'$gt': 1}, a: {'$lt': 10}]}",
"split_conditions",
"=",
"[",
"]",
"for",
"key",
"in",
"query_value",
".",
"keys",
"(",
")",
":",
"split_conditions",
".",
"append",
"(",
"QueryCondition",
"(",
"{",
"query_field",
":",
"{",
"key",
":",
"query_value",
"[",
"key",
"]",
"}",
"}",
")",
")",
"return",
"QueryCondition",
"(",
"{",
"'$and'",
":",
"split_conditions",
"}",
")",
".",
"to_sql",
"(",
")",
"else",
":",
"# The simple case of {a: {$complex_operator: 1}}",
"complex_operator",
"=",
"query_value",
".",
"keys",
"(",
")",
"[",
"0",
"]",
"# the complex operator",
"target_value",
"=",
"query_value",
"[",
"complex_operator",
"]",
"query_str",
"=",
"None",
"if",
"u\"$contains\"",
"==",
"complex_operator",
":",
"query_str",
"=",
"u\"LIKE \"",
"+",
"value_to_sql_str",
"(",
"'%'",
"+",
"target_value",
"+",
"'%'",
")",
"elif",
"complex_operator",
"in",
"(",
"'$eq'",
",",
"'$gte'",
",",
"'$gt'",
",",
"'$lt'",
",",
"'$lte'",
")",
":",
"map_dic",
"=",
"{",
"'$eq'",
":",
"'='",
",",
"'$gte'",
":",
"'>='",
",",
"'$gt'",
":",
"'>'",
",",
"'$lt'",
":",
"'<'",
",",
"'$lte'",
":",
"'<='",
"}",
"query_str",
"=",
"map_dic",
"[",
"complex_operator",
"]",
"+",
"value_to_sql_str",
"(",
"target_value",
")",
"elif",
"u'$in'",
"==",
"complex_operator",
":",
"if",
"len",
"(",
"target_value",
")",
"==",
"0",
":",
"query_str",
"=",
"u\"IN (null) \"",
"else",
":",
"query_str",
"=",
"u\"IN (\"",
"+",
"u','",
".",
"join",
"(",
"[",
"str",
"(",
"_v_",
")",
"for",
"_v_",
"in",
"target_value",
"]",
")",
"+",
"u\") \"",
"else",
":",
"raise",
"MonSQLException",
"(",
"u\"Unsupport complex query: %s\"",
"%",
"(",
"complex_operator",
")",
")",
"return",
"query_field",
"+",
"' '",
"+",
"query_str",
"else",
":",
"return",
"None",
"# For testing",
"assert",
"False"
] |
This function build a sql condition string (those used in the 'WHERE' clause) based on given condition
Supported match pattern:
{a: 1} -> a == 1
{a: {$gt: 1}} -> a > 1
{a: {$gte: 1}} -> a >= 1
{a: {$lt: 1}} -> a < 1
{a: {$lte: 1}} -> a <= 1
{a: {$eq: 1}} -> a == 1
{a: {$in: [1, 2]}} -> a == 1
{a: {$contains: '123'}} -> a like %123%
And complex combination
{$not: condition} -> NOT (condition)
{$and: [condition1, condition2]} -> condition1 and condition2
{$or: [condition1, condition2]} -> condition1 or condition2
|
[
"This",
"function",
"build",
"a",
"sql",
"condition",
"string",
"(",
"those",
"used",
"in",
"the",
"WHERE",
"clause",
")",
"based",
"on",
"given",
"condition",
"Supported",
"match",
"pattern",
":"
] |
python
|
train
|
pandas-dev/pandas
|
pandas/core/algorithms.py
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/algorithms.py#L966-L1043
|
def quantile(x, q, interpolation_method='fraction'):
"""
Compute sample quantile or quantiles of the input array. For example, q=0.5
computes the median.
The `interpolation_method` parameter supports three values, namely
`fraction` (default), `lower` and `higher`. Interpolation is done only,
if the desired quantile lies between two data points `i` and `j`. For
`fraction`, the result is an interpolated value between `i` and `j`;
for `lower`, the result is `i`, for `higher` the result is `j`.
Parameters
----------
x : ndarray
Values from which to extract score.
q : scalar or array
Percentile at which to extract score.
interpolation_method : {'fraction', 'lower', 'higher'}, optional
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
- fraction: `i + (j - i)*fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
-lower: `i`.
- higher: `j`.
Returns
-------
score : float
Score at percentile.
Examples
--------
>>> from scipy import stats
>>> a = np.arange(100)
>>> stats.scoreatpercentile(a, 50)
49.5
"""
x = np.asarray(x)
mask = isna(x)
x = x[~mask]
values = np.sort(x)
def _interpolate(a, b, fraction):
"""Returns the point at the given fraction between a and b, where
'fraction' must be between 0 and 1.
"""
return a + (b - a) * fraction
def _get_score(at):
if len(values) == 0:
return np.nan
idx = at * (len(values) - 1)
if idx % 1 == 0:
score = values[int(idx)]
else:
if interpolation_method == 'fraction':
score = _interpolate(values[int(idx)], values[int(idx) + 1],
idx % 1)
elif interpolation_method == 'lower':
score = values[np.floor(idx)]
elif interpolation_method == 'higher':
score = values[np.ceil(idx)]
else:
raise ValueError("interpolation_method can only be 'fraction' "
", 'lower' or 'higher'")
return score
if is_scalar(q):
return _get_score(q)
else:
q = np.asarray(q, np.float64)
return algos.arrmap_float64(q, _get_score)
|
[
"def",
"quantile",
"(",
"x",
",",
"q",
",",
"interpolation_method",
"=",
"'fraction'",
")",
":",
"x",
"=",
"np",
".",
"asarray",
"(",
"x",
")",
"mask",
"=",
"isna",
"(",
"x",
")",
"x",
"=",
"x",
"[",
"~",
"mask",
"]",
"values",
"=",
"np",
".",
"sort",
"(",
"x",
")",
"def",
"_interpolate",
"(",
"a",
",",
"b",
",",
"fraction",
")",
":",
"\"\"\"Returns the point at the given fraction between a and b, where\n 'fraction' must be between 0 and 1.\n \"\"\"",
"return",
"a",
"+",
"(",
"b",
"-",
"a",
")",
"*",
"fraction",
"def",
"_get_score",
"(",
"at",
")",
":",
"if",
"len",
"(",
"values",
")",
"==",
"0",
":",
"return",
"np",
".",
"nan",
"idx",
"=",
"at",
"*",
"(",
"len",
"(",
"values",
")",
"-",
"1",
")",
"if",
"idx",
"%",
"1",
"==",
"0",
":",
"score",
"=",
"values",
"[",
"int",
"(",
"idx",
")",
"]",
"else",
":",
"if",
"interpolation_method",
"==",
"'fraction'",
":",
"score",
"=",
"_interpolate",
"(",
"values",
"[",
"int",
"(",
"idx",
")",
"]",
",",
"values",
"[",
"int",
"(",
"idx",
")",
"+",
"1",
"]",
",",
"idx",
"%",
"1",
")",
"elif",
"interpolation_method",
"==",
"'lower'",
":",
"score",
"=",
"values",
"[",
"np",
".",
"floor",
"(",
"idx",
")",
"]",
"elif",
"interpolation_method",
"==",
"'higher'",
":",
"score",
"=",
"values",
"[",
"np",
".",
"ceil",
"(",
"idx",
")",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"\"interpolation_method can only be 'fraction' \"",
"\", 'lower' or 'higher'\"",
")",
"return",
"score",
"if",
"is_scalar",
"(",
"q",
")",
":",
"return",
"_get_score",
"(",
"q",
")",
"else",
":",
"q",
"=",
"np",
".",
"asarray",
"(",
"q",
",",
"np",
".",
"float64",
")",
"return",
"algos",
".",
"arrmap_float64",
"(",
"q",
",",
"_get_score",
")"
] |
Compute sample quantile or quantiles of the input array. For example, q=0.5
computes the median.
The `interpolation_method` parameter supports three values, namely
`fraction` (default), `lower` and `higher`. Interpolation is done only,
if the desired quantile lies between two data points `i` and `j`. For
`fraction`, the result is an interpolated value between `i` and `j`;
for `lower`, the result is `i`, for `higher` the result is `j`.
Parameters
----------
x : ndarray
Values from which to extract score.
q : scalar or array
Percentile at which to extract score.
interpolation_method : {'fraction', 'lower', 'higher'}, optional
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
- fraction: `i + (j - i)*fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
-lower: `i`.
- higher: `j`.
Returns
-------
score : float
Score at percentile.
Examples
--------
>>> from scipy import stats
>>> a = np.arange(100)
>>> stats.scoreatpercentile(a, 50)
49.5
|
[
"Compute",
"sample",
"quantile",
"or",
"quantiles",
"of",
"the",
"input",
"array",
".",
"For",
"example",
"q",
"=",
"0",
".",
"5",
"computes",
"the",
"median",
"."
] |
python
|
train
|
UpCloudLtd/upcloud-python-api
|
upcloud_api/cloud_manager/firewall_mixin.py
|
https://github.com/UpCloudLtd/upcloud-python-api/blob/954b0ad7c4b932b2be31a95d88975f6b0eeac8ed/upcloud_api/cloud_manager/firewall_mixin.py#L42-L55
|
def create_firewall_rule(self, server, firewall_rule_body):
"""
Create a new firewall rule for a given server uuid.
The rule can begiven as a dict or with FirewallRule.prepare_post_body().
Returns a FirewallRule object.
"""
server_uuid, server_instance = uuid_and_instance(server)
url = '/server/{0}/firewall_rule'.format(server_uuid)
body = {'firewall_rule': firewall_rule_body}
res = self.post_request(url, body)
return FirewallRule(server=server_instance, **res['firewall_rule'])
|
[
"def",
"create_firewall_rule",
"(",
"self",
",",
"server",
",",
"firewall_rule_body",
")",
":",
"server_uuid",
",",
"server_instance",
"=",
"uuid_and_instance",
"(",
"server",
")",
"url",
"=",
"'/server/{0}/firewall_rule'",
".",
"format",
"(",
"server_uuid",
")",
"body",
"=",
"{",
"'firewall_rule'",
":",
"firewall_rule_body",
"}",
"res",
"=",
"self",
".",
"post_request",
"(",
"url",
",",
"body",
")",
"return",
"FirewallRule",
"(",
"server",
"=",
"server_instance",
",",
"*",
"*",
"res",
"[",
"'firewall_rule'",
"]",
")"
] |
Create a new firewall rule for a given server uuid.
The rule can begiven as a dict or with FirewallRule.prepare_post_body().
Returns a FirewallRule object.
|
[
"Create",
"a",
"new",
"firewall",
"rule",
"for",
"a",
"given",
"server",
"uuid",
"."
] |
python
|
train
|
pycontribs/pyrax
|
pyrax/queueing.py
|
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/queueing.py#L504-L516
|
def set_metadata(self, queue, metadata, clear=False):
"""
Accepts a dictionary and adds that to the specified queue's metadata.
If the 'clear' argument is passed as True, any existing metadata is
replaced with the new metadata.
"""
uri = "/%s/%s/metadata" % (self.uri_base, utils.get_id(queue))
if clear:
curr = {}
else:
curr = self.get_metadata(queue)
curr.update(metadata)
resp, resp_body = self.api.method_put(uri, body=curr)
|
[
"def",
"set_metadata",
"(",
"self",
",",
"queue",
",",
"metadata",
",",
"clear",
"=",
"False",
")",
":",
"uri",
"=",
"\"/%s/%s/metadata\"",
"%",
"(",
"self",
".",
"uri_base",
",",
"utils",
".",
"get_id",
"(",
"queue",
")",
")",
"if",
"clear",
":",
"curr",
"=",
"{",
"}",
"else",
":",
"curr",
"=",
"self",
".",
"get_metadata",
"(",
"queue",
")",
"curr",
".",
"update",
"(",
"metadata",
")",
"resp",
",",
"resp_body",
"=",
"self",
".",
"api",
".",
"method_put",
"(",
"uri",
",",
"body",
"=",
"curr",
")"
] |
Accepts a dictionary and adds that to the specified queue's metadata.
If the 'clear' argument is passed as True, any existing metadata is
replaced with the new metadata.
|
[
"Accepts",
"a",
"dictionary",
"and",
"adds",
"that",
"to",
"the",
"specified",
"queue",
"s",
"metadata",
".",
"If",
"the",
"clear",
"argument",
"is",
"passed",
"as",
"True",
"any",
"existing",
"metadata",
"is",
"replaced",
"with",
"the",
"new",
"metadata",
"."
] |
python
|
train
|
fastai/fastai
|
fastai/layers.py
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/layers.py#L281-L283
|
def MSELossFlat(*args, axis:int=-1, floatify:bool=True, **kwargs):
"Same as `nn.MSELoss`, but flattens input and target."
return FlattenedLoss(nn.MSELoss, *args, axis=axis, floatify=floatify, is_2d=False, **kwargs)
|
[
"def",
"MSELossFlat",
"(",
"*",
"args",
",",
"axis",
":",
"int",
"=",
"-",
"1",
",",
"floatify",
":",
"bool",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"FlattenedLoss",
"(",
"nn",
".",
"MSELoss",
",",
"*",
"args",
",",
"axis",
"=",
"axis",
",",
"floatify",
"=",
"floatify",
",",
"is_2d",
"=",
"False",
",",
"*",
"*",
"kwargs",
")"
] |
Same as `nn.MSELoss`, but flattens input and target.
|
[
"Same",
"as",
"nn",
".",
"MSELoss",
"but",
"flattens",
"input",
"and",
"target",
"."
] |
python
|
train
|
google/grumpy
|
third_party/stdlib/_abcoll.py
|
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/_abcoll.py#L312-L320
|
def pop(self):
"""Return the popped value. Raise KeyError if empty."""
it = iter(self)
try:
value = next(it)
except StopIteration:
raise KeyError
self.discard(value)
return value
|
[
"def",
"pop",
"(",
"self",
")",
":",
"it",
"=",
"iter",
"(",
"self",
")",
"try",
":",
"value",
"=",
"next",
"(",
"it",
")",
"except",
"StopIteration",
":",
"raise",
"KeyError",
"self",
".",
"discard",
"(",
"value",
")",
"return",
"value"
] |
Return the popped value. Raise KeyError if empty.
|
[
"Return",
"the",
"popped",
"value",
".",
"Raise",
"KeyError",
"if",
"empty",
"."
] |
python
|
valid
|
pycontribs/pyrax
|
pyrax/resource.py
|
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/resource.py#L66-L76
|
def _add_details(self, info):
"""
Takes the dict returned by the API call and sets the
corresponding attributes on the object.
"""
for (key, val) in six.iteritems(info):
if isinstance(key, six.text_type) and six.PY2:
key = key.encode(pyrax.get_encoding())
elif isinstance(key, bytes):
key = key.decode("utf-8")
setattr(self, key, val)
|
[
"def",
"_add_details",
"(",
"self",
",",
"info",
")",
":",
"for",
"(",
"key",
",",
"val",
")",
"in",
"six",
".",
"iteritems",
"(",
"info",
")",
":",
"if",
"isinstance",
"(",
"key",
",",
"six",
".",
"text_type",
")",
"and",
"six",
".",
"PY2",
":",
"key",
"=",
"key",
".",
"encode",
"(",
"pyrax",
".",
"get_encoding",
"(",
")",
")",
"elif",
"isinstance",
"(",
"key",
",",
"bytes",
")",
":",
"key",
"=",
"key",
".",
"decode",
"(",
"\"utf-8\"",
")",
"setattr",
"(",
"self",
",",
"key",
",",
"val",
")"
] |
Takes the dict returned by the API call and sets the
corresponding attributes on the object.
|
[
"Takes",
"the",
"dict",
"returned",
"by",
"the",
"API",
"call",
"and",
"sets",
"the",
"corresponding",
"attributes",
"on",
"the",
"object",
"."
] |
python
|
train
|
ekzhu/datasketch
|
datasketch/minhash.py
|
https://github.com/ekzhu/datasketch/blob/b3e4129987890a2beb04f2c0b6dc618ae35f2e14/datasketch/minhash.py#L137-L154
|
def jaccard(self, other):
'''Estimate the `Jaccard similarity`_ (resemblance) between the sets
represented by this MinHash and the other.
Args:
other (datasketch.MinHash): The other MinHash.
Returns:
float: The Jaccard similarity, which is between 0.0 and 1.0.
'''
if other.seed != self.seed:
raise ValueError("Cannot compute Jaccard given MinHash with\
different seeds")
if len(self) != len(other):
raise ValueError("Cannot compute Jaccard given MinHash with\
different numbers of permutation functions")
return np.float(np.count_nonzero(self.hashvalues==other.hashvalues)) /\
np.float(len(self))
|
[
"def",
"jaccard",
"(",
"self",
",",
"other",
")",
":",
"if",
"other",
".",
"seed",
"!=",
"self",
".",
"seed",
":",
"raise",
"ValueError",
"(",
"\"Cannot compute Jaccard given MinHash with\\\n different seeds\"",
")",
"if",
"len",
"(",
"self",
")",
"!=",
"len",
"(",
"other",
")",
":",
"raise",
"ValueError",
"(",
"\"Cannot compute Jaccard given MinHash with\\\n different numbers of permutation functions\"",
")",
"return",
"np",
".",
"float",
"(",
"np",
".",
"count_nonzero",
"(",
"self",
".",
"hashvalues",
"==",
"other",
".",
"hashvalues",
")",
")",
"/",
"np",
".",
"float",
"(",
"len",
"(",
"self",
")",
")"
] |
Estimate the `Jaccard similarity`_ (resemblance) between the sets
represented by this MinHash and the other.
Args:
other (datasketch.MinHash): The other MinHash.
Returns:
float: The Jaccard similarity, which is between 0.0 and 1.0.
|
[
"Estimate",
"the",
"Jaccard",
"similarity",
"_",
"(",
"resemblance",
")",
"between",
"the",
"sets",
"represented",
"by",
"this",
"MinHash",
"and",
"the",
"other",
"."
] |
python
|
test
|
GoogleCloudPlatform/appengine-mapreduce
|
python/demo/main.py
|
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/demo/main.py#L181-L185
|
def split_into_sentences(s):
"""Split text into list of sentences."""
s = re.sub(r"\s+", " ", s)
s = re.sub(r"[\\.\\?\\!]", "\n", s)
return s.split("\n")
|
[
"def",
"split_into_sentences",
"(",
"s",
")",
":",
"s",
"=",
"re",
".",
"sub",
"(",
"r\"\\s+\"",
",",
"\" \"",
",",
"s",
")",
"s",
"=",
"re",
".",
"sub",
"(",
"r\"[\\\\.\\\\?\\\\!]\"",
",",
"\"\\n\"",
",",
"s",
")",
"return",
"s",
".",
"split",
"(",
"\"\\n\"",
")"
] |
Split text into list of sentences.
|
[
"Split",
"text",
"into",
"list",
"of",
"sentences",
"."
] |
python
|
train
|
saltstack/salt
|
salt/utils/dns.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/dns.py#L546-L654
|
def lookup(
name,
rdtype,
method=None,
servers=None,
timeout=None,
walk=False,
walk_tld=False,
secure=None
):
'''
Lookup DNS records and return their data
:param name: name to lookup
:param rdtype: DNS record type
:param method: gai (getaddrinfo()), dnspython, dig, drill, host, nslookup or auto (default)
:param servers: (list of) server(s) to try in-order
:param timeout: query timeout or a valiant approximation of that
:param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'.
:param walk_tld: Include the final domain in the walk
:param secure: return only DNSSEC secured responses
:return: [] of record data
'''
# opts = __opts__.get('dns', {})
opts = {}
method = method or opts.get('method', 'auto')
secure = secure or opts.get('secure', None)
servers = servers or opts.get('servers', None)
timeout = timeout or opts.get('timeout', False)
rdtype = rdtype.upper()
# pylint: disable=bad-whitespace,multiple-spaces-before-keyword
query_methods = (
('gai', _lookup_gai, not any((rdtype not in ('A', 'AAAA'), servers, secure))),
('dnspython', _lookup_dnspython, HAS_DNSPYTHON),
('dig', _lookup_dig, HAS_DIG),
('drill', _lookup_drill, HAS_DRILL),
('host', _lookup_host, HAS_HOST and not secure),
('nslookup', _lookup_nslookup, HAS_NSLOOKUP and not secure),
)
# pylint: enable=bad-whitespace,multiple-spaces-before-keyword
try:
if method == 'auto':
# The first one not to bork on the conditions becomes the function
method, resolver = next(((rname, rcb) for rname, rcb, rtest in query_methods if rtest))
else:
# The first one not to bork on the conditions becomes the function. And the name must match.
resolver = next((rcb for rname, rcb, rtest in query_methods if rname == method and rtest))
except StopIteration:
log.error(
'Unable to lookup %s/%s: Resolver method %s invalid, unsupported '
'or unable to perform query', method, rdtype, name
)
return False
res_kwargs = {
'rdtype': rdtype,
}
if servers:
if not isinstance(servers, (list, tuple)):
servers = [servers]
if method in ('dnspython', 'dig', 'drill'):
res_kwargs['servers'] = servers
else:
if timeout:
timeout /= len(servers)
# Inject a wrapper for multi-server behaviour
def _multi_srvr(resolv_func):
@functools.wraps(resolv_func)
def _wrapper(**res_kwargs):
for server in servers:
s_res = resolv_func(server=server, **res_kwargs)
if s_res:
return s_res
return _wrapper
resolver = _multi_srvr(resolver)
if not walk:
name = [name]
else:
idx = 0
if rdtype in ('SRV', 'TLSA'): # The only RRs I know that have 2 name components
idx = name.find('.') + 1
idx = name.find('.', idx) + 1
domain = name[idx:]
rname = name[0:idx]
name = _tree(domain, walk_tld)
if walk == 'name':
name = [rname + domain for domain in name]
if timeout:
timeout /= len(name)
if secure:
res_kwargs['secure'] = secure
if timeout:
res_kwargs['timeout'] = timeout
for rname in name:
res = resolver(name=rname, **res_kwargs)
if res:
return res
return res
|
[
"def",
"lookup",
"(",
"name",
",",
"rdtype",
",",
"method",
"=",
"None",
",",
"servers",
"=",
"None",
",",
"timeout",
"=",
"None",
",",
"walk",
"=",
"False",
",",
"walk_tld",
"=",
"False",
",",
"secure",
"=",
"None",
")",
":",
"# opts = __opts__.get('dns', {})",
"opts",
"=",
"{",
"}",
"method",
"=",
"method",
"or",
"opts",
".",
"get",
"(",
"'method'",
",",
"'auto'",
")",
"secure",
"=",
"secure",
"or",
"opts",
".",
"get",
"(",
"'secure'",
",",
"None",
")",
"servers",
"=",
"servers",
"or",
"opts",
".",
"get",
"(",
"'servers'",
",",
"None",
")",
"timeout",
"=",
"timeout",
"or",
"opts",
".",
"get",
"(",
"'timeout'",
",",
"False",
")",
"rdtype",
"=",
"rdtype",
".",
"upper",
"(",
")",
"# pylint: disable=bad-whitespace,multiple-spaces-before-keyword",
"query_methods",
"=",
"(",
"(",
"'gai'",
",",
"_lookup_gai",
",",
"not",
"any",
"(",
"(",
"rdtype",
"not",
"in",
"(",
"'A'",
",",
"'AAAA'",
")",
",",
"servers",
",",
"secure",
")",
")",
")",
",",
"(",
"'dnspython'",
",",
"_lookup_dnspython",
",",
"HAS_DNSPYTHON",
")",
",",
"(",
"'dig'",
",",
"_lookup_dig",
",",
"HAS_DIG",
")",
",",
"(",
"'drill'",
",",
"_lookup_drill",
",",
"HAS_DRILL",
")",
",",
"(",
"'host'",
",",
"_lookup_host",
",",
"HAS_HOST",
"and",
"not",
"secure",
")",
",",
"(",
"'nslookup'",
",",
"_lookup_nslookup",
",",
"HAS_NSLOOKUP",
"and",
"not",
"secure",
")",
",",
")",
"# pylint: enable=bad-whitespace,multiple-spaces-before-keyword",
"try",
":",
"if",
"method",
"==",
"'auto'",
":",
"# The first one not to bork on the conditions becomes the function",
"method",
",",
"resolver",
"=",
"next",
"(",
"(",
"(",
"rname",
",",
"rcb",
")",
"for",
"rname",
",",
"rcb",
",",
"rtest",
"in",
"query_methods",
"if",
"rtest",
")",
")",
"else",
":",
"# The first one not to bork on the conditions becomes the function. And the name must match.",
"resolver",
"=",
"next",
"(",
"(",
"rcb",
"for",
"rname",
",",
"rcb",
",",
"rtest",
"in",
"query_methods",
"if",
"rname",
"==",
"method",
"and",
"rtest",
")",
")",
"except",
"StopIteration",
":",
"log",
".",
"error",
"(",
"'Unable to lookup %s/%s: Resolver method %s invalid, unsupported '",
"'or unable to perform query'",
",",
"method",
",",
"rdtype",
",",
"name",
")",
"return",
"False",
"res_kwargs",
"=",
"{",
"'rdtype'",
":",
"rdtype",
",",
"}",
"if",
"servers",
":",
"if",
"not",
"isinstance",
"(",
"servers",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"servers",
"=",
"[",
"servers",
"]",
"if",
"method",
"in",
"(",
"'dnspython'",
",",
"'dig'",
",",
"'drill'",
")",
":",
"res_kwargs",
"[",
"'servers'",
"]",
"=",
"servers",
"else",
":",
"if",
"timeout",
":",
"timeout",
"/=",
"len",
"(",
"servers",
")",
"# Inject a wrapper for multi-server behaviour",
"def",
"_multi_srvr",
"(",
"resolv_func",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"resolv_func",
")",
"def",
"_wrapper",
"(",
"*",
"*",
"res_kwargs",
")",
":",
"for",
"server",
"in",
"servers",
":",
"s_res",
"=",
"resolv_func",
"(",
"server",
"=",
"server",
",",
"*",
"*",
"res_kwargs",
")",
"if",
"s_res",
":",
"return",
"s_res",
"return",
"_wrapper",
"resolver",
"=",
"_multi_srvr",
"(",
"resolver",
")",
"if",
"not",
"walk",
":",
"name",
"=",
"[",
"name",
"]",
"else",
":",
"idx",
"=",
"0",
"if",
"rdtype",
"in",
"(",
"'SRV'",
",",
"'TLSA'",
")",
":",
"# The only RRs I know that have 2 name components",
"idx",
"=",
"name",
".",
"find",
"(",
"'.'",
")",
"+",
"1",
"idx",
"=",
"name",
".",
"find",
"(",
"'.'",
",",
"idx",
")",
"+",
"1",
"domain",
"=",
"name",
"[",
"idx",
":",
"]",
"rname",
"=",
"name",
"[",
"0",
":",
"idx",
"]",
"name",
"=",
"_tree",
"(",
"domain",
",",
"walk_tld",
")",
"if",
"walk",
"==",
"'name'",
":",
"name",
"=",
"[",
"rname",
"+",
"domain",
"for",
"domain",
"in",
"name",
"]",
"if",
"timeout",
":",
"timeout",
"/=",
"len",
"(",
"name",
")",
"if",
"secure",
":",
"res_kwargs",
"[",
"'secure'",
"]",
"=",
"secure",
"if",
"timeout",
":",
"res_kwargs",
"[",
"'timeout'",
"]",
"=",
"timeout",
"for",
"rname",
"in",
"name",
":",
"res",
"=",
"resolver",
"(",
"name",
"=",
"rname",
",",
"*",
"*",
"res_kwargs",
")",
"if",
"res",
":",
"return",
"res",
"return",
"res"
] |
Lookup DNS records and return their data
:param name: name to lookup
:param rdtype: DNS record type
:param method: gai (getaddrinfo()), dnspython, dig, drill, host, nslookup or auto (default)
:param servers: (list of) server(s) to try in-order
:param timeout: query timeout or a valiant approximation of that
:param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'.
:param walk_tld: Include the final domain in the walk
:param secure: return only DNSSEC secured responses
:return: [] of record data
|
[
"Lookup",
"DNS",
"records",
"and",
"return",
"their",
"data"
] |
python
|
train
|
wandb/client
|
wandb/vendor/prompt_toolkit/buffer.py
|
https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/buffer.py#L655-L666
|
def join_next_line(self, separator=' '):
"""
Join the next line to the current one by deleting the line ending after
the current line.
"""
if not self.document.on_last_line:
self.cursor_position += self.document.get_end_of_line_position()
self.delete()
# Remove spaces.
self.text = (self.document.text_before_cursor + separator +
self.document.text_after_cursor.lstrip(' '))
|
[
"def",
"join_next_line",
"(",
"self",
",",
"separator",
"=",
"' '",
")",
":",
"if",
"not",
"self",
".",
"document",
".",
"on_last_line",
":",
"self",
".",
"cursor_position",
"+=",
"self",
".",
"document",
".",
"get_end_of_line_position",
"(",
")",
"self",
".",
"delete",
"(",
")",
"# Remove spaces.",
"self",
".",
"text",
"=",
"(",
"self",
".",
"document",
".",
"text_before_cursor",
"+",
"separator",
"+",
"self",
".",
"document",
".",
"text_after_cursor",
".",
"lstrip",
"(",
"' '",
")",
")"
] |
Join the next line to the current one by deleting the line ending after
the current line.
|
[
"Join",
"the",
"next",
"line",
"to",
"the",
"current",
"one",
"by",
"deleting",
"the",
"line",
"ending",
"after",
"the",
"current",
"line",
"."
] |
python
|
train
|
klahnakoski/pyLibrary
|
jx_python/jx.py
|
https://github.com/klahnakoski/pyLibrary/blob/fa2dcbc48fda8d26999baef400e9a98149e0b982/jx_python/jx.py#L914-L941
|
def wrap_function(func):
"""
RETURN A THREE-PARAMETER WINDOW FUNCTION TO MATCH
"""
if is_text(func):
return compile_expression(func)
numarg = func.__code__.co_argcount
if numarg == 0:
def temp(row, rownum, rows):
return func()
return temp
elif numarg == 1:
def temp(row, rownum, rows):
return func(row)
return temp
elif numarg == 2:
def temp(row, rownum, rows):
return func(row, rownum)
return temp
elif numarg == 3:
return func
|
[
"def",
"wrap_function",
"(",
"func",
")",
":",
"if",
"is_text",
"(",
"func",
")",
":",
"return",
"compile_expression",
"(",
"func",
")",
"numarg",
"=",
"func",
".",
"__code__",
".",
"co_argcount",
"if",
"numarg",
"==",
"0",
":",
"def",
"temp",
"(",
"row",
",",
"rownum",
",",
"rows",
")",
":",
"return",
"func",
"(",
")",
"return",
"temp",
"elif",
"numarg",
"==",
"1",
":",
"def",
"temp",
"(",
"row",
",",
"rownum",
",",
"rows",
")",
":",
"return",
"func",
"(",
"row",
")",
"return",
"temp",
"elif",
"numarg",
"==",
"2",
":",
"def",
"temp",
"(",
"row",
",",
"rownum",
",",
"rows",
")",
":",
"return",
"func",
"(",
"row",
",",
"rownum",
")",
"return",
"temp",
"elif",
"numarg",
"==",
"3",
":",
"return",
"func"
] |
RETURN A THREE-PARAMETER WINDOW FUNCTION TO MATCH
|
[
"RETURN",
"A",
"THREE",
"-",
"PARAMETER",
"WINDOW",
"FUNCTION",
"TO",
"MATCH"
] |
python
|
train
|
bcbio/bcbio-nextgen
|
bcbio/pipeline/config_utils.py
|
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/config_utils.py#L221-L233
|
def _get_program_cmd(name, pconfig, config, default):
"""Retrieve commandline of a program.
"""
if pconfig is None:
return name
elif isinstance(pconfig, six.string_types):
return pconfig
elif "cmd" in pconfig:
return pconfig["cmd"]
elif default is not None:
return default
else:
return name
|
[
"def",
"_get_program_cmd",
"(",
"name",
",",
"pconfig",
",",
"config",
",",
"default",
")",
":",
"if",
"pconfig",
"is",
"None",
":",
"return",
"name",
"elif",
"isinstance",
"(",
"pconfig",
",",
"six",
".",
"string_types",
")",
":",
"return",
"pconfig",
"elif",
"\"cmd\"",
"in",
"pconfig",
":",
"return",
"pconfig",
"[",
"\"cmd\"",
"]",
"elif",
"default",
"is",
"not",
"None",
":",
"return",
"default",
"else",
":",
"return",
"name"
] |
Retrieve commandline of a program.
|
[
"Retrieve",
"commandline",
"of",
"a",
"program",
"."
] |
python
|
train
|
EasyPost/pystalk
|
pystalk/client.py
|
https://github.com/EasyPost/pystalk/blob/96759ad1fda264b9897ee5346eef7926892a3a4c/pystalk/client.py#L572-L587
|
def pause_tube(self, tube, delay=3600):
"""Pause a tube for some number of seconds, preventing it from issuing jobs.
:param delay: Time to pause for, in seconds
:type delay: int
There is no way to permanently pause a tube; passing 0 for delay actually un-pauses the tube.
.. seealso::
:func:`unpause_tube()`
"""
with self._sock_ctx() as socket:
delay = int(delay)
self._send_message('pause-tube {0} {1}'.format(tube, delay), socket)
return self._receive_word(socket, b'PAUSED')
|
[
"def",
"pause_tube",
"(",
"self",
",",
"tube",
",",
"delay",
"=",
"3600",
")",
":",
"with",
"self",
".",
"_sock_ctx",
"(",
")",
"as",
"socket",
":",
"delay",
"=",
"int",
"(",
"delay",
")",
"self",
".",
"_send_message",
"(",
"'pause-tube {0} {1}'",
".",
"format",
"(",
"tube",
",",
"delay",
")",
",",
"socket",
")",
"return",
"self",
".",
"_receive_word",
"(",
"socket",
",",
"b'PAUSED'",
")"
] |
Pause a tube for some number of seconds, preventing it from issuing jobs.
:param delay: Time to pause for, in seconds
:type delay: int
There is no way to permanently pause a tube; passing 0 for delay actually un-pauses the tube.
.. seealso::
:func:`unpause_tube()`
|
[
"Pause",
"a",
"tube",
"for",
"some",
"number",
"of",
"seconds",
"preventing",
"it",
"from",
"issuing",
"jobs",
"."
] |
python
|
train
|
ktbyers/netmiko
|
netmiko/base_connection.py
|
https://github.com/ktbyers/netmiko/blob/54e6116c0b4664de2123081937e0a9a27bdfdfea/netmiko/base_connection.py#L1147-L1158
|
def strip_prompt(self, a_string):
"""Strip the trailing router prompt from the output.
:param a_string: Returned string from device
:type a_string: str
"""
response_list = a_string.split(self.RESPONSE_RETURN)
last_line = response_list[-1]
if self.base_prompt in last_line:
return self.RESPONSE_RETURN.join(response_list[:-1])
else:
return a_string
|
[
"def",
"strip_prompt",
"(",
"self",
",",
"a_string",
")",
":",
"response_list",
"=",
"a_string",
".",
"split",
"(",
"self",
".",
"RESPONSE_RETURN",
")",
"last_line",
"=",
"response_list",
"[",
"-",
"1",
"]",
"if",
"self",
".",
"base_prompt",
"in",
"last_line",
":",
"return",
"self",
".",
"RESPONSE_RETURN",
".",
"join",
"(",
"response_list",
"[",
":",
"-",
"1",
"]",
")",
"else",
":",
"return",
"a_string"
] |
Strip the trailing router prompt from the output.
:param a_string: Returned string from device
:type a_string: str
|
[
"Strip",
"the",
"trailing",
"router",
"prompt",
"from",
"the",
"output",
"."
] |
python
|
train
|
SiLab-Bonn/basil
|
basil/HL/FEI4AdapterCard.py
|
https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/FEI4AdapterCard.py#L336-L366
|
def read_eeprom_calibration(self, temperature=False): # use default values for temperature, EEPROM values are usually not calibrated and random
'''Reading EEPROM calibration for power regulators and temperature
'''
header = self.get_format()
if header == self.HEADER_V1:
data = self._read_eeprom(self.CAL_DATA_ADDR, size=calcsize(self.CAL_DATA_V1_FORMAT))
for idx, channel in enumerate(self._ch_cal.iterkeys()):
ch_data = data[idx * calcsize(self.CAL_DATA_CH_V1_FORMAT):(idx + 1) * calcsize(self.CAL_DATA_CH_V1_FORMAT)]
values = unpack_from(self.CAL_DATA_CH_V1_FORMAT, ch_data)
self._ch_cal[channel]['name'] = "".join([c for c in values[0] if (c in string.printable)]) # values[0].strip()
self._ch_cal[channel]['default'] = values[1]
self._ch_cal[channel]['ADCI']['gain'] = values[2]
self._ch_cal[channel]['ADCI']['offset'] = values[3]
self._ch_cal[channel]['ADCI']['iq_gain'] = values[4]
self._ch_cal[channel]['ADCI']['iq_offset'] = values[5]
self._ch_cal[channel]['ADCV']['gain'] = values[6]
self._ch_cal[channel]['ADCV']['offset'] = values[7]
self._ch_cal[channel]['DACV']['gain'] = values[8]
self._ch_cal[channel]['DACV']['offset'] = values[9]
const_data = data[-calcsize(self.CAL_DATA_CONST_V1_FORMAT):]
values = unpack_from(self.CAL_DATA_CONST_V1_FORMAT, const_data)
if temperature:
for channel in self._ch_cal.keys():
self._ch_cal[channel]['VNTC']['B_NTC'] = values[0]
self._ch_cal[channel]['VNTC']['R1'] = values[1]
self._ch_cal[channel]['VNTC']['R2'] = values[2]
self._ch_cal[channel]['VNTC']['R4'] = values[3]
self._ch_cal[channel]['VNTC']['R_NTC_25'] = values[4]
self._ch_cal[channel]['VNTC']['VREF'] = values[5]
else:
raise ValueError('EEPROM data format not supported (header: %s)' % header)
|
[
"def",
"read_eeprom_calibration",
"(",
"self",
",",
"temperature",
"=",
"False",
")",
":",
"# use default values for temperature, EEPROM values are usually not calibrated and random",
"header",
"=",
"self",
".",
"get_format",
"(",
")",
"if",
"header",
"==",
"self",
".",
"HEADER_V1",
":",
"data",
"=",
"self",
".",
"_read_eeprom",
"(",
"self",
".",
"CAL_DATA_ADDR",
",",
"size",
"=",
"calcsize",
"(",
"self",
".",
"CAL_DATA_V1_FORMAT",
")",
")",
"for",
"idx",
",",
"channel",
"in",
"enumerate",
"(",
"self",
".",
"_ch_cal",
".",
"iterkeys",
"(",
")",
")",
":",
"ch_data",
"=",
"data",
"[",
"idx",
"*",
"calcsize",
"(",
"self",
".",
"CAL_DATA_CH_V1_FORMAT",
")",
":",
"(",
"idx",
"+",
"1",
")",
"*",
"calcsize",
"(",
"self",
".",
"CAL_DATA_CH_V1_FORMAT",
")",
"]",
"values",
"=",
"unpack_from",
"(",
"self",
".",
"CAL_DATA_CH_V1_FORMAT",
",",
"ch_data",
")",
"self",
".",
"_ch_cal",
"[",
"channel",
"]",
"[",
"'name'",
"]",
"=",
"\"\"",
".",
"join",
"(",
"[",
"c",
"for",
"c",
"in",
"values",
"[",
"0",
"]",
"if",
"(",
"c",
"in",
"string",
".",
"printable",
")",
"]",
")",
"# values[0].strip()",
"self",
".",
"_ch_cal",
"[",
"channel",
"]",
"[",
"'default'",
"]",
"=",
"values",
"[",
"1",
"]",
"self",
".",
"_ch_cal",
"[",
"channel",
"]",
"[",
"'ADCI'",
"]",
"[",
"'gain'",
"]",
"=",
"values",
"[",
"2",
"]",
"self",
".",
"_ch_cal",
"[",
"channel",
"]",
"[",
"'ADCI'",
"]",
"[",
"'offset'",
"]",
"=",
"values",
"[",
"3",
"]",
"self",
".",
"_ch_cal",
"[",
"channel",
"]",
"[",
"'ADCI'",
"]",
"[",
"'iq_gain'",
"]",
"=",
"values",
"[",
"4",
"]",
"self",
".",
"_ch_cal",
"[",
"channel",
"]",
"[",
"'ADCI'",
"]",
"[",
"'iq_offset'",
"]",
"=",
"values",
"[",
"5",
"]",
"self",
".",
"_ch_cal",
"[",
"channel",
"]",
"[",
"'ADCV'",
"]",
"[",
"'gain'",
"]",
"=",
"values",
"[",
"6",
"]",
"self",
".",
"_ch_cal",
"[",
"channel",
"]",
"[",
"'ADCV'",
"]",
"[",
"'offset'",
"]",
"=",
"values",
"[",
"7",
"]",
"self",
".",
"_ch_cal",
"[",
"channel",
"]",
"[",
"'DACV'",
"]",
"[",
"'gain'",
"]",
"=",
"values",
"[",
"8",
"]",
"self",
".",
"_ch_cal",
"[",
"channel",
"]",
"[",
"'DACV'",
"]",
"[",
"'offset'",
"]",
"=",
"values",
"[",
"9",
"]",
"const_data",
"=",
"data",
"[",
"-",
"calcsize",
"(",
"self",
".",
"CAL_DATA_CONST_V1_FORMAT",
")",
":",
"]",
"values",
"=",
"unpack_from",
"(",
"self",
".",
"CAL_DATA_CONST_V1_FORMAT",
",",
"const_data",
")",
"if",
"temperature",
":",
"for",
"channel",
"in",
"self",
".",
"_ch_cal",
".",
"keys",
"(",
")",
":",
"self",
".",
"_ch_cal",
"[",
"channel",
"]",
"[",
"'VNTC'",
"]",
"[",
"'B_NTC'",
"]",
"=",
"values",
"[",
"0",
"]",
"self",
".",
"_ch_cal",
"[",
"channel",
"]",
"[",
"'VNTC'",
"]",
"[",
"'R1'",
"]",
"=",
"values",
"[",
"1",
"]",
"self",
".",
"_ch_cal",
"[",
"channel",
"]",
"[",
"'VNTC'",
"]",
"[",
"'R2'",
"]",
"=",
"values",
"[",
"2",
"]",
"self",
".",
"_ch_cal",
"[",
"channel",
"]",
"[",
"'VNTC'",
"]",
"[",
"'R4'",
"]",
"=",
"values",
"[",
"3",
"]",
"self",
".",
"_ch_cal",
"[",
"channel",
"]",
"[",
"'VNTC'",
"]",
"[",
"'R_NTC_25'",
"]",
"=",
"values",
"[",
"4",
"]",
"self",
".",
"_ch_cal",
"[",
"channel",
"]",
"[",
"'VNTC'",
"]",
"[",
"'VREF'",
"]",
"=",
"values",
"[",
"5",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"'EEPROM data format not supported (header: %s)'",
"%",
"header",
")"
] |
Reading EEPROM calibration for power regulators and temperature
|
[
"Reading",
"EEPROM",
"calibration",
"for",
"power",
"regulators",
"and",
"temperature"
] |
python
|
train
|
danpaquin/coinbasepro-python
|
cbpro/authenticated_client.py
|
https://github.com/danpaquin/coinbasepro-python/blob/0a9dbd86a25ae266d0e0eefeb112368c284b7dcc/cbpro/authenticated_client.py#L91-L129
|
def get_account_history(self, account_id, **kwargs):
""" List account activity. Account activity either increases or
decreases your account balance.
Entry type indicates the reason for the account change.
* transfer: Funds moved to/from Coinbase to cbpro
* match: Funds moved as a result of a trade
* fee: Fee as a result of a trade
* rebate: Fee rebate as per our fee schedule
If an entry is the result of a trade (match, fee), the details
field will contain additional information about the trade.
Args:
account_id (str): Account id to get history of.
kwargs (dict): Additional HTTP request parameters.
Returns:
list: History information for the account. Example::
[
{
"id": "100",
"created_at": "2014-11-07T08:19:27.028459Z",
"amount": "0.001",
"balance": "239.669",
"type": "fee",
"details": {
"order_id": "d50ec984-77a8-460a-b958-66f114b0de9b",
"trade_id": "74",
"product_id": "BTC-USD"
}
},
{
...
}
]
"""
endpoint = '/accounts/{}/ledger'.format(account_id)
return self._send_paginated_message(endpoint, params=kwargs)
|
[
"def",
"get_account_history",
"(",
"self",
",",
"account_id",
",",
"*",
"*",
"kwargs",
")",
":",
"endpoint",
"=",
"'/accounts/{}/ledger'",
".",
"format",
"(",
"account_id",
")",
"return",
"self",
".",
"_send_paginated_message",
"(",
"endpoint",
",",
"params",
"=",
"kwargs",
")"
] |
List account activity. Account activity either increases or
decreases your account balance.
Entry type indicates the reason for the account change.
* transfer: Funds moved to/from Coinbase to cbpro
* match: Funds moved as a result of a trade
* fee: Fee as a result of a trade
* rebate: Fee rebate as per our fee schedule
If an entry is the result of a trade (match, fee), the details
field will contain additional information about the trade.
Args:
account_id (str): Account id to get history of.
kwargs (dict): Additional HTTP request parameters.
Returns:
list: History information for the account. Example::
[
{
"id": "100",
"created_at": "2014-11-07T08:19:27.028459Z",
"amount": "0.001",
"balance": "239.669",
"type": "fee",
"details": {
"order_id": "d50ec984-77a8-460a-b958-66f114b0de9b",
"trade_id": "74",
"product_id": "BTC-USD"
}
},
{
...
}
]
|
[
"List",
"account",
"activity",
".",
"Account",
"activity",
"either",
"increases",
"or",
"decreases",
"your",
"account",
"balance",
"."
] |
python
|
train
|
AndrewAnnex/SpiceyPy
|
spiceypy/spiceypy.py
|
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L5089-L5118
|
def eul2m(angle3, angle2, angle1, axis3, axis2, axis1):
"""
Construct a rotation matrix from a set of Euler angles.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/eul2m_c.html
:param angle3: Rotation angle about third rotation axis (radians).
:type angle3: float
:param angle2: Rotation angle about second rotation axis (radians).
:type angle2: float
:param angle1: Rotation angle about first rotation axis (radians).
:type angle1: float
:param axis3: Axis number of third rotation axis.
:type axis3: int
:param axis2: Axis number of second rotation axis.
:type axis2: int
:param axis1: Axis number of first rotation axis.]
:type axis1: int
:return: Product of the 3 rotations.
:rtype: 3x3-Element Array of floats
"""
angle3 = ctypes.c_double(angle3)
angle2 = ctypes.c_double(angle2)
angle1 = ctypes.c_double(angle1)
axis3 = ctypes.c_int(axis3)
axis2 = ctypes.c_int(axis2)
axis1 = ctypes.c_int(axis1)
r = stypes.emptyDoubleMatrix()
libspice.eul2m_c(angle3, angle2, angle1, axis3, axis2, axis1, r)
return stypes.cMatrixToNumpy(r)
|
[
"def",
"eul2m",
"(",
"angle3",
",",
"angle2",
",",
"angle1",
",",
"axis3",
",",
"axis2",
",",
"axis1",
")",
":",
"angle3",
"=",
"ctypes",
".",
"c_double",
"(",
"angle3",
")",
"angle2",
"=",
"ctypes",
".",
"c_double",
"(",
"angle2",
")",
"angle1",
"=",
"ctypes",
".",
"c_double",
"(",
"angle1",
")",
"axis3",
"=",
"ctypes",
".",
"c_int",
"(",
"axis3",
")",
"axis2",
"=",
"ctypes",
".",
"c_int",
"(",
"axis2",
")",
"axis1",
"=",
"ctypes",
".",
"c_int",
"(",
"axis1",
")",
"r",
"=",
"stypes",
".",
"emptyDoubleMatrix",
"(",
")",
"libspice",
".",
"eul2m_c",
"(",
"angle3",
",",
"angle2",
",",
"angle1",
",",
"axis3",
",",
"axis2",
",",
"axis1",
",",
"r",
")",
"return",
"stypes",
".",
"cMatrixToNumpy",
"(",
"r",
")"
] |
Construct a rotation matrix from a set of Euler angles.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/eul2m_c.html
:param angle3: Rotation angle about third rotation axis (radians).
:type angle3: float
:param angle2: Rotation angle about second rotation axis (radians).
:type angle2: float
:param angle1: Rotation angle about first rotation axis (radians).
:type angle1: float
:param axis3: Axis number of third rotation axis.
:type axis3: int
:param axis2: Axis number of second rotation axis.
:type axis2: int
:param axis1: Axis number of first rotation axis.]
:type axis1: int
:return: Product of the 3 rotations.
:rtype: 3x3-Element Array of floats
|
[
"Construct",
"a",
"rotation",
"matrix",
"from",
"a",
"set",
"of",
"Euler",
"angles",
"."
] |
python
|
train
|
LPgenerator/django-mmc
|
mmc/mixins.py
|
https://github.com/LPgenerator/django-mmc/blob/3b6d3c9d1721c4428177277c1bb1ca24512127de/mmc/mixins.py#L111-L123
|
def run_at_subprocess(self, use_subprocess, foo, *args, **kwrags):
"""
This method for run some function at subprocess.
Very useful when you have a problem with memory leaks.
"""
if use_subprocess is False:
return foo(*args, **kwrags)
child_pid = os.fork()
if child_pid == 0:
foo(*args, **kwrags)
sys.exit(0)
return os.waitpid(child_pid, 0)[1] == 0
|
[
"def",
"run_at_subprocess",
"(",
"self",
",",
"use_subprocess",
",",
"foo",
",",
"*",
"args",
",",
"*",
"*",
"kwrags",
")",
":",
"if",
"use_subprocess",
"is",
"False",
":",
"return",
"foo",
"(",
"*",
"args",
",",
"*",
"*",
"kwrags",
")",
"child_pid",
"=",
"os",
".",
"fork",
"(",
")",
"if",
"child_pid",
"==",
"0",
":",
"foo",
"(",
"*",
"args",
",",
"*",
"*",
"kwrags",
")",
"sys",
".",
"exit",
"(",
"0",
")",
"return",
"os",
".",
"waitpid",
"(",
"child_pid",
",",
"0",
")",
"[",
"1",
"]",
"==",
"0"
] |
This method for run some function at subprocess.
Very useful when you have a problem with memory leaks.
|
[
"This",
"method",
"for",
"run",
"some",
"function",
"at",
"subprocess",
".",
"Very",
"useful",
"when",
"you",
"have",
"a",
"problem",
"with",
"memory",
"leaks",
"."
] |
python
|
train
|
Unity-Technologies/ml-agents
|
ml-agents-envs/mlagents/envs/rpc_communicator.py
|
https://github.com/Unity-Technologies/ml-agents/blob/37d139af636e4a2351751fbf0f2fca5a9ed7457f/ml-agents-envs/mlagents/envs/rpc_communicator.py#L65-L75
|
def check_port(self, port):
"""
Attempts to bind to the requested communicator port, checking if it is already in use.
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(("localhost", port))
except socket.error:
raise UnityWorkerInUseException(self.worker_id)
finally:
s.close()
|
[
"def",
"check_port",
"(",
"self",
",",
"port",
")",
":",
"s",
"=",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_INET",
",",
"socket",
".",
"SOCK_STREAM",
")",
"try",
":",
"s",
".",
"bind",
"(",
"(",
"\"localhost\"",
",",
"port",
")",
")",
"except",
"socket",
".",
"error",
":",
"raise",
"UnityWorkerInUseException",
"(",
"self",
".",
"worker_id",
")",
"finally",
":",
"s",
".",
"close",
"(",
")"
] |
Attempts to bind to the requested communicator port, checking if it is already in use.
|
[
"Attempts",
"to",
"bind",
"to",
"the",
"requested",
"communicator",
"port",
"checking",
"if",
"it",
"is",
"already",
"in",
"use",
"."
] |
python
|
train
|
pycontribs/pyrax
|
pyrax/queueing.py
|
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/queueing.py#L528-L534
|
def _configure_manager(self):
"""
Create the manager to handle queues.
"""
self._manager = QueueManager(self,
resource_class=Queue, response_key="queue",
uri_base="queues")
|
[
"def",
"_configure_manager",
"(",
"self",
")",
":",
"self",
".",
"_manager",
"=",
"QueueManager",
"(",
"self",
",",
"resource_class",
"=",
"Queue",
",",
"response_key",
"=",
"\"queue\"",
",",
"uri_base",
"=",
"\"queues\"",
")"
] |
Create the manager to handle queues.
|
[
"Create",
"the",
"manager",
"to",
"handle",
"queues",
"."
] |
python
|
train
|
go-macaroon-bakery/py-macaroon-bakery
|
macaroonbakery/bakery/_macaroon.py
|
https://github.com/go-macaroon-bakery/py-macaroon-bakery/blob/63ce1ef1dabe816eb8aaec48fbb46761c34ddf77/macaroonbakery/bakery/_macaroon.py#L250-L293
|
def _new_caveat_id(self, base):
'''Return a third party caveat id
This does not duplicate any third party caveat ids already inside
macaroon. If base is non-empty, it is used as the id prefix.
@param base bytes
@return bytes
'''
id = bytearray()
if len(base) > 0:
id.extend(base)
else:
# Add a version byte to the caveat id. Technically
# this is unnecessary as the caveat-decoding logic
# that looks at versions should never see this id,
# but if the caveat payload isn't provided with the
# payload, having this version gives a strong indication
# that the payload has been omitted so we can produce
# a better error for the user.
id.append(VERSION_3)
# Iterate through integers looking for one that isn't already used,
# starting from n so that if everyone is using this same algorithm,
# we'll only perform one iteration.
i = len(self._caveat_data)
caveats = self._macaroon.caveats
while True:
# We append a varint to the end of the id and assume that
# any client that's created the id that we're using as a base
# is using similar conventions - in the worst case they might
# end up with a duplicate third party caveat id and thus create
# a macaroon that cannot be discharged.
temp = id[:]
encode_uvarint(i, temp)
found = False
for cav in caveats:
if (cav.verification_key_id is not None
and cav.caveat_id == temp):
found = True
break
if not found:
return bytes(temp)
i += 1
|
[
"def",
"_new_caveat_id",
"(",
"self",
",",
"base",
")",
":",
"id",
"=",
"bytearray",
"(",
")",
"if",
"len",
"(",
"base",
")",
">",
"0",
":",
"id",
".",
"extend",
"(",
"base",
")",
"else",
":",
"# Add a version byte to the caveat id. Technically",
"# this is unnecessary as the caveat-decoding logic",
"# that looks at versions should never see this id,",
"# but if the caveat payload isn't provided with the",
"# payload, having this version gives a strong indication",
"# that the payload has been omitted so we can produce",
"# a better error for the user.",
"id",
".",
"append",
"(",
"VERSION_3",
")",
"# Iterate through integers looking for one that isn't already used,",
"# starting from n so that if everyone is using this same algorithm,",
"# we'll only perform one iteration.",
"i",
"=",
"len",
"(",
"self",
".",
"_caveat_data",
")",
"caveats",
"=",
"self",
".",
"_macaroon",
".",
"caveats",
"while",
"True",
":",
"# We append a varint to the end of the id and assume that",
"# any client that's created the id that we're using as a base",
"# is using similar conventions - in the worst case they might",
"# end up with a duplicate third party caveat id and thus create",
"# a macaroon that cannot be discharged.",
"temp",
"=",
"id",
"[",
":",
"]",
"encode_uvarint",
"(",
"i",
",",
"temp",
")",
"found",
"=",
"False",
"for",
"cav",
"in",
"caveats",
":",
"if",
"(",
"cav",
".",
"verification_key_id",
"is",
"not",
"None",
"and",
"cav",
".",
"caveat_id",
"==",
"temp",
")",
":",
"found",
"=",
"True",
"break",
"if",
"not",
"found",
":",
"return",
"bytes",
"(",
"temp",
")",
"i",
"+=",
"1"
] |
Return a third party caveat id
This does not duplicate any third party caveat ids already inside
macaroon. If base is non-empty, it is used as the id prefix.
@param base bytes
@return bytes
|
[
"Return",
"a",
"third",
"party",
"caveat",
"id"
] |
python
|
train
|
ipfs/py-ipfs-api
|
ipfsapi/client.py
|
https://github.com/ipfs/py-ipfs-api/blob/7574dad04877b45dbe4ad321dcfa9e880eb2d90c/ipfsapi/client.py#L2282-L2330
|
def pubsub_peers(self, topic=None, **kwargs):
"""List the peers we are pubsubbing with.
Lists the id's of other IPFS users who we
are connected to via some topic. Without specifying
a topic, IPFS peers from all subscribed topics
will be returned in the data. If a topic is specified
only the IPFS id's of the peers from the specified
topic will be returned in the data.
.. code-block:: python
>>> c.pubsub_peers()
{'Strings':
[
'QmPbZ3SDgmTNEB1gNSE9DEf4xT8eag3AFn5uo7X39TbZM8',
'QmQKiXYzoFpiGZ93DaFBFDMDWDJCRjXDARu4wne2PRtSgA',
...
'QmepgFW7BHEtU4pZJdxaNiv75mKLLRQnPi1KaaXmQN4V1a'
]
}
## with a topic
# subscribe to a channel
>>> with c.pubsub_sub('hello') as sub:
... c.pubsub_peers(topic='hello')
{'String':
[
'QmPbZ3SDgmTNEB1gNSE9DEf4xT8eag3AFn5uo7X39TbZM8',
...
# other peers connected to the same channel
]
}
Parameters
----------
topic : str
The topic to list connected peers of
(defaults to None which lists peers for all topics)
Returns
-------
dict : Dictionary with the ke "Strings" who's value is id of IPFS
peers we're pubsubbing with
"""
args = (topic,) if topic is not None else ()
return self._client.request('/pubsub/peers', args,
decoder='json', **kwargs)
|
[
"def",
"pubsub_peers",
"(",
"self",
",",
"topic",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"args",
"=",
"(",
"topic",
",",
")",
"if",
"topic",
"is",
"not",
"None",
"else",
"(",
")",
"return",
"self",
".",
"_client",
".",
"request",
"(",
"'/pubsub/peers'",
",",
"args",
",",
"decoder",
"=",
"'json'",
",",
"*",
"*",
"kwargs",
")"
] |
List the peers we are pubsubbing with.
Lists the id's of other IPFS users who we
are connected to via some topic. Without specifying
a topic, IPFS peers from all subscribed topics
will be returned in the data. If a topic is specified
only the IPFS id's of the peers from the specified
topic will be returned in the data.
.. code-block:: python
>>> c.pubsub_peers()
{'Strings':
[
'QmPbZ3SDgmTNEB1gNSE9DEf4xT8eag3AFn5uo7X39TbZM8',
'QmQKiXYzoFpiGZ93DaFBFDMDWDJCRjXDARu4wne2PRtSgA',
...
'QmepgFW7BHEtU4pZJdxaNiv75mKLLRQnPi1KaaXmQN4V1a'
]
}
## with a topic
# subscribe to a channel
>>> with c.pubsub_sub('hello') as sub:
... c.pubsub_peers(topic='hello')
{'String':
[
'QmPbZ3SDgmTNEB1gNSE9DEf4xT8eag3AFn5uo7X39TbZM8',
...
# other peers connected to the same channel
]
}
Parameters
----------
topic : str
The topic to list connected peers of
(defaults to None which lists peers for all topics)
Returns
-------
dict : Dictionary with the ke "Strings" who's value is id of IPFS
peers we're pubsubbing with
|
[
"List",
"the",
"peers",
"we",
"are",
"pubsubbing",
"with",
"."
] |
python
|
train
|
Diviyan-Kalainathan/CausalDiscoveryToolbox
|
cdt/causality/graph/CGNN.py
|
https://github.com/Diviyan-Kalainathan/CausalDiscoveryToolbox/blob/be228b078ba9eb76c01b3ccba9a1c0ad9e9e5ed1/cdt/causality/graph/CGNN.py#L315-L344
|
def orient_undirected_graph(self, data, umg, alg='HC'):
"""Orient the undirected graph using GNN and apply CGNN to improve the graph.
Args:
data (pandas.DataFrame): Observational data on which causal
discovery has to be performed.
umg (nx.Graph): Graph that provides the skeleton, on which the GNN
then the CGNN algorithm will be applied.
alg (str): Exploration heuristic to use, among ["HC", "HCr",
"tabu", "EHC"]
Returns:
networkx.DiGraph: Solution given by CGNN.
.. note::
GNN (``cdt.causality.pairwise.GNN``) is first used to orient the
undirected graph and output a DAG before applying CGNN.
"""
warnings.warn("The pairwise GNN model is computed on each edge of the UMG "
"to initialize the model and start CGNN with a DAG")
gnn = GNN(nh=self.nh, lr=self.lr)
og = gnn.orient_graph(data, umg, nb_runs=self.nb_runs, nb_max_runs=self.nb_runs,
nb_jobs=self.nb_jobs, train_epochs=self.train_epochs,
test_epochs=self.test_epochs, verbose=self.verbose, gpu=self.gpu) # Pairwise method
# print(nx.adj_matrix(og).todense().shape)
# print(list(og.edges()))
dag = dagify_min_edge(og)
# print(nx.adj_matrix(dag).todense().shape)
return self.orient_directed_graph(data, dag, alg=alg)
|
[
"def",
"orient_undirected_graph",
"(",
"self",
",",
"data",
",",
"umg",
",",
"alg",
"=",
"'HC'",
")",
":",
"warnings",
".",
"warn",
"(",
"\"The pairwise GNN model is computed on each edge of the UMG \"",
"\"to initialize the model and start CGNN with a DAG\"",
")",
"gnn",
"=",
"GNN",
"(",
"nh",
"=",
"self",
".",
"nh",
",",
"lr",
"=",
"self",
".",
"lr",
")",
"og",
"=",
"gnn",
".",
"orient_graph",
"(",
"data",
",",
"umg",
",",
"nb_runs",
"=",
"self",
".",
"nb_runs",
",",
"nb_max_runs",
"=",
"self",
".",
"nb_runs",
",",
"nb_jobs",
"=",
"self",
".",
"nb_jobs",
",",
"train_epochs",
"=",
"self",
".",
"train_epochs",
",",
"test_epochs",
"=",
"self",
".",
"test_epochs",
",",
"verbose",
"=",
"self",
".",
"verbose",
",",
"gpu",
"=",
"self",
".",
"gpu",
")",
"# Pairwise method",
"# print(nx.adj_matrix(og).todense().shape)",
"# print(list(og.edges()))",
"dag",
"=",
"dagify_min_edge",
"(",
"og",
")",
"# print(nx.adj_matrix(dag).todense().shape)",
"return",
"self",
".",
"orient_directed_graph",
"(",
"data",
",",
"dag",
",",
"alg",
"=",
"alg",
")"
] |
Orient the undirected graph using GNN and apply CGNN to improve the graph.
Args:
data (pandas.DataFrame): Observational data on which causal
discovery has to be performed.
umg (nx.Graph): Graph that provides the skeleton, on which the GNN
then the CGNN algorithm will be applied.
alg (str): Exploration heuristic to use, among ["HC", "HCr",
"tabu", "EHC"]
Returns:
networkx.DiGraph: Solution given by CGNN.
.. note::
GNN (``cdt.causality.pairwise.GNN``) is first used to orient the
undirected graph and output a DAG before applying CGNN.
|
[
"Orient",
"the",
"undirected",
"graph",
"using",
"GNN",
"and",
"apply",
"CGNN",
"to",
"improve",
"the",
"graph",
"."
] |
python
|
valid
|
Vital-Fernandez/dazer
|
bin/lib/Astro_Libraries/f2n.py
|
https://github.com/Vital-Fernandez/dazer/blob/3c9ae8ae6d40ea33f22cc20dc11365d6d6e65244/bin/lib/Astro_Libraries/f2n.py#L954-L972
|
def fromfits(infile, hdu = 0, verbose = True):
"""
Factory function that reads a FITS file and returns a f2nimage object.
Use hdu to specify which HDU you want (primary = 0)
"""
pixelarray, hdr = ft.getdata(infile, hdu, header=True)
pixelarray = np.asarray(pixelarray).transpose()
#print pixelarray
pixelarrayshape = pixelarray.shape
if verbose :
print "Input shape : (%i, %i)" % (pixelarrayshape[0], pixelarrayshape[1])
print "Input file BITPIX : %s" % (hdr["BITPIX"])
pixelarrayshape = np.asarray(pixelarrayshape)
if verbose :
print "Internal array type :", pixelarray.dtype.name
return f2nimage(pixelarray, verbose = verbose)
|
[
"def",
"fromfits",
"(",
"infile",
",",
"hdu",
"=",
"0",
",",
"verbose",
"=",
"True",
")",
":",
"pixelarray",
",",
"hdr",
"=",
"ft",
".",
"getdata",
"(",
"infile",
",",
"hdu",
",",
"header",
"=",
"True",
")",
"pixelarray",
"=",
"np",
".",
"asarray",
"(",
"pixelarray",
")",
".",
"transpose",
"(",
")",
"#print pixelarray",
"pixelarrayshape",
"=",
"pixelarray",
".",
"shape",
"if",
"verbose",
":",
"print",
"\"Input shape : (%i, %i)\"",
"%",
"(",
"pixelarrayshape",
"[",
"0",
"]",
",",
"pixelarrayshape",
"[",
"1",
"]",
")",
"print",
"\"Input file BITPIX : %s\"",
"%",
"(",
"hdr",
"[",
"\"BITPIX\"",
"]",
")",
"pixelarrayshape",
"=",
"np",
".",
"asarray",
"(",
"pixelarrayshape",
")",
"if",
"verbose",
":",
"print",
"\"Internal array type :\"",
",",
"pixelarray",
".",
"dtype",
".",
"name",
"return",
"f2nimage",
"(",
"pixelarray",
",",
"verbose",
"=",
"verbose",
")"
] |
Factory function that reads a FITS file and returns a f2nimage object.
Use hdu to specify which HDU you want (primary = 0)
|
[
"Factory",
"function",
"that",
"reads",
"a",
"FITS",
"file",
"and",
"returns",
"a",
"f2nimage",
"object",
".",
"Use",
"hdu",
"to",
"specify",
"which",
"HDU",
"you",
"want",
"(",
"primary",
"=",
"0",
")"
] |
python
|
train
|
pyqt/python-qt5
|
setup.py
|
https://github.com/pyqt/python-qt5/blob/c9ed180c56f6fd3521ffe5fb70904bc5d3f50e5f/setup.py#L35-L58
|
def get_package_data():
"""Include all files from all sub-directories"""
package_data = dict()
package_data['PyQt5'] = list()
for subdir in ("doc/", "examples/", "include/",
"mkspecs/", "plugins/", "qml/",
"qsci/", "sip/", "translations/", "uic/"):
abspath = os.path.abspath("PyQt5/" + subdir)
for root, dirs, files in os.walk(abspath):
for f in files:
fpath = os.path.join(root, f)
relpath = os.path.relpath(fpath, abspath)
relpath = relpath.replace("\\", "/")
package_data['PyQt5'].append(subdir + relpath)
package_data['PyQt5'].extend(["*.exe",
"*.dll",
"*.pyd",
"*.conf",
"*.api",
"*.qm",
"*.bat"])
return package_data
|
[
"def",
"get_package_data",
"(",
")",
":",
"package_data",
"=",
"dict",
"(",
")",
"package_data",
"[",
"'PyQt5'",
"]",
"=",
"list",
"(",
")",
"for",
"subdir",
"in",
"(",
"\"doc/\"",
",",
"\"examples/\"",
",",
"\"include/\"",
",",
"\"mkspecs/\"",
",",
"\"plugins/\"",
",",
"\"qml/\"",
",",
"\"qsci/\"",
",",
"\"sip/\"",
",",
"\"translations/\"",
",",
"\"uic/\"",
")",
":",
"abspath",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"\"PyQt5/\"",
"+",
"subdir",
")",
"for",
"root",
",",
"dirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"abspath",
")",
":",
"for",
"f",
"in",
"files",
":",
"fpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"f",
")",
"relpath",
"=",
"os",
".",
"path",
".",
"relpath",
"(",
"fpath",
",",
"abspath",
")",
"relpath",
"=",
"relpath",
".",
"replace",
"(",
"\"\\\\\"",
",",
"\"/\"",
")",
"package_data",
"[",
"'PyQt5'",
"]",
".",
"append",
"(",
"subdir",
"+",
"relpath",
")",
"package_data",
"[",
"'PyQt5'",
"]",
".",
"extend",
"(",
"[",
"\"*.exe\"",
",",
"\"*.dll\"",
",",
"\"*.pyd\"",
",",
"\"*.conf\"",
",",
"\"*.api\"",
",",
"\"*.qm\"",
",",
"\"*.bat\"",
"]",
")",
"return",
"package_data"
] |
Include all files from all sub-directories
|
[
"Include",
"all",
"files",
"from",
"all",
"sub",
"-",
"directories"
] |
python
|
train
|
tonioo/sievelib
|
sievelib/managesieve.py
|
https://github.com/tonioo/sievelib/blob/88822d1f1daf30ef3dd9ac74911301b0773ef3c8/sievelib/managesieve.py#L485-L511
|
def connect(
self, login, password, authz_id=b"", starttls=False,
authmech=None):
"""Establish a connection with the server.
This function must be used. It read the server capabilities
and wraps calls to STARTTLS and AUTHENTICATE commands.
:param login: username
:param password: clear password
:param starttls: use a TLS connection or not
:param authmech: prefered authenticate mechanism
:rtype: boolean
"""
try:
self.sock = socket.create_connection((self.srvaddr, self.srvport))
self.sock.settimeout(Client.read_timeout)
except socket.error as msg:
raise Error("Connection to server failed: %s" % str(msg))
if not self.__get_capabilities():
raise Error("Failed to read capabilities from server")
if starttls and not self.__starttls():
return False
if self.__authenticate(login, password, authz_id, authmech):
return True
return False
|
[
"def",
"connect",
"(",
"self",
",",
"login",
",",
"password",
",",
"authz_id",
"=",
"b\"\"",
",",
"starttls",
"=",
"False",
",",
"authmech",
"=",
"None",
")",
":",
"try",
":",
"self",
".",
"sock",
"=",
"socket",
".",
"create_connection",
"(",
"(",
"self",
".",
"srvaddr",
",",
"self",
".",
"srvport",
")",
")",
"self",
".",
"sock",
".",
"settimeout",
"(",
"Client",
".",
"read_timeout",
")",
"except",
"socket",
".",
"error",
"as",
"msg",
":",
"raise",
"Error",
"(",
"\"Connection to server failed: %s\"",
"%",
"str",
"(",
"msg",
")",
")",
"if",
"not",
"self",
".",
"__get_capabilities",
"(",
")",
":",
"raise",
"Error",
"(",
"\"Failed to read capabilities from server\"",
")",
"if",
"starttls",
"and",
"not",
"self",
".",
"__starttls",
"(",
")",
":",
"return",
"False",
"if",
"self",
".",
"__authenticate",
"(",
"login",
",",
"password",
",",
"authz_id",
",",
"authmech",
")",
":",
"return",
"True",
"return",
"False"
] |
Establish a connection with the server.
This function must be used. It read the server capabilities
and wraps calls to STARTTLS and AUTHENTICATE commands.
:param login: username
:param password: clear password
:param starttls: use a TLS connection or not
:param authmech: prefered authenticate mechanism
:rtype: boolean
|
[
"Establish",
"a",
"connection",
"with",
"the",
"server",
"."
] |
python
|
train
|
google/grr
|
grr/server/grr_response_server/databases/mysql_flows.py
|
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/databases/mysql_flows.py#L867-L897
|
def _UpdateRequestsAndScheduleFPRs(self, responses, cursor=None):
"""Updates requests and writes FlowProcessingRequests if needed."""
request_keys = set(
(r.client_id, r.flow_id, r.request_id) for r in responses)
flow_keys = set((r.client_id, r.flow_id) for r in responses)
response_counts = self._ReadFlowResponseCounts(request_keys, cursor)
next_requests = self._ReadAndLockNextRequestsToProcess(flow_keys, cursor)
completed_requests = self._ReadLockAndUpdateCompletedRequests(
request_keys, response_counts, cursor)
if not completed_requests:
return completed_requests
fprs_to_write = []
for request_key, r in iteritems(completed_requests):
client_id, flow_id, request_id = request_key
if next_requests[(client_id, flow_id)] == request_id:
fprs_to_write.append(
rdf_flows.FlowProcessingRequest(
client_id=r.client_id,
flow_id=r.flow_id,
delivery_time=r.start_time))
if fprs_to_write:
self._WriteFlowProcessingRequests(fprs_to_write, cursor)
return completed_requests
|
[
"def",
"_UpdateRequestsAndScheduleFPRs",
"(",
"self",
",",
"responses",
",",
"cursor",
"=",
"None",
")",
":",
"request_keys",
"=",
"set",
"(",
"(",
"r",
".",
"client_id",
",",
"r",
".",
"flow_id",
",",
"r",
".",
"request_id",
")",
"for",
"r",
"in",
"responses",
")",
"flow_keys",
"=",
"set",
"(",
"(",
"r",
".",
"client_id",
",",
"r",
".",
"flow_id",
")",
"for",
"r",
"in",
"responses",
")",
"response_counts",
"=",
"self",
".",
"_ReadFlowResponseCounts",
"(",
"request_keys",
",",
"cursor",
")",
"next_requests",
"=",
"self",
".",
"_ReadAndLockNextRequestsToProcess",
"(",
"flow_keys",
",",
"cursor",
")",
"completed_requests",
"=",
"self",
".",
"_ReadLockAndUpdateCompletedRequests",
"(",
"request_keys",
",",
"response_counts",
",",
"cursor",
")",
"if",
"not",
"completed_requests",
":",
"return",
"completed_requests",
"fprs_to_write",
"=",
"[",
"]",
"for",
"request_key",
",",
"r",
"in",
"iteritems",
"(",
"completed_requests",
")",
":",
"client_id",
",",
"flow_id",
",",
"request_id",
"=",
"request_key",
"if",
"next_requests",
"[",
"(",
"client_id",
",",
"flow_id",
")",
"]",
"==",
"request_id",
":",
"fprs_to_write",
".",
"append",
"(",
"rdf_flows",
".",
"FlowProcessingRequest",
"(",
"client_id",
"=",
"r",
".",
"client_id",
",",
"flow_id",
"=",
"r",
".",
"flow_id",
",",
"delivery_time",
"=",
"r",
".",
"start_time",
")",
")",
"if",
"fprs_to_write",
":",
"self",
".",
"_WriteFlowProcessingRequests",
"(",
"fprs_to_write",
",",
"cursor",
")",
"return",
"completed_requests"
] |
Updates requests and writes FlowProcessingRequests if needed.
|
[
"Updates",
"requests",
"and",
"writes",
"FlowProcessingRequests",
"if",
"needed",
"."
] |
python
|
train
|
singnet/snet-cli
|
snet_cli/mpe_service_metadata.py
|
https://github.com/singnet/snet-cli/blob/1b5ac98cb9a64211c861ead9fcfe6208f2749032/snet_cli/mpe_service_metadata.py#L103-L109
|
def is_group_name_exists(self, group_name):
""" check if group with given name is already exists """
groups = self.m["groups"]
for g in groups:
if (g["group_name"] == group_name):
return True
return False
|
[
"def",
"is_group_name_exists",
"(",
"self",
",",
"group_name",
")",
":",
"groups",
"=",
"self",
".",
"m",
"[",
"\"groups\"",
"]",
"for",
"g",
"in",
"groups",
":",
"if",
"(",
"g",
"[",
"\"group_name\"",
"]",
"==",
"group_name",
")",
":",
"return",
"True",
"return",
"False"
] |
check if group with given name is already exists
|
[
"check",
"if",
"group",
"with",
"given",
"name",
"is",
"already",
"exists"
] |
python
|
train
|
kragniz/python-etcd3
|
etcd3/client.py
|
https://github.com/kragniz/python-etcd3/blob/0adb14840d4a6011a2023a13f07e247e4c336a80/etcd3/client.py#L453-L476
|
def replace(self, key, initial_value, new_value):
"""
Atomically replace the value of a key with a new value.
This compares the current value of a key, then replaces it with a new
value if it is equal to a specified value. This operation takes place
in a transaction.
:param key: key in etcd to replace
:param initial_value: old value to replace
:type initial_value: bytes
:param new_value: new value of the key
:type new_value: bytes
:returns: status of transaction, ``True`` if the replace was
successful, ``False`` otherwise
:rtype: bool
"""
status, _ = self.transaction(
compare=[self.transactions.value(key) == initial_value],
success=[self.transactions.put(key, new_value)],
failure=[],
)
return status
|
[
"def",
"replace",
"(",
"self",
",",
"key",
",",
"initial_value",
",",
"new_value",
")",
":",
"status",
",",
"_",
"=",
"self",
".",
"transaction",
"(",
"compare",
"=",
"[",
"self",
".",
"transactions",
".",
"value",
"(",
"key",
")",
"==",
"initial_value",
"]",
",",
"success",
"=",
"[",
"self",
".",
"transactions",
".",
"put",
"(",
"key",
",",
"new_value",
")",
"]",
",",
"failure",
"=",
"[",
"]",
",",
")",
"return",
"status"
] |
Atomically replace the value of a key with a new value.
This compares the current value of a key, then replaces it with a new
value if it is equal to a specified value. This operation takes place
in a transaction.
:param key: key in etcd to replace
:param initial_value: old value to replace
:type initial_value: bytes
:param new_value: new value of the key
:type new_value: bytes
:returns: status of transaction, ``True`` if the replace was
successful, ``False`` otherwise
:rtype: bool
|
[
"Atomically",
"replace",
"the",
"value",
"of",
"a",
"key",
"with",
"a",
"new",
"value",
"."
] |
python
|
train
|
SiLab-Bonn/pyBAR
|
pybar/run_manager.py
|
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/run_manager.py#L268-L278
|
def stop(self, msg=None):
'''Stopping a run. Control for loops. Gentle stop/abort.
This event should provide a more gentle abort. The run should stop ASAP but the run is still considered complete.
'''
if not self.stop_run.is_set():
if msg:
logging.info('%s%s Stopping run...', msg, ('' if msg[-1] in punctuation else '.'))
else:
logging.info('Stopping run...')
self.stop_run.set()
|
[
"def",
"stop",
"(",
"self",
",",
"msg",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"stop_run",
".",
"is_set",
"(",
")",
":",
"if",
"msg",
":",
"logging",
".",
"info",
"(",
"'%s%s Stopping run...'",
",",
"msg",
",",
"(",
"''",
"if",
"msg",
"[",
"-",
"1",
"]",
"in",
"punctuation",
"else",
"'.'",
")",
")",
"else",
":",
"logging",
".",
"info",
"(",
"'Stopping run...'",
")",
"self",
".",
"stop_run",
".",
"set",
"(",
")"
] |
Stopping a run. Control for loops. Gentle stop/abort.
This event should provide a more gentle abort. The run should stop ASAP but the run is still considered complete.
|
[
"Stopping",
"a",
"run",
".",
"Control",
"for",
"loops",
".",
"Gentle",
"stop",
"/",
"abort",
"."
] |
python
|
train
|
blockstack/virtualchain
|
virtualchain/lib/ecdsalib.py
|
https://github.com/blockstack/virtualchain/blob/fcfc970064ca7dfcab26ebd3ab955870a763ea39/virtualchain/lib/ecdsalib.py#L412-L429
|
def sign_digest(hash_hex, privkey_hex, hashfunc=hashlib.sha256):
"""
Given a digest and a private key, sign it.
Return the base64-encoded signature
"""
if not isinstance(hash_hex, (str, unicode)):
raise ValueError("hash hex is not a string")
hash_hex = str(hash_hex)
pk_i = decode_privkey_hex(privkey_hex)
privk = ec.derive_private_key(pk_i, ec.SECP256K1(), default_backend())
sig = privk.sign(hash_hex.decode('hex'), ec.ECDSA(utils.Prehashed(hashes.SHA256())))
sig_r, sig_s = decode_dss_signature(sig)
sigb64 = encode_signature(sig_r, sig_s)
return sigb64
|
[
"def",
"sign_digest",
"(",
"hash_hex",
",",
"privkey_hex",
",",
"hashfunc",
"=",
"hashlib",
".",
"sha256",
")",
":",
"if",
"not",
"isinstance",
"(",
"hash_hex",
",",
"(",
"str",
",",
"unicode",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"hash hex is not a string\"",
")",
"hash_hex",
"=",
"str",
"(",
"hash_hex",
")",
"pk_i",
"=",
"decode_privkey_hex",
"(",
"privkey_hex",
")",
"privk",
"=",
"ec",
".",
"derive_private_key",
"(",
"pk_i",
",",
"ec",
".",
"SECP256K1",
"(",
")",
",",
"default_backend",
"(",
")",
")",
"sig",
"=",
"privk",
".",
"sign",
"(",
"hash_hex",
".",
"decode",
"(",
"'hex'",
")",
",",
"ec",
".",
"ECDSA",
"(",
"utils",
".",
"Prehashed",
"(",
"hashes",
".",
"SHA256",
"(",
")",
")",
")",
")",
"sig_r",
",",
"sig_s",
"=",
"decode_dss_signature",
"(",
"sig",
")",
"sigb64",
"=",
"encode_signature",
"(",
"sig_r",
",",
"sig_s",
")",
"return",
"sigb64"
] |
Given a digest and a private key, sign it.
Return the base64-encoded signature
|
[
"Given",
"a",
"digest",
"and",
"a",
"private",
"key",
"sign",
"it",
".",
"Return",
"the",
"base64",
"-",
"encoded",
"signature"
] |
python
|
train
|
dnanexus/dx-toolkit
|
src/python/dxpy/utils/resolver.py
|
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/utils/resolver.py#L721-L747
|
def _resolve_folder(project, parent_folder, folder_name):
"""
:param project: The project that the folder belongs to
:type project: string
:param parent_folder: Full path to the parent folder that contains
folder_name
:type parent_folder: string
:param folder_name: Name of the folder
:type folder_name: string
:returns: The path to folder_name, if it exists, in the form of
"<parent_folder>/<folder_name>"
:rtype: string
:raises: ResolutionError if folder_name is not a folder, or if
folder_name points to a folder that does not exist
Attempts to resolve folder_name at location parent_folder in project.
"""
if '/' in folder_name:
# Then there's no way it's supposed to be a folder
raise ResolutionError('Object of name ' + str(folder_name) + ' could not be resolved in folder ' +
str(parent_folder) + ' of project ID ' + str(project))
possible_folder, _skip = clean_folder_path(parent_folder + '/' + folder_name, 'folder')
if not check_folder_exists(project, parent_folder, folder_name):
raise ResolutionError('Unable to resolve "' + folder_name +
'" to a data object or folder name in \'' + parent_folder + "'")
return possible_folder
|
[
"def",
"_resolve_folder",
"(",
"project",
",",
"parent_folder",
",",
"folder_name",
")",
":",
"if",
"'/'",
"in",
"folder_name",
":",
"# Then there's no way it's supposed to be a folder",
"raise",
"ResolutionError",
"(",
"'Object of name '",
"+",
"str",
"(",
"folder_name",
")",
"+",
"' could not be resolved in folder '",
"+",
"str",
"(",
"parent_folder",
")",
"+",
"' of project ID '",
"+",
"str",
"(",
"project",
")",
")",
"possible_folder",
",",
"_skip",
"=",
"clean_folder_path",
"(",
"parent_folder",
"+",
"'/'",
"+",
"folder_name",
",",
"'folder'",
")",
"if",
"not",
"check_folder_exists",
"(",
"project",
",",
"parent_folder",
",",
"folder_name",
")",
":",
"raise",
"ResolutionError",
"(",
"'Unable to resolve \"'",
"+",
"folder_name",
"+",
"'\" to a data object or folder name in \\''",
"+",
"parent_folder",
"+",
"\"'\"",
")",
"return",
"possible_folder"
] |
:param project: The project that the folder belongs to
:type project: string
:param parent_folder: Full path to the parent folder that contains
folder_name
:type parent_folder: string
:param folder_name: Name of the folder
:type folder_name: string
:returns: The path to folder_name, if it exists, in the form of
"<parent_folder>/<folder_name>"
:rtype: string
:raises: ResolutionError if folder_name is not a folder, or if
folder_name points to a folder that does not exist
Attempts to resolve folder_name at location parent_folder in project.
|
[
":",
"param",
"project",
":",
"The",
"project",
"that",
"the",
"folder",
"belongs",
"to",
":",
"type",
"project",
":",
"string",
":",
"param",
"parent_folder",
":",
"Full",
"path",
"to",
"the",
"parent",
"folder",
"that",
"contains",
"folder_name",
":",
"type",
"parent_folder",
":",
"string",
":",
"param",
"folder_name",
":",
"Name",
"of",
"the",
"folder",
":",
"type",
"folder_name",
":",
"string",
":",
"returns",
":",
"The",
"path",
"to",
"folder_name",
"if",
"it",
"exists",
"in",
"the",
"form",
"of",
"<parent_folder",
">",
"/",
"<folder_name",
">",
":",
"rtype",
":",
"string",
":",
"raises",
":",
"ResolutionError",
"if",
"folder_name",
"is",
"not",
"a",
"folder",
"or",
"if",
"folder_name",
"points",
"to",
"a",
"folder",
"that",
"does",
"not",
"exist"
] |
python
|
train
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.