repo
stringlengths 7
55
| path
stringlengths 4
223
| url
stringlengths 87
315
| code
stringlengths 75
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values | avg_line_len
float64 7.91
980
|
---|---|---|---|---|---|---|---|---|---|
lowandrew/OLCTools
|
coreGenome/annotate.py
|
https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/coreGenome/annotate.py#L328-L359
|
def profiler(self):
"""
Calculates the core profile for each strain
"""
printtime('Calculating core profiles', self.start)
# Only create the profile if it doesn't exist already
# if not os.path.isfile('{}/profile.txt'.format(self.profilelocation)):
for strain in self.corealleles:
# Add the gene name and allele number pair for each core gene in each strain
self.coreset.add(tuple(sorted(self.corealleles[strain].items())))
# Set the header to be similar to an MLST profile - ST,gene1,gene2,etc
header = 'ST,{}\n'.format(','.join(sorted(self.geneset)))
data = ''
for count, core in sorted(enumerate(self.coreset)):
# Increment count now to account for 0-based numbering
count += 1
# Add the sequence type number to the profile
data += '{}'.format(count)
# Store the sequence type for each strain
for strain in self.corealleles:
if tuple(sorted(self.corealleles[strain].items())) == core:
self.profiles[strain] = count
# Add the allele number for each gene
for gene in sorted(core):
data += ',{}'.format(gene[1])
data += '\n'
# Write the profile
with open(os.path.join(self.profilelocation, 'profile.txt'), 'w') as profile:
profile.write(header)
profile.write(data)
# Create a list of which strains correspond to the sequence types
self.linker()
|
[
"def",
"profiler",
"(",
"self",
")",
":",
"printtime",
"(",
"'Calculating core profiles'",
",",
"self",
".",
"start",
")",
"# Only create the profile if it doesn't exist already",
"# if not os.path.isfile('{}/profile.txt'.format(self.profilelocation)):",
"for",
"strain",
"in",
"self",
".",
"corealleles",
":",
"# Add the gene name and allele number pair for each core gene in each strain",
"self",
".",
"coreset",
".",
"add",
"(",
"tuple",
"(",
"sorted",
"(",
"self",
".",
"corealleles",
"[",
"strain",
"]",
".",
"items",
"(",
")",
")",
")",
")",
"# Set the header to be similar to an MLST profile - ST,gene1,gene2,etc",
"header",
"=",
"'ST,{}\\n'",
".",
"format",
"(",
"','",
".",
"join",
"(",
"sorted",
"(",
"self",
".",
"geneset",
")",
")",
")",
"data",
"=",
"''",
"for",
"count",
",",
"core",
"in",
"sorted",
"(",
"enumerate",
"(",
"self",
".",
"coreset",
")",
")",
":",
"# Increment count now to account for 0-based numbering",
"count",
"+=",
"1",
"# Add the sequence type number to the profile",
"data",
"+=",
"'{}'",
".",
"format",
"(",
"count",
")",
"# Store the sequence type for each strain",
"for",
"strain",
"in",
"self",
".",
"corealleles",
":",
"if",
"tuple",
"(",
"sorted",
"(",
"self",
".",
"corealleles",
"[",
"strain",
"]",
".",
"items",
"(",
")",
")",
")",
"==",
"core",
":",
"self",
".",
"profiles",
"[",
"strain",
"]",
"=",
"count",
"# Add the allele number for each gene",
"for",
"gene",
"in",
"sorted",
"(",
"core",
")",
":",
"data",
"+=",
"',{}'",
".",
"format",
"(",
"gene",
"[",
"1",
"]",
")",
"data",
"+=",
"'\\n'",
"# Write the profile",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"profilelocation",
",",
"'profile.txt'",
")",
",",
"'w'",
")",
"as",
"profile",
":",
"profile",
".",
"write",
"(",
"header",
")",
"profile",
".",
"write",
"(",
"data",
")",
"# Create a list of which strains correspond to the sequence types",
"self",
".",
"linker",
"(",
")"
] |
Calculates the core profile for each strain
|
[
"Calculates",
"the",
"core",
"profile",
"for",
"each",
"strain"
] |
python
|
train
| 48.15625 |
ArduPilot/MAVProxy
|
MAVProxy/modules/mavproxy_misseditor/me_defines.py
|
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_misseditor/me_defines.py#L59-L68
|
def get_column_labels(command_name):
'''return dictionary of column labels if available'''
cmd = cmd_reverse_lookup(command_name)
if cmd == 0:
return {}
labels = {}
enum = mavutil.mavlink.enums['MAV_CMD'][cmd]
for col in enum.param.keys():
labels[col] = make_column_label(command_name, enum.param[col], "P%u" % col)
return labels
|
[
"def",
"get_column_labels",
"(",
"command_name",
")",
":",
"cmd",
"=",
"cmd_reverse_lookup",
"(",
"command_name",
")",
"if",
"cmd",
"==",
"0",
":",
"return",
"{",
"}",
"labels",
"=",
"{",
"}",
"enum",
"=",
"mavutil",
".",
"mavlink",
".",
"enums",
"[",
"'MAV_CMD'",
"]",
"[",
"cmd",
"]",
"for",
"col",
"in",
"enum",
".",
"param",
".",
"keys",
"(",
")",
":",
"labels",
"[",
"col",
"]",
"=",
"make_column_label",
"(",
"command_name",
",",
"enum",
".",
"param",
"[",
"col",
"]",
",",
"\"P%u\"",
"%",
"col",
")",
"return",
"labels"
] |
return dictionary of column labels if available
|
[
"return",
"dictionary",
"of",
"column",
"labels",
"if",
"available"
] |
python
|
train
| 36.4 |
python-odin/odinweb
|
odinweb/signing.py
|
https://github.com/python-odin/odinweb/blob/198424133584acc18cb41c8d18d91f803abc810f/odinweb/signing.py#L118-L130
|
def verify_url(url, secret_key, **kwargs):
"""
Verify a signed URL (excluding the domain and scheme).
:param url: URL to sign
:param secret_key: Secret key
:rtype: bool
:raises: URLError
"""
result = urlparse(url)
query_args = MultiValueDict(parse_qs(result.query))
return verify_url_path(result.path, query_args, secret_key, **kwargs)
|
[
"def",
"verify_url",
"(",
"url",
",",
"secret_key",
",",
"*",
"*",
"kwargs",
")",
":",
"result",
"=",
"urlparse",
"(",
"url",
")",
"query_args",
"=",
"MultiValueDict",
"(",
"parse_qs",
"(",
"result",
".",
"query",
")",
")",
"return",
"verify_url_path",
"(",
"result",
".",
"path",
",",
"query_args",
",",
"secret_key",
",",
"*",
"*",
"kwargs",
")"
] |
Verify a signed URL (excluding the domain and scheme).
:param url: URL to sign
:param secret_key: Secret key
:rtype: bool
:raises: URLError
|
[
"Verify",
"a",
"signed",
"URL",
"(",
"excluding",
"the",
"domain",
"and",
"scheme",
")",
"."
] |
python
|
train
| 28.076923 |
adamrehn/ue4cli
|
ue4cli/UnrealManagerBase.py
|
https://github.com/adamrehn/ue4cli/blob/f1c34502c96059e36757b7433da7e98760a75a6f/ue4cli/UnrealManagerBase.py#L80-L90
|
def getEngineChangelist(self):
"""
Returns the compatible Perforce changelist identifier for the latest installed version of UE4
"""
# Newer versions of the engine use the key "CompatibleChangelist", older ones use "Changelist"
version = self._getEngineVersionDetails()
if 'CompatibleChangelist' in version:
return int(version['CompatibleChangelist'])
else:
return int(version['Changelist'])
|
[
"def",
"getEngineChangelist",
"(",
"self",
")",
":",
"# Newer versions of the engine use the key \"CompatibleChangelist\", older ones use \"Changelist\"",
"version",
"=",
"self",
".",
"_getEngineVersionDetails",
"(",
")",
"if",
"'CompatibleChangelist'",
"in",
"version",
":",
"return",
"int",
"(",
"version",
"[",
"'CompatibleChangelist'",
"]",
")",
"else",
":",
"return",
"int",
"(",
"version",
"[",
"'Changelist'",
"]",
")"
] |
Returns the compatible Perforce changelist identifier for the latest installed version of UE4
|
[
"Returns",
"the",
"compatible",
"Perforce",
"changelist",
"identifier",
"for",
"the",
"latest",
"installed",
"version",
"of",
"UE4"
] |
python
|
train
| 36.727273 |
apple/turicreate
|
src/external/xgboost/python-package/xgboost/training.py
|
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/xgboost/python-package/xgboost/training.py#L213-L233
|
def mknfold(dall, nfold, param, seed, evals=(), fpreproc=None):
"""
Make an n-fold list of CVPack from random indices.
"""
evals = list(evals)
np.random.seed(seed)
randidx = np.random.permutation(dall.num_row())
kstep = len(randidx) / nfold
idset = [randidx[(i * kstep): min(len(randidx), (i + 1) * kstep)] for i in range(nfold)]
ret = []
for k in range(nfold):
dtrain = dall.slice(np.concatenate([idset[i] for i in range(nfold) if k != i]))
dtest = dall.slice(idset[k])
# run preprocessing on the data set if needed
if fpreproc is not None:
dtrain, dtest, tparam = fpreproc(dtrain, dtest, param.copy())
else:
tparam = param
plst = list(tparam.items()) + [('eval_metric', itm) for itm in evals]
ret.append(CVPack(dtrain, dtest, plst))
return ret
|
[
"def",
"mknfold",
"(",
"dall",
",",
"nfold",
",",
"param",
",",
"seed",
",",
"evals",
"=",
"(",
")",
",",
"fpreproc",
"=",
"None",
")",
":",
"evals",
"=",
"list",
"(",
"evals",
")",
"np",
".",
"random",
".",
"seed",
"(",
"seed",
")",
"randidx",
"=",
"np",
".",
"random",
".",
"permutation",
"(",
"dall",
".",
"num_row",
"(",
")",
")",
"kstep",
"=",
"len",
"(",
"randidx",
")",
"/",
"nfold",
"idset",
"=",
"[",
"randidx",
"[",
"(",
"i",
"*",
"kstep",
")",
":",
"min",
"(",
"len",
"(",
"randidx",
")",
",",
"(",
"i",
"+",
"1",
")",
"*",
"kstep",
")",
"]",
"for",
"i",
"in",
"range",
"(",
"nfold",
")",
"]",
"ret",
"=",
"[",
"]",
"for",
"k",
"in",
"range",
"(",
"nfold",
")",
":",
"dtrain",
"=",
"dall",
".",
"slice",
"(",
"np",
".",
"concatenate",
"(",
"[",
"idset",
"[",
"i",
"]",
"for",
"i",
"in",
"range",
"(",
"nfold",
")",
"if",
"k",
"!=",
"i",
"]",
")",
")",
"dtest",
"=",
"dall",
".",
"slice",
"(",
"idset",
"[",
"k",
"]",
")",
"# run preprocessing on the data set if needed",
"if",
"fpreproc",
"is",
"not",
"None",
":",
"dtrain",
",",
"dtest",
",",
"tparam",
"=",
"fpreproc",
"(",
"dtrain",
",",
"dtest",
",",
"param",
".",
"copy",
"(",
")",
")",
"else",
":",
"tparam",
"=",
"param",
"plst",
"=",
"list",
"(",
"tparam",
".",
"items",
"(",
")",
")",
"+",
"[",
"(",
"'eval_metric'",
",",
"itm",
")",
"for",
"itm",
"in",
"evals",
"]",
"ret",
".",
"append",
"(",
"CVPack",
"(",
"dtrain",
",",
"dtest",
",",
"plst",
")",
")",
"return",
"ret"
] |
Make an n-fold list of CVPack from random indices.
|
[
"Make",
"an",
"n",
"-",
"fold",
"list",
"of",
"CVPack",
"from",
"random",
"indices",
"."
] |
python
|
train
| 40.428571 |
openego/eDisGo
|
edisgo/tools/pypsa_io_lopf.py
|
https://github.com/openego/eDisGo/blob/e6245bdaf236f9c49dbda5a18c1c458290f41e2b/edisgo/tools/pypsa_io_lopf.py#L722-L764
|
def _pypsa_storage_timeseries(network, timesteps, mode=None):
"""
Timeseries in PyPSA compatible format for storage instances
Parameters
----------
network : Network
The eDisGo grid topology model overall container
timesteps : array_like
Timesteps is an array-like object with entries of type
:pandas:`pandas.Timestamp<timestamp>` specifying which time steps
to export to pypsa representation and use in power flow analysis.
mode : str, optional
Specifically retrieve generator time series for MV or LV grid level or
both. Either choose 'mv' or 'lv'.
Defaults to None, which returns both timeseries for MV and LV in a
single DataFrame.
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
Time series table in PyPSA format
"""
mv_storage_timeseries_p_min = []
mv_storage_timeseries_p_max = []
# MV storage time series
if mode is 'mv' or mode is None:
for storage in network.mv_grid.graph.nodes_by_attribute('storage'):
mv_storage_timeseries_p_min.append(
storage.timeseries.p.rename(repr(
storage)).to_frame().loc[timesteps])
mv_storage_timeseries_p_max.append(
storage.timeseries.p.rename(repr(
storage)).to_frame().loc[timesteps])
storage_df_p_min = pd.concat(
mv_storage_timeseries_p_min, axis=1)
storage_df_p_max = pd.concat(
mv_storage_timeseries_p_max, axis=1)
return storage_df_p_min, storage_df_p_max
|
[
"def",
"_pypsa_storage_timeseries",
"(",
"network",
",",
"timesteps",
",",
"mode",
"=",
"None",
")",
":",
"mv_storage_timeseries_p_min",
"=",
"[",
"]",
"mv_storage_timeseries_p_max",
"=",
"[",
"]",
"# MV storage time series",
"if",
"mode",
"is",
"'mv'",
"or",
"mode",
"is",
"None",
":",
"for",
"storage",
"in",
"network",
".",
"mv_grid",
".",
"graph",
".",
"nodes_by_attribute",
"(",
"'storage'",
")",
":",
"mv_storage_timeseries_p_min",
".",
"append",
"(",
"storage",
".",
"timeseries",
".",
"p",
".",
"rename",
"(",
"repr",
"(",
"storage",
")",
")",
".",
"to_frame",
"(",
")",
".",
"loc",
"[",
"timesteps",
"]",
")",
"mv_storage_timeseries_p_max",
".",
"append",
"(",
"storage",
".",
"timeseries",
".",
"p",
".",
"rename",
"(",
"repr",
"(",
"storage",
")",
")",
".",
"to_frame",
"(",
")",
".",
"loc",
"[",
"timesteps",
"]",
")",
"storage_df_p_min",
"=",
"pd",
".",
"concat",
"(",
"mv_storage_timeseries_p_min",
",",
"axis",
"=",
"1",
")",
"storage_df_p_max",
"=",
"pd",
".",
"concat",
"(",
"mv_storage_timeseries_p_max",
",",
"axis",
"=",
"1",
")",
"return",
"storage_df_p_min",
",",
"storage_df_p_max"
] |
Timeseries in PyPSA compatible format for storage instances
Parameters
----------
network : Network
The eDisGo grid topology model overall container
timesteps : array_like
Timesteps is an array-like object with entries of type
:pandas:`pandas.Timestamp<timestamp>` specifying which time steps
to export to pypsa representation and use in power flow analysis.
mode : str, optional
Specifically retrieve generator time series for MV or LV grid level or
both. Either choose 'mv' or 'lv'.
Defaults to None, which returns both timeseries for MV and LV in a
single DataFrame.
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
Time series table in PyPSA format
|
[
"Timeseries",
"in",
"PyPSA",
"compatible",
"format",
"for",
"storage",
"instances"
] |
python
|
train
| 35.744186 |
usc-isi-i2/etk
|
etk/extractors/sentence_extractor.py
|
https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/extractors/sentence_extractor.py#L57-L80
|
def extract(self, text: str) -> List[Extraction]:
"""
Splits text by sentences.
Args:
text (str): Input text to be extracted.
Returns:
List[Extraction]: the list of extraction or the empty list if there are no matches.
"""
doc = self._parser(text)
extractions = list()
for sent in doc.sents:
this_extraction = Extraction(value=sent.text,
extractor_name=self.name,
start_token=sent[0],
end_token=sent[-1],
start_char=sent.text[0],
end_char=sent.text[-1])
extractions.append(this_extraction)
return extractions
|
[
"def",
"extract",
"(",
"self",
",",
"text",
":",
"str",
")",
"->",
"List",
"[",
"Extraction",
"]",
":",
"doc",
"=",
"self",
".",
"_parser",
"(",
"text",
")",
"extractions",
"=",
"list",
"(",
")",
"for",
"sent",
"in",
"doc",
".",
"sents",
":",
"this_extraction",
"=",
"Extraction",
"(",
"value",
"=",
"sent",
".",
"text",
",",
"extractor_name",
"=",
"self",
".",
"name",
",",
"start_token",
"=",
"sent",
"[",
"0",
"]",
",",
"end_token",
"=",
"sent",
"[",
"-",
"1",
"]",
",",
"start_char",
"=",
"sent",
".",
"text",
"[",
"0",
"]",
",",
"end_char",
"=",
"sent",
".",
"text",
"[",
"-",
"1",
"]",
")",
"extractions",
".",
"append",
"(",
"this_extraction",
")",
"return",
"extractions"
] |
Splits text by sentences.
Args:
text (str): Input text to be extracted.
Returns:
List[Extraction]: the list of extraction or the empty list if there are no matches.
|
[
"Splits",
"text",
"by",
"sentences",
"."
] |
python
|
train
| 33.958333 |
jobovy/galpy
|
galpy/potential/Potential.py
|
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/potential/Potential.py#L1951-L1995
|
def evaluateR2derivs(Pot,R,z,phi=None,t=0.):
"""
NAME:
evaluateR2derivs
PURPOSE:
convenience function to evaluate a possible sum of potentials
INPUT:
Pot - a potential or list of potentials (dissipative forces in such a list are ignored)
R - cylindrical Galactocentric distance (can be Quantity)
z - distance above the plane (can be Quantity)
phi - azimuth (optional; can be Quantity)
t - time (optional; can be Quantity)
OUTPUT:
d2Phi/d2R(R,z,phi,t)
HISTORY:
2012-07-25 - Written - Bovy (IAS)
"""
isList= isinstance(Pot,list)
nonAxi= _isNonAxi(Pot)
if nonAxi and phi is None:
raise PotentialError("The (list of) Potential instances is non-axisymmetric, but you did not provide phi")
if isList:
sum= 0.
for pot in Pot:
if not isinstance(pot,DissipativeForce):
sum+= pot.R2deriv(R,z,phi=phi,t=t,use_physical=False)
return sum
elif isinstance(Pot,Potential):
return Pot.R2deriv(R,z,phi=phi,t=t,use_physical=False)
else: #pragma: no cover
raise PotentialError("Input to 'evaluateR2derivs' is neither a Potential-instance or a list of such instances")
|
[
"def",
"evaluateR2derivs",
"(",
"Pot",
",",
"R",
",",
"z",
",",
"phi",
"=",
"None",
",",
"t",
"=",
"0.",
")",
":",
"isList",
"=",
"isinstance",
"(",
"Pot",
",",
"list",
")",
"nonAxi",
"=",
"_isNonAxi",
"(",
"Pot",
")",
"if",
"nonAxi",
"and",
"phi",
"is",
"None",
":",
"raise",
"PotentialError",
"(",
"\"The (list of) Potential instances is non-axisymmetric, but you did not provide phi\"",
")",
"if",
"isList",
":",
"sum",
"=",
"0.",
"for",
"pot",
"in",
"Pot",
":",
"if",
"not",
"isinstance",
"(",
"pot",
",",
"DissipativeForce",
")",
":",
"sum",
"+=",
"pot",
".",
"R2deriv",
"(",
"R",
",",
"z",
",",
"phi",
"=",
"phi",
",",
"t",
"=",
"t",
",",
"use_physical",
"=",
"False",
")",
"return",
"sum",
"elif",
"isinstance",
"(",
"Pot",
",",
"Potential",
")",
":",
"return",
"Pot",
".",
"R2deriv",
"(",
"R",
",",
"z",
",",
"phi",
"=",
"phi",
",",
"t",
"=",
"t",
",",
"use_physical",
"=",
"False",
")",
"else",
":",
"#pragma: no cover ",
"raise",
"PotentialError",
"(",
"\"Input to 'evaluateR2derivs' is neither a Potential-instance or a list of such instances\"",
")"
] |
NAME:
evaluateR2derivs
PURPOSE:
convenience function to evaluate a possible sum of potentials
INPUT:
Pot - a potential or list of potentials (dissipative forces in such a list are ignored)
R - cylindrical Galactocentric distance (can be Quantity)
z - distance above the plane (can be Quantity)
phi - azimuth (optional; can be Quantity)
t - time (optional; can be Quantity)
OUTPUT:
d2Phi/d2R(R,z,phi,t)
HISTORY:
2012-07-25 - Written - Bovy (IAS)
|
[
"NAME",
":"
] |
python
|
train
| 26.866667 |
pywbem/pywbem
|
attic/cim_provider.py
|
https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/attic/cim_provider.py#L381-L440
|
def references(self, env, object_name, model, assoc_class,
result_class_name, role, result_role, keys_only):
"""Instrument Associations.
All four association-related operations (Associators, AssociatorNames,
References, ReferenceNames) are mapped to this method.
This method is a python generator
Keyword arguments:
env -- Provider Environment (pycimmb.ProviderEnvironment)
object_name -- A pywbem.CIMInstanceName that defines the source
CIM Object whose associated Objects are to be returned.
model -- A template pywbem.CIMInstance to serve as a model
of the objects to be returned. Only properties present on this
model need to be set.
assoc_class -- The pywbem.CIMClass.
result_class_name -- If not empty, this string acts as a filter on
the returned set of Instances by mandating that each returned
Instances MUST represent an association between object_name
and an Instance of a Class whose name matches this parameter
or a subclass.
role -- If not empty, MUST be a valid Property name. It acts as a
filter on the returned set of Instances by mandating that each
returned Instance MUST refer to object_name via a Property
whose name matches the value of this parameter.
result_role -- If not empty, MUST be a valid Property name. It acts
as a filter on the returned set of Instances by mandating that
each returned Instance MUST represent associations of
object_name to other Instances, where the other Instances play
the specified result_role in the association (i.e. the
name of the Property in the Association Class that refers to
the Object related to object_name MUST match the value of this
parameter).
keys_only -- A boolean. True if only the key properties should be
set on the generated instances.
The following diagram may be helpful in understanding the role,
result_role, and result_class_name parameters.
+------------------------+ +-------------------+
| object_name.classname | | result_class_name |
| ~~~~~~~~~~~~~~~~~~~~~ | | ~~~~~~~~~~~~~~~~~ |
+------------------------+ +-------------------+
| +-----------------------------------+ |
| | [Association] assoc_class | |
| object_name | ~~~~~~~~~~~~~~~~~~~~~~~~~ | |
+--------------+ object_name.classname REF role | |
(CIMInstanceName) | result_class_name REF result_role +------+
| |(CIMInstanceName)
+-----------------------------------+
Possible Errors:
CIM_ERR_ACCESS_DENIED
CIM_ERR_NOT_SUPPORTED
CIM_ERR_INVALID_NAMESPACE
CIM_ERR_INVALID_PARAMETER (including missing, duplicate, unrecognized
or otherwise incorrect parameters)
CIM_ERR_FAILED (some other unspecified error occurred)
"""
pass
|
[
"def",
"references",
"(",
"self",
",",
"env",
",",
"object_name",
",",
"model",
",",
"assoc_class",
",",
"result_class_name",
",",
"role",
",",
"result_role",
",",
"keys_only",
")",
":",
"pass"
] |
Instrument Associations.
All four association-related operations (Associators, AssociatorNames,
References, ReferenceNames) are mapped to this method.
This method is a python generator
Keyword arguments:
env -- Provider Environment (pycimmb.ProviderEnvironment)
object_name -- A pywbem.CIMInstanceName that defines the source
CIM Object whose associated Objects are to be returned.
model -- A template pywbem.CIMInstance to serve as a model
of the objects to be returned. Only properties present on this
model need to be set.
assoc_class -- The pywbem.CIMClass.
result_class_name -- If not empty, this string acts as a filter on
the returned set of Instances by mandating that each returned
Instances MUST represent an association between object_name
and an Instance of a Class whose name matches this parameter
or a subclass.
role -- If not empty, MUST be a valid Property name. It acts as a
filter on the returned set of Instances by mandating that each
returned Instance MUST refer to object_name via a Property
whose name matches the value of this parameter.
result_role -- If not empty, MUST be a valid Property name. It acts
as a filter on the returned set of Instances by mandating that
each returned Instance MUST represent associations of
object_name to other Instances, where the other Instances play
the specified result_role in the association (i.e. the
name of the Property in the Association Class that refers to
the Object related to object_name MUST match the value of this
parameter).
keys_only -- A boolean. True if only the key properties should be
set on the generated instances.
The following diagram may be helpful in understanding the role,
result_role, and result_class_name parameters.
+------------------------+ +-------------------+
| object_name.classname | | result_class_name |
| ~~~~~~~~~~~~~~~~~~~~~ | | ~~~~~~~~~~~~~~~~~ |
+------------------------+ +-------------------+
| +-----------------------------------+ |
| | [Association] assoc_class | |
| object_name | ~~~~~~~~~~~~~~~~~~~~~~~~~ | |
+--------------+ object_name.classname REF role | |
(CIMInstanceName) | result_class_name REF result_role +------+
| |(CIMInstanceName)
+-----------------------------------+
Possible Errors:
CIM_ERR_ACCESS_DENIED
CIM_ERR_NOT_SUPPORTED
CIM_ERR_INVALID_NAMESPACE
CIM_ERR_INVALID_PARAMETER (including missing, duplicate, unrecognized
or otherwise incorrect parameters)
CIM_ERR_FAILED (some other unspecified error occurred)
|
[
"Instrument",
"Associations",
"."
] |
python
|
train
| 54.35 |
BYU-PCCL/holodeck
|
example.py
|
https://github.com/BYU-PCCL/holodeck/blob/01acd4013f5acbd9f61fbc9caaafe19975e8b121/example.py#L162-L183
|
def editor_multi_agent_example():
"""This editor example shows how to interact with holodeck worlds that have multiple agents.
This is specifically for when working with UE4 directly and not a prebuilt binary.
"""
agent_definitions = [
AgentDefinition("uav0", agents.UavAgent, [Sensors.PIXEL_CAMERA, Sensors.LOCATION_SENSOR]),
AgentDefinition("uav1", agents.UavAgent, [Sensors.LOCATION_SENSOR, Sensors.VELOCITY_SENSOR])
]
env = HolodeckEnvironment(agent_definitions, start_world=False)
cmd0 = np.array([0, 0, -2, 10])
cmd1 = np.array([0, 0, 5, 10])
for i in range(10):
env.reset()
env.act("uav0", cmd0)
env.act("uav1", cmd1)
for _ in range(1000):
states = env.tick()
uav0_terminal = states["uav0"][Sensors.TERMINAL]
uav1_reward = states["uav1"][Sensors.REWARD]
|
[
"def",
"editor_multi_agent_example",
"(",
")",
":",
"agent_definitions",
"=",
"[",
"AgentDefinition",
"(",
"\"uav0\"",
",",
"agents",
".",
"UavAgent",
",",
"[",
"Sensors",
".",
"PIXEL_CAMERA",
",",
"Sensors",
".",
"LOCATION_SENSOR",
"]",
")",
",",
"AgentDefinition",
"(",
"\"uav1\"",
",",
"agents",
".",
"UavAgent",
",",
"[",
"Sensors",
".",
"LOCATION_SENSOR",
",",
"Sensors",
".",
"VELOCITY_SENSOR",
"]",
")",
"]",
"env",
"=",
"HolodeckEnvironment",
"(",
"agent_definitions",
",",
"start_world",
"=",
"False",
")",
"cmd0",
"=",
"np",
".",
"array",
"(",
"[",
"0",
",",
"0",
",",
"-",
"2",
",",
"10",
"]",
")",
"cmd1",
"=",
"np",
".",
"array",
"(",
"[",
"0",
",",
"0",
",",
"5",
",",
"10",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"10",
")",
":",
"env",
".",
"reset",
"(",
")",
"env",
".",
"act",
"(",
"\"uav0\"",
",",
"cmd0",
")",
"env",
".",
"act",
"(",
"\"uav1\"",
",",
"cmd1",
")",
"for",
"_",
"in",
"range",
"(",
"1000",
")",
":",
"states",
"=",
"env",
".",
"tick",
"(",
")",
"uav0_terminal",
"=",
"states",
"[",
"\"uav0\"",
"]",
"[",
"Sensors",
".",
"TERMINAL",
"]",
"uav1_reward",
"=",
"states",
"[",
"\"uav1\"",
"]",
"[",
"Sensors",
".",
"REWARD",
"]"
] |
This editor example shows how to interact with holodeck worlds that have multiple agents.
This is specifically for when working with UE4 directly and not a prebuilt binary.
|
[
"This",
"editor",
"example",
"shows",
"how",
"to",
"interact",
"with",
"holodeck",
"worlds",
"that",
"have",
"multiple",
"agents",
".",
"This",
"is",
"specifically",
"for",
"when",
"working",
"with",
"UE4",
"directly",
"and",
"not",
"a",
"prebuilt",
"binary",
"."
] |
python
|
train
| 39.181818 |
agschwender/pilbox
|
pilbox/signature.py
|
https://github.com/agschwender/pilbox/blob/8b1d154436fd1b9f9740925549793561c58d4400/pilbox/signature.py#L35-L38
|
def derive_signature(key, qs):
"""Derives the signature from the supplied query string using the key."""
key, qs = (key or "", qs or "")
return hmac.new(key.encode(), qs.encode(), hashlib.sha1).hexdigest()
|
[
"def",
"derive_signature",
"(",
"key",
",",
"qs",
")",
":",
"key",
",",
"qs",
"=",
"(",
"key",
"or",
"\"\"",
",",
"qs",
"or",
"\"\"",
")",
"return",
"hmac",
".",
"new",
"(",
"key",
".",
"encode",
"(",
")",
",",
"qs",
".",
"encode",
"(",
")",
",",
"hashlib",
".",
"sha1",
")",
".",
"hexdigest",
"(",
")"
] |
Derives the signature from the supplied query string using the key.
|
[
"Derives",
"the",
"signature",
"from",
"the",
"supplied",
"query",
"string",
"using",
"the",
"key",
"."
] |
python
|
train
| 53.5 |
juju-solutions/charms.reactive
|
charms/reactive/endpoints.py
|
https://github.com/juju-solutions/charms.reactive/blob/e37e781432e77c12b63d2c739bd6cd70d3230c3a/charms/reactive/endpoints.py#L285-L320
|
def all_departed_units(self):
"""
Collection of all units that were previously part of any relation on
this endpoint but which have since departed.
This collection is persistent and mutable. The departed units will
be kept until they are explicitly removed, to allow for reasonable
cleanup of units that have left.
Example: You need to run a command each time a unit departs the relation.
.. code-block:: python
@when('endpoint.{endpoint_name}.departed')
def handle_departed_unit(self):
for name, unit in self.all_departed_units.items():
# run the command to remove `unit` from the cluster
# ..
self.all_departed_units.clear()
clear_flag(self.expand_name('departed'))
Once a unit is departed, it will no longer show up in
:attr:`all_joined_units`. Note that units are considered departed as
soon as the departed hook is entered, which differs slightly from how
the Juju primitives behave (departing units are still returned from
``related-units`` until after the departed hook is complete).
This collection is a :class:`KeyList`, so can be used as a mapping to
look up units by their unit name, or iterated or accessed by index.
"""
if self._all_departed_units is None:
self._all_departed_units = CachedKeyList.load(
'reactive.endpoints.departed.{}'.format(self.endpoint_name),
RelatedUnit._deserialize,
'unit_name')
return self._all_departed_units
|
[
"def",
"all_departed_units",
"(",
"self",
")",
":",
"if",
"self",
".",
"_all_departed_units",
"is",
"None",
":",
"self",
".",
"_all_departed_units",
"=",
"CachedKeyList",
".",
"load",
"(",
"'reactive.endpoints.departed.{}'",
".",
"format",
"(",
"self",
".",
"endpoint_name",
")",
",",
"RelatedUnit",
".",
"_deserialize",
",",
"'unit_name'",
")",
"return",
"self",
".",
"_all_departed_units"
] |
Collection of all units that were previously part of any relation on
this endpoint but which have since departed.
This collection is persistent and mutable. The departed units will
be kept until they are explicitly removed, to allow for reasonable
cleanup of units that have left.
Example: You need to run a command each time a unit departs the relation.
.. code-block:: python
@when('endpoint.{endpoint_name}.departed')
def handle_departed_unit(self):
for name, unit in self.all_departed_units.items():
# run the command to remove `unit` from the cluster
# ..
self.all_departed_units.clear()
clear_flag(self.expand_name('departed'))
Once a unit is departed, it will no longer show up in
:attr:`all_joined_units`. Note that units are considered departed as
soon as the departed hook is entered, which differs slightly from how
the Juju primitives behave (departing units are still returned from
``related-units`` until after the departed hook is complete).
This collection is a :class:`KeyList`, so can be used as a mapping to
look up units by their unit name, or iterated or accessed by index.
|
[
"Collection",
"of",
"all",
"units",
"that",
"were",
"previously",
"part",
"of",
"any",
"relation",
"on",
"this",
"endpoint",
"but",
"which",
"have",
"since",
"departed",
"."
] |
python
|
train
| 45.5 |
closeio/tasktiger
|
tasktiger/redis_scripts.py
|
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/redis_scripts.py#L318-L337
|
def zadd(self, key, score, member, mode, client=None):
"""
Like ZADD, but supports different score update modes, in case the
member already exists in the ZSET:
- "nx": Don't update the score
- "xx": Only update elements that already exist. Never add elements.
- "min": Use the smaller of the given and existing score
- "max": Use the larger of the given and existing score
"""
if mode == 'nx':
f = self._zadd_noupdate
elif mode == 'xx':
f = self._zadd_update_existing
elif mode == 'min':
f = self._zadd_update_min
elif mode == 'max':
f = self._zadd_update_max
else:
raise NotImplementedError('mode "%s" unsupported' % mode)
return f(keys=[key], args=[score, member], client=client)
|
[
"def",
"zadd",
"(",
"self",
",",
"key",
",",
"score",
",",
"member",
",",
"mode",
",",
"client",
"=",
"None",
")",
":",
"if",
"mode",
"==",
"'nx'",
":",
"f",
"=",
"self",
".",
"_zadd_noupdate",
"elif",
"mode",
"==",
"'xx'",
":",
"f",
"=",
"self",
".",
"_zadd_update_existing",
"elif",
"mode",
"==",
"'min'",
":",
"f",
"=",
"self",
".",
"_zadd_update_min",
"elif",
"mode",
"==",
"'max'",
":",
"f",
"=",
"self",
".",
"_zadd_update_max",
"else",
":",
"raise",
"NotImplementedError",
"(",
"'mode \"%s\" unsupported'",
"%",
"mode",
")",
"return",
"f",
"(",
"keys",
"=",
"[",
"key",
"]",
",",
"args",
"=",
"[",
"score",
",",
"member",
"]",
",",
"client",
"=",
"client",
")"
] |
Like ZADD, but supports different score update modes, in case the
member already exists in the ZSET:
- "nx": Don't update the score
- "xx": Only update elements that already exist. Never add elements.
- "min": Use the smaller of the given and existing score
- "max": Use the larger of the given and existing score
|
[
"Like",
"ZADD",
"but",
"supports",
"different",
"score",
"update",
"modes",
"in",
"case",
"the",
"member",
"already",
"exists",
"in",
"the",
"ZSET",
":",
"-",
"nx",
":",
"Don",
"t",
"update",
"the",
"score",
"-",
"xx",
":",
"Only",
"update",
"elements",
"that",
"already",
"exist",
".",
"Never",
"add",
"elements",
".",
"-",
"min",
":",
"Use",
"the",
"smaller",
"of",
"the",
"given",
"and",
"existing",
"score",
"-",
"max",
":",
"Use",
"the",
"larger",
"of",
"the",
"given",
"and",
"existing",
"score"
] |
python
|
train
| 41.7 |
marshmallow-code/apispec
|
src/apispec/yaml_utils.py
|
https://github.com/marshmallow-code/apispec/blob/e92ceffd12b2e392b8d199ed314bd2a7e6512dff/src/apispec/yaml_utils.py#L53-L62
|
def load_operations_from_docstring(docstring):
"""Return a dictionary of OpenAPI operations parsed from a
a docstring.
"""
doc_data = load_yaml_from_docstring(docstring)
return {
key: val
for key, val in iteritems(doc_data)
if key in PATH_KEYS or key.startswith("x-")
}
|
[
"def",
"load_operations_from_docstring",
"(",
"docstring",
")",
":",
"doc_data",
"=",
"load_yaml_from_docstring",
"(",
"docstring",
")",
"return",
"{",
"key",
":",
"val",
"for",
"key",
",",
"val",
"in",
"iteritems",
"(",
"doc_data",
")",
"if",
"key",
"in",
"PATH_KEYS",
"or",
"key",
".",
"startswith",
"(",
"\"x-\"",
")",
"}"
] |
Return a dictionary of OpenAPI operations parsed from a
a docstring.
|
[
"Return",
"a",
"dictionary",
"of",
"OpenAPI",
"operations",
"parsed",
"from",
"a",
"a",
"docstring",
"."
] |
python
|
train
| 30.8 |
persephone-tools/persephone
|
persephone/utterance.py
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/utterance.py#L45-L65
|
def write_transcriptions(utterances: List[Utterance],
tgt_dir: Path, ext: str, lazy: bool) -> None:
""" Write the utterance transcriptions to files in the tgt_dir. Is lazy and
checks if the file already exists.
Args:
utterances: A list of Utterance objects to be written.
tgt_dir: The directory in which to write the text of the utterances,
one file per utterance.
ext: The file extension for the utterances. Typically something like
"phonemes", or "phonemes_and_tones".
"""
tgt_dir.mkdir(parents=True, exist_ok=True)
for utter in utterances:
out_path = tgt_dir / "{}.{}".format(utter.prefix, ext)
if lazy and out_path.is_file():
continue
with out_path.open("w") as f:
print(utter.text, file=f)
|
[
"def",
"write_transcriptions",
"(",
"utterances",
":",
"List",
"[",
"Utterance",
"]",
",",
"tgt_dir",
":",
"Path",
",",
"ext",
":",
"str",
",",
"lazy",
":",
"bool",
")",
"->",
"None",
":",
"tgt_dir",
".",
"mkdir",
"(",
"parents",
"=",
"True",
",",
"exist_ok",
"=",
"True",
")",
"for",
"utter",
"in",
"utterances",
":",
"out_path",
"=",
"tgt_dir",
"/",
"\"{}.{}\"",
".",
"format",
"(",
"utter",
".",
"prefix",
",",
"ext",
")",
"if",
"lazy",
"and",
"out_path",
".",
"is_file",
"(",
")",
":",
"continue",
"with",
"out_path",
".",
"open",
"(",
"\"w\"",
")",
"as",
"f",
":",
"print",
"(",
"utter",
".",
"text",
",",
"file",
"=",
"f",
")"
] |
Write the utterance transcriptions to files in the tgt_dir. Is lazy and
checks if the file already exists.
Args:
utterances: A list of Utterance objects to be written.
tgt_dir: The directory in which to write the text of the utterances,
one file per utterance.
ext: The file extension for the utterances. Typically something like
"phonemes", or "phonemes_and_tones".
|
[
"Write",
"the",
"utterance",
"transcriptions",
"to",
"files",
"in",
"the",
"tgt_dir",
".",
"Is",
"lazy",
"and",
"checks",
"if",
"the",
"file",
"already",
"exists",
"."
] |
python
|
train
| 39.142857 |
torfsen/service
|
src/service/__init__.py
|
https://github.com/torfsen/service/blob/d0dd824fce9237825c1943b30cd14f7b0f5957a6/src/service/__init__.py#L49-L78
|
def _detach_process():
"""
Detach daemon process.
Forks the current process into a parent and a detached child. The
child process resides in its own process group, has no controlling
terminal attached and is cleaned up by the init process.
Returns ``True`` for the parent and ``False`` for the child.
"""
# To detach from our process group we need to call ``setsid``. We
# can only do that if we aren't a process group leader. Therefore
# we fork once, which makes sure that the new child process is not
# a process group leader.
pid = os.fork()
if pid > 0:
# Parent process
# Use waitpid to "collect" the child process and avoid Zombies
os.waitpid(pid, 0)
return True
os.setsid()
# We now fork a second time and let the second's fork parent exit.
# This makes the second fork's child process an orphan. Orphans are
# cleaned up by the init process, so we won't end up with a zombie.
# In addition, the second fork's child is no longer a session
# leader and can therefore never acquire a controlling terminal.
pid = os.fork()
if pid > 0:
os._exit(os.EX_OK)
return False
|
[
"def",
"_detach_process",
"(",
")",
":",
"# To detach from our process group we need to call ``setsid``. We",
"# can only do that if we aren't a process group leader. Therefore",
"# we fork once, which makes sure that the new child process is not",
"# a process group leader.",
"pid",
"=",
"os",
".",
"fork",
"(",
")",
"if",
"pid",
">",
"0",
":",
"# Parent process",
"# Use waitpid to \"collect\" the child process and avoid Zombies",
"os",
".",
"waitpid",
"(",
"pid",
",",
"0",
")",
"return",
"True",
"os",
".",
"setsid",
"(",
")",
"# We now fork a second time and let the second's fork parent exit.",
"# This makes the second fork's child process an orphan. Orphans are",
"# cleaned up by the init process, so we won't end up with a zombie.",
"# In addition, the second fork's child is no longer a session",
"# leader and can therefore never acquire a controlling terminal.",
"pid",
"=",
"os",
".",
"fork",
"(",
")",
"if",
"pid",
">",
"0",
":",
"os",
".",
"_exit",
"(",
"os",
".",
"EX_OK",
")",
"return",
"False"
] |
Detach daemon process.
Forks the current process into a parent and a detached child. The
child process resides in its own process group, has no controlling
terminal attached and is cleaned up by the init process.
Returns ``True`` for the parent and ``False`` for the child.
|
[
"Detach",
"daemon",
"process",
"."
] |
python
|
train
| 39.033333 |
DataBiosphere/toil
|
src/toil/lib/ec2.py
|
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/lib/ec2.py#L102-L179
|
def wait_spot_requests_active(ec2, requests, timeout=None, tentative=False):
"""
Wait until no spot request in the given iterator is in the 'open' state or, optionally,
a timeout occurs. Yield spot requests as soon as they leave the 'open' state.
:param Iterator[SpotInstanceRequest] requests:
:param float timeout: Maximum time in seconds to spend waiting or None to wait forever. If a
timeout occurs, the remaining open requests will be cancelled.
:param bool tentative: if True, give up on a spot request at the earliest indication of it
not being fulfilled immediately
:rtype: Iterator[list[SpotInstanceRequest]]
"""
if timeout is not None:
timeout = time.time() + timeout
active_ids = set()
other_ids = set()
open_ids = None
def cancel():
log.warn('Cancelling remaining %i spot requests.', len(open_ids))
ec2.cancel_spot_instance_requests(list(open_ids))
def spot_request_not_found(e):
error_code = 'InvalidSpotInstanceRequestID.NotFound'
return isinstance(e, EC2ResponseError) and e.error_code == error_code
try:
while True:
open_ids, eval_ids, fulfill_ids = set(), set(), set()
batch = []
for r in requests:
if r.state == 'open':
open_ids.add(r.id)
if r.status.code == 'pending-evaluation':
eval_ids.add(r.id)
elif r.status.code == 'pending-fulfillment':
fulfill_ids.add(r.id)
else:
log.info(
'Request %s entered status %s indicating that it will not be '
'fulfilled anytime soon.', r.id, r.status.code)
elif r.state == 'active':
assert r.id not in active_ids
active_ids.add(r.id)
batch.append(r)
else:
assert r.id not in other_ids
other_ids.add(r.id)
batch.append(r)
if batch:
yield batch
log.info('%i spot requests(s) are open (%i of which are pending evaluation and %i '
'are pending fulfillment), %i are active and %i are in another state.',
*map(len, (open_ids, eval_ids, fulfill_ids, active_ids, other_ids)))
if not open_ids or tentative and not eval_ids and not fulfill_ids:
break
sleep_time = 2 * a_short_time
if timeout is not None and time.time() + sleep_time >= timeout:
log.warn('Timed out waiting for spot requests.')
break
log.info('Sleeping for %is', sleep_time)
time.sleep(sleep_time)
for attempt in retry_ec2(retry_while=spot_request_not_found):
with attempt:
requests = ec2.get_all_spot_instance_requests(
list(open_ids))
except BaseException:
if open_ids:
with panic(log):
cancel()
raise
else:
if open_ids:
cancel()
|
[
"def",
"wait_spot_requests_active",
"(",
"ec2",
",",
"requests",
",",
"timeout",
"=",
"None",
",",
"tentative",
"=",
"False",
")",
":",
"if",
"timeout",
"is",
"not",
"None",
":",
"timeout",
"=",
"time",
".",
"time",
"(",
")",
"+",
"timeout",
"active_ids",
"=",
"set",
"(",
")",
"other_ids",
"=",
"set",
"(",
")",
"open_ids",
"=",
"None",
"def",
"cancel",
"(",
")",
":",
"log",
".",
"warn",
"(",
"'Cancelling remaining %i spot requests.'",
",",
"len",
"(",
"open_ids",
")",
")",
"ec2",
".",
"cancel_spot_instance_requests",
"(",
"list",
"(",
"open_ids",
")",
")",
"def",
"spot_request_not_found",
"(",
"e",
")",
":",
"error_code",
"=",
"'InvalidSpotInstanceRequestID.NotFound'",
"return",
"isinstance",
"(",
"e",
",",
"EC2ResponseError",
")",
"and",
"e",
".",
"error_code",
"==",
"error_code",
"try",
":",
"while",
"True",
":",
"open_ids",
",",
"eval_ids",
",",
"fulfill_ids",
"=",
"set",
"(",
")",
",",
"set",
"(",
")",
",",
"set",
"(",
")",
"batch",
"=",
"[",
"]",
"for",
"r",
"in",
"requests",
":",
"if",
"r",
".",
"state",
"==",
"'open'",
":",
"open_ids",
".",
"add",
"(",
"r",
".",
"id",
")",
"if",
"r",
".",
"status",
".",
"code",
"==",
"'pending-evaluation'",
":",
"eval_ids",
".",
"add",
"(",
"r",
".",
"id",
")",
"elif",
"r",
".",
"status",
".",
"code",
"==",
"'pending-fulfillment'",
":",
"fulfill_ids",
".",
"add",
"(",
"r",
".",
"id",
")",
"else",
":",
"log",
".",
"info",
"(",
"'Request %s entered status %s indicating that it will not be '",
"'fulfilled anytime soon.'",
",",
"r",
".",
"id",
",",
"r",
".",
"status",
".",
"code",
")",
"elif",
"r",
".",
"state",
"==",
"'active'",
":",
"assert",
"r",
".",
"id",
"not",
"in",
"active_ids",
"active_ids",
".",
"add",
"(",
"r",
".",
"id",
")",
"batch",
".",
"append",
"(",
"r",
")",
"else",
":",
"assert",
"r",
".",
"id",
"not",
"in",
"other_ids",
"other_ids",
".",
"add",
"(",
"r",
".",
"id",
")",
"batch",
".",
"append",
"(",
"r",
")",
"if",
"batch",
":",
"yield",
"batch",
"log",
".",
"info",
"(",
"'%i spot requests(s) are open (%i of which are pending evaluation and %i '",
"'are pending fulfillment), %i are active and %i are in another state.'",
",",
"*",
"map",
"(",
"len",
",",
"(",
"open_ids",
",",
"eval_ids",
",",
"fulfill_ids",
",",
"active_ids",
",",
"other_ids",
")",
")",
")",
"if",
"not",
"open_ids",
"or",
"tentative",
"and",
"not",
"eval_ids",
"and",
"not",
"fulfill_ids",
":",
"break",
"sleep_time",
"=",
"2",
"*",
"a_short_time",
"if",
"timeout",
"is",
"not",
"None",
"and",
"time",
".",
"time",
"(",
")",
"+",
"sleep_time",
">=",
"timeout",
":",
"log",
".",
"warn",
"(",
"'Timed out waiting for spot requests.'",
")",
"break",
"log",
".",
"info",
"(",
"'Sleeping for %is'",
",",
"sleep_time",
")",
"time",
".",
"sleep",
"(",
"sleep_time",
")",
"for",
"attempt",
"in",
"retry_ec2",
"(",
"retry_while",
"=",
"spot_request_not_found",
")",
":",
"with",
"attempt",
":",
"requests",
"=",
"ec2",
".",
"get_all_spot_instance_requests",
"(",
"list",
"(",
"open_ids",
")",
")",
"except",
"BaseException",
":",
"if",
"open_ids",
":",
"with",
"panic",
"(",
"log",
")",
":",
"cancel",
"(",
")",
"raise",
"else",
":",
"if",
"open_ids",
":",
"cancel",
"(",
")"
] |
Wait until no spot request in the given iterator is in the 'open' state or, optionally,
a timeout occurs. Yield spot requests as soon as they leave the 'open' state.
:param Iterator[SpotInstanceRequest] requests:
:param float timeout: Maximum time in seconds to spend waiting or None to wait forever. If a
timeout occurs, the remaining open requests will be cancelled.
:param bool tentative: if True, give up on a spot request at the earliest indication of it
not being fulfilled immediately
:rtype: Iterator[list[SpotInstanceRequest]]
|
[
"Wait",
"until",
"no",
"spot",
"request",
"in",
"the",
"given",
"iterator",
"is",
"in",
"the",
"open",
"state",
"or",
"optionally",
"a",
"timeout",
"occurs",
".",
"Yield",
"spot",
"requests",
"as",
"soon",
"as",
"they",
"leave",
"the",
"open",
"state",
"."
] |
python
|
train
| 40.179487 |
polyaxon/polyaxon
|
polyaxon/db/models/pipelines.py
|
https://github.com/polyaxon/polyaxon/blob/e1724f0756b1a42f9e7aa08a976584a84ef7f016/polyaxon/db/models/pipelines.py#L258-L273
|
def get_run_params(self) -> Dict:
"""Return the params to run the celery task."""
params = {}
if self.celery_queue:
params['queue'] = self.celery_queue
if self.timeout:
params['soft_time_limit'] = self.timeout
# We set also a hard time limit that will send sig 9
# This hard time limit should not happened, as it will set inconsistent state
params['time_limit'] = self.timeout + settings.CELERY_HARD_TIME_LIMIT_DELAY
if self.execute_at:
params['eta'] = self.execute_at
return params
|
[
"def",
"get_run_params",
"(",
"self",
")",
"->",
"Dict",
":",
"params",
"=",
"{",
"}",
"if",
"self",
".",
"celery_queue",
":",
"params",
"[",
"'queue'",
"]",
"=",
"self",
".",
"celery_queue",
"if",
"self",
".",
"timeout",
":",
"params",
"[",
"'soft_time_limit'",
"]",
"=",
"self",
".",
"timeout",
"# We set also a hard time limit that will send sig 9",
"# This hard time limit should not happened, as it will set inconsistent state",
"params",
"[",
"'time_limit'",
"]",
"=",
"self",
".",
"timeout",
"+",
"settings",
".",
"CELERY_HARD_TIME_LIMIT_DELAY",
"if",
"self",
".",
"execute_at",
":",
"params",
"[",
"'eta'",
"]",
"=",
"self",
".",
"execute_at",
"return",
"params"
] |
Return the params to run the celery task.
|
[
"Return",
"the",
"params",
"to",
"run",
"the",
"celery",
"task",
"."
] |
python
|
train
| 36.875 |
unixorn/logrus
|
logrus/time.py
|
https://github.com/unixorn/logrus/blob/d1af28639fd42968acc257476d526d9bbe57719f/logrus/time.py#L23-L29
|
def humanTime(seconds):
'''
Convert seconds to something more human-friendly
'''
intervals = ['days', 'hours', 'minutes', 'seconds']
x = deltaTime(seconds=seconds)
return ' '.join('{} {}'.format(getattr(x, k), k) for k in intervals if getattr(x, k))
|
[
"def",
"humanTime",
"(",
"seconds",
")",
":",
"intervals",
"=",
"[",
"'days'",
",",
"'hours'",
",",
"'minutes'",
",",
"'seconds'",
"]",
"x",
"=",
"deltaTime",
"(",
"seconds",
"=",
"seconds",
")",
"return",
"' '",
".",
"join",
"(",
"'{} {}'",
".",
"format",
"(",
"getattr",
"(",
"x",
",",
"k",
")",
",",
"k",
")",
"for",
"k",
"in",
"intervals",
"if",
"getattr",
"(",
"x",
",",
"k",
")",
")"
] |
Convert seconds to something more human-friendly
|
[
"Convert",
"seconds",
"to",
"something",
"more",
"human",
"-",
"friendly"
] |
python
|
train
| 36.428571 |
dougalsutherland/skl-groups
|
skl_groups/kernels/transform.py
|
https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/skl_groups/kernels/transform.py#L549-L576
|
def transform(self, X):
'''
Transforms X according to the linear transformation corresponding to
shifting the input eigenvalues to all be at least ``self.min_eig``.
Parameters
----------
X : array, shape [n_test, n]
The test similarities to training points.
Returns
-------
Xt : array, shape [n_test, n]
The transformed test similarites to training points. Only different
from X if X is the training data.
'''
n = self.train_.shape[0]
if X.ndim != 2 or X.shape[1] != n:
msg = "X should have {} columns, the number of samples at fit time"
raise TypeError(msg.format(n))
if self.copy:
X = X.copy()
if self.shift_ != 0 and X is self.train_ or (
X.shape == self.train_.shape and np.allclose(X, self.train_)):
X[xrange(n), xrange(n)] += self.shift_
return X
|
[
"def",
"transform",
"(",
"self",
",",
"X",
")",
":",
"n",
"=",
"self",
".",
"train_",
".",
"shape",
"[",
"0",
"]",
"if",
"X",
".",
"ndim",
"!=",
"2",
"or",
"X",
".",
"shape",
"[",
"1",
"]",
"!=",
"n",
":",
"msg",
"=",
"\"X should have {} columns, the number of samples at fit time\"",
"raise",
"TypeError",
"(",
"msg",
".",
"format",
"(",
"n",
")",
")",
"if",
"self",
".",
"copy",
":",
"X",
"=",
"X",
".",
"copy",
"(",
")",
"if",
"self",
".",
"shift_",
"!=",
"0",
"and",
"X",
"is",
"self",
".",
"train_",
"or",
"(",
"X",
".",
"shape",
"==",
"self",
".",
"train_",
".",
"shape",
"and",
"np",
".",
"allclose",
"(",
"X",
",",
"self",
".",
"train_",
")",
")",
":",
"X",
"[",
"xrange",
"(",
"n",
")",
",",
"xrange",
"(",
"n",
")",
"]",
"+=",
"self",
".",
"shift_",
"return",
"X"
] |
Transforms X according to the linear transformation corresponding to
shifting the input eigenvalues to all be at least ``self.min_eig``.
Parameters
----------
X : array, shape [n_test, n]
The test similarities to training points.
Returns
-------
Xt : array, shape [n_test, n]
The transformed test similarites to training points. Only different
from X if X is the training data.
|
[
"Transforms",
"X",
"according",
"to",
"the",
"linear",
"transformation",
"corresponding",
"to",
"shifting",
"the",
"input",
"eigenvalues",
"to",
"all",
"be",
"at",
"least",
"self",
".",
"min_eig",
"."
] |
python
|
valid
| 33.892857 |
saltstack/salt
|
salt/cloud/clouds/joyent.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/joyent.py#L893-L914
|
def show_key(kwargs=None, call=None):
'''
List the keys available
'''
if call != 'function':
log.error(
'The list_keys function must be called with -f or --function.'
)
return False
if not kwargs:
kwargs = {}
if 'keyname' not in kwargs:
log.error('A keyname is required.')
return False
rcode, data = query(
command='my/keys/{0}'.format(kwargs['keyname']),
method='GET',
)
return {'keys': {data['name']: data['key']}}
|
[
"def",
"show_key",
"(",
"kwargs",
"=",
"None",
",",
"call",
"=",
"None",
")",
":",
"if",
"call",
"!=",
"'function'",
":",
"log",
".",
"error",
"(",
"'The list_keys function must be called with -f or --function.'",
")",
"return",
"False",
"if",
"not",
"kwargs",
":",
"kwargs",
"=",
"{",
"}",
"if",
"'keyname'",
"not",
"in",
"kwargs",
":",
"log",
".",
"error",
"(",
"'A keyname is required.'",
")",
"return",
"False",
"rcode",
",",
"data",
"=",
"query",
"(",
"command",
"=",
"'my/keys/{0}'",
".",
"format",
"(",
"kwargs",
"[",
"'keyname'",
"]",
")",
",",
"method",
"=",
"'GET'",
",",
")",
"return",
"{",
"'keys'",
":",
"{",
"data",
"[",
"'name'",
"]",
":",
"data",
"[",
"'key'",
"]",
"}",
"}"
] |
List the keys available
|
[
"List",
"the",
"keys",
"available"
] |
python
|
train
| 23.181818 |
CalebBell/thermo
|
thermo/joback.py
|
https://github.com/CalebBell/thermo/blob/3857ed023a3e64fd3039a32d53576c24990ef1c3/thermo/joback.py#L772-L819
|
def Cpig_coeffs(counts):
r'''Computes the ideal-gas polynomial heat capacity coefficients
of an organic compound using the Joback method as a function of
chemical structure only.
.. math::
C_p^{ig} = \sum_i a_i - 37.93 + \left[ \sum_i b_i + 0.210 \right] T
+ \left[ \sum_i c_i - 3.91 \cdot 10^{-4} \right] T^2
+ \left[\sum_i d_i + 2.06 \cdot 10^{-7}\right] T^3
288 compounds were used by Joback in this determination. No overall
error was reported.
The ideal gas heat capacity values used in developing the heat
capacity polynomials used 9 data points between 298 K and 1000 K.
Parameters
----------
counts : dict
Dictionary of Joback groups present (numerically indexed) and their
counts, [-]
Returns
-------
coefficients : list[float]
Coefficients which will result in a calculated heat capacity in
in units of J/mol/K, [-]
Examples
--------
>>> c = Joback.Cpig_coeffs({1: 2, 24: 1})
>>> c
[7.520000000000003, 0.26084, -0.0001207, 1.545999999999998e-08]
>>> Cp = lambda T : c[0] + c[1]*T + c[2]*T**2 + c[3]*T**3
>>> Cp(300)
75.32642000000001
'''
a, b, c, d = 0.0, 0.0, 0.0, 0.0
for group, count in counts.items():
a += joback_groups_id_dict[group].Cpa*count
b += joback_groups_id_dict[group].Cpb*count
c += joback_groups_id_dict[group].Cpc*count
d += joback_groups_id_dict[group].Cpd*count
a -= 37.93
b += 0.210
c -= 3.91E-4
d += 2.06E-7
return [a, b, c, d]
|
[
"def",
"Cpig_coeffs",
"(",
"counts",
")",
":",
"a",
",",
"b",
",",
"c",
",",
"d",
"=",
"0.0",
",",
"0.0",
",",
"0.0",
",",
"0.0",
"for",
"group",
",",
"count",
"in",
"counts",
".",
"items",
"(",
")",
":",
"a",
"+=",
"joback_groups_id_dict",
"[",
"group",
"]",
".",
"Cpa",
"*",
"count",
"b",
"+=",
"joback_groups_id_dict",
"[",
"group",
"]",
".",
"Cpb",
"*",
"count",
"c",
"+=",
"joback_groups_id_dict",
"[",
"group",
"]",
".",
"Cpc",
"*",
"count",
"d",
"+=",
"joback_groups_id_dict",
"[",
"group",
"]",
".",
"Cpd",
"*",
"count",
"a",
"-=",
"37.93",
"b",
"+=",
"0.210",
"c",
"-=",
"3.91E-4",
"d",
"+=",
"2.06E-7",
"return",
"[",
"a",
",",
"b",
",",
"c",
",",
"d",
"]"
] |
r'''Computes the ideal-gas polynomial heat capacity coefficients
of an organic compound using the Joback method as a function of
chemical structure only.
.. math::
C_p^{ig} = \sum_i a_i - 37.93 + \left[ \sum_i b_i + 0.210 \right] T
+ \left[ \sum_i c_i - 3.91 \cdot 10^{-4} \right] T^2
+ \left[\sum_i d_i + 2.06 \cdot 10^{-7}\right] T^3
288 compounds were used by Joback in this determination. No overall
error was reported.
The ideal gas heat capacity values used in developing the heat
capacity polynomials used 9 data points between 298 K and 1000 K.
Parameters
----------
counts : dict
Dictionary of Joback groups present (numerically indexed) and their
counts, [-]
Returns
-------
coefficients : list[float]
Coefficients which will result in a calculated heat capacity in
in units of J/mol/K, [-]
Examples
--------
>>> c = Joback.Cpig_coeffs({1: 2, 24: 1})
>>> c
[7.520000000000003, 0.26084, -0.0001207, 1.545999999999998e-08]
>>> Cp = lambda T : c[0] + c[1]*T + c[2]*T**2 + c[3]*T**3
>>> Cp(300)
75.32642000000001
|
[
"r",
"Computes",
"the",
"ideal",
"-",
"gas",
"polynomial",
"heat",
"capacity",
"coefficients",
"of",
"an",
"organic",
"compound",
"using",
"the",
"Joback",
"method",
"as",
"a",
"function",
"of",
"chemical",
"structure",
"only",
".",
"..",
"math",
"::",
"C_p^",
"{",
"ig",
"}",
"=",
"\\",
"sum_i",
"a_i",
"-",
"37",
".",
"93",
"+",
"\\",
"left",
"[",
"\\",
"sum_i",
"b_i",
"+",
"0",
".",
"210",
"\\",
"right",
"]",
"T",
"+",
"\\",
"left",
"[",
"\\",
"sum_i",
"c_i",
"-",
"3",
".",
"91",
"\\",
"cdot",
"10^",
"{",
"-",
"4",
"}",
"\\",
"right",
"]",
"T^2",
"+",
"\\",
"left",
"[",
"\\",
"sum_i",
"d_i",
"+",
"2",
".",
"06",
"\\",
"cdot",
"10^",
"{",
"-",
"7",
"}",
"\\",
"right",
"]",
"T^3",
"288",
"compounds",
"were",
"used",
"by",
"Joback",
"in",
"this",
"determination",
".",
"No",
"overall",
"error",
"was",
"reported",
"."
] |
python
|
valid
| 35.9375 |
ejeschke/ginga
|
ginga/rv/plugins/Command.py
|
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/rv/plugins/Command.py#L422-L442
|
def cmd_lsch(self):
"""lsch
List the channels, showing the current one.
"""
names = list(self.fv.get_channel_names())
names.sort()
if len(names) == 0:
self.log("No channels")
return
res = []
cur_ch = self.fv.get_channel_info()
for name in names:
if (cur_ch is not None) and (cur_ch.name == name):
res.append("=>%s" % (name))
else:
res.append(" %s" % (name))
self.log("\n".join(res))
|
[
"def",
"cmd_lsch",
"(",
"self",
")",
":",
"names",
"=",
"list",
"(",
"self",
".",
"fv",
".",
"get_channel_names",
"(",
")",
")",
"names",
".",
"sort",
"(",
")",
"if",
"len",
"(",
"names",
")",
"==",
"0",
":",
"self",
".",
"log",
"(",
"\"No channels\"",
")",
"return",
"res",
"=",
"[",
"]",
"cur_ch",
"=",
"self",
".",
"fv",
".",
"get_channel_info",
"(",
")",
"for",
"name",
"in",
"names",
":",
"if",
"(",
"cur_ch",
"is",
"not",
"None",
")",
"and",
"(",
"cur_ch",
".",
"name",
"==",
"name",
")",
":",
"res",
".",
"append",
"(",
"\"=>%s\"",
"%",
"(",
"name",
")",
")",
"else",
":",
"res",
".",
"append",
"(",
"\" %s\"",
"%",
"(",
"name",
")",
")",
"self",
".",
"log",
"(",
"\"\\n\"",
".",
"join",
"(",
"res",
")",
")"
] |
lsch
List the channels, showing the current one.
|
[
"lsch"
] |
python
|
train
| 25.095238 |
idlesign/uwsgiconf
|
uwsgiconf/options/workers.py
|
https://github.com/idlesign/uwsgiconf/blob/475407acb44199edbf7e0a66261bfeb51de1afae/uwsgiconf/options/workers.py#L394-L410
|
def set_zerg_client_params(self, server_sockets, use_fallback_socket=None):
"""Zerg mode. Zergs params.
:param str|unicode|list[str|unicode] server_sockets: Attaches zerg to a zerg server.
:param bool use_fallback_socket: Fallback to normal sockets if the zerg server is not available
"""
self._set('zerg', server_sockets, multi=True)
if use_fallback_socket is not None:
self._set('zerg-fallback', use_fallback_socket, cast=bool)
for socket in listify(server_sockets):
self._section.networking.register_socket(self._section.networking.sockets.default(socket))
return self._section
|
[
"def",
"set_zerg_client_params",
"(",
"self",
",",
"server_sockets",
",",
"use_fallback_socket",
"=",
"None",
")",
":",
"self",
".",
"_set",
"(",
"'zerg'",
",",
"server_sockets",
",",
"multi",
"=",
"True",
")",
"if",
"use_fallback_socket",
"is",
"not",
"None",
":",
"self",
".",
"_set",
"(",
"'zerg-fallback'",
",",
"use_fallback_socket",
",",
"cast",
"=",
"bool",
")",
"for",
"socket",
"in",
"listify",
"(",
"server_sockets",
")",
":",
"self",
".",
"_section",
".",
"networking",
".",
"register_socket",
"(",
"self",
".",
"_section",
".",
"networking",
".",
"sockets",
".",
"default",
"(",
"socket",
")",
")",
"return",
"self",
".",
"_section"
] |
Zerg mode. Zergs params.
:param str|unicode|list[str|unicode] server_sockets: Attaches zerg to a zerg server.
:param bool use_fallback_socket: Fallback to normal sockets if the zerg server is not available
|
[
"Zerg",
"mode",
".",
"Zergs",
"params",
"."
] |
python
|
train
| 39.176471 |
joelfrederico/SciSalt
|
scisalt/matplotlib/plot_featured.py
|
https://github.com/joelfrederico/SciSalt/blob/7bf57c49c7dde0a8b0aa337fbd2fbd527ce7a67f/scisalt/matplotlib/plot_featured.py#L11-L55
|
def plot_featured(*args, **kwargs):
"""
Wrapper for matplotlib.pyplot.plot() / errorbar().
Takes options:
* 'error': if true, use :func:`matplotlib.pyplot.errorbar` instead of :func:`matplotlib.pyplot.plot`. *\*args* and *\*\*kwargs* passed through here.
* 'fig': figure to use.
* 'figlabel': figure label.
* 'legend': legend location.
* 'toplabel': top label of plot.
* 'xlabel': x-label of plot.
* 'ylabel': y-label of plot.
"""
# Strip off options specific to plot_featured
toplabel = kwargs.pop('toplabel', None)
xlabel = kwargs.pop('xlabel', None)
ylabel = kwargs.pop('ylabel', None)
legend = kwargs.pop('legend', None)
error = kwargs.pop('error', None)
# save = kwargs.pop('save', False)
figlabel = kwargs.pop('figlabel', None)
fig = kwargs.pop('fig', None)
if figlabel is not None:
fig = _figure(figlabel)
elif fig is None:
try:
fig = _plt.gcf()
except:
fig = _plt.fig()
# Pass everything else to plot
if error is None:
_plt.plot(*args, **kwargs)
else:
_plt.errorbar(*args, **kwargs)
# Format plot as desired
_addlabel(toplabel, xlabel, ylabel, fig=fig)
if legend is not None:
_plt.legend(legend)
return fig
|
[
"def",
"plot_featured",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# Strip off options specific to plot_featured",
"toplabel",
"=",
"kwargs",
".",
"pop",
"(",
"'toplabel'",
",",
"None",
")",
"xlabel",
"=",
"kwargs",
".",
"pop",
"(",
"'xlabel'",
",",
"None",
")",
"ylabel",
"=",
"kwargs",
".",
"pop",
"(",
"'ylabel'",
",",
"None",
")",
"legend",
"=",
"kwargs",
".",
"pop",
"(",
"'legend'",
",",
"None",
")",
"error",
"=",
"kwargs",
".",
"pop",
"(",
"'error'",
",",
"None",
")",
"# save = kwargs.pop('save', False)",
"figlabel",
"=",
"kwargs",
".",
"pop",
"(",
"'figlabel'",
",",
"None",
")",
"fig",
"=",
"kwargs",
".",
"pop",
"(",
"'fig'",
",",
"None",
")",
"if",
"figlabel",
"is",
"not",
"None",
":",
"fig",
"=",
"_figure",
"(",
"figlabel",
")",
"elif",
"fig",
"is",
"None",
":",
"try",
":",
"fig",
"=",
"_plt",
".",
"gcf",
"(",
")",
"except",
":",
"fig",
"=",
"_plt",
".",
"fig",
"(",
")",
"# Pass everything else to plot",
"if",
"error",
"is",
"None",
":",
"_plt",
".",
"plot",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"_plt",
".",
"errorbar",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"# Format plot as desired",
"_addlabel",
"(",
"toplabel",
",",
"xlabel",
",",
"ylabel",
",",
"fig",
"=",
"fig",
")",
"if",
"legend",
"is",
"not",
"None",
":",
"_plt",
".",
"legend",
"(",
"legend",
")",
"return",
"fig"
] |
Wrapper for matplotlib.pyplot.plot() / errorbar().
Takes options:
* 'error': if true, use :func:`matplotlib.pyplot.errorbar` instead of :func:`matplotlib.pyplot.plot`. *\*args* and *\*\*kwargs* passed through here.
* 'fig': figure to use.
* 'figlabel': figure label.
* 'legend': legend location.
* 'toplabel': top label of plot.
* 'xlabel': x-label of plot.
* 'ylabel': y-label of plot.
|
[
"Wrapper",
"for",
"matplotlib",
".",
"pyplot",
".",
"plot",
"()",
"/",
"errorbar",
"()",
"."
] |
python
|
valid
| 28.511111 |
mitsei/dlkit
|
dlkit/json_/osid/metadata.py
|
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/osid/metadata.py#L1858-L1872
|
def supports_version_type(self, version_type):
"""Tests if the given version type is supported.
arg: version_type (osid.type.Type): a version Type
return: (boolean) - ``true`` if the type is supported, ``false``
otherwise
raise: IllegalState - syntax is not a ``VERSION``
raise: NullArgument - ``version_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.Metadata.supports_coordinate_type
if self._kwargs['syntax'] not in ['``VERSION``']:
raise errors.IllegalState()
return version_type in self.get_version_types
|
[
"def",
"supports_version_type",
"(",
"self",
",",
"version_type",
")",
":",
"# Implemented from template for osid.Metadata.supports_coordinate_type",
"if",
"self",
".",
"_kwargs",
"[",
"'syntax'",
"]",
"not",
"in",
"[",
"'``VERSION``'",
"]",
":",
"raise",
"errors",
".",
"IllegalState",
"(",
")",
"return",
"version_type",
"in",
"self",
".",
"get_version_types"
] |
Tests if the given version type is supported.
arg: version_type (osid.type.Type): a version Type
return: (boolean) - ``true`` if the type is supported, ``false``
otherwise
raise: IllegalState - syntax is not a ``VERSION``
raise: NullArgument - ``version_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
|
[
"Tests",
"if",
"the",
"given",
"version",
"type",
"is",
"supported",
"."
] |
python
|
train
| 45.466667 |
ktbyers/netmiko
|
netmiko/dell/dell_force10_ssh.py
|
https://github.com/ktbyers/netmiko/blob/54e6116c0b4664de2123081937e0a9a27bdfdfea/netmiko/dell/dell_force10_ssh.py#L9-L18
|
def save_config(
self,
cmd="copy running-configuration startup-configuration",
confirm=False,
confirm_response="",
):
"""Saves Config"""
return super(DellForce10SSH, self).save_config(
cmd=cmd, confirm=confirm, confirm_response=confirm_response
)
|
[
"def",
"save_config",
"(",
"self",
",",
"cmd",
"=",
"\"copy running-configuration startup-configuration\"",
",",
"confirm",
"=",
"False",
",",
"confirm_response",
"=",
"\"\"",
",",
")",
":",
"return",
"super",
"(",
"DellForce10SSH",
",",
"self",
")",
".",
"save_config",
"(",
"cmd",
"=",
"cmd",
",",
"confirm",
"=",
"confirm",
",",
"confirm_response",
"=",
"confirm_response",
")"
] |
Saves Config
|
[
"Saves",
"Config"
] |
python
|
train
| 30.9 |
mikekatz04/BOWIE
|
snr_calculator_folder/gwsnrcalc/utils/waveforms.py
|
https://github.com/mikekatz04/BOWIE/blob/a941342a3536cb57c817a1643896d99a3f354a86/snr_calculator_folder/gwsnrcalc/utils/waveforms.py#L126-L173
|
def _broadcast_and_set_attrs(self, local_dict):
"""Cast all inputs to correct dimensions.
This method fixes inputs who have different lengths. Namely one input as
an array and others that are scalara or of len-1.
Raises:
Value Error: Multiple length arrays of len>1
"""
del local_dict['self']
self.remove_axis = False
max_length = 0
for key in local_dict:
try:
length = len(local_dict[key])
if length > max_length:
max_length = length
except TypeError:
pass
if max_length == 0:
self.remove_axis = True
for key in local_dict:
setattr(self, key, np.array([local_dict[key]]))
# check for bad length arrays
else:
for key in local_dict:
try:
if len(local_dict[key]) < max_length and len(local_dict[key]) > 1:
raise ValueError("Casting parameters not correct."
+ " Need all at a maximum shape and the rest being"
+ "len-1 arrays or scalars")
except TypeError:
pass
# broadcast arrays
for key in local_dict:
try:
if len(local_dict[key]) == max_length:
setattr(self, key, local_dict[key])
elif len(local_dict[key]) == 1:
setattr(self, key, np.full((max_length,), local_dict[key][0]))
except TypeError:
setattr(self, key, np.full((max_length,), local_dict[key]))
return
|
[
"def",
"_broadcast_and_set_attrs",
"(",
"self",
",",
"local_dict",
")",
":",
"del",
"local_dict",
"[",
"'self'",
"]",
"self",
".",
"remove_axis",
"=",
"False",
"max_length",
"=",
"0",
"for",
"key",
"in",
"local_dict",
":",
"try",
":",
"length",
"=",
"len",
"(",
"local_dict",
"[",
"key",
"]",
")",
"if",
"length",
">",
"max_length",
":",
"max_length",
"=",
"length",
"except",
"TypeError",
":",
"pass",
"if",
"max_length",
"==",
"0",
":",
"self",
".",
"remove_axis",
"=",
"True",
"for",
"key",
"in",
"local_dict",
":",
"setattr",
"(",
"self",
",",
"key",
",",
"np",
".",
"array",
"(",
"[",
"local_dict",
"[",
"key",
"]",
"]",
")",
")",
"# check for bad length arrays",
"else",
":",
"for",
"key",
"in",
"local_dict",
":",
"try",
":",
"if",
"len",
"(",
"local_dict",
"[",
"key",
"]",
")",
"<",
"max_length",
"and",
"len",
"(",
"local_dict",
"[",
"key",
"]",
")",
">",
"1",
":",
"raise",
"ValueError",
"(",
"\"Casting parameters not correct.\"",
"+",
"\" Need all at a maximum shape and the rest being\"",
"+",
"\"len-1 arrays or scalars\"",
")",
"except",
"TypeError",
":",
"pass",
"# broadcast arrays",
"for",
"key",
"in",
"local_dict",
":",
"try",
":",
"if",
"len",
"(",
"local_dict",
"[",
"key",
"]",
")",
"==",
"max_length",
":",
"setattr",
"(",
"self",
",",
"key",
",",
"local_dict",
"[",
"key",
"]",
")",
"elif",
"len",
"(",
"local_dict",
"[",
"key",
"]",
")",
"==",
"1",
":",
"setattr",
"(",
"self",
",",
"key",
",",
"np",
".",
"full",
"(",
"(",
"max_length",
",",
")",
",",
"local_dict",
"[",
"key",
"]",
"[",
"0",
"]",
")",
")",
"except",
"TypeError",
":",
"setattr",
"(",
"self",
",",
"key",
",",
"np",
".",
"full",
"(",
"(",
"max_length",
",",
")",
",",
"local_dict",
"[",
"key",
"]",
")",
")",
"return"
] |
Cast all inputs to correct dimensions.
This method fixes inputs who have different lengths. Namely one input as
an array and others that are scalara or of len-1.
Raises:
Value Error: Multiple length arrays of len>1
|
[
"Cast",
"all",
"inputs",
"to",
"correct",
"dimensions",
"."
] |
python
|
train
| 35.875 |
SpockBotMC/SpockBot
|
spockbot/plugins/helpers/inventory.py
|
https://github.com/SpockBotMC/SpockBot/blob/f89911551f18357720034fbaa52837a0d09f66ea/spockbot/plugins/helpers/inventory.py#L145-L158
|
def inv_slots_preferred(self):
"""
List of all available inventory slots in the preferred search order.
Does not include the additional slots from the open window.
1. active slot
2. remainder of the hotbar
3. remainder of the persistent inventory
"""
slots = [self.active_slot]
slots.extend(slot for slot in self.window.hotbar_slots
if slot != self.active_slot)
slots.extend(self.window.inventory_slots)
return slots
|
[
"def",
"inv_slots_preferred",
"(",
"self",
")",
":",
"slots",
"=",
"[",
"self",
".",
"active_slot",
"]",
"slots",
".",
"extend",
"(",
"slot",
"for",
"slot",
"in",
"self",
".",
"window",
".",
"hotbar_slots",
"if",
"slot",
"!=",
"self",
".",
"active_slot",
")",
"slots",
".",
"extend",
"(",
"self",
".",
"window",
".",
"inventory_slots",
")",
"return",
"slots"
] |
List of all available inventory slots in the preferred search order.
Does not include the additional slots from the open window.
1. active slot
2. remainder of the hotbar
3. remainder of the persistent inventory
|
[
"List",
"of",
"all",
"available",
"inventory",
"slots",
"in",
"the",
"preferred",
"search",
"order",
".",
"Does",
"not",
"include",
"the",
"additional",
"slots",
"from",
"the",
"open",
"window",
"."
] |
python
|
train
| 36.642857 |
tensorflow/cleverhans
|
cleverhans/attacks/fast_gradient_method.py
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/attacks/fast_gradient_method.py#L40-L61
|
def generate(self, x, **kwargs):
"""
Returns the graph for Fast Gradient Method adversarial examples.
:param x: The model's symbolic inputs.
:param kwargs: See `parse_params`
"""
# Parse and save attack-specific parameters
assert self.parse_params(**kwargs)
labels, _nb_classes = self.get_or_guess_labels(x, kwargs)
return fgm(
x,
self.model.get_logits(x),
y=labels,
eps=self.eps,
ord=self.ord,
clip_min=self.clip_min,
clip_max=self.clip_max,
targeted=(self.y_target is not None),
sanity_checks=self.sanity_checks)
|
[
"def",
"generate",
"(",
"self",
",",
"x",
",",
"*",
"*",
"kwargs",
")",
":",
"# Parse and save attack-specific parameters",
"assert",
"self",
".",
"parse_params",
"(",
"*",
"*",
"kwargs",
")",
"labels",
",",
"_nb_classes",
"=",
"self",
".",
"get_or_guess_labels",
"(",
"x",
",",
"kwargs",
")",
"return",
"fgm",
"(",
"x",
",",
"self",
".",
"model",
".",
"get_logits",
"(",
"x",
")",
",",
"y",
"=",
"labels",
",",
"eps",
"=",
"self",
".",
"eps",
",",
"ord",
"=",
"self",
".",
"ord",
",",
"clip_min",
"=",
"self",
".",
"clip_min",
",",
"clip_max",
"=",
"self",
".",
"clip_max",
",",
"targeted",
"=",
"(",
"self",
".",
"y_target",
"is",
"not",
"None",
")",
",",
"sanity_checks",
"=",
"self",
".",
"sanity_checks",
")"
] |
Returns the graph for Fast Gradient Method adversarial examples.
:param x: The model's symbolic inputs.
:param kwargs: See `parse_params`
|
[
"Returns",
"the",
"graph",
"for",
"Fast",
"Gradient",
"Method",
"adversarial",
"examples",
"."
] |
python
|
train
| 27.454545 |
fulfilio/fulfil-python-api
|
fulfil_client/oauth.py
|
https://github.com/fulfilio/fulfil-python-api/blob/180ac969c427b1292439a0371866aa5f169ffa6b/fulfil_client/oauth.py#L18-L22
|
def setup(cls, client_id, client_secret):
"""Configure client in session
"""
cls.client_id = client_id
cls.client_secret = client_secret
|
[
"def",
"setup",
"(",
"cls",
",",
"client_id",
",",
"client_secret",
")",
":",
"cls",
".",
"client_id",
"=",
"client_id",
"cls",
".",
"client_secret",
"=",
"client_secret"
] |
Configure client in session
|
[
"Configure",
"client",
"in",
"session"
] |
python
|
train
| 32.8 |
orb-framework/orb
|
orb/core/database.py
|
https://github.com/orb-framework/orb/blob/575be2689cb269e65a0a2678232ff940acc19e5a/orb/core/database.py#L77-L83
|
def addNamespace(self, namespace, **context):
"""
Creates a new namespace within this database.
:param namespace: <str>
"""
self.connection().addNamespace(namespace, orb.Context(**context))
|
[
"def",
"addNamespace",
"(",
"self",
",",
"namespace",
",",
"*",
"*",
"context",
")",
":",
"self",
".",
"connection",
"(",
")",
".",
"addNamespace",
"(",
"namespace",
",",
"orb",
".",
"Context",
"(",
"*",
"*",
"context",
")",
")"
] |
Creates a new namespace within this database.
:param namespace: <str>
|
[
"Creates",
"a",
"new",
"namespace",
"within",
"this",
"database",
"."
] |
python
|
train
| 32 |
rigetti/pyquil
|
pyquil/gates.py
|
https://github.com/rigetti/pyquil/blob/ec98e453084b0037d69d8c3245f6822a5422593d/pyquil/gates.py#L510-L520
|
def FALSE(classical_reg):
"""
Produce a FALSE instruction.
:param classical_reg: A classical register to modify.
:return: An instruction object representing the equivalent MOVE.
"""
warn("`FALSE a` has been deprecated. Use `MOVE a 0` instead.")
if isinstance(classical_reg, int):
classical_reg = Addr(classical_reg)
return MOVE(classical_reg, 0)
|
[
"def",
"FALSE",
"(",
"classical_reg",
")",
":",
"warn",
"(",
"\"`FALSE a` has been deprecated. Use `MOVE a 0` instead.\"",
")",
"if",
"isinstance",
"(",
"classical_reg",
",",
"int",
")",
":",
"classical_reg",
"=",
"Addr",
"(",
"classical_reg",
")",
"return",
"MOVE",
"(",
"classical_reg",
",",
"0",
")"
] |
Produce a FALSE instruction.
:param classical_reg: A classical register to modify.
:return: An instruction object representing the equivalent MOVE.
|
[
"Produce",
"a",
"FALSE",
"instruction",
"."
] |
python
|
train
| 34.181818 |
pricingassistant/mrq
|
mrq/processes.py
|
https://github.com/pricingassistant/mrq/blob/d0a5a34de9cba38afa94fb7c9e17f9b570b79a50/mrq/processes.py#L112-L122
|
def wait(self):
""" Waits for the pool to be fully stopped """
while True:
if not self.greenlet_watch:
break
if self.stopping:
gevent.sleep(0.1)
else:
gevent.sleep(1)
|
[
"def",
"wait",
"(",
"self",
")",
":",
"while",
"True",
":",
"if",
"not",
"self",
".",
"greenlet_watch",
":",
"break",
"if",
"self",
".",
"stopping",
":",
"gevent",
".",
"sleep",
"(",
"0.1",
")",
"else",
":",
"gevent",
".",
"sleep",
"(",
"1",
")"
] |
Waits for the pool to be fully stopped
|
[
"Waits",
"for",
"the",
"pool",
"to",
"be",
"fully",
"stopped"
] |
python
|
train
| 23.454545 |
blockstack/blockstack-core
|
blockstack/lib/operations/announce.py
|
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/operations/announce.py#L136-L154
|
def parse(bin_payload):
"""
Interpret a block's nulldata back into a SHA256. The first three bytes (2 magic + 1 opcode)
will not be present in bin_payload.
"""
message_hash = hexlify(bin_payload)
if not is_hex( message_hash ):
log.warning("Not a message hash")
return None
if len(message_hash) != 40:
log.warning("Not a 160-bit hash")
return None
return {
'opcode': 'ANNOUNCE',
'message_hash': message_hash
}
|
[
"def",
"parse",
"(",
"bin_payload",
")",
":",
"message_hash",
"=",
"hexlify",
"(",
"bin_payload",
")",
"if",
"not",
"is_hex",
"(",
"message_hash",
")",
":",
"log",
".",
"warning",
"(",
"\"Not a message hash\"",
")",
"return",
"None",
"if",
"len",
"(",
"message_hash",
")",
"!=",
"40",
":",
"log",
".",
"warning",
"(",
"\"Not a 160-bit hash\"",
")",
"return",
"None",
"return",
"{",
"'opcode'",
":",
"'ANNOUNCE'",
",",
"'message_hash'",
":",
"message_hash",
"}"
] |
Interpret a block's nulldata back into a SHA256. The first three bytes (2 magic + 1 opcode)
will not be present in bin_payload.
|
[
"Interpret",
"a",
"block",
"s",
"nulldata",
"back",
"into",
"a",
"SHA256",
".",
"The",
"first",
"three",
"bytes",
"(",
"2",
"magic",
"+",
"1",
"opcode",
")",
"will",
"not",
"be",
"present",
"in",
"bin_payload",
"."
] |
python
|
train
| 25.578947 |
Alveo/pyalveo
|
pyalveo/pyalveo.py
|
https://github.com/Alveo/pyalveo/blob/1e9eec22bc031bc9a08066f9966565a546e6242e/pyalveo/pyalveo.py#L1542-L1559
|
def delete_contribution(self, url):
"""Delete the contribution with this identifier
:rtype: bool
:returns: True if the contribution was deleted, False otherwise (eg. if it didn't exist)
"""
# first validate that this is a real contrib
try:
result = self.api_request(url)
if 'url' in result and 'documents' in result:
self.api_request(result['url'], method='DELETE')
return True
except:
pass
return False
|
[
"def",
"delete_contribution",
"(",
"self",
",",
"url",
")",
":",
"# first validate that this is a real contrib",
"try",
":",
"result",
"=",
"self",
".",
"api_request",
"(",
"url",
")",
"if",
"'url'",
"in",
"result",
"and",
"'documents'",
"in",
"result",
":",
"self",
".",
"api_request",
"(",
"result",
"[",
"'url'",
"]",
",",
"method",
"=",
"'DELETE'",
")",
"return",
"True",
"except",
":",
"pass",
"return",
"False"
] |
Delete the contribution with this identifier
:rtype: bool
:returns: True if the contribution was deleted, False otherwise (eg. if it didn't exist)
|
[
"Delete",
"the",
"contribution",
"with",
"this",
"identifier"
] |
python
|
train
| 29 |
avirshup/DockerMake
|
dockermake/step.py
|
https://github.com/avirshup/DockerMake/blob/2173199904f086353ef539ea578788b99f6fea0a/dockermake/step.py#L183-L227
|
def _resolve_squash_cache(self, client):
"""
Currently doing a "squash" basically negates the cache for any subsequent layers.
But we can work around this by A) checking if the cache was successful for the _unsquashed_
version of the image, and B) if so, re-using an older squashed version of the image.
Three ways to do this:
1. get the shas of the before/after images from `image.history` comments
OR the output stream (or both). Both are extremely brittle, but also easy to access
2. Build the image without squash first. If the unsquashed image sha matches
a cached one, substitute the unsuqashed image for the squashed one.
If no match, re-run the steps with squash=True and store the resulting pair
Less brittle than 1., but harder and defs not elegant
3. Use docker-squash as a dependency - this is by far the most preferable solution,
except that they don't yet support the newest docker sdk version.
Currently option 1 is implemented - we parse the comment string in the image history
to figure out which layers the image was squashed from
"""
from .staging import BUILD_CACHEDIR
history = client.api.history(self.buildname)
comment = history[0].get('Comment', '').split()
if len(comment) != 4 or comment[0] != 'merge' or comment[2] != 'to':
print('WARNING: failed to parse this image\'s pre-squash history. '
'The build will continue, but all subsequent layers will be rebuilt.')
return
squashed_sha = history[0]['Id']
start_squash_sha = comment[1]
end_squash_sha = comment[3]
cprint(' Layers %s to %s were squashed.' % (start_squash_sha, end_squash_sha), 'yellow')
# check cache
squashcache = os.path.join(BUILD_CACHEDIR, 'squashes')
if not os.path.exists(squashcache):
os.makedirs(squashcache)
cachepath = os.path.join(BUILD_CACHEDIR,
'squashes', '%s-%s' % (start_squash_sha, end_squash_sha))
# on hit, tag the squashedsha as the result of this build step
if os.path.exists(cachepath):
self._get_squashed_layer_cache(client, squashed_sha, cachepath)
else:
self._cache_squashed_layer(squashed_sha, cachepath)
|
[
"def",
"_resolve_squash_cache",
"(",
"self",
",",
"client",
")",
":",
"from",
".",
"staging",
"import",
"BUILD_CACHEDIR",
"history",
"=",
"client",
".",
"api",
".",
"history",
"(",
"self",
".",
"buildname",
")",
"comment",
"=",
"history",
"[",
"0",
"]",
".",
"get",
"(",
"'Comment'",
",",
"''",
")",
".",
"split",
"(",
")",
"if",
"len",
"(",
"comment",
")",
"!=",
"4",
"or",
"comment",
"[",
"0",
"]",
"!=",
"'merge'",
"or",
"comment",
"[",
"2",
"]",
"!=",
"'to'",
":",
"print",
"(",
"'WARNING: failed to parse this image\\'s pre-squash history. '",
"'The build will continue, but all subsequent layers will be rebuilt.'",
")",
"return",
"squashed_sha",
"=",
"history",
"[",
"0",
"]",
"[",
"'Id'",
"]",
"start_squash_sha",
"=",
"comment",
"[",
"1",
"]",
"end_squash_sha",
"=",
"comment",
"[",
"3",
"]",
"cprint",
"(",
"' Layers %s to %s were squashed.'",
"%",
"(",
"start_squash_sha",
",",
"end_squash_sha",
")",
",",
"'yellow'",
")",
"# check cache",
"squashcache",
"=",
"os",
".",
"path",
".",
"join",
"(",
"BUILD_CACHEDIR",
",",
"'squashes'",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"squashcache",
")",
":",
"os",
".",
"makedirs",
"(",
"squashcache",
")",
"cachepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"BUILD_CACHEDIR",
",",
"'squashes'",
",",
"'%s-%s'",
"%",
"(",
"start_squash_sha",
",",
"end_squash_sha",
")",
")",
"# on hit, tag the squashedsha as the result of this build step",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"cachepath",
")",
":",
"self",
".",
"_get_squashed_layer_cache",
"(",
"client",
",",
"squashed_sha",
",",
"cachepath",
")",
"else",
":",
"self",
".",
"_cache_squashed_layer",
"(",
"squashed_sha",
",",
"cachepath",
")"
] |
Currently doing a "squash" basically negates the cache for any subsequent layers.
But we can work around this by A) checking if the cache was successful for the _unsquashed_
version of the image, and B) if so, re-using an older squashed version of the image.
Three ways to do this:
1. get the shas of the before/after images from `image.history` comments
OR the output stream (or both). Both are extremely brittle, but also easy to access
2. Build the image without squash first. If the unsquashed image sha matches
a cached one, substitute the unsuqashed image for the squashed one.
If no match, re-run the steps with squash=True and store the resulting pair
Less brittle than 1., but harder and defs not elegant
3. Use docker-squash as a dependency - this is by far the most preferable solution,
except that they don't yet support the newest docker sdk version.
Currently option 1 is implemented - we parse the comment string in the image history
to figure out which layers the image was squashed from
|
[
"Currently",
"doing",
"a",
"squash",
"basically",
"negates",
"the",
"cache",
"for",
"any",
"subsequent",
"layers",
".",
"But",
"we",
"can",
"work",
"around",
"this",
"by",
"A",
")",
"checking",
"if",
"the",
"cache",
"was",
"successful",
"for",
"the",
"_unsquashed_",
"version",
"of",
"the",
"image",
"and",
"B",
")",
"if",
"so",
"re",
"-",
"using",
"an",
"older",
"squashed",
"version",
"of",
"the",
"image",
"."
] |
python
|
train
| 53.222222 |
tjguk/networkzero
|
networkzero/discovery.py
|
https://github.com/tjguk/networkzero/blob/0e3e81d2e9200b25a83ac07741612283599486d7/networkzero/discovery.py#L539-L567
|
def discover(name, wait_for_s=60):
"""Discover a service by name
Look for an advert to a named service::
address = nw0.discover("myservice")
:param name: any text
:param wait_for_s: how many seconds to wait before giving up
:returns: the address found or None
"""
_start_beacon()
#
# It's possible to enter a deadlock situation where the first
# process fires off a discovery request and waits for the
# second process to advertise. But the second process has to
# connect to the rpc port of the first process' beacon and
# its advertisement is queued behind the pending discovery.
#
# To give both a chance of succeeding we operate in bursts,
# allowing them to interleave.
#
t0 = time.time()
while True:
discovery = _rpc("discover", name, 0.5)
if discovery:
return discovery
if timed_out(t0, wait_for_s):
return None
|
[
"def",
"discover",
"(",
"name",
",",
"wait_for_s",
"=",
"60",
")",
":",
"_start_beacon",
"(",
")",
"#",
"# It's possible to enter a deadlock situation where the first",
"# process fires off a discovery request and waits for the",
"# second process to advertise. But the second process has to",
"# connect to the rpc port of the first process' beacon and",
"# its advertisement is queued behind the pending discovery.",
"#",
"# To give both a chance of succeeding we operate in bursts,",
"# allowing them to interleave.",
"#",
"t0",
"=",
"time",
".",
"time",
"(",
")",
"while",
"True",
":",
"discovery",
"=",
"_rpc",
"(",
"\"discover\"",
",",
"name",
",",
"0.5",
")",
"if",
"discovery",
":",
"return",
"discovery",
"if",
"timed_out",
"(",
"t0",
",",
"wait_for_s",
")",
":",
"return",
"None"
] |
Discover a service by name
Look for an advert to a named service::
address = nw0.discover("myservice")
:param name: any text
:param wait_for_s: how many seconds to wait before giving up
:returns: the address found or None
|
[
"Discover",
"a",
"service",
"by",
"name",
"Look",
"for",
"an",
"advert",
"to",
"a",
"named",
"service",
"::",
"address",
"=",
"nw0",
".",
"discover",
"(",
"myservice",
")",
":",
"param",
"name",
":",
"any",
"text",
":",
"param",
"wait_for_s",
":",
"how",
"many",
"seconds",
"to",
"wait",
"before",
"giving",
"up",
":",
"returns",
":",
"the",
"address",
"found",
"or",
"None"
] |
python
|
train
| 32.448276 |
gem/oq-engine
|
openquake/hazardlib/calc/disagg.py
|
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/calc/disagg.py#L72-L89
|
def collect_bin_data(ruptures, sitecol, cmaker, iml4,
truncation_level, n_epsilons, monitor=Monitor()):
"""
:param ruptures: a list of ruptures
:param sitecol: a SiteCollection instance
:param cmaker: a ContextMaker instance
:param iml4: an ArrayWrapper of intensities of shape (N, R, M, P)
:param truncation_level: the truncation level
:param n_epsilons: the number of epsilons
:param monitor: a Monitor instance
:returns: a dictionary (poe, imt, rlzi) -> probabilities of shape (N, E)
"""
# NB: instantiating truncnorm is slow and calls the infamous "doccer"
truncnorm = scipy.stats.truncnorm(-truncation_level, truncation_level)
epsilons = numpy.linspace(truncnorm.a, truncnorm.b, n_epsilons + 1)
acc = cmaker.disaggregate(
sitecol, ruptures, iml4, truncnorm, epsilons, monitor)
return pack(acc, 'mags dists lons lats'.split())
|
[
"def",
"collect_bin_data",
"(",
"ruptures",
",",
"sitecol",
",",
"cmaker",
",",
"iml4",
",",
"truncation_level",
",",
"n_epsilons",
",",
"monitor",
"=",
"Monitor",
"(",
")",
")",
":",
"# NB: instantiating truncnorm is slow and calls the infamous \"doccer\"",
"truncnorm",
"=",
"scipy",
".",
"stats",
".",
"truncnorm",
"(",
"-",
"truncation_level",
",",
"truncation_level",
")",
"epsilons",
"=",
"numpy",
".",
"linspace",
"(",
"truncnorm",
".",
"a",
",",
"truncnorm",
".",
"b",
",",
"n_epsilons",
"+",
"1",
")",
"acc",
"=",
"cmaker",
".",
"disaggregate",
"(",
"sitecol",
",",
"ruptures",
",",
"iml4",
",",
"truncnorm",
",",
"epsilons",
",",
"monitor",
")",
"return",
"pack",
"(",
"acc",
",",
"'mags dists lons lats'",
".",
"split",
"(",
")",
")"
] |
:param ruptures: a list of ruptures
:param sitecol: a SiteCollection instance
:param cmaker: a ContextMaker instance
:param iml4: an ArrayWrapper of intensities of shape (N, R, M, P)
:param truncation_level: the truncation level
:param n_epsilons: the number of epsilons
:param monitor: a Monitor instance
:returns: a dictionary (poe, imt, rlzi) -> probabilities of shape (N, E)
|
[
":",
"param",
"ruptures",
":",
"a",
"list",
"of",
"ruptures",
":",
"param",
"sitecol",
":",
"a",
"SiteCollection",
"instance",
":",
"param",
"cmaker",
":",
"a",
"ContextMaker",
"instance",
":",
"param",
"iml4",
":",
"an",
"ArrayWrapper",
"of",
"intensities",
"of",
"shape",
"(",
"N",
"R",
"M",
"P",
")",
":",
"param",
"truncation_level",
":",
"the",
"truncation",
"level",
":",
"param",
"n_epsilons",
":",
"the",
"number",
"of",
"epsilons",
":",
"param",
"monitor",
":",
"a",
"Monitor",
"instance",
":",
"returns",
":",
"a",
"dictionary",
"(",
"poe",
"imt",
"rlzi",
")",
"-",
">",
"probabilities",
"of",
"shape",
"(",
"N",
"E",
")"
] |
python
|
train
| 50.111111 |
apache/incubator-mxnet
|
tools/diagnose.py
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/tools/diagnose.py#L33-L48
|
def parse_args():
"""Parse arguments."""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Diagnose script for checking the current system.')
choices = ['python', 'pip', 'mxnet', 'os', 'hardware', 'network']
for choice in choices:
parser.add_argument('--' + choice, default=1, type=int,
help='Diagnose {}.'.format(choice))
parser.add_argument('--region', default='', type=str,
help="Additional sites in which region(s) to test. \
Specify 'cn' for example to test mirror sites in China.")
parser.add_argument('--timeout', default=10, type=int,
help="Connection test timeout threshold, 0 to disable.")
args = parser.parse_args()
return args
|
[
"def",
"parse_args",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"formatter_class",
"=",
"argparse",
".",
"ArgumentDefaultsHelpFormatter",
",",
"description",
"=",
"'Diagnose script for checking the current system.'",
")",
"choices",
"=",
"[",
"'python'",
",",
"'pip'",
",",
"'mxnet'",
",",
"'os'",
",",
"'hardware'",
",",
"'network'",
"]",
"for",
"choice",
"in",
"choices",
":",
"parser",
".",
"add_argument",
"(",
"'--'",
"+",
"choice",
",",
"default",
"=",
"1",
",",
"type",
"=",
"int",
",",
"help",
"=",
"'Diagnose {}.'",
".",
"format",
"(",
"choice",
")",
")",
"parser",
".",
"add_argument",
"(",
"'--region'",
",",
"default",
"=",
"''",
",",
"type",
"=",
"str",
",",
"help",
"=",
"\"Additional sites in which region(s) to test. \\\n Specify 'cn' for example to test mirror sites in China.\"",
")",
"parser",
".",
"add_argument",
"(",
"'--timeout'",
",",
"default",
"=",
"10",
",",
"type",
"=",
"int",
",",
"help",
"=",
"\"Connection test timeout threshold, 0 to disable.\"",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"return",
"args"
] |
Parse arguments.
|
[
"Parse",
"arguments",
"."
] |
python
|
train
| 52 |
jalanb/pysyte
|
pysyte/text_streams.py
|
https://github.com/jalanb/pysyte/blob/4e278101943d1ceb1a6bcaf6ddc72052ecf13114/pysyte/text_streams.py#L23-L37
|
def args(parsed_args, name=None):
"""Interpret parsed args to streams"""
strings = parsed_args.arg_strings(name)
files = [s for s in strings if os.path.isfile(s)]
if files:
streams = [open(f) for f in files]
else:
streams = []
if getattr(parsed_args, 'paste', not files):
streams.append(clipboard_stream())
if getattr(parsed_args, 'stdin', False):
streams.append(sys.stdin)
elif not streams:
streams = [sys.stdin]
return streams
|
[
"def",
"args",
"(",
"parsed_args",
",",
"name",
"=",
"None",
")",
":",
"strings",
"=",
"parsed_args",
".",
"arg_strings",
"(",
"name",
")",
"files",
"=",
"[",
"s",
"for",
"s",
"in",
"strings",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"s",
")",
"]",
"if",
"files",
":",
"streams",
"=",
"[",
"open",
"(",
"f",
")",
"for",
"f",
"in",
"files",
"]",
"else",
":",
"streams",
"=",
"[",
"]",
"if",
"getattr",
"(",
"parsed_args",
",",
"'paste'",
",",
"not",
"files",
")",
":",
"streams",
".",
"append",
"(",
"clipboard_stream",
"(",
")",
")",
"if",
"getattr",
"(",
"parsed_args",
",",
"'stdin'",
",",
"False",
")",
":",
"streams",
".",
"append",
"(",
"sys",
".",
"stdin",
")",
"elif",
"not",
"streams",
":",
"streams",
"=",
"[",
"sys",
".",
"stdin",
"]",
"return",
"streams"
] |
Interpret parsed args to streams
|
[
"Interpret",
"parsed",
"args",
"to",
"streams"
] |
python
|
train
| 32.666667 |
numenta/htmresearch
|
htmresearch/algorithms/faulty_spatial_pooler.py
|
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/algorithms/faulty_spatial_pooler.py#L56-L75
|
def killCells(self, percent=0.05):
"""
Changes the percentage of cells that are now considered dead. The first
time you call this method a permutation list is set up. Calls change the
number of cells considered dead.
"""
numColumns = numpy.prod(self.getColumnDimensions())
if self.zombiePermutation is None:
self.zombiePermutation = numpy.random.permutation(numColumns)
self.numDead = int(round(percent * numColumns))
if self.numDead > 0:
self.deadCols = self.zombiePermutation[0:self.numDead]
else:
self.deadCols = numpy.array([])
self.deadColumnInputSpan = self.getConnectedSpan(self.deadCols)
self.removeDeadColumns()
|
[
"def",
"killCells",
"(",
"self",
",",
"percent",
"=",
"0.05",
")",
":",
"numColumns",
"=",
"numpy",
".",
"prod",
"(",
"self",
".",
"getColumnDimensions",
"(",
")",
")",
"if",
"self",
".",
"zombiePermutation",
"is",
"None",
":",
"self",
".",
"zombiePermutation",
"=",
"numpy",
".",
"random",
".",
"permutation",
"(",
"numColumns",
")",
"self",
".",
"numDead",
"=",
"int",
"(",
"round",
"(",
"percent",
"*",
"numColumns",
")",
")",
"if",
"self",
".",
"numDead",
">",
"0",
":",
"self",
".",
"deadCols",
"=",
"self",
".",
"zombiePermutation",
"[",
"0",
":",
"self",
".",
"numDead",
"]",
"else",
":",
"self",
".",
"deadCols",
"=",
"numpy",
".",
"array",
"(",
"[",
"]",
")",
"self",
".",
"deadColumnInputSpan",
"=",
"self",
".",
"getConnectedSpan",
"(",
"self",
".",
"deadCols",
")",
"self",
".",
"removeDeadColumns",
"(",
")"
] |
Changes the percentage of cells that are now considered dead. The first
time you call this method a permutation list is set up. Calls change the
number of cells considered dead.
|
[
"Changes",
"the",
"percentage",
"of",
"cells",
"that",
"are",
"now",
"considered",
"dead",
".",
"The",
"first",
"time",
"you",
"call",
"this",
"method",
"a",
"permutation",
"list",
"is",
"set",
"up",
".",
"Calls",
"change",
"the",
"number",
"of",
"cells",
"considered",
"dead",
"."
] |
python
|
train
| 33.55 |
jlmadurga/permabots
|
permabots/views/hooks/kik_hook.py
|
https://github.com/jlmadurga/permabots/blob/781a91702529a23fe7bc2aa84c5d88e961412466/permabots/views/hooks/kik_hook.py#L58-L105
|
def post(self, request, hook_id):
"""
Process Kik webhook:
1. Get an enabled Kik bot
2. Verify Kik signature
3. Serialize each message
4. For each message create :class:`KikMessage <permabots.models.kik_api.KikMessage>` and :class:`KikUser <permabots.models.kik_api.KikUser>`
5. Delay each message processing to a task
6. Response provider
"""
try:
bot = caching.get_or_set(KikBot, hook_id)
except KikBot.DoesNotExist:
logger.warning("Hook id %s not associated to a bot" % hook_id)
return Response(status=status.HTTP_404_NOT_FOUND)
signature = request.META.get('HTTP_X_KIK_SIGNATURE')
if signature:
signature.encode('utf-8')
if not bot._bot.verify_signature(signature, request.stream.body):
logger.debug("Kik Bot data %s not verified %s" % (request.data, signature))
return Response(status=403)
logger.debug("Kik Bot data %s verified" % (request.data))
for kik_message in request.data['messages']:
serializer = KikMessageSerializer(data=kik_message)
logger.debug("Kik message %s serialized" % (kik_message))
if serializer.is_valid():
try:
if not self.accepted_types(serializer):
raise OnlyTextMessages
message = self.create_message(serializer, bot)
if bot.enabled:
logger.debug("Kik Bot %s attending request %s" % (bot, kik_message))
handle_message.delay(message.id, bot.id)
else:
logger.error("Message %s ignored by disabled bot %s" % (message, bot))
except OnlyTextMessages:
logger.warning("Not text message %s for bot %s" % (kik_message, hook_id))
return Response(status=status.HTTP_200_OK)
except:
exc_info = sys.exc_info()
traceback.print_exception(*exc_info)
logger.error("Error processing %s for bot %s" % (kik_message, hook_id))
return Response(serializer.errors, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
else:
logger.error("Validation error: %s from kik message %s" % (serializer.errors, kik_message))
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
return Response(serializer.data, status=status.HTTP_200_OK)
|
[
"def",
"post",
"(",
"self",
",",
"request",
",",
"hook_id",
")",
":",
"try",
":",
"bot",
"=",
"caching",
".",
"get_or_set",
"(",
"KikBot",
",",
"hook_id",
")",
"except",
"KikBot",
".",
"DoesNotExist",
":",
"logger",
".",
"warning",
"(",
"\"Hook id %s not associated to a bot\"",
"%",
"hook_id",
")",
"return",
"Response",
"(",
"status",
"=",
"status",
".",
"HTTP_404_NOT_FOUND",
")",
"signature",
"=",
"request",
".",
"META",
".",
"get",
"(",
"'HTTP_X_KIK_SIGNATURE'",
")",
"if",
"signature",
":",
"signature",
".",
"encode",
"(",
"'utf-8'",
")",
"if",
"not",
"bot",
".",
"_bot",
".",
"verify_signature",
"(",
"signature",
",",
"request",
".",
"stream",
".",
"body",
")",
":",
"logger",
".",
"debug",
"(",
"\"Kik Bot data %s not verified %s\"",
"%",
"(",
"request",
".",
"data",
",",
"signature",
")",
")",
"return",
"Response",
"(",
"status",
"=",
"403",
")",
"logger",
".",
"debug",
"(",
"\"Kik Bot data %s verified\"",
"%",
"(",
"request",
".",
"data",
")",
")",
"for",
"kik_message",
"in",
"request",
".",
"data",
"[",
"'messages'",
"]",
":",
"serializer",
"=",
"KikMessageSerializer",
"(",
"data",
"=",
"kik_message",
")",
"logger",
".",
"debug",
"(",
"\"Kik message %s serialized\"",
"%",
"(",
"kik_message",
")",
")",
"if",
"serializer",
".",
"is_valid",
"(",
")",
":",
"try",
":",
"if",
"not",
"self",
".",
"accepted_types",
"(",
"serializer",
")",
":",
"raise",
"OnlyTextMessages",
"message",
"=",
"self",
".",
"create_message",
"(",
"serializer",
",",
"bot",
")",
"if",
"bot",
".",
"enabled",
":",
"logger",
".",
"debug",
"(",
"\"Kik Bot %s attending request %s\"",
"%",
"(",
"bot",
",",
"kik_message",
")",
")",
"handle_message",
".",
"delay",
"(",
"message",
".",
"id",
",",
"bot",
".",
"id",
")",
"else",
":",
"logger",
".",
"error",
"(",
"\"Message %s ignored by disabled bot %s\"",
"%",
"(",
"message",
",",
"bot",
")",
")",
"except",
"OnlyTextMessages",
":",
"logger",
".",
"warning",
"(",
"\"Not text message %s for bot %s\"",
"%",
"(",
"kik_message",
",",
"hook_id",
")",
")",
"return",
"Response",
"(",
"status",
"=",
"status",
".",
"HTTP_200_OK",
")",
"except",
":",
"exc_info",
"=",
"sys",
".",
"exc_info",
"(",
")",
"traceback",
".",
"print_exception",
"(",
"*",
"exc_info",
")",
"logger",
".",
"error",
"(",
"\"Error processing %s for bot %s\"",
"%",
"(",
"kik_message",
",",
"hook_id",
")",
")",
"return",
"Response",
"(",
"serializer",
".",
"errors",
",",
"status",
"=",
"status",
".",
"HTTP_500_INTERNAL_SERVER_ERROR",
")",
"else",
":",
"logger",
".",
"error",
"(",
"\"Validation error: %s from kik message %s\"",
"%",
"(",
"serializer",
".",
"errors",
",",
"kik_message",
")",
")",
"return",
"Response",
"(",
"serializer",
".",
"errors",
",",
"status",
"=",
"status",
".",
"HTTP_400_BAD_REQUEST",
")",
"return",
"Response",
"(",
"serializer",
".",
"data",
",",
"status",
"=",
"status",
".",
"HTTP_200_OK",
")"
] |
Process Kik webhook:
1. Get an enabled Kik bot
2. Verify Kik signature
3. Serialize each message
4. For each message create :class:`KikMessage <permabots.models.kik_api.KikMessage>` and :class:`KikUser <permabots.models.kik_api.KikUser>`
5. Delay each message processing to a task
6. Response provider
|
[
"Process",
"Kik",
"webhook",
":",
"1",
".",
"Get",
"an",
"enabled",
"Kik",
"bot",
"2",
".",
"Verify",
"Kik",
"signature",
"3",
".",
"Serialize",
"each",
"message",
"4",
".",
"For",
"each",
"message",
"create",
":",
"class",
":",
"KikMessage",
"<permabots",
".",
"models",
".",
"kik_api",
".",
"KikMessage",
">",
"and",
":",
"class",
":",
"KikUser",
"<permabots",
".",
"models",
".",
"kik_api",
".",
"KikUser",
">",
"5",
".",
"Delay",
"each",
"message",
"processing",
"to",
"a",
"task",
"6",
".",
"Response",
"provider"
] |
python
|
train
| 54.416667 |
danilobellini/dose
|
dose/_legacy.py
|
https://github.com/danilobellini/dose/blob/141f48322f7812b7d32e3d5f065d4473a11102a4/dose/_legacy.py#L627-L634
|
def on_close(self, evt):
"""
Pop-up menu and wx.EVT_CLOSE closing event
"""
self.stop() # DoseWatcher
if evt.EventObject is not self: # Avoid deadlocks
self.Close() # wx.Frame
evt.Skip()
|
[
"def",
"on_close",
"(",
"self",
",",
"evt",
")",
":",
"self",
".",
"stop",
"(",
")",
"# DoseWatcher",
"if",
"evt",
".",
"EventObject",
"is",
"not",
"self",
":",
"# Avoid deadlocks",
"self",
".",
"Close",
"(",
")",
"# wx.Frame",
"evt",
".",
"Skip",
"(",
")"
] |
Pop-up menu and wx.EVT_CLOSE closing event
|
[
"Pop",
"-",
"up",
"menu",
"and",
"wx",
".",
"EVT_CLOSE",
"closing",
"event"
] |
python
|
train
| 26.125 |
flowersteam/explauto
|
explauto/sensorimotor_model/inverse/cma.py
|
https://github.com/flowersteam/explauto/blob/cf0f81ecb9f6412f7276a95bd27359000e1e26b6/explauto/sensorimotor_model/inverse/cma.py#L4452-L4461
|
def versatile_options():
"""return list of options that can be changed at any time (not
only be initialized), however the list might not be entirely up
to date.
The string ' #v ' in the default value indicates a 'versatile'
option that can be changed any time.
"""
return tuple(sorted(i[0] for i in list(CMAOptions.defaults().items()) if i[1].find(' #v ') > 0))
|
[
"def",
"versatile_options",
"(",
")",
":",
"return",
"tuple",
"(",
"sorted",
"(",
"i",
"[",
"0",
"]",
"for",
"i",
"in",
"list",
"(",
"CMAOptions",
".",
"defaults",
"(",
")",
".",
"items",
"(",
")",
")",
"if",
"i",
"[",
"1",
"]",
".",
"find",
"(",
"' #v '",
")",
">",
"0",
")",
")"
] |
return list of options that can be changed at any time (not
only be initialized), however the list might not be entirely up
to date.
The string ' #v ' in the default value indicates a 'versatile'
option that can be changed any time.
|
[
"return",
"list",
"of",
"options",
"that",
"can",
"be",
"changed",
"at",
"any",
"time",
"(",
"not",
"only",
"be",
"initialized",
")",
"however",
"the",
"list",
"might",
"not",
"be",
"entirely",
"up",
"to",
"date",
"."
] |
python
|
train
| 41 |
earwig/mwparserfromhell
|
mwparserfromhell/wikicode.py
|
https://github.com/earwig/mwparserfromhell/blob/98dc30902d35c714a70aca8e6616f49d71cb24cc/mwparserfromhell/wikicode.py#L163-L205
|
def _do_weak_search(self, obj, recursive):
"""Search for an element that looks like *obj* within the node list.
This follows the same rules as :meth:`_do_strong_search` with some
differences. *obj* is treated as a string that might represent any
:class:`.Node`, :class:`.Wikicode`, or combination of the two present
in the node list. Thus, matching is weak (using string comparisons)
rather than strong (using ``is``). Because multiple nodes can match
*obj*, the result is a list of tuples instead of just one (however,
:exc:`ValueError` is still raised if nothing is found). Individual
matches will never overlap.
The tuples contain a new first element, *exact*, which is ``True`` if
we were able to match *obj* exactly to one or more adjacent nodes, or
``False`` if we found *obj* inside a node or incompletely spanning
multiple nodes.
"""
obj = parse_anything(obj)
if not obj or obj not in self:
raise ValueError(obj)
results = []
contexts = [self]
while contexts:
context = contexts.pop()
i = len(context.nodes) - 1
while i >= 0:
node = context.get(i)
if obj.get(-1) == node:
for j in range(-len(obj.nodes), -1):
if obj.get(j) != context.get(i + j + 1):
break
else:
i -= len(obj.nodes) - 1
index = slice(i, i + len(obj.nodes))
results.append((True, context, index))
elif recursive and obj in node:
contexts.extend(node.__children__())
i -= 1
if not results:
if not recursive:
raise ValueError(obj)
results.append((False, self, slice(0, len(self.nodes))))
return results
|
[
"def",
"_do_weak_search",
"(",
"self",
",",
"obj",
",",
"recursive",
")",
":",
"obj",
"=",
"parse_anything",
"(",
"obj",
")",
"if",
"not",
"obj",
"or",
"obj",
"not",
"in",
"self",
":",
"raise",
"ValueError",
"(",
"obj",
")",
"results",
"=",
"[",
"]",
"contexts",
"=",
"[",
"self",
"]",
"while",
"contexts",
":",
"context",
"=",
"contexts",
".",
"pop",
"(",
")",
"i",
"=",
"len",
"(",
"context",
".",
"nodes",
")",
"-",
"1",
"while",
"i",
">=",
"0",
":",
"node",
"=",
"context",
".",
"get",
"(",
"i",
")",
"if",
"obj",
".",
"get",
"(",
"-",
"1",
")",
"==",
"node",
":",
"for",
"j",
"in",
"range",
"(",
"-",
"len",
"(",
"obj",
".",
"nodes",
")",
",",
"-",
"1",
")",
":",
"if",
"obj",
".",
"get",
"(",
"j",
")",
"!=",
"context",
".",
"get",
"(",
"i",
"+",
"j",
"+",
"1",
")",
":",
"break",
"else",
":",
"i",
"-=",
"len",
"(",
"obj",
".",
"nodes",
")",
"-",
"1",
"index",
"=",
"slice",
"(",
"i",
",",
"i",
"+",
"len",
"(",
"obj",
".",
"nodes",
")",
")",
"results",
".",
"append",
"(",
"(",
"True",
",",
"context",
",",
"index",
")",
")",
"elif",
"recursive",
"and",
"obj",
"in",
"node",
":",
"contexts",
".",
"extend",
"(",
"node",
".",
"__children__",
"(",
")",
")",
"i",
"-=",
"1",
"if",
"not",
"results",
":",
"if",
"not",
"recursive",
":",
"raise",
"ValueError",
"(",
"obj",
")",
"results",
".",
"append",
"(",
"(",
"False",
",",
"self",
",",
"slice",
"(",
"0",
",",
"len",
"(",
"self",
".",
"nodes",
")",
")",
")",
")",
"return",
"results"
] |
Search for an element that looks like *obj* within the node list.
This follows the same rules as :meth:`_do_strong_search` with some
differences. *obj* is treated as a string that might represent any
:class:`.Node`, :class:`.Wikicode`, or combination of the two present
in the node list. Thus, matching is weak (using string comparisons)
rather than strong (using ``is``). Because multiple nodes can match
*obj*, the result is a list of tuples instead of just one (however,
:exc:`ValueError` is still raised if nothing is found). Individual
matches will never overlap.
The tuples contain a new first element, *exact*, which is ``True`` if
we were able to match *obj* exactly to one or more adjacent nodes, or
``False`` if we found *obj* inside a node or incompletely spanning
multiple nodes.
|
[
"Search",
"for",
"an",
"element",
"that",
"looks",
"like",
"*",
"obj",
"*",
"within",
"the",
"node",
"list",
"."
] |
python
|
train
| 45.046512 |
google/grr
|
grr/server/grr_response_server/aff4_objects/collects.py
|
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/aff4_objects/collects.py#L23-L58
|
def NewFromContent(cls,
content,
urn,
chunk_size=1024,
token=None,
private_key=None,
public_key=None):
"""Alternate constructor for GRRSignedBlob.
Creates a GRRSignedBlob from a content string by chunking it and signing
each chunk.
Args:
content: The data to stored in the GRRSignedBlob.
urn: The AFF4 URN to create.
chunk_size: Data will be chunked into this size (each chunk is
individually signed.
token: The ACL Token.
private_key: An rdf_crypto.RSAPrivateKey() instance.
public_key: An rdf_crypto.RSAPublicKey() instance.
Returns:
the URN of the new object written.
"""
aff4.FACTORY.Delete(urn, token=token)
with data_store.DB.GetMutationPool() as pool:
with aff4.FACTORY.Create(
urn, cls, mode="w", mutation_pool=pool, token=token) as fd:
for start_of_chunk in range(0, len(content), chunk_size):
chunk = content[start_of_chunk:start_of_chunk + chunk_size]
blob_rdf = rdf_crypto.SignedBlob()
blob_rdf.Sign(chunk, private_key, public_key)
fd.Add(blob_rdf, mutation_pool=pool)
return urn
|
[
"def",
"NewFromContent",
"(",
"cls",
",",
"content",
",",
"urn",
",",
"chunk_size",
"=",
"1024",
",",
"token",
"=",
"None",
",",
"private_key",
"=",
"None",
",",
"public_key",
"=",
"None",
")",
":",
"aff4",
".",
"FACTORY",
".",
"Delete",
"(",
"urn",
",",
"token",
"=",
"token",
")",
"with",
"data_store",
".",
"DB",
".",
"GetMutationPool",
"(",
")",
"as",
"pool",
":",
"with",
"aff4",
".",
"FACTORY",
".",
"Create",
"(",
"urn",
",",
"cls",
",",
"mode",
"=",
"\"w\"",
",",
"mutation_pool",
"=",
"pool",
",",
"token",
"=",
"token",
")",
"as",
"fd",
":",
"for",
"start_of_chunk",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"content",
")",
",",
"chunk_size",
")",
":",
"chunk",
"=",
"content",
"[",
"start_of_chunk",
":",
"start_of_chunk",
"+",
"chunk_size",
"]",
"blob_rdf",
"=",
"rdf_crypto",
".",
"SignedBlob",
"(",
")",
"blob_rdf",
".",
"Sign",
"(",
"chunk",
",",
"private_key",
",",
"public_key",
")",
"fd",
".",
"Add",
"(",
"blob_rdf",
",",
"mutation_pool",
"=",
"pool",
")",
"return",
"urn"
] |
Alternate constructor for GRRSignedBlob.
Creates a GRRSignedBlob from a content string by chunking it and signing
each chunk.
Args:
content: The data to stored in the GRRSignedBlob.
urn: The AFF4 URN to create.
chunk_size: Data will be chunked into this size (each chunk is
individually signed.
token: The ACL Token.
private_key: An rdf_crypto.RSAPrivateKey() instance.
public_key: An rdf_crypto.RSAPublicKey() instance.
Returns:
the URN of the new object written.
|
[
"Alternate",
"constructor",
"for",
"GRRSignedBlob",
"."
] |
python
|
train
| 34.361111 |
log2timeline/plaso
|
plaso/parsers/msiecf.py
|
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/parsers/msiecf.py#L96-L126
|
def _ParseLeak(
self, parser_mediator, cache_directories, msiecf_item, recovered=False):
"""Extract data from a MSIE Cache Files (MSIECF) leak item.
Every item is stored as an event object, one for each timestamp.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
cache_directories (list[str]): cache directory names.
msiecf_item (pymsiecf.leak): MSIECF leak item.
recovered (Optional[bool]): True if the item was recovered.
"""
# TODO: add support for possible last cache synchronization date and time.
date_time = dfdatetime_semantic_time.SemanticTime('Not set')
event_data = MSIECFLeakEventData()
event_data.cached_filename = msiecf_item.filename
event_data.cached_file_size = msiecf_item.cached_file_size
event_data.cache_directory_index = msiecf_item.cache_directory_index
event_data.offset = msiecf_item.offset
event_data.recovered = recovered
if (event_data.cache_directory_index >= 0 and
event_data.cache_directory_index < len(cache_directories)):
event_data.cache_directory_name = (
cache_directories[event_data.cache_directory_index])
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_NOT_A_TIME)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
[
"def",
"_ParseLeak",
"(",
"self",
",",
"parser_mediator",
",",
"cache_directories",
",",
"msiecf_item",
",",
"recovered",
"=",
"False",
")",
":",
"# TODO: add support for possible last cache synchronization date and time.",
"date_time",
"=",
"dfdatetime_semantic_time",
".",
"SemanticTime",
"(",
"'Not set'",
")",
"event_data",
"=",
"MSIECFLeakEventData",
"(",
")",
"event_data",
".",
"cached_filename",
"=",
"msiecf_item",
".",
"filename",
"event_data",
".",
"cached_file_size",
"=",
"msiecf_item",
".",
"cached_file_size",
"event_data",
".",
"cache_directory_index",
"=",
"msiecf_item",
".",
"cache_directory_index",
"event_data",
".",
"offset",
"=",
"msiecf_item",
".",
"offset",
"event_data",
".",
"recovered",
"=",
"recovered",
"if",
"(",
"event_data",
".",
"cache_directory_index",
">=",
"0",
"and",
"event_data",
".",
"cache_directory_index",
"<",
"len",
"(",
"cache_directories",
")",
")",
":",
"event_data",
".",
"cache_directory_name",
"=",
"(",
"cache_directories",
"[",
"event_data",
".",
"cache_directory_index",
"]",
")",
"event",
"=",
"time_events",
".",
"DateTimeValuesEvent",
"(",
"date_time",
",",
"definitions",
".",
"TIME_DESCRIPTION_NOT_A_TIME",
")",
"parser_mediator",
".",
"ProduceEventWithEventData",
"(",
"event",
",",
"event_data",
")"
] |
Extract data from a MSIE Cache Files (MSIECF) leak item.
Every item is stored as an event object, one for each timestamp.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
cache_directories (list[str]): cache directory names.
msiecf_item (pymsiecf.leak): MSIECF leak item.
recovered (Optional[bool]): True if the item was recovered.
|
[
"Extract",
"data",
"from",
"a",
"MSIE",
"Cache",
"Files",
"(",
"MSIECF",
")",
"leak",
"item",
"."
] |
python
|
train
| 44.580645 |
Kronuz/pyScss
|
scss/extension/compass/sprites.py
|
https://github.com/Kronuz/pyScss/blob/fb32b317f6e2b4b4aad2b86a74844658ac4aa11e/scss/extension/compass/sprites.py#L479-L505
|
def sprite(map, sprite, offset_x=None, offset_y=None, cache_buster=True):
"""
Returns the image and background position for use in a single shorthand
property
"""
map = map.render()
sprite_maps = _get_cache('sprite_maps')
sprite_map = sprite_maps.get(map)
sprite_name = String.unquoted(sprite).value
sprite = sprite_map and sprite_map.get(sprite_name)
if not sprite_map:
log.error("No sprite map found: %s", map, extra={'stack': True})
elif not sprite:
log.error("No sprite found: %s in %s", sprite_name, sprite_map['*n*'], extra={'stack': True})
if sprite:
url = '%s%s' % (config.ASSETS_URL, sprite_map['*f*'])
if cache_buster:
url += '?_=%s' % sprite_map['*t*']
x = Number(offset_x or 0, 'px')
y = Number(offset_y or 0, 'px')
if not x.value or (x.value <= -1 or x.value >= 1) and not x.is_simple_unit('%'):
x -= Number(sprite[2], 'px')
if not y.value or (y.value <= -1 or y.value >= 1) and not y.is_simple_unit('%'):
y -= Number(sprite[3], 'px')
url = "url(%s)" % escape(url)
return List([String.unquoted(url), x, y])
return List([Number(0), Number(0)])
|
[
"def",
"sprite",
"(",
"map",
",",
"sprite",
",",
"offset_x",
"=",
"None",
",",
"offset_y",
"=",
"None",
",",
"cache_buster",
"=",
"True",
")",
":",
"map",
"=",
"map",
".",
"render",
"(",
")",
"sprite_maps",
"=",
"_get_cache",
"(",
"'sprite_maps'",
")",
"sprite_map",
"=",
"sprite_maps",
".",
"get",
"(",
"map",
")",
"sprite_name",
"=",
"String",
".",
"unquoted",
"(",
"sprite",
")",
".",
"value",
"sprite",
"=",
"sprite_map",
"and",
"sprite_map",
".",
"get",
"(",
"sprite_name",
")",
"if",
"not",
"sprite_map",
":",
"log",
".",
"error",
"(",
"\"No sprite map found: %s\"",
",",
"map",
",",
"extra",
"=",
"{",
"'stack'",
":",
"True",
"}",
")",
"elif",
"not",
"sprite",
":",
"log",
".",
"error",
"(",
"\"No sprite found: %s in %s\"",
",",
"sprite_name",
",",
"sprite_map",
"[",
"'*n*'",
"]",
",",
"extra",
"=",
"{",
"'stack'",
":",
"True",
"}",
")",
"if",
"sprite",
":",
"url",
"=",
"'%s%s'",
"%",
"(",
"config",
".",
"ASSETS_URL",
",",
"sprite_map",
"[",
"'*f*'",
"]",
")",
"if",
"cache_buster",
":",
"url",
"+=",
"'?_=%s'",
"%",
"sprite_map",
"[",
"'*t*'",
"]",
"x",
"=",
"Number",
"(",
"offset_x",
"or",
"0",
",",
"'px'",
")",
"y",
"=",
"Number",
"(",
"offset_y",
"or",
"0",
",",
"'px'",
")",
"if",
"not",
"x",
".",
"value",
"or",
"(",
"x",
".",
"value",
"<=",
"-",
"1",
"or",
"x",
".",
"value",
">=",
"1",
")",
"and",
"not",
"x",
".",
"is_simple_unit",
"(",
"'%'",
")",
":",
"x",
"-=",
"Number",
"(",
"sprite",
"[",
"2",
"]",
",",
"'px'",
")",
"if",
"not",
"y",
".",
"value",
"or",
"(",
"y",
".",
"value",
"<=",
"-",
"1",
"or",
"y",
".",
"value",
">=",
"1",
")",
"and",
"not",
"y",
".",
"is_simple_unit",
"(",
"'%'",
")",
":",
"y",
"-=",
"Number",
"(",
"sprite",
"[",
"3",
"]",
",",
"'px'",
")",
"url",
"=",
"\"url(%s)\"",
"%",
"escape",
"(",
"url",
")",
"return",
"List",
"(",
"[",
"String",
".",
"unquoted",
"(",
"url",
")",
",",
"x",
",",
"y",
"]",
")",
"return",
"List",
"(",
"[",
"Number",
"(",
"0",
")",
",",
"Number",
"(",
"0",
")",
"]",
")"
] |
Returns the image and background position for use in a single shorthand
property
|
[
"Returns",
"the",
"image",
"and",
"background",
"position",
"for",
"use",
"in",
"a",
"single",
"shorthand",
"property"
] |
python
|
train
| 44.333333 |
Azure/azure-sdk-for-python
|
azure-servicemanagement-legacy/azure/servicemanagement/_serialization.py
|
https://github.com/Azure/azure-sdk-for-python/blob/d7306fde32f60a293a7567678692bdad31e4b667/azure-servicemanagement-legacy/azure/servicemanagement/_serialization.py#L1574-L1642
|
def xml_to_metrics(xmlstr, object_type):
'''Converts xml response to service bus metrics objects
The xml format for MetricProperties
<entry>
<id>https://sbgm.windows.net/Metrics(\'listeners.active\')</id>
<title/>
<updated>2014-10-09T11:56:50Z</updated>
<author>
<name/>
</author>
<content type="application/xml">
<m:properties>
<d:Name>listeners.active</d:Name>
<d:PrimaryAggregation>Average</d:PrimaryAggregation>
<d:Unit>Count</d:Unit>
<d:DisplayName>Active listeners</d:DisplayName>
</m:properties>
</content>
</entry>
The xml format for MetricValues
<entry>
<id>https://sbgm.windows.net/MetricValues(datetime\'2014-10-02T00:00:00Z\')</id>
<title/>
<updated>2014-10-09T18:38:28Z</updated>
<author>
<name/>
</author>
<content type="application/xml">
<m:properties>
<d:Timestamp m:type="Edm.DateTime">2014-10-02T00:00:00Z</d:Timestamp>
<d:Min m:type="Edm.Int64">-118</d:Min>
<d:Max m:type="Edm.Int64">15</d:Max>
<d:Average m:type="Edm.Single">-78.44444</d:Average>
<d:Total m:type="Edm.Int64">0</d:Total>
</m:properties>
</content>
</entry>
'''
xmldoc = minidom.parseString(xmlstr)
return_obj = object_type()
members = dict(vars(return_obj))
# Only one entry here
for xml_entry in _MinidomXmlToObject.get_children_from_path(xmldoc,
'entry'):
for node in _MinidomXmlToObject.get_children_from_path(xml_entry,
'content',
'properties'):
for name in members:
xml_name = _get_serialization_name(name)
children = _MinidomXmlToObject.get_child_nodes(node, xml_name)
if not children:
continue
child = children[0]
node_type = child.getAttributeNS("http://schemas.microsoft.com/ado/2007/08/dataservices/metadata", 'type')
node_value = _ServiceBusManagementXmlSerializer.odata_converter(child.firstChild.nodeValue, node_type)
setattr(return_obj, name, node_value)
for name, value in _MinidomXmlToObject.get_entry_properties_from_node(
xml_entry,
include_id=True,
use_title_as_id=False).items():
if name in members:
continue # Do not override if already members
setattr(return_obj, name, value)
return return_obj
|
[
"def",
"xml_to_metrics",
"(",
"xmlstr",
",",
"object_type",
")",
":",
"xmldoc",
"=",
"minidom",
".",
"parseString",
"(",
"xmlstr",
")",
"return_obj",
"=",
"object_type",
"(",
")",
"members",
"=",
"dict",
"(",
"vars",
"(",
"return_obj",
")",
")",
"# Only one entry here",
"for",
"xml_entry",
"in",
"_MinidomXmlToObject",
".",
"get_children_from_path",
"(",
"xmldoc",
",",
"'entry'",
")",
":",
"for",
"node",
"in",
"_MinidomXmlToObject",
".",
"get_children_from_path",
"(",
"xml_entry",
",",
"'content'",
",",
"'properties'",
")",
":",
"for",
"name",
"in",
"members",
":",
"xml_name",
"=",
"_get_serialization_name",
"(",
"name",
")",
"children",
"=",
"_MinidomXmlToObject",
".",
"get_child_nodes",
"(",
"node",
",",
"xml_name",
")",
"if",
"not",
"children",
":",
"continue",
"child",
"=",
"children",
"[",
"0",
"]",
"node_type",
"=",
"child",
".",
"getAttributeNS",
"(",
"\"http://schemas.microsoft.com/ado/2007/08/dataservices/metadata\"",
",",
"'type'",
")",
"node_value",
"=",
"_ServiceBusManagementXmlSerializer",
".",
"odata_converter",
"(",
"child",
".",
"firstChild",
".",
"nodeValue",
",",
"node_type",
")",
"setattr",
"(",
"return_obj",
",",
"name",
",",
"node_value",
")",
"for",
"name",
",",
"value",
"in",
"_MinidomXmlToObject",
".",
"get_entry_properties_from_node",
"(",
"xml_entry",
",",
"include_id",
"=",
"True",
",",
"use_title_as_id",
"=",
"False",
")",
".",
"items",
"(",
")",
":",
"if",
"name",
"in",
"members",
":",
"continue",
"# Do not override if already members",
"setattr",
"(",
"return_obj",
",",
"name",
",",
"value",
")",
"return",
"return_obj"
] |
Converts xml response to service bus metrics objects
The xml format for MetricProperties
<entry>
<id>https://sbgm.windows.net/Metrics(\'listeners.active\')</id>
<title/>
<updated>2014-10-09T11:56:50Z</updated>
<author>
<name/>
</author>
<content type="application/xml">
<m:properties>
<d:Name>listeners.active</d:Name>
<d:PrimaryAggregation>Average</d:PrimaryAggregation>
<d:Unit>Count</d:Unit>
<d:DisplayName>Active listeners</d:DisplayName>
</m:properties>
</content>
</entry>
The xml format for MetricValues
<entry>
<id>https://sbgm.windows.net/MetricValues(datetime\'2014-10-02T00:00:00Z\')</id>
<title/>
<updated>2014-10-09T18:38:28Z</updated>
<author>
<name/>
</author>
<content type="application/xml">
<m:properties>
<d:Timestamp m:type="Edm.DateTime">2014-10-02T00:00:00Z</d:Timestamp>
<d:Min m:type="Edm.Int64">-118</d:Min>
<d:Max m:type="Edm.Int64">15</d:Max>
<d:Average m:type="Edm.Single">-78.44444</d:Average>
<d:Total m:type="Edm.Int64">0</d:Total>
</m:properties>
</content>
</entry>
|
[
"Converts",
"xml",
"response",
"to",
"service",
"bus",
"metrics",
"objects"
] |
python
|
test
| 39.84058 |
amigocloud/python-amigocloud
|
amigocloud/amigocloud.py
|
https://github.com/amigocloud/python-amigocloud/blob/d31403e7299cc46e3a3e1392090ee033f3a02b6d/amigocloud/amigocloud.py#L342-L356
|
def listen_dataset_events(self, owner_id, project_id, dataset_id):
"""
Authenticate to start using dataset events.
"""
if not self._user_id:
raise AmigoCloudError(self.error_msg['logged_in_websockets'])
url = '/users/%s/projects/%s/datasets/%s/start_websocket_session'
response = self.get(url % (owner_id, project_id, dataset_id))
websocket_session = response['websocket_session']
auth_data = {'userid': self._user_id,
'datasetid': dataset_id,
'websocket_session': websocket_session}
self.amigosocket.emit('authenticate', auth_data)
|
[
"def",
"listen_dataset_events",
"(",
"self",
",",
"owner_id",
",",
"project_id",
",",
"dataset_id",
")",
":",
"if",
"not",
"self",
".",
"_user_id",
":",
"raise",
"AmigoCloudError",
"(",
"self",
".",
"error_msg",
"[",
"'logged_in_websockets'",
"]",
")",
"url",
"=",
"'/users/%s/projects/%s/datasets/%s/start_websocket_session'",
"response",
"=",
"self",
".",
"get",
"(",
"url",
"%",
"(",
"owner_id",
",",
"project_id",
",",
"dataset_id",
")",
")",
"websocket_session",
"=",
"response",
"[",
"'websocket_session'",
"]",
"auth_data",
"=",
"{",
"'userid'",
":",
"self",
".",
"_user_id",
",",
"'datasetid'",
":",
"dataset_id",
",",
"'websocket_session'",
":",
"websocket_session",
"}",
"self",
".",
"amigosocket",
".",
"emit",
"(",
"'authenticate'",
",",
"auth_data",
")"
] |
Authenticate to start using dataset events.
|
[
"Authenticate",
"to",
"start",
"using",
"dataset",
"events",
"."
] |
python
|
train
| 43.066667 |
spulec/moto
|
moto/core/responses.py
|
https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/moto/core/responses.py#L224-L252
|
def _get_action_from_method_and_request_uri(self, method, request_uri):
"""basically used for `rest-json` APIs
You can refer to example from link below
https://github.com/boto/botocore/blob/develop/botocore/data/iot/2015-05-28/service-2.json
"""
# service response class should have 'SERVICE_NAME' class member,
# if you want to get action from method and url
if not hasattr(self, 'SERVICE_NAME'):
return None
service = self.SERVICE_NAME
conn = boto3.client(service, region_name=self.region)
# make cache if it does not exist yet
if not hasattr(self, 'method_urls'):
self.method_urls = defaultdict(lambda: defaultdict(str))
op_names = conn._service_model.operation_names
for op_name in op_names:
op_model = conn._service_model.operation_model(op_name)
_method = op_model.http['method']
uri_regexp = self.uri_to_regexp(op_model.http['requestUri'])
self.method_urls[_method][uri_regexp] = op_model.name
regexp_and_names = self.method_urls[method]
for regexp, name in regexp_and_names.items():
match = re.match(regexp, request_uri)
self.uri_match = match
if match:
return name
return None
|
[
"def",
"_get_action_from_method_and_request_uri",
"(",
"self",
",",
"method",
",",
"request_uri",
")",
":",
"# service response class should have 'SERVICE_NAME' class member,",
"# if you want to get action from method and url",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'SERVICE_NAME'",
")",
":",
"return",
"None",
"service",
"=",
"self",
".",
"SERVICE_NAME",
"conn",
"=",
"boto3",
".",
"client",
"(",
"service",
",",
"region_name",
"=",
"self",
".",
"region",
")",
"# make cache if it does not exist yet",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'method_urls'",
")",
":",
"self",
".",
"method_urls",
"=",
"defaultdict",
"(",
"lambda",
":",
"defaultdict",
"(",
"str",
")",
")",
"op_names",
"=",
"conn",
".",
"_service_model",
".",
"operation_names",
"for",
"op_name",
"in",
"op_names",
":",
"op_model",
"=",
"conn",
".",
"_service_model",
".",
"operation_model",
"(",
"op_name",
")",
"_method",
"=",
"op_model",
".",
"http",
"[",
"'method'",
"]",
"uri_regexp",
"=",
"self",
".",
"uri_to_regexp",
"(",
"op_model",
".",
"http",
"[",
"'requestUri'",
"]",
")",
"self",
".",
"method_urls",
"[",
"_method",
"]",
"[",
"uri_regexp",
"]",
"=",
"op_model",
".",
"name",
"regexp_and_names",
"=",
"self",
".",
"method_urls",
"[",
"method",
"]",
"for",
"regexp",
",",
"name",
"in",
"regexp_and_names",
".",
"items",
"(",
")",
":",
"match",
"=",
"re",
".",
"match",
"(",
"regexp",
",",
"request_uri",
")",
"self",
".",
"uri_match",
"=",
"match",
"if",
"match",
":",
"return",
"name",
"return",
"None"
] |
basically used for `rest-json` APIs
You can refer to example from link below
https://github.com/boto/botocore/blob/develop/botocore/data/iot/2015-05-28/service-2.json
|
[
"basically",
"used",
"for",
"rest",
"-",
"json",
"APIs",
"You",
"can",
"refer",
"to",
"example",
"from",
"link",
"below",
"https",
":",
"//",
"github",
".",
"com",
"/",
"boto",
"/",
"botocore",
"/",
"blob",
"/",
"develop",
"/",
"botocore",
"/",
"data",
"/",
"iot",
"/",
"2015",
"-",
"05",
"-",
"28",
"/",
"service",
"-",
"2",
".",
"json"
] |
python
|
train
| 46.034483 |
wglass/lighthouse
|
lighthouse/checks/http.py
|
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/checks/http.py#L52-L73
|
def perform(self):
"""
Performs a simple HTTP request against the configured url and returns
true if the response has a 2xx code.
The url can be configured to use https via the "https" boolean flag
in the config, as well as a custom HTTP method via the "method" key.
The default is to not use https and the GET method.
"""
if self.use_https:
conn = client.HTTPSConnection(self.host, self.port)
else:
conn = client.HTTPConnection(self.host, self.port)
conn.request(self.method, self.uri)
response = conn.getresponse()
conn.close()
return bool(response.status >= 200 and response.status < 300)
|
[
"def",
"perform",
"(",
"self",
")",
":",
"if",
"self",
".",
"use_https",
":",
"conn",
"=",
"client",
".",
"HTTPSConnection",
"(",
"self",
".",
"host",
",",
"self",
".",
"port",
")",
"else",
":",
"conn",
"=",
"client",
".",
"HTTPConnection",
"(",
"self",
".",
"host",
",",
"self",
".",
"port",
")",
"conn",
".",
"request",
"(",
"self",
".",
"method",
",",
"self",
".",
"uri",
")",
"response",
"=",
"conn",
".",
"getresponse",
"(",
")",
"conn",
".",
"close",
"(",
")",
"return",
"bool",
"(",
"response",
".",
"status",
">=",
"200",
"and",
"response",
".",
"status",
"<",
"300",
")"
] |
Performs a simple HTTP request against the configured url and returns
true if the response has a 2xx code.
The url can be configured to use https via the "https" boolean flag
in the config, as well as a custom HTTP method via the "method" key.
The default is to not use https and the GET method.
|
[
"Performs",
"a",
"simple",
"HTTP",
"request",
"against",
"the",
"configured",
"url",
"and",
"returns",
"true",
"if",
"the",
"response",
"has",
"a",
"2xx",
"code",
"."
] |
python
|
train
| 32 |
redhat-cip/dci-control-server
|
dci/api/v1/components.py
|
https://github.com/redhat-cip/dci-control-server/blob/b416cf935ec93e4fdd5741f61a21cabecf8454d2/dci/api/v1/components.py#L495-L507
|
def delete_tag_for_component(user, c_id, tag_id):
"""Delete a tag on a specific component."""
# Todo : check c_id and tag_id exist in db
query = _TABLE_TAGS.delete().where(_TABLE_TAGS.c.tag_id == tag_id and
_TABLE_TAGS.c.component_id == c_id)
try:
flask.g.db_conn.execute(query)
except sa_exc.IntegrityError:
raise dci_exc.DCICreationConflict(_TABLE_TAGS.c.tag_id, 'tag_id')
return flask.Response(None, 204, content_type='application/json')
|
[
"def",
"delete_tag_for_component",
"(",
"user",
",",
"c_id",
",",
"tag_id",
")",
":",
"# Todo : check c_id and tag_id exist in db",
"query",
"=",
"_TABLE_TAGS",
".",
"delete",
"(",
")",
".",
"where",
"(",
"_TABLE_TAGS",
".",
"c",
".",
"tag_id",
"==",
"tag_id",
"and",
"_TABLE_TAGS",
".",
"c",
".",
"component_id",
"==",
"c_id",
")",
"try",
":",
"flask",
".",
"g",
".",
"db_conn",
".",
"execute",
"(",
"query",
")",
"except",
"sa_exc",
".",
"IntegrityError",
":",
"raise",
"dci_exc",
".",
"DCICreationConflict",
"(",
"_TABLE_TAGS",
".",
"c",
".",
"tag_id",
",",
"'tag_id'",
")",
"return",
"flask",
".",
"Response",
"(",
"None",
",",
"204",
",",
"content_type",
"=",
"'application/json'",
")"
] |
Delete a tag on a specific component.
|
[
"Delete",
"a",
"tag",
"on",
"a",
"specific",
"component",
"."
] |
python
|
train
| 39.230769 |
frictionlessdata/datapackage-pipelines
|
datapackage_pipelines/web/server.py
|
https://github.com/frictionlessdata/datapackage-pipelines/blob/3a34bbdf042d13c3bec5eef46ff360ee41403874/datapackage_pipelines/web/server.py#L87-L102
|
def basic_auth_required(view_func):
"""
A decorator that can be used to protect specific views with HTTP basic
access authentication. Conditional on having BASIC_AUTH_USERNAME and
BASIC_AUTH_PASSWORD set as env vars.
"""
@wraps(view_func)
def wrapper(*args, **kwargs):
if app.config.get('BASIC_AUTH_ACTIVE', False):
if basic_auth.authenticate():
return view_func(*args, **kwargs)
else:
return basic_auth.challenge()
else:
return view_func(*args, **kwargs)
return wrapper
|
[
"def",
"basic_auth_required",
"(",
"view_func",
")",
":",
"@",
"wraps",
"(",
"view_func",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"app",
".",
"config",
".",
"get",
"(",
"'BASIC_AUTH_ACTIVE'",
",",
"False",
")",
":",
"if",
"basic_auth",
".",
"authenticate",
"(",
")",
":",
"return",
"view_func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"return",
"basic_auth",
".",
"challenge",
"(",
")",
"else",
":",
"return",
"view_func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrapper"
] |
A decorator that can be used to protect specific views with HTTP basic
access authentication. Conditional on having BASIC_AUTH_USERNAME and
BASIC_AUTH_PASSWORD set as env vars.
|
[
"A",
"decorator",
"that",
"can",
"be",
"used",
"to",
"protect",
"specific",
"views",
"with",
"HTTP",
"basic",
"access",
"authentication",
".",
"Conditional",
"on",
"having",
"BASIC_AUTH_USERNAME",
"and",
"BASIC_AUTH_PASSWORD",
"set",
"as",
"env",
"vars",
"."
] |
python
|
train
| 35.6875 |
tensorflow/tensor2tensor
|
tensor2tensor/models/video/epva.py
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/epva.py#L127-L182
|
def van_enc_2d(x, first_depth, reuse=False):
"""The higher level structure encoder for the VAN.
The high level structure is a vector instead of an image.
Args:
x: The higher level structure to encode.
first_depth: The depth of the first layer. Depth is increased in subsequent
layers.
reuse: To reuse in variable scope or not.
Returns:
The encoded image.
"""
with tf.variable_scope('van_enc', reuse=reuse):
a = 4 # depends on the inputs size
b = 4
# a, b = 4,4
enc = tf.nn.relu(x)
enc = tf.layers.dense(enc, first_depth * a * b, tf.nn.relu)
enc = tf.contrib.layers.layer_norm(enc)
enc = tf.reshape(enc, [-1, a, b, first_depth])
enc = tf.layers.conv2d_transpose(
enc, first_depth, 3, padding='same', activation=tf.nn.relu, strides=1)
enc = tf.contrib.layers.layer_norm(enc)
enc = tf.layers.conv2d_transpose(
enc,
first_depth * 2,
3,
padding='same',
activation=tf.nn.relu,
strides=2)
van_higher_level_2 = tf.reshape(enc, [-1, a * 2 * b * 2 * first_depth * 2])
enc = tf.layers.conv2d_transpose(
enc,
first_depth * 2,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
enc = tf.contrib.layers.layer_norm(enc)
enc = tf.layers.conv2d_transpose(
enc,
first_depth * 4,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
van_higher_level_4 = tf.reshape(enc, [-1, a * 2 * b * 2 * first_depth * 4])
van_higher_level = tf.concat([x, van_higher_level_2, van_higher_level_4], 1)
return enc, van_higher_level
|
[
"def",
"van_enc_2d",
"(",
"x",
",",
"first_depth",
",",
"reuse",
"=",
"False",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"'van_enc'",
",",
"reuse",
"=",
"reuse",
")",
":",
"a",
"=",
"4",
"# depends on the inputs size",
"b",
"=",
"4",
"# a, b = 4,4",
"enc",
"=",
"tf",
".",
"nn",
".",
"relu",
"(",
"x",
")",
"enc",
"=",
"tf",
".",
"layers",
".",
"dense",
"(",
"enc",
",",
"first_depth",
"*",
"a",
"*",
"b",
",",
"tf",
".",
"nn",
".",
"relu",
")",
"enc",
"=",
"tf",
".",
"contrib",
".",
"layers",
".",
"layer_norm",
"(",
"enc",
")",
"enc",
"=",
"tf",
".",
"reshape",
"(",
"enc",
",",
"[",
"-",
"1",
",",
"a",
",",
"b",
",",
"first_depth",
"]",
")",
"enc",
"=",
"tf",
".",
"layers",
".",
"conv2d_transpose",
"(",
"enc",
",",
"first_depth",
",",
"3",
",",
"padding",
"=",
"'same'",
",",
"activation",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"strides",
"=",
"1",
")",
"enc",
"=",
"tf",
".",
"contrib",
".",
"layers",
".",
"layer_norm",
"(",
"enc",
")",
"enc",
"=",
"tf",
".",
"layers",
".",
"conv2d_transpose",
"(",
"enc",
",",
"first_depth",
"*",
"2",
",",
"3",
",",
"padding",
"=",
"'same'",
",",
"activation",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"strides",
"=",
"2",
")",
"van_higher_level_2",
"=",
"tf",
".",
"reshape",
"(",
"enc",
",",
"[",
"-",
"1",
",",
"a",
"*",
"2",
"*",
"b",
"*",
"2",
"*",
"first_depth",
"*",
"2",
"]",
")",
"enc",
"=",
"tf",
".",
"layers",
".",
"conv2d_transpose",
"(",
"enc",
",",
"first_depth",
"*",
"2",
",",
"3",
",",
"padding",
"=",
"'same'",
",",
"activation",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"strides",
"=",
"1",
")",
"enc",
"=",
"tf",
".",
"contrib",
".",
"layers",
".",
"layer_norm",
"(",
"enc",
")",
"enc",
"=",
"tf",
".",
"layers",
".",
"conv2d_transpose",
"(",
"enc",
",",
"first_depth",
"*",
"4",
",",
"3",
",",
"padding",
"=",
"'same'",
",",
"activation",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"strides",
"=",
"1",
")",
"van_higher_level_4",
"=",
"tf",
".",
"reshape",
"(",
"enc",
",",
"[",
"-",
"1",
",",
"a",
"*",
"2",
"*",
"b",
"*",
"2",
"*",
"first_depth",
"*",
"4",
"]",
")",
"van_higher_level",
"=",
"tf",
".",
"concat",
"(",
"[",
"x",
",",
"van_higher_level_2",
",",
"van_higher_level_4",
"]",
",",
"1",
")",
"return",
"enc",
",",
"van_higher_level"
] |
The higher level structure encoder for the VAN.
The high level structure is a vector instead of an image.
Args:
x: The higher level structure to encode.
first_depth: The depth of the first layer. Depth is increased in subsequent
layers.
reuse: To reuse in variable scope or not.
Returns:
The encoded image.
|
[
"The",
"higher",
"level",
"structure",
"encoder",
"for",
"the",
"VAN",
"."
] |
python
|
train
| 28.660714 |
angr/angr
|
angr/engines/successors.py
|
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/engines/successors.py#L228-L340
|
def _categorize_successor(self, state):
"""
Append state into successor lists.
:param state: a SimState instance
:param target: The target (of the jump/call/ret)
:return: The state
"""
self.all_successors.append(state)
target = state.scratch.target
# categorize the state
if o.APPROXIMATE_GUARDS in state.options and state.solver.is_false(state.scratch.guard, exact=False):
if o.VALIDATE_APPROXIMATIONS in state.options:
if state.satisfiable():
raise Exception('WTF')
self.unsat_successors.append(state)
elif o.APPROXIMATE_SATISFIABILITY in state.options and not state.solver.satisfiable(exact=False):
if o.VALIDATE_APPROXIMATIONS in state.options:
if state.solver.satisfiable():
raise Exception('WTF')
self.unsat_successors.append(state)
elif not state.scratch.guard.symbolic and state.solver.is_false(state.scratch.guard):
self.unsat_successors.append(state)
elif o.LAZY_SOLVES not in state.options and not state.satisfiable():
self.unsat_successors.append(state)
elif o.NO_SYMBOLIC_JUMP_RESOLUTION in state.options and state.solver.symbolic(target):
self.unconstrained_successors.append(state)
elif not state.solver.symbolic(target) and not state.history.jumpkind.startswith("Ijk_Sys"):
# a successor with a concrete IP, and it's not a syscall
self.successors.append(state)
self.flat_successors.append(state)
elif state.history.jumpkind.startswith("Ijk_Sys"):
# syscall
self.successors.append(state)
# Misuse the ip_at_syscall register to save the return address for this syscall
# state.ip *might be* changed to be the real address of syscall SimProcedures by syscall handling code in
# angr
state.regs.ip_at_syscall = state.ip
try:
symbolic_syscall_num, concrete_syscall_nums = self._resolve_syscall(state)
if concrete_syscall_nums is not None:
for i, n in enumerate(concrete_syscall_nums):
split_state = state if i == len(concrete_syscall_nums) - 1 else state.copy()
split_state.add_constraints(symbolic_syscall_num == n)
if split_state.supports_inspect:
split_state.inspect.downsize()
self._fix_syscall_ip(split_state)
self.flat_successors.append(split_state)
else:
# We cannot resolve the syscall number
# However, we still put it to the flat_successors list, and angr.SimOS.handle_syscall will pick it
# up, and create a "unknown syscall" stub for it.
self._fix_syscall_ip(state)
self.flat_successors.append(state)
except AngrUnsupportedSyscallError:
self.unsat_successors.append(state)
else:
# a successor with a symbolic IP
_max_targets = state.options.symbolic_ip_max_targets
_max_jumptable_targets = state.options.jumptable_symbolic_ip_max_targets
try:
if o.NO_IP_CONCRETIZATION in state.options:
# Don't try to concretize the IP
cond_and_targets = [ (claripy.true, target) ]
max_targets = 0
elif o.KEEP_IP_SYMBOLIC in state.options:
s = claripy.Solver()
addrs = s.eval(target, _max_targets + 1, extra_constraints=tuple(state.ip_constraints))
if len(addrs) > _max_targets:
# It is not a library
l.debug("It is not a Library")
addrs = state.solver.eval_upto(target, _max_targets + 1)
l.debug("addrs :%s", addrs)
cond_and_targets = [ (target == addr, addr) for addr in addrs ]
max_targets = _max_targets
else:
cond_and_targets = self._eval_target_jumptable(state, target, _max_jumptable_targets + 1)
if cond_and_targets is None:
# Fallback to the traditional and slow method
cond_and_targets = self._eval_target_brutal(state, target, _max_targets + 1)
max_targets = _max_targets
else:
max_targets = _max_jumptable_targets
if len(cond_and_targets) > max_targets:
l.warning(
"Exit state has over %d possible solutions. Likely unconstrained; skipping. %s",
max_targets,
target.shallow_repr()
)
self.unconstrained_successors.append(state)
else:
for cond, a in cond_and_targets:
split_state = state.copy()
if o.KEEP_IP_SYMBOLIC in split_state.options:
split_state.regs.ip = target
else:
split_state.add_constraints(cond, action=True)
split_state.regs.ip = a
if split_state.supports_inspect:
split_state.inspect.downsize()
self.flat_successors.append(split_state)
self.successors.append(state)
except SimSolverModeError:
self.unsat_successors.append(state)
return state
|
[
"def",
"_categorize_successor",
"(",
"self",
",",
"state",
")",
":",
"self",
".",
"all_successors",
".",
"append",
"(",
"state",
")",
"target",
"=",
"state",
".",
"scratch",
".",
"target",
"# categorize the state",
"if",
"o",
".",
"APPROXIMATE_GUARDS",
"in",
"state",
".",
"options",
"and",
"state",
".",
"solver",
".",
"is_false",
"(",
"state",
".",
"scratch",
".",
"guard",
",",
"exact",
"=",
"False",
")",
":",
"if",
"o",
".",
"VALIDATE_APPROXIMATIONS",
"in",
"state",
".",
"options",
":",
"if",
"state",
".",
"satisfiable",
"(",
")",
":",
"raise",
"Exception",
"(",
"'WTF'",
")",
"self",
".",
"unsat_successors",
".",
"append",
"(",
"state",
")",
"elif",
"o",
".",
"APPROXIMATE_SATISFIABILITY",
"in",
"state",
".",
"options",
"and",
"not",
"state",
".",
"solver",
".",
"satisfiable",
"(",
"exact",
"=",
"False",
")",
":",
"if",
"o",
".",
"VALIDATE_APPROXIMATIONS",
"in",
"state",
".",
"options",
":",
"if",
"state",
".",
"solver",
".",
"satisfiable",
"(",
")",
":",
"raise",
"Exception",
"(",
"'WTF'",
")",
"self",
".",
"unsat_successors",
".",
"append",
"(",
"state",
")",
"elif",
"not",
"state",
".",
"scratch",
".",
"guard",
".",
"symbolic",
"and",
"state",
".",
"solver",
".",
"is_false",
"(",
"state",
".",
"scratch",
".",
"guard",
")",
":",
"self",
".",
"unsat_successors",
".",
"append",
"(",
"state",
")",
"elif",
"o",
".",
"LAZY_SOLVES",
"not",
"in",
"state",
".",
"options",
"and",
"not",
"state",
".",
"satisfiable",
"(",
")",
":",
"self",
".",
"unsat_successors",
".",
"append",
"(",
"state",
")",
"elif",
"o",
".",
"NO_SYMBOLIC_JUMP_RESOLUTION",
"in",
"state",
".",
"options",
"and",
"state",
".",
"solver",
".",
"symbolic",
"(",
"target",
")",
":",
"self",
".",
"unconstrained_successors",
".",
"append",
"(",
"state",
")",
"elif",
"not",
"state",
".",
"solver",
".",
"symbolic",
"(",
"target",
")",
"and",
"not",
"state",
".",
"history",
".",
"jumpkind",
".",
"startswith",
"(",
"\"Ijk_Sys\"",
")",
":",
"# a successor with a concrete IP, and it's not a syscall",
"self",
".",
"successors",
".",
"append",
"(",
"state",
")",
"self",
".",
"flat_successors",
".",
"append",
"(",
"state",
")",
"elif",
"state",
".",
"history",
".",
"jumpkind",
".",
"startswith",
"(",
"\"Ijk_Sys\"",
")",
":",
"# syscall",
"self",
".",
"successors",
".",
"append",
"(",
"state",
")",
"# Misuse the ip_at_syscall register to save the return address for this syscall",
"# state.ip *might be* changed to be the real address of syscall SimProcedures by syscall handling code in",
"# angr",
"state",
".",
"regs",
".",
"ip_at_syscall",
"=",
"state",
".",
"ip",
"try",
":",
"symbolic_syscall_num",
",",
"concrete_syscall_nums",
"=",
"self",
".",
"_resolve_syscall",
"(",
"state",
")",
"if",
"concrete_syscall_nums",
"is",
"not",
"None",
":",
"for",
"i",
",",
"n",
"in",
"enumerate",
"(",
"concrete_syscall_nums",
")",
":",
"split_state",
"=",
"state",
"if",
"i",
"==",
"len",
"(",
"concrete_syscall_nums",
")",
"-",
"1",
"else",
"state",
".",
"copy",
"(",
")",
"split_state",
".",
"add_constraints",
"(",
"symbolic_syscall_num",
"==",
"n",
")",
"if",
"split_state",
".",
"supports_inspect",
":",
"split_state",
".",
"inspect",
".",
"downsize",
"(",
")",
"self",
".",
"_fix_syscall_ip",
"(",
"split_state",
")",
"self",
".",
"flat_successors",
".",
"append",
"(",
"split_state",
")",
"else",
":",
"# We cannot resolve the syscall number",
"# However, we still put it to the flat_successors list, and angr.SimOS.handle_syscall will pick it",
"# up, and create a \"unknown syscall\" stub for it.",
"self",
".",
"_fix_syscall_ip",
"(",
"state",
")",
"self",
".",
"flat_successors",
".",
"append",
"(",
"state",
")",
"except",
"AngrUnsupportedSyscallError",
":",
"self",
".",
"unsat_successors",
".",
"append",
"(",
"state",
")",
"else",
":",
"# a successor with a symbolic IP",
"_max_targets",
"=",
"state",
".",
"options",
".",
"symbolic_ip_max_targets",
"_max_jumptable_targets",
"=",
"state",
".",
"options",
".",
"jumptable_symbolic_ip_max_targets",
"try",
":",
"if",
"o",
".",
"NO_IP_CONCRETIZATION",
"in",
"state",
".",
"options",
":",
"# Don't try to concretize the IP",
"cond_and_targets",
"=",
"[",
"(",
"claripy",
".",
"true",
",",
"target",
")",
"]",
"max_targets",
"=",
"0",
"elif",
"o",
".",
"KEEP_IP_SYMBOLIC",
"in",
"state",
".",
"options",
":",
"s",
"=",
"claripy",
".",
"Solver",
"(",
")",
"addrs",
"=",
"s",
".",
"eval",
"(",
"target",
",",
"_max_targets",
"+",
"1",
",",
"extra_constraints",
"=",
"tuple",
"(",
"state",
".",
"ip_constraints",
")",
")",
"if",
"len",
"(",
"addrs",
")",
">",
"_max_targets",
":",
"# It is not a library",
"l",
".",
"debug",
"(",
"\"It is not a Library\"",
")",
"addrs",
"=",
"state",
".",
"solver",
".",
"eval_upto",
"(",
"target",
",",
"_max_targets",
"+",
"1",
")",
"l",
".",
"debug",
"(",
"\"addrs :%s\"",
",",
"addrs",
")",
"cond_and_targets",
"=",
"[",
"(",
"target",
"==",
"addr",
",",
"addr",
")",
"for",
"addr",
"in",
"addrs",
"]",
"max_targets",
"=",
"_max_targets",
"else",
":",
"cond_and_targets",
"=",
"self",
".",
"_eval_target_jumptable",
"(",
"state",
",",
"target",
",",
"_max_jumptable_targets",
"+",
"1",
")",
"if",
"cond_and_targets",
"is",
"None",
":",
"# Fallback to the traditional and slow method",
"cond_and_targets",
"=",
"self",
".",
"_eval_target_brutal",
"(",
"state",
",",
"target",
",",
"_max_targets",
"+",
"1",
")",
"max_targets",
"=",
"_max_targets",
"else",
":",
"max_targets",
"=",
"_max_jumptable_targets",
"if",
"len",
"(",
"cond_and_targets",
")",
">",
"max_targets",
":",
"l",
".",
"warning",
"(",
"\"Exit state has over %d possible solutions. Likely unconstrained; skipping. %s\"",
",",
"max_targets",
",",
"target",
".",
"shallow_repr",
"(",
")",
")",
"self",
".",
"unconstrained_successors",
".",
"append",
"(",
"state",
")",
"else",
":",
"for",
"cond",
",",
"a",
"in",
"cond_and_targets",
":",
"split_state",
"=",
"state",
".",
"copy",
"(",
")",
"if",
"o",
".",
"KEEP_IP_SYMBOLIC",
"in",
"split_state",
".",
"options",
":",
"split_state",
".",
"regs",
".",
"ip",
"=",
"target",
"else",
":",
"split_state",
".",
"add_constraints",
"(",
"cond",
",",
"action",
"=",
"True",
")",
"split_state",
".",
"regs",
".",
"ip",
"=",
"a",
"if",
"split_state",
".",
"supports_inspect",
":",
"split_state",
".",
"inspect",
".",
"downsize",
"(",
")",
"self",
".",
"flat_successors",
".",
"append",
"(",
"split_state",
")",
"self",
".",
"successors",
".",
"append",
"(",
"state",
")",
"except",
"SimSolverModeError",
":",
"self",
".",
"unsat_successors",
".",
"append",
"(",
"state",
")",
"return",
"state"
] |
Append state into successor lists.
:param state: a SimState instance
:param target: The target (of the jump/call/ret)
:return: The state
|
[
"Append",
"state",
"into",
"successor",
"lists",
"."
] |
python
|
train
| 50.504425 |
neurosynth/neurosynth
|
neurosynth/base/dataset.py
|
https://github.com/neurosynth/neurosynth/blob/948ce7edce15d7df693446e76834e0c23bfe8f11/neurosynth/base/dataset.py#L767-L771
|
def _csr_to_sdf(self):
""" Inverse of _sdf_to_csr(). """
self.data = pd.DataFrame(self.data['values'].todense(),
index=self.data['index'],
columns=self.data['columns']).to_sparse()
|
[
"def",
"_csr_to_sdf",
"(",
"self",
")",
":",
"self",
".",
"data",
"=",
"pd",
".",
"DataFrame",
"(",
"self",
".",
"data",
"[",
"'values'",
"]",
".",
"todense",
"(",
")",
",",
"index",
"=",
"self",
".",
"data",
"[",
"'index'",
"]",
",",
"columns",
"=",
"self",
".",
"data",
"[",
"'columns'",
"]",
")",
".",
"to_sparse",
"(",
")"
] |
Inverse of _sdf_to_csr().
|
[
"Inverse",
"of",
"_sdf_to_csr",
"()",
"."
] |
python
|
test
| 51.6 |
i3visio/osrframework
|
osrframework/domainfy.py
|
https://github.com/i3visio/osrframework/blob/83437f4c14c9c08cb80a896bd9834c77f6567871/osrframework/domainfy.py#L63-L70
|
def getNumberTLD():
"""
Counting the total number of TLD being processed.
"""
total = 0
for typeTld in TLD.keys():
total+= len(TLD[typeTld])
return total
|
[
"def",
"getNumberTLD",
"(",
")",
":",
"total",
"=",
"0",
"for",
"typeTld",
"in",
"TLD",
".",
"keys",
"(",
")",
":",
"total",
"+=",
"len",
"(",
"TLD",
"[",
"typeTld",
"]",
")",
"return",
"total"
] |
Counting the total number of TLD being processed.
|
[
"Counting",
"the",
"total",
"number",
"of",
"TLD",
"being",
"processed",
"."
] |
python
|
train
| 22.25 |
marrow/cinje
|
cinje/block/generic.py
|
https://github.com/marrow/cinje/blob/413bdac7242020ce8379d272720c649a9196daa2/cinje/block/generic.py#L56-L59
|
def match(self, context, line):
"""Match code lines prefixed with a variety of keywords."""
return line.kind == 'code' and line.partitioned[0] in self._both
|
[
"def",
"match",
"(",
"self",
",",
"context",
",",
"line",
")",
":",
"return",
"line",
".",
"kind",
"==",
"'code'",
"and",
"line",
".",
"partitioned",
"[",
"0",
"]",
"in",
"self",
".",
"_both"
] |
Match code lines prefixed with a variety of keywords.
|
[
"Match",
"code",
"lines",
"prefixed",
"with",
"a",
"variety",
"of",
"keywords",
"."
] |
python
|
train
| 40 |
ProjetPP/PPP-Core
|
ppp_core/config.py
|
https://github.com/ProjetPP/PPP-Core/blob/49ee5b16325aa7134e2e423cf75e7b2609df96a0/ppp_core/config.py#L28-L36
|
def should_send(self, request):
"""Returns whether or not the request should be sent to the
modules, based on the filters."""
if self.filters.get('whitelist', None):
return request.tree.type in self.filters['whitelist']
elif self.filters.get('blacklist', None):
return request.tree.type not in self.filters['blacklist']
else:
return True
|
[
"def",
"should_send",
"(",
"self",
",",
"request",
")",
":",
"if",
"self",
".",
"filters",
".",
"get",
"(",
"'whitelist'",
",",
"None",
")",
":",
"return",
"request",
".",
"tree",
".",
"type",
"in",
"self",
".",
"filters",
"[",
"'whitelist'",
"]",
"elif",
"self",
".",
"filters",
".",
"get",
"(",
"'blacklist'",
",",
"None",
")",
":",
"return",
"request",
".",
"tree",
".",
"type",
"not",
"in",
"self",
".",
"filters",
"[",
"'blacklist'",
"]",
"else",
":",
"return",
"True"
] |
Returns whether or not the request should be sent to the
modules, based on the filters.
|
[
"Returns",
"whether",
"or",
"not",
"the",
"request",
"should",
"be",
"sent",
"to",
"the",
"modules",
"based",
"on",
"the",
"filters",
"."
] |
python
|
train
| 45 |
IntegralDefense/critsapi
|
critsapi/critsdbapi.py
|
https://github.com/IntegralDefense/critsapi/blob/e770bd81e124eaaeb5f1134ba95f4a35ff345c5a/critsapi/critsdbapi.py#L104-L117
|
def find_one(self, collection, query):
"""
Search a collection for the query provided and return one result. Just
a raw interface to mongo to do any query you want.
Args:
collection: The db collection. See main class documentation.
query: A mongo find query.
Returns:
pymongo Cursor object with the results.
"""
obj = getattr(self.db, collection)
result = obj.find_one(query)
return result
|
[
"def",
"find_one",
"(",
"self",
",",
"collection",
",",
"query",
")",
":",
"obj",
"=",
"getattr",
"(",
"self",
".",
"db",
",",
"collection",
")",
"result",
"=",
"obj",
".",
"find_one",
"(",
"query",
")",
"return",
"result"
] |
Search a collection for the query provided and return one result. Just
a raw interface to mongo to do any query you want.
Args:
collection: The db collection. See main class documentation.
query: A mongo find query.
Returns:
pymongo Cursor object with the results.
|
[
"Search",
"a",
"collection",
"for",
"the",
"query",
"provided",
"and",
"return",
"one",
"result",
".",
"Just",
"a",
"raw",
"interface",
"to",
"mongo",
"to",
"do",
"any",
"query",
"you",
"want",
"."
] |
python
|
train
| 34.642857 |
apache/incubator-superset
|
superset/views/base.py
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/base.py#L111-L134
|
def handle_api_exception(f):
"""
A decorator to catch superset exceptions. Use it after the @api decorator above
so superset exception handler is triggered before the handler for generic exceptions.
"""
def wraps(self, *args, **kwargs):
try:
return f(self, *args, **kwargs)
except SupersetSecurityException as e:
logging.exception(e)
return json_error_response(utils.error_msg_from_exception(e),
status=e.status,
stacktrace=traceback.format_exc(),
link=e.link)
except SupersetException as e:
logging.exception(e)
return json_error_response(utils.error_msg_from_exception(e),
stacktrace=traceback.format_exc(),
status=e.status)
except Exception as e:
logging.exception(e)
return json_error_response(utils.error_msg_from_exception(e),
stacktrace=traceback.format_exc())
return functools.update_wrapper(wraps, f)
|
[
"def",
"handle_api_exception",
"(",
"f",
")",
":",
"def",
"wraps",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"return",
"f",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"SupersetSecurityException",
"as",
"e",
":",
"logging",
".",
"exception",
"(",
"e",
")",
"return",
"json_error_response",
"(",
"utils",
".",
"error_msg_from_exception",
"(",
"e",
")",
",",
"status",
"=",
"e",
".",
"status",
",",
"stacktrace",
"=",
"traceback",
".",
"format_exc",
"(",
")",
",",
"link",
"=",
"e",
".",
"link",
")",
"except",
"SupersetException",
"as",
"e",
":",
"logging",
".",
"exception",
"(",
"e",
")",
"return",
"json_error_response",
"(",
"utils",
".",
"error_msg_from_exception",
"(",
"e",
")",
",",
"stacktrace",
"=",
"traceback",
".",
"format_exc",
"(",
")",
",",
"status",
"=",
"e",
".",
"status",
")",
"except",
"Exception",
"as",
"e",
":",
"logging",
".",
"exception",
"(",
"e",
")",
"return",
"json_error_response",
"(",
"utils",
".",
"error_msg_from_exception",
"(",
"e",
")",
",",
"stacktrace",
"=",
"traceback",
".",
"format_exc",
"(",
")",
")",
"return",
"functools",
".",
"update_wrapper",
"(",
"wraps",
",",
"f",
")"
] |
A decorator to catch superset exceptions. Use it after the @api decorator above
so superset exception handler is triggered before the handler for generic exceptions.
|
[
"A",
"decorator",
"to",
"catch",
"superset",
"exceptions",
".",
"Use",
"it",
"after",
"the"
] |
python
|
train
| 48.333333 |
cortical-io/retina-sdk.py
|
retinasdk/full_client.py
|
https://github.com/cortical-io/retina-sdk.py/blob/474c13ad399fe1e974d2650335537608f4456b07/retinasdk/full_client.py#L183-L196
|
def getContextsForExpression(self, body, getFingerprint=None, startIndex=0, maxResults=5, sparsity=1.0):
"""Get semantic contexts for the input expression
Args:
body, ExpressionOperation: The JSON encoded expression to be evaluated (required)
getFingerprint, bool: Configure if the fingerprint should be returned as part of the results (optional)
startIndex, int: The start-index for pagination (optional)
maxResults, int: Max results per page (optional)
sparsity, float: Sparsify the resulting expression to this percentage (optional)
Returns:
list of Context
Raises:
CorticalioException: if the request was not successful
"""
return self._expressions.getContextsForExpression(self._retina, body, getFingerprint, startIndex, maxResults, sparsity)
|
[
"def",
"getContextsForExpression",
"(",
"self",
",",
"body",
",",
"getFingerprint",
"=",
"None",
",",
"startIndex",
"=",
"0",
",",
"maxResults",
"=",
"5",
",",
"sparsity",
"=",
"1.0",
")",
":",
"return",
"self",
".",
"_expressions",
".",
"getContextsForExpression",
"(",
"self",
".",
"_retina",
",",
"body",
",",
"getFingerprint",
",",
"startIndex",
",",
"maxResults",
",",
"sparsity",
")"
] |
Get semantic contexts for the input expression
Args:
body, ExpressionOperation: The JSON encoded expression to be evaluated (required)
getFingerprint, bool: Configure if the fingerprint should be returned as part of the results (optional)
startIndex, int: The start-index for pagination (optional)
maxResults, int: Max results per page (optional)
sparsity, float: Sparsify the resulting expression to this percentage (optional)
Returns:
list of Context
Raises:
CorticalioException: if the request was not successful
|
[
"Get",
"semantic",
"contexts",
"for",
"the",
"input",
"expression",
"Args",
":",
"body",
"ExpressionOperation",
":",
"The",
"JSON",
"encoded",
"expression",
"to",
"be",
"evaluated",
"(",
"required",
")",
"getFingerprint",
"bool",
":",
"Configure",
"if",
"the",
"fingerprint",
"should",
"be",
"returned",
"as",
"part",
"of",
"the",
"results",
"(",
"optional",
")",
"startIndex",
"int",
":",
"The",
"start",
"-",
"index",
"for",
"pagination",
"(",
"optional",
")",
"maxResults",
"int",
":",
"Max",
"results",
"per",
"page",
"(",
"optional",
")",
"sparsity",
"float",
":",
"Sparsify",
"the",
"resulting",
"expression",
"to",
"this",
"percentage",
"(",
"optional",
")",
"Returns",
":",
"list",
"of",
"Context",
"Raises",
":",
"CorticalioException",
":",
"if",
"the",
"request",
"was",
"not",
"successful"
] |
python
|
train
| 61.857143 |
watson-developer-cloud/python-sdk
|
ibm_watson/discovery_v1.py
|
https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/discovery_v1.py#L10160-L10167
|
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'document_retrieval_strategy'
) and self.document_retrieval_strategy is not None:
_dict[
'document_retrieval_strategy'] = self.document_retrieval_strategy
return _dict
|
[
"def",
"_to_dict",
"(",
"self",
")",
":",
"_dict",
"=",
"{",
"}",
"if",
"hasattr",
"(",
"self",
",",
"'document_retrieval_strategy'",
")",
"and",
"self",
".",
"document_retrieval_strategy",
"is",
"not",
"None",
":",
"_dict",
"[",
"'document_retrieval_strategy'",
"]",
"=",
"self",
".",
"document_retrieval_strategy",
"return",
"_dict"
] |
Return a json dictionary representing this model.
|
[
"Return",
"a",
"json",
"dictionary",
"representing",
"this",
"model",
"."
] |
python
|
train
| 42.75 |
saltstack/salt
|
salt/states/disk.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/disk.py#L57-L75
|
def _validate_int(name, value, limits=(), strip='%'):
'''
Validate the named integer within the supplied limits inclusive and
strip supplied unit characters
'''
comment = ''
# Must be integral
try:
if isinstance(value, string_types):
value = value.strip(' ' + strip)
value = int(value)
except (TypeError, ValueError):
comment += '{0} must be an integer '.format(name)
# Must be in range
else:
if len(limits) == 2:
if value < limits[0] or value > limits[1]:
comment += '{0} must be in the range [{1[0]}, {1[1]}] '.format(name, limits)
return value, comment
|
[
"def",
"_validate_int",
"(",
"name",
",",
"value",
",",
"limits",
"=",
"(",
")",
",",
"strip",
"=",
"'%'",
")",
":",
"comment",
"=",
"''",
"# Must be integral",
"try",
":",
"if",
"isinstance",
"(",
"value",
",",
"string_types",
")",
":",
"value",
"=",
"value",
".",
"strip",
"(",
"' '",
"+",
"strip",
")",
"value",
"=",
"int",
"(",
"value",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"comment",
"+=",
"'{0} must be an integer '",
".",
"format",
"(",
"name",
")",
"# Must be in range",
"else",
":",
"if",
"len",
"(",
"limits",
")",
"==",
"2",
":",
"if",
"value",
"<",
"limits",
"[",
"0",
"]",
"or",
"value",
">",
"limits",
"[",
"1",
"]",
":",
"comment",
"+=",
"'{0} must be in the range [{1[0]}, {1[1]}] '",
".",
"format",
"(",
"name",
",",
"limits",
")",
"return",
"value",
",",
"comment"
] |
Validate the named integer within the supplied limits inclusive and
strip supplied unit characters
|
[
"Validate",
"the",
"named",
"integer",
"within",
"the",
"supplied",
"limits",
"inclusive",
"and",
"strip",
"supplied",
"unit",
"characters"
] |
python
|
train
| 34.368421 |
SoCo/SoCo
|
soco/services.py
|
https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/soco/services.py#L373-L424
|
def build_command(self, action, args=None):
"""Build a SOAP request.
Args:
action (str): the name of an action (a string as specified in the
service description XML file) to be sent.
args (list, optional): Relevant arguments as a list of (name,
value) tuples.
Returns:
tuple: a tuple containing the POST headers (as a dict) and a
string containing the relevant SOAP body. Does not set
content-length, or host headers, which are completed upon
sending.
"""
# A complete request should look something like this:
# POST path of control URL HTTP/1.1
# HOST: host of control URL:port of control URL
# CONTENT-LENGTH: bytes in body
# CONTENT-TYPE: text/xml; charset="utf-8"
# SOAPACTION: "urn:schemas-upnp-org:service:serviceType:v#actionName"
#
# <?xml version="1.0"?>
# <s:Envelope
# xmlns:s="http://schemas.xmlsoap.org/soap/envelope/"
# s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">
# <s:Body>
# <u:actionName
# xmlns:u="urn:schemas-upnp-org:service:serviceType:v">
# <argumentName>in arg value</argumentName>
# ... other in args and their values go here, if any
# </u:actionName>
# </s:Body>
# </s:Envelope>
arguments = self.wrap_arguments(args)
body = self.soap_body_template.format(
arguments=arguments, action=action, service_type=self.service_type,
version=self.version)
soap_action_template = \
"urn:schemas-upnp-org:service:{service_type}:{version}#{action}"
soap_action = soap_action_template.format(
service_type=self.service_type, version=self.version,
action=action)
headers = {'Content-Type': 'text/xml; charset="utf-8"',
'SOAPACTION': soap_action}
# Note that although we set the charset to utf-8 here, in fact the
# body is still unicode. It will only be converted to bytes when it
# is set over the network
return (headers, body)
|
[
"def",
"build_command",
"(",
"self",
",",
"action",
",",
"args",
"=",
"None",
")",
":",
"# A complete request should look something like this:",
"# POST path of control URL HTTP/1.1",
"# HOST: host of control URL:port of control URL",
"# CONTENT-LENGTH: bytes in body",
"# CONTENT-TYPE: text/xml; charset=\"utf-8\"",
"# SOAPACTION: \"urn:schemas-upnp-org:service:serviceType:v#actionName\"",
"#",
"# <?xml version=\"1.0\"?>",
"# <s:Envelope",
"# xmlns:s=\"http://schemas.xmlsoap.org/soap/envelope/\"",
"# s:encodingStyle=\"http://schemas.xmlsoap.org/soap/encoding/\">",
"# <s:Body>",
"# <u:actionName",
"# xmlns:u=\"urn:schemas-upnp-org:service:serviceType:v\">",
"# <argumentName>in arg value</argumentName>",
"# ... other in args and their values go here, if any",
"# </u:actionName>",
"# </s:Body>",
"# </s:Envelope>",
"arguments",
"=",
"self",
".",
"wrap_arguments",
"(",
"args",
")",
"body",
"=",
"self",
".",
"soap_body_template",
".",
"format",
"(",
"arguments",
"=",
"arguments",
",",
"action",
"=",
"action",
",",
"service_type",
"=",
"self",
".",
"service_type",
",",
"version",
"=",
"self",
".",
"version",
")",
"soap_action_template",
"=",
"\"urn:schemas-upnp-org:service:{service_type}:{version}#{action}\"",
"soap_action",
"=",
"soap_action_template",
".",
"format",
"(",
"service_type",
"=",
"self",
".",
"service_type",
",",
"version",
"=",
"self",
".",
"version",
",",
"action",
"=",
"action",
")",
"headers",
"=",
"{",
"'Content-Type'",
":",
"'text/xml; charset=\"utf-8\"'",
",",
"'SOAPACTION'",
":",
"soap_action",
"}",
"# Note that although we set the charset to utf-8 here, in fact the",
"# body is still unicode. It will only be converted to bytes when it",
"# is set over the network",
"return",
"(",
"headers",
",",
"body",
")"
] |
Build a SOAP request.
Args:
action (str): the name of an action (a string as specified in the
service description XML file) to be sent.
args (list, optional): Relevant arguments as a list of (name,
value) tuples.
Returns:
tuple: a tuple containing the POST headers (as a dict) and a
string containing the relevant SOAP body. Does not set
content-length, or host headers, which are completed upon
sending.
|
[
"Build",
"a",
"SOAP",
"request",
"."
] |
python
|
train
| 42.519231 |
MostAwesomeDude/gentleman
|
gentleman/async.py
|
https://github.com/MostAwesomeDude/gentleman/blob/17fb8ffb922aa4af9d8bcab85e452c9311d41805/gentleman/async.py#L125-L183
|
def request(self, method, path, query=None, content=None):
"""
Sends an HTTP request.
This constructs a full URL, encodes and decodes HTTP bodies, and
handles invalid responses in a pythonic way.
@type method: string
@param method: HTTP method to use
@type path: string
@param path: HTTP URL path
@type query: list of two-tuples
@param query: query arguments to pass to urllib.urlencode
@type content: str or None
@param content: HTTP body content
@rtype: object
@return: JSON-Decoded response
@raises GanetiApiError: If an invalid response is returned
"""
if not path.startswith("/"):
raise ClientError("Implementation error: Called with bad path %s"
% path)
body = None
if content is not None:
data = self._json_encoder.encode(content)
body = StringProducer(data)
url = self._base_url + path
if query:
prepare_query(query)
params = urlencode(query, doseq=True)
url += "?%s" % params
log.msg("Sending request to %s %s %s" % (url, self.headers, body),
system="Gentleman")
d = self._agent.request(method, url, headers=self.headers,
bodyProducer=body)
protocol = JsonResponseProtocol(d)
@d.addErrback
def connectionFailed(failure):
failure.trap(ConnectionRefusedError)
raise GanetiApiError("Connection refused!")
@d.addCallback
def cb(response):
if response.code != 200:
raise NotOkayError(code=response.code)
response.deliverBody(protocol)
return protocol.getData()
|
[
"def",
"request",
"(",
"self",
",",
"method",
",",
"path",
",",
"query",
"=",
"None",
",",
"content",
"=",
"None",
")",
":",
"if",
"not",
"path",
".",
"startswith",
"(",
"\"/\"",
")",
":",
"raise",
"ClientError",
"(",
"\"Implementation error: Called with bad path %s\"",
"%",
"path",
")",
"body",
"=",
"None",
"if",
"content",
"is",
"not",
"None",
":",
"data",
"=",
"self",
".",
"_json_encoder",
".",
"encode",
"(",
"content",
")",
"body",
"=",
"StringProducer",
"(",
"data",
")",
"url",
"=",
"self",
".",
"_base_url",
"+",
"path",
"if",
"query",
":",
"prepare_query",
"(",
"query",
")",
"params",
"=",
"urlencode",
"(",
"query",
",",
"doseq",
"=",
"True",
")",
"url",
"+=",
"\"?%s\"",
"%",
"params",
"log",
".",
"msg",
"(",
"\"Sending request to %s %s %s\"",
"%",
"(",
"url",
",",
"self",
".",
"headers",
",",
"body",
")",
",",
"system",
"=",
"\"Gentleman\"",
")",
"d",
"=",
"self",
".",
"_agent",
".",
"request",
"(",
"method",
",",
"url",
",",
"headers",
"=",
"self",
".",
"headers",
",",
"bodyProducer",
"=",
"body",
")",
"protocol",
"=",
"JsonResponseProtocol",
"(",
"d",
")",
"@",
"d",
".",
"addErrback",
"def",
"connectionFailed",
"(",
"failure",
")",
":",
"failure",
".",
"trap",
"(",
"ConnectionRefusedError",
")",
"raise",
"GanetiApiError",
"(",
"\"Connection refused!\"",
")",
"@",
"d",
".",
"addCallback",
"def",
"cb",
"(",
"response",
")",
":",
"if",
"response",
".",
"code",
"!=",
"200",
":",
"raise",
"NotOkayError",
"(",
"code",
"=",
"response",
".",
"code",
")",
"response",
".",
"deliverBody",
"(",
"protocol",
")",
"return",
"protocol",
".",
"getData",
"(",
")"
] |
Sends an HTTP request.
This constructs a full URL, encodes and decodes HTTP bodies, and
handles invalid responses in a pythonic way.
@type method: string
@param method: HTTP method to use
@type path: string
@param path: HTTP URL path
@type query: list of two-tuples
@param query: query arguments to pass to urllib.urlencode
@type content: str or None
@param content: HTTP body content
@rtype: object
@return: JSON-Decoded response
@raises GanetiApiError: If an invalid response is returned
|
[
"Sends",
"an",
"HTTP",
"request",
"."
] |
python
|
train
| 29.949153 |
inveniosoftware/invenio-admin
|
invenio_admin/ext.py
|
https://github.com/inveniosoftware/invenio-admin/blob/b5ff8f7de66d1d6b67efc9f81ff094eb2428f969/invenio_admin/ext.py#L41-L51
|
def register_view(self, view_class, *args, **kwargs):
"""Register an admin view on this admin instance.
:param view_class: The view class name passed to the view factory.
:param args: Positional arugments for view class.
:param kwargs: Keyword arguments to view class.
"""
protected_view_class = self.view_class_factory(view_class)
if 'endpoint' not in kwargs:
kwargs['endpoint'] = view_class(*args, **kwargs).endpoint
self.admin.add_view(protected_view_class(*args, **kwargs))
|
[
"def",
"register_view",
"(",
"self",
",",
"view_class",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"protected_view_class",
"=",
"self",
".",
"view_class_factory",
"(",
"view_class",
")",
"if",
"'endpoint'",
"not",
"in",
"kwargs",
":",
"kwargs",
"[",
"'endpoint'",
"]",
"=",
"view_class",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
".",
"endpoint",
"self",
".",
"admin",
".",
"add_view",
"(",
"protected_view_class",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
")"
] |
Register an admin view on this admin instance.
:param view_class: The view class name passed to the view factory.
:param args: Positional arugments for view class.
:param kwargs: Keyword arguments to view class.
|
[
"Register",
"an",
"admin",
"view",
"on",
"this",
"admin",
"instance",
"."
] |
python
|
train
| 49.454545 |
LettError/MutatorMath
|
Lib/mutatorMath/ufo/document.py
|
https://github.com/LettError/MutatorMath/blob/10318fc4e7c9cee9df6130826829baea3054a42b/Lib/mutatorMath/ufo/document.py#L291-L301
|
def writeInfo(self, location=None, masters=None):
""" Write font into the current instance.
Note: the masters attribute is ignored at the moment.
"""
if self.currentInstance is None:
return
infoElement = ET.Element("info")
if location is not None:
locationElement = self._makeLocationElement(location)
infoElement.append(locationElement)
self.currentInstance.append(infoElement)
|
[
"def",
"writeInfo",
"(",
"self",
",",
"location",
"=",
"None",
",",
"masters",
"=",
"None",
")",
":",
"if",
"self",
".",
"currentInstance",
"is",
"None",
":",
"return",
"infoElement",
"=",
"ET",
".",
"Element",
"(",
"\"info\"",
")",
"if",
"location",
"is",
"not",
"None",
":",
"locationElement",
"=",
"self",
".",
"_makeLocationElement",
"(",
"location",
")",
"infoElement",
".",
"append",
"(",
"locationElement",
")",
"self",
".",
"currentInstance",
".",
"append",
"(",
"infoElement",
")"
] |
Write font into the current instance.
Note: the masters attribute is ignored at the moment.
|
[
"Write",
"font",
"into",
"the",
"current",
"instance",
".",
"Note",
":",
"the",
"masters",
"attribute",
"is",
"ignored",
"at",
"the",
"moment",
"."
] |
python
|
train
| 42.181818 |
orbingol/NURBS-Python
|
geomdl/voxelize.py
|
https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/voxelize.py#L89-L107
|
def save_voxel_grid(voxel_grid, file_name):
""" Saves binary voxel grid as a binary file.
The binary file is structured in little-endian unsigned int format.
:param voxel_grid: binary voxel grid
:type voxel_grid: list, tuple
:param file_name: file name to save
:type file_name: str
"""
try:
with open(file_name, 'wb') as fp:
for voxel in voxel_grid:
fp.write(struct.pack("<I", voxel))
except IOError as e:
print("An error occurred: {}".format(e.args[-1]))
raise e
except Exception:
raise
|
[
"def",
"save_voxel_grid",
"(",
"voxel_grid",
",",
"file_name",
")",
":",
"try",
":",
"with",
"open",
"(",
"file_name",
",",
"'wb'",
")",
"as",
"fp",
":",
"for",
"voxel",
"in",
"voxel_grid",
":",
"fp",
".",
"write",
"(",
"struct",
".",
"pack",
"(",
"\"<I\"",
",",
"voxel",
")",
")",
"except",
"IOError",
"as",
"e",
":",
"print",
"(",
"\"An error occurred: {}\"",
".",
"format",
"(",
"e",
".",
"args",
"[",
"-",
"1",
"]",
")",
")",
"raise",
"e",
"except",
"Exception",
":",
"raise"
] |
Saves binary voxel grid as a binary file.
The binary file is structured in little-endian unsigned int format.
:param voxel_grid: binary voxel grid
:type voxel_grid: list, tuple
:param file_name: file name to save
:type file_name: str
|
[
"Saves",
"binary",
"voxel",
"grid",
"as",
"a",
"binary",
"file",
"."
] |
python
|
train
| 30.052632 |
wdecoster/nanoget
|
nanoget/extraction_functions.py
|
https://github.com/wdecoster/nanoget/blob/fb7306220e261849b96785fab02dd2f35a0e3b60/nanoget/extraction_functions.py#L120-L139
|
def process_ubam(bam, **kwargs):
"""Extracting metrics from unaligned bam format
Extracting lengths
"""
logging.info("Nanoget: Starting to collect statistics from ubam file {}.".format(bam))
samfile = pysam.AlignmentFile(bam, "rb", check_sq=False)
if not samfile.has_index():
pysam.index(bam)
# Need to reload the samfile after creating index
samfile = pysam.AlignmentFile(bam, "rb")
logging.info("Nanoget: No index for bam file could be found, created index.")
datadf = pd.DataFrame(
data=[(read.query_name, nanomath.ave_qual(read.query_qualities), read.query_length)
for read in samfile.fetch(until_eof=True)],
columns=["readIDs", "quals", "lengths"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info("Nanoget: ubam {} contains {} reads.".format(
bam, datadf["lengths"].size))
return ut.reduce_memory_usage(datadf)
|
[
"def",
"process_ubam",
"(",
"bam",
",",
"*",
"*",
"kwargs",
")",
":",
"logging",
".",
"info",
"(",
"\"Nanoget: Starting to collect statistics from ubam file {}.\"",
".",
"format",
"(",
"bam",
")",
")",
"samfile",
"=",
"pysam",
".",
"AlignmentFile",
"(",
"bam",
",",
"\"rb\"",
",",
"check_sq",
"=",
"False",
")",
"if",
"not",
"samfile",
".",
"has_index",
"(",
")",
":",
"pysam",
".",
"index",
"(",
"bam",
")",
"# Need to reload the samfile after creating index",
"samfile",
"=",
"pysam",
".",
"AlignmentFile",
"(",
"bam",
",",
"\"rb\"",
")",
"logging",
".",
"info",
"(",
"\"Nanoget: No index for bam file could be found, created index.\"",
")",
"datadf",
"=",
"pd",
".",
"DataFrame",
"(",
"data",
"=",
"[",
"(",
"read",
".",
"query_name",
",",
"nanomath",
".",
"ave_qual",
"(",
"read",
".",
"query_qualities",
")",
",",
"read",
".",
"query_length",
")",
"for",
"read",
"in",
"samfile",
".",
"fetch",
"(",
"until_eof",
"=",
"True",
")",
"]",
",",
"columns",
"=",
"[",
"\"readIDs\"",
",",
"\"quals\"",
",",
"\"lengths\"",
"]",
")",
".",
"dropna",
"(",
"axis",
"=",
"'columns'",
",",
"how",
"=",
"'all'",
")",
".",
"dropna",
"(",
"axis",
"=",
"'index'",
",",
"how",
"=",
"'any'",
")",
"logging",
".",
"info",
"(",
"\"Nanoget: ubam {} contains {} reads.\"",
".",
"format",
"(",
"bam",
",",
"datadf",
"[",
"\"lengths\"",
"]",
".",
"size",
")",
")",
"return",
"ut",
".",
"reduce_memory_usage",
"(",
"datadf",
")"
] |
Extracting metrics from unaligned bam format
Extracting lengths
|
[
"Extracting",
"metrics",
"from",
"unaligned",
"bam",
"format",
"Extracting",
"lengths"
] |
python
|
train
| 47.75 |
kwikteam/phy
|
phy/plot/utils.py
|
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/plot/utils.py#L33-L47
|
def _binary_search(f, xmin, xmax, eps=1e-9):
"""Return the largest x such f(x) is True."""
middle = (xmax + xmin) / 2.
while xmax - xmin > eps:
assert xmin < xmax
middle = (xmax + xmin) / 2.
if f(xmax):
return xmax
if not f(xmin):
return xmin
if f(middle):
xmin = middle
else:
xmax = middle
return middle
|
[
"def",
"_binary_search",
"(",
"f",
",",
"xmin",
",",
"xmax",
",",
"eps",
"=",
"1e-9",
")",
":",
"middle",
"=",
"(",
"xmax",
"+",
"xmin",
")",
"/",
"2.",
"while",
"xmax",
"-",
"xmin",
">",
"eps",
":",
"assert",
"xmin",
"<",
"xmax",
"middle",
"=",
"(",
"xmax",
"+",
"xmin",
")",
"/",
"2.",
"if",
"f",
"(",
"xmax",
")",
":",
"return",
"xmax",
"if",
"not",
"f",
"(",
"xmin",
")",
":",
"return",
"xmin",
"if",
"f",
"(",
"middle",
")",
":",
"xmin",
"=",
"middle",
"else",
":",
"xmax",
"=",
"middle",
"return",
"middle"
] |
Return the largest x such f(x) is True.
|
[
"Return",
"the",
"largest",
"x",
"such",
"f",
"(",
"x",
")",
"is",
"True",
"."
] |
python
|
train
| 26.8 |
Nixiware/viper
|
nx/viper/module.py
|
https://github.com/Nixiware/viper/blob/fbe6057facd8d46103e9955880dfd99e63b7acb3/nx/viper/module.py#L108-L134
|
def _loadService(self, servicePath):
"""
Check if an application service can be found at the specified path.
If found, instantiate it and add it to the application service pool.
:param: <str> service file path
:return: <void>
"""
serviceName = ntpath.basename(servicePath).replace(".py", "")
# importing service
serviceSpec = importlib.util.spec_from_file_location(
serviceName,
servicePath
)
service = importlib.util.module_from_spec(serviceSpec)
serviceSpec.loader.exec_module(service)
# checking if there is a service in the file
if hasattr(service, "Service"):
# instantiate the service
serviceInstance = service.Service(self.application)
self.application.addService(
self.name,
serviceName,
serviceInstance
)
|
[
"def",
"_loadService",
"(",
"self",
",",
"servicePath",
")",
":",
"serviceName",
"=",
"ntpath",
".",
"basename",
"(",
"servicePath",
")",
".",
"replace",
"(",
"\".py\"",
",",
"\"\"",
")",
"# importing service",
"serviceSpec",
"=",
"importlib",
".",
"util",
".",
"spec_from_file_location",
"(",
"serviceName",
",",
"servicePath",
")",
"service",
"=",
"importlib",
".",
"util",
".",
"module_from_spec",
"(",
"serviceSpec",
")",
"serviceSpec",
".",
"loader",
".",
"exec_module",
"(",
"service",
")",
"# checking if there is a service in the file",
"if",
"hasattr",
"(",
"service",
",",
"\"Service\"",
")",
":",
"# instantiate the service",
"serviceInstance",
"=",
"service",
".",
"Service",
"(",
"self",
".",
"application",
")",
"self",
".",
"application",
".",
"addService",
"(",
"self",
".",
"name",
",",
"serviceName",
",",
"serviceInstance",
")"
] |
Check if an application service can be found at the specified path.
If found, instantiate it and add it to the application service pool.
:param: <str> service file path
:return: <void>
|
[
"Check",
"if",
"an",
"application",
"service",
"can",
"be",
"found",
"at",
"the",
"specified",
"path",
".",
"If",
"found",
"instantiate",
"it",
"and",
"add",
"it",
"to",
"the",
"application",
"service",
"pool",
"."
] |
python
|
train
| 34.148148 |
wesyoung/pyzyre
|
czmq/_czmq_ctypes.py
|
https://github.com/wesyoung/pyzyre/blob/22d4c757acefcfdb700d3802adaf30b402bb9eea/czmq/_czmq_ctypes.py#L4539-L4544
|
def pushmem(self, data, size):
"""
Push block of memory to front of message, as a new frame.
Returns 0 on success, -1 on error.
"""
return lib.zmsg_pushmem(self._as_parameter_, data, size)
|
[
"def",
"pushmem",
"(",
"self",
",",
"data",
",",
"size",
")",
":",
"return",
"lib",
".",
"zmsg_pushmem",
"(",
"self",
".",
"_as_parameter_",
",",
"data",
",",
"size",
")"
] |
Push block of memory to front of message, as a new frame.
Returns 0 on success, -1 on error.
|
[
"Push",
"block",
"of",
"memory",
"to",
"front",
"of",
"message",
"as",
"a",
"new",
"frame",
".",
"Returns",
"0",
"on",
"success",
"-",
"1",
"on",
"error",
"."
] |
python
|
train
| 35.833333 |
SBRG/ssbio
|
ssbio/databases/uniprot.py
|
https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/databases/uniprot.py#L238-L276
|
def parse_uniprot_xml_metadata(sr):
"""Load relevant attributes and dbxrefs from a parsed UniProt XML file in a SeqRecord.
Returns:
dict: All parsed information
"""
# TODO: What about "reviewed" status? and EC number
xref_dbs_to_keep = ['GO', 'KEGG', 'PDB', 'PROSITE', 'Pfam', 'RefSeq']
infodict = {}
infodict['alt_uniprots'] = list(set(sr.annotations['accessions']).difference([sr.id]))
infodict['gene_name'] = None
if 'gene_name_primary' in sr.annotations:
infodict['gene_name'] = sr.annotations['gene_name_primary']
infodict['description'] = sr.description
infodict['taxonomy'] = None
if 'organism' in sr.annotations:
infodict['taxonomy'] = sr.annotations['organism']
infodict['seq_version'] = sr.annotations['sequence_version']
infodict['seq_date'] = sr.annotations['sequence_modified']
infodict['entry_version'] = sr.annotations['version']
infodict['entry_date'] = sr.annotations['modified']
tmp = defaultdict(list)
for xref in sr.dbxrefs:
database = xref.split(':', 1)[0]
xrefs = xref.split(':', 1)[-1]
if database in xref_dbs_to_keep:
if database == 'PDB':
tmp['pdbs'].append(xrefs)
else:
tmp[database.lower()].append(xrefs)
infodict.update(tmp)
return infodict
|
[
"def",
"parse_uniprot_xml_metadata",
"(",
"sr",
")",
":",
"# TODO: What about \"reviewed\" status? and EC number",
"xref_dbs_to_keep",
"=",
"[",
"'GO'",
",",
"'KEGG'",
",",
"'PDB'",
",",
"'PROSITE'",
",",
"'Pfam'",
",",
"'RefSeq'",
"]",
"infodict",
"=",
"{",
"}",
"infodict",
"[",
"'alt_uniprots'",
"]",
"=",
"list",
"(",
"set",
"(",
"sr",
".",
"annotations",
"[",
"'accessions'",
"]",
")",
".",
"difference",
"(",
"[",
"sr",
".",
"id",
"]",
")",
")",
"infodict",
"[",
"'gene_name'",
"]",
"=",
"None",
"if",
"'gene_name_primary'",
"in",
"sr",
".",
"annotations",
":",
"infodict",
"[",
"'gene_name'",
"]",
"=",
"sr",
".",
"annotations",
"[",
"'gene_name_primary'",
"]",
"infodict",
"[",
"'description'",
"]",
"=",
"sr",
".",
"description",
"infodict",
"[",
"'taxonomy'",
"]",
"=",
"None",
"if",
"'organism'",
"in",
"sr",
".",
"annotations",
":",
"infodict",
"[",
"'taxonomy'",
"]",
"=",
"sr",
".",
"annotations",
"[",
"'organism'",
"]",
"infodict",
"[",
"'seq_version'",
"]",
"=",
"sr",
".",
"annotations",
"[",
"'sequence_version'",
"]",
"infodict",
"[",
"'seq_date'",
"]",
"=",
"sr",
".",
"annotations",
"[",
"'sequence_modified'",
"]",
"infodict",
"[",
"'entry_version'",
"]",
"=",
"sr",
".",
"annotations",
"[",
"'version'",
"]",
"infodict",
"[",
"'entry_date'",
"]",
"=",
"sr",
".",
"annotations",
"[",
"'modified'",
"]",
"tmp",
"=",
"defaultdict",
"(",
"list",
")",
"for",
"xref",
"in",
"sr",
".",
"dbxrefs",
":",
"database",
"=",
"xref",
".",
"split",
"(",
"':'",
",",
"1",
")",
"[",
"0",
"]",
"xrefs",
"=",
"xref",
".",
"split",
"(",
"':'",
",",
"1",
")",
"[",
"-",
"1",
"]",
"if",
"database",
"in",
"xref_dbs_to_keep",
":",
"if",
"database",
"==",
"'PDB'",
":",
"tmp",
"[",
"'pdbs'",
"]",
".",
"append",
"(",
"xrefs",
")",
"else",
":",
"tmp",
"[",
"database",
".",
"lower",
"(",
")",
"]",
".",
"append",
"(",
"xrefs",
")",
"infodict",
".",
"update",
"(",
"tmp",
")",
"return",
"infodict"
] |
Load relevant attributes and dbxrefs from a parsed UniProt XML file in a SeqRecord.
Returns:
dict: All parsed information
|
[
"Load",
"relevant",
"attributes",
"and",
"dbxrefs",
"from",
"a",
"parsed",
"UniProt",
"XML",
"file",
"in",
"a",
"SeqRecord",
"."
] |
python
|
train
| 33.923077 |
django-danceschool/django-danceschool
|
danceschool/core/models.py
|
https://github.com/django-danceschool/django-danceschool/blob/bb08cbf39017a812a5a94bdb4ea34170bf1a30ba/danceschool/core/models.py#L1191-L1211
|
def allDayForDate(self,this_date,timeZone=None):
'''
This method determines whether the occurrence lasts the entirety of
a specified day in the specified time zone. If no time zone is specified,
then it uses the default time zone). Also, give a grace period of a few
minutes to account for issues with the way events are sometimes entered.
'''
if isinstance(this_date,datetime):
d = this_date.date()
else:
d = this_date
date_start = datetime(d.year,d.month,d.day)
naive_start = self.startTime if timezone.is_naive(self.startTime) else timezone.make_naive(self.startTime, timezone=timeZone)
naive_end = self.endTime if timezone.is_naive(self.endTime) else timezone.make_naive(self.endTime, timezone=timeZone)
return (
# Ensure that all comparisons are done in local time
naive_start <= date_start and
naive_end >= date_start + timedelta(days=1,minutes=-30)
)
|
[
"def",
"allDayForDate",
"(",
"self",
",",
"this_date",
",",
"timeZone",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"this_date",
",",
"datetime",
")",
":",
"d",
"=",
"this_date",
".",
"date",
"(",
")",
"else",
":",
"d",
"=",
"this_date",
"date_start",
"=",
"datetime",
"(",
"d",
".",
"year",
",",
"d",
".",
"month",
",",
"d",
".",
"day",
")",
"naive_start",
"=",
"self",
".",
"startTime",
"if",
"timezone",
".",
"is_naive",
"(",
"self",
".",
"startTime",
")",
"else",
"timezone",
".",
"make_naive",
"(",
"self",
".",
"startTime",
",",
"timezone",
"=",
"timeZone",
")",
"naive_end",
"=",
"self",
".",
"endTime",
"if",
"timezone",
".",
"is_naive",
"(",
"self",
".",
"endTime",
")",
"else",
"timezone",
".",
"make_naive",
"(",
"self",
".",
"endTime",
",",
"timezone",
"=",
"timeZone",
")",
"return",
"(",
"# Ensure that all comparisons are done in local time",
"naive_start",
"<=",
"date_start",
"and",
"naive_end",
">=",
"date_start",
"+",
"timedelta",
"(",
"days",
"=",
"1",
",",
"minutes",
"=",
"-",
"30",
")",
")"
] |
This method determines whether the occurrence lasts the entirety of
a specified day in the specified time zone. If no time zone is specified,
then it uses the default time zone). Also, give a grace period of a few
minutes to account for issues with the way events are sometimes entered.
|
[
"This",
"method",
"determines",
"whether",
"the",
"occurrence",
"lasts",
"the",
"entirety",
"of",
"a",
"specified",
"day",
"in",
"the",
"specified",
"time",
"zone",
".",
"If",
"no",
"time",
"zone",
"is",
"specified",
"then",
"it",
"uses",
"the",
"default",
"time",
"zone",
")",
".",
"Also",
"give",
"a",
"grace",
"period",
"of",
"a",
"few",
"minutes",
"to",
"account",
"for",
"issues",
"with",
"the",
"way",
"events",
"are",
"sometimes",
"entered",
"."
] |
python
|
train
| 47.857143 |
apple/turicreate
|
deps/src/boost_1_68_0/tools/build/src/build/virtual_target.py
|
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/virtual_target.py#L107-L150
|
def register (self, target):
""" Registers a new virtual target. Checks if there's already registered target, with the same
name, type, project and subvariant properties, and also with the same sources
and equal action. If such target is found it is retured and 'target' is not registered.
Otherwise, 'target' is registered and returned.
"""
assert isinstance(target, VirtualTarget)
if target.path():
signature = target.path() + "-" + target.name()
else:
signature = "-" + target.name()
result = None
if signature not in self.cache_:
self.cache_ [signature] = []
for t in self.cache_ [signature]:
a1 = t.action ()
a2 = target.action ()
# TODO: why are we checking for not result?
if not result:
if not a1 and not a2:
result = t
else:
if a1 and a2 and a1.action_name () == a2.action_name () and a1.sources () == a2.sources ():
ps1 = a1.properties ()
ps2 = a2.properties ()
p1 = ps1.base () + ps1.free () +\
b2.util.set.difference(ps1.dependency(), ps1.incidental())
p2 = ps2.base () + ps2.free () +\
b2.util.set.difference(ps2.dependency(), ps2.incidental())
if p1 == p2:
result = t
if not result:
self.cache_ [signature].append (target)
result = target
# TODO: Don't append if we found pre-existing target?
self.recent_targets_.append(result)
self.all_targets_.append(result)
return result
|
[
"def",
"register",
"(",
"self",
",",
"target",
")",
":",
"assert",
"isinstance",
"(",
"target",
",",
"VirtualTarget",
")",
"if",
"target",
".",
"path",
"(",
")",
":",
"signature",
"=",
"target",
".",
"path",
"(",
")",
"+",
"\"-\"",
"+",
"target",
".",
"name",
"(",
")",
"else",
":",
"signature",
"=",
"\"-\"",
"+",
"target",
".",
"name",
"(",
")",
"result",
"=",
"None",
"if",
"signature",
"not",
"in",
"self",
".",
"cache_",
":",
"self",
".",
"cache_",
"[",
"signature",
"]",
"=",
"[",
"]",
"for",
"t",
"in",
"self",
".",
"cache_",
"[",
"signature",
"]",
":",
"a1",
"=",
"t",
".",
"action",
"(",
")",
"a2",
"=",
"target",
".",
"action",
"(",
")",
"# TODO: why are we checking for not result?",
"if",
"not",
"result",
":",
"if",
"not",
"a1",
"and",
"not",
"a2",
":",
"result",
"=",
"t",
"else",
":",
"if",
"a1",
"and",
"a2",
"and",
"a1",
".",
"action_name",
"(",
")",
"==",
"a2",
".",
"action_name",
"(",
")",
"and",
"a1",
".",
"sources",
"(",
")",
"==",
"a2",
".",
"sources",
"(",
")",
":",
"ps1",
"=",
"a1",
".",
"properties",
"(",
")",
"ps2",
"=",
"a2",
".",
"properties",
"(",
")",
"p1",
"=",
"ps1",
".",
"base",
"(",
")",
"+",
"ps1",
".",
"free",
"(",
")",
"+",
"b2",
".",
"util",
".",
"set",
".",
"difference",
"(",
"ps1",
".",
"dependency",
"(",
")",
",",
"ps1",
".",
"incidental",
"(",
")",
")",
"p2",
"=",
"ps2",
".",
"base",
"(",
")",
"+",
"ps2",
".",
"free",
"(",
")",
"+",
"b2",
".",
"util",
".",
"set",
".",
"difference",
"(",
"ps2",
".",
"dependency",
"(",
")",
",",
"ps2",
".",
"incidental",
"(",
")",
")",
"if",
"p1",
"==",
"p2",
":",
"result",
"=",
"t",
"if",
"not",
"result",
":",
"self",
".",
"cache_",
"[",
"signature",
"]",
".",
"append",
"(",
"target",
")",
"result",
"=",
"target",
"# TODO: Don't append if we found pre-existing target?",
"self",
".",
"recent_targets_",
".",
"append",
"(",
"result",
")",
"self",
".",
"all_targets_",
".",
"append",
"(",
"result",
")",
"return",
"result"
] |
Registers a new virtual target. Checks if there's already registered target, with the same
name, type, project and subvariant properties, and also with the same sources
and equal action. If such target is found it is retured and 'target' is not registered.
Otherwise, 'target' is registered and returned.
|
[
"Registers",
"a",
"new",
"virtual",
"target",
".",
"Checks",
"if",
"there",
"s",
"already",
"registered",
"target",
"with",
"the",
"same",
"name",
"type",
"project",
"and",
"subvariant",
"properties",
"and",
"also",
"with",
"the",
"same",
"sources",
"and",
"equal",
"action",
".",
"If",
"such",
"target",
"is",
"found",
"it",
"is",
"retured",
"and",
"target",
"is",
"not",
"registered",
".",
"Otherwise",
"target",
"is",
"registered",
"and",
"returned",
"."
] |
python
|
train
| 40.363636 |
eddiejessup/spatious
|
spatious/distance.py
|
https://github.com/eddiejessup/spatious/blob/b7ae91bec029e85a45a7f303ee184076433723cd/spatious/distance.py#L56-L77
|
def csep_periodic(ra, rb, L):
"""Return separation vectors between each pair of the two sets of points.
Parameters
----------
ra, rb: float array-like, shape (n, d) and (m, d) in d dimensions.
Two sets of points.
L: float array, shape (d,)
System lengths.
Returns
-------
csep: float array-like, shape (n, m, d)
csep[i, j] is the separation vector from point j to point i.
Note the un-intuitive vector direction.
"""
seps = ra[:, np.newaxis, :] - rb[np.newaxis, :, :]
for i_dim in range(ra.shape[1]):
seps_dim = seps[:, :, i_dim]
seps_dim[seps_dim > L[i_dim] / 2.0] -= L[i_dim]
seps_dim[seps_dim < -L[i_dim] / 2.0] += L[i_dim]
return seps
|
[
"def",
"csep_periodic",
"(",
"ra",
",",
"rb",
",",
"L",
")",
":",
"seps",
"=",
"ra",
"[",
":",
",",
"np",
".",
"newaxis",
",",
":",
"]",
"-",
"rb",
"[",
"np",
".",
"newaxis",
",",
":",
",",
":",
"]",
"for",
"i_dim",
"in",
"range",
"(",
"ra",
".",
"shape",
"[",
"1",
"]",
")",
":",
"seps_dim",
"=",
"seps",
"[",
":",
",",
":",
",",
"i_dim",
"]",
"seps_dim",
"[",
"seps_dim",
">",
"L",
"[",
"i_dim",
"]",
"/",
"2.0",
"]",
"-=",
"L",
"[",
"i_dim",
"]",
"seps_dim",
"[",
"seps_dim",
"<",
"-",
"L",
"[",
"i_dim",
"]",
"/",
"2.0",
"]",
"+=",
"L",
"[",
"i_dim",
"]",
"return",
"seps"
] |
Return separation vectors between each pair of the two sets of points.
Parameters
----------
ra, rb: float array-like, shape (n, d) and (m, d) in d dimensions.
Two sets of points.
L: float array, shape (d,)
System lengths.
Returns
-------
csep: float array-like, shape (n, m, d)
csep[i, j] is the separation vector from point j to point i.
Note the un-intuitive vector direction.
|
[
"Return",
"separation",
"vectors",
"between",
"each",
"pair",
"of",
"the",
"two",
"sets",
"of",
"points",
"."
] |
python
|
train
| 32.863636 |
ojarva/python-sshpubkeys
|
sshpubkeys/keys.py
|
https://github.com/ojarva/python-sshpubkeys/blob/86dc1ab27ce82dcc091ce127416cc3ee219e9bec/sshpubkeys/keys.py#L239-L245
|
def decode_key(cls, pubkey_content):
"""Decode base64 coded part of the key."""
try:
decoded_key = base64.b64decode(pubkey_content.encode("ascii"))
except (TypeError, binascii.Error):
raise MalformedDataError("Unable to decode the key")
return decoded_key
|
[
"def",
"decode_key",
"(",
"cls",
",",
"pubkey_content",
")",
":",
"try",
":",
"decoded_key",
"=",
"base64",
".",
"b64decode",
"(",
"pubkey_content",
".",
"encode",
"(",
"\"ascii\"",
")",
")",
"except",
"(",
"TypeError",
",",
"binascii",
".",
"Error",
")",
":",
"raise",
"MalformedDataError",
"(",
"\"Unable to decode the key\"",
")",
"return",
"decoded_key"
] |
Decode base64 coded part of the key.
|
[
"Decode",
"base64",
"coded",
"part",
"of",
"the",
"key",
"."
] |
python
|
test
| 43.571429 |
pyvisa/pyvisa
|
pyvisa/ctwrapper/functions.py
|
https://github.com/pyvisa/pyvisa/blob/b8b2d4371e1f00782856aa9176ff1ced6bcb3798/pyvisa/ctwrapper/functions.py#L971-L991
|
def move_in_8(library, session, space, offset, length, extended=False):
"""Moves an 8-bit block of data from the specified address space and offset to local memory.
Corresponds to viMoveIn8* functions of the VISA library.
:param library: the visa library wrapped by ctypes.
:param session: Unique logical identifier to a session.
:param space: Specifies the address space. (Constants.*SPACE*)
:param offset: Offset (in bytes) of the address or register from which to read.
:param length: Number of elements to transfer, where the data width of the elements to transfer
is identical to the source data width.
:param extended: Use 64 bits offset independent of the platform.
:return: Data read from the bus, return value of the library call.
:rtype: list, :class:`pyvisa.constants.StatusCode`
"""
buffer_8 = (ViUInt8 * length)()
if extended:
ret = library.viMoveIn8Ex(session, space, offset, length, buffer_8)
else:
ret = library.viMoveIn8(session, space, offset, length, buffer_8)
return list(buffer_8), ret
|
[
"def",
"move_in_8",
"(",
"library",
",",
"session",
",",
"space",
",",
"offset",
",",
"length",
",",
"extended",
"=",
"False",
")",
":",
"buffer_8",
"=",
"(",
"ViUInt8",
"*",
"length",
")",
"(",
")",
"if",
"extended",
":",
"ret",
"=",
"library",
".",
"viMoveIn8Ex",
"(",
"session",
",",
"space",
",",
"offset",
",",
"length",
",",
"buffer_8",
")",
"else",
":",
"ret",
"=",
"library",
".",
"viMoveIn8",
"(",
"session",
",",
"space",
",",
"offset",
",",
"length",
",",
"buffer_8",
")",
"return",
"list",
"(",
"buffer_8",
")",
",",
"ret"
] |
Moves an 8-bit block of data from the specified address space and offset to local memory.
Corresponds to viMoveIn8* functions of the VISA library.
:param library: the visa library wrapped by ctypes.
:param session: Unique logical identifier to a session.
:param space: Specifies the address space. (Constants.*SPACE*)
:param offset: Offset (in bytes) of the address or register from which to read.
:param length: Number of elements to transfer, where the data width of the elements to transfer
is identical to the source data width.
:param extended: Use 64 bits offset independent of the platform.
:return: Data read from the bus, return value of the library call.
:rtype: list, :class:`pyvisa.constants.StatusCode`
|
[
"Moves",
"an",
"8",
"-",
"bit",
"block",
"of",
"data",
"from",
"the",
"specified",
"address",
"space",
"and",
"offset",
"to",
"local",
"memory",
"."
] |
python
|
train
| 51.571429 |
devassistant/devassistant
|
devassistant/gui/path_window.py
|
https://github.com/devassistant/devassistant/blob/2dbfeaa666a64127263664d18969c55d19ecc83e/devassistant/gui/path_window.py#L65-L69
|
def get_full_dir_name(self):
"""
Function returns a full dir name
"""
return os.path.join(self.dir_name.get_text(), self.entry_project_name.get_text())
|
[
"def",
"get_full_dir_name",
"(",
"self",
")",
":",
"return",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"dir_name",
".",
"get_text",
"(",
")",
",",
"self",
".",
"entry_project_name",
".",
"get_text",
"(",
")",
")"
] |
Function returns a full dir name
|
[
"Function",
"returns",
"a",
"full",
"dir",
"name"
] |
python
|
train
| 35.8 |
Knio/pynmea2
|
pynmea2/nmea_utils.py
|
https://github.com/Knio/pynmea2/blob/c4fc66c6a13dd85ad862b15c516245af6e571456/pynmea2/nmea_utils.py#L29-L39
|
def dm_to_sd(dm):
'''
Converts a geographic co-ordinate given in "degrees/minutes" dddmm.mmmm
format (eg, "12319.943281" = 123 degrees, 19.943281 minutes) to a signed
decimal (python float) format
'''
# '12319.943281'
if not dm or dm == '0':
return 0.
d, m = re.match(r'^(\d+)(\d\d\.\d+)$', dm).groups()
return float(d) + float(m) / 60
|
[
"def",
"dm_to_sd",
"(",
"dm",
")",
":",
"# '12319.943281'\r",
"if",
"not",
"dm",
"or",
"dm",
"==",
"'0'",
":",
"return",
"0.",
"d",
",",
"m",
"=",
"re",
".",
"match",
"(",
"r'^(\\d+)(\\d\\d\\.\\d+)$'",
",",
"dm",
")",
".",
"groups",
"(",
")",
"return",
"float",
"(",
"d",
")",
"+",
"float",
"(",
"m",
")",
"/",
"60"
] |
Converts a geographic co-ordinate given in "degrees/minutes" dddmm.mmmm
format (eg, "12319.943281" = 123 degrees, 19.943281 minutes) to a signed
decimal (python float) format
|
[
"Converts",
"a",
"geographic",
"co",
"-",
"ordinate",
"given",
"in",
"degrees",
"/",
"minutes",
"dddmm",
".",
"mmmm",
"format",
"(",
"eg",
"12319",
".",
"943281",
"=",
"123",
"degrees",
"19",
".",
"943281",
"minutes",
")",
"to",
"a",
"signed",
"decimal",
"(",
"python",
"float",
")",
"format"
] |
python
|
train
| 34.454545 |
bcbio/bcbio-nextgen
|
bcbio/cwl/workflow.py
|
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/workflow.py#L280-L290
|
def _get_variable(vid, variables):
"""Retrieve an input variable from our existing pool of options.
"""
if isinstance(vid, six.string_types):
vid = get_base_id(vid)
else:
vid = _get_string_vid(vid)
for v in variables:
if vid == get_base_id(v["id"]):
return copy.deepcopy(v)
raise ValueError("Did not find variable %s in \n%s" % (vid, pprint.pformat(variables)))
|
[
"def",
"_get_variable",
"(",
"vid",
",",
"variables",
")",
":",
"if",
"isinstance",
"(",
"vid",
",",
"six",
".",
"string_types",
")",
":",
"vid",
"=",
"get_base_id",
"(",
"vid",
")",
"else",
":",
"vid",
"=",
"_get_string_vid",
"(",
"vid",
")",
"for",
"v",
"in",
"variables",
":",
"if",
"vid",
"==",
"get_base_id",
"(",
"v",
"[",
"\"id\"",
"]",
")",
":",
"return",
"copy",
".",
"deepcopy",
"(",
"v",
")",
"raise",
"ValueError",
"(",
"\"Did not find variable %s in \\n%s\"",
"%",
"(",
"vid",
",",
"pprint",
".",
"pformat",
"(",
"variables",
")",
")",
")"
] |
Retrieve an input variable from our existing pool of options.
|
[
"Retrieve",
"an",
"input",
"variable",
"from",
"our",
"existing",
"pool",
"of",
"options",
"."
] |
python
|
train
| 37.363636 |
mbj4668/pyang
|
pyang/translators/schemanode.py
|
https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/translators/schemanode.py#L191-L195
|
def _default_format(self, occur):
"""Return the default serialization format."""
if self.text or self.children:
return self.start_tag() + "%s" + self.end_tag()
return self.start_tag(empty=True)
|
[
"def",
"_default_format",
"(",
"self",
",",
"occur",
")",
":",
"if",
"self",
".",
"text",
"or",
"self",
".",
"children",
":",
"return",
"self",
".",
"start_tag",
"(",
")",
"+",
"\"%s\"",
"+",
"self",
".",
"end_tag",
"(",
")",
"return",
"self",
".",
"start_tag",
"(",
"empty",
"=",
"True",
")"
] |
Return the default serialization format.
|
[
"Return",
"the",
"default",
"serialization",
"format",
"."
] |
python
|
train
| 45.2 |
vijayvarma392/surfinBH
|
surfinBH/_fit_evaluators/fit_7dq2.py
|
https://github.com/vijayvarma392/surfinBH/blob/9f2d25d00f894ee2ce9ffbb02f4e4a41fa7989eb/surfinBH/_fit_evaluators/fit_7dq2.py#L202-L223
|
def _get_fit_params(self, x, fit_key):
""" Transforms the input parameter to fit parameters for the 7dq2 model.
That is, maps from
x = [q, chiAx, chiAy, chiAz, chiBx, chiBy, chiBz]
fit_params = [np.log(q), chiAx, chiAy, chiHat, chiBx, chiBy, chi_a]
chiHat is defined in Eq.(3) of 1508.07253, but with chiAz and chiBz instead
of chiA and chiB.
chi_a = (chiAz - chiBz)/2.
"""
q, chiAz, chiBz = x[0], x[3], x[6]
eta = q/(1.+q)**2
chi_wtAvg = (q*chiAz+chiBz)/(1.+q)
chiHat = (chi_wtAvg - 38.*eta/113.*(chiAz + chiBz))/(1. - 76.*eta/113.)
chi_a = (chiAz - chiBz)/2.
fit_params = x
fit_params[0] = np.log(q)
fit_params[3] = chiHat
fit_params[6] = chi_a
return fit_params
|
[
"def",
"_get_fit_params",
"(",
"self",
",",
"x",
",",
"fit_key",
")",
":",
"q",
",",
"chiAz",
",",
"chiBz",
"=",
"x",
"[",
"0",
"]",
",",
"x",
"[",
"3",
"]",
",",
"x",
"[",
"6",
"]",
"eta",
"=",
"q",
"/",
"(",
"1.",
"+",
"q",
")",
"**",
"2",
"chi_wtAvg",
"=",
"(",
"q",
"*",
"chiAz",
"+",
"chiBz",
")",
"/",
"(",
"1.",
"+",
"q",
")",
"chiHat",
"=",
"(",
"chi_wtAvg",
"-",
"38.",
"*",
"eta",
"/",
"113.",
"*",
"(",
"chiAz",
"+",
"chiBz",
")",
")",
"/",
"(",
"1.",
"-",
"76.",
"*",
"eta",
"/",
"113.",
")",
"chi_a",
"=",
"(",
"chiAz",
"-",
"chiBz",
")",
"/",
"2.",
"fit_params",
"=",
"x",
"fit_params",
"[",
"0",
"]",
"=",
"np",
".",
"log",
"(",
"q",
")",
"fit_params",
"[",
"3",
"]",
"=",
"chiHat",
"fit_params",
"[",
"6",
"]",
"=",
"chi_a",
"return",
"fit_params"
] |
Transforms the input parameter to fit parameters for the 7dq2 model.
That is, maps from
x = [q, chiAx, chiAy, chiAz, chiBx, chiBy, chiBz]
fit_params = [np.log(q), chiAx, chiAy, chiHat, chiBx, chiBy, chi_a]
chiHat is defined in Eq.(3) of 1508.07253, but with chiAz and chiBz instead
of chiA and chiB.
chi_a = (chiAz - chiBz)/2.
|
[
"Transforms",
"the",
"input",
"parameter",
"to",
"fit",
"parameters",
"for",
"the",
"7dq2",
"model",
".",
"That",
"is",
"maps",
"from",
"x",
"=",
"[",
"q",
"chiAx",
"chiAy",
"chiAz",
"chiBx",
"chiBy",
"chiBz",
"]",
"fit_params",
"=",
"[",
"np",
".",
"log",
"(",
"q",
")",
"chiAx",
"chiAy",
"chiHat",
"chiBx",
"chiBy",
"chi_a",
"]"
] |
python
|
train
| 34.818182 |
twisted/mantissa
|
xmantissa/web.py
|
https://github.com/twisted/mantissa/blob/53e5502aba23ce99be78b27f923a276593033fe8/xmantissa/web.py#L314-L324
|
def renderHTTP(self, context):
"""
Render the wrapped resource if HTTPS is already being used, otherwise
invoke a helper which may generate a redirect.
"""
request = IRequest(context)
if request.isSecure():
renderer = self.wrappedResource
else:
renderer = _SecureWrapper(self.urlGenerator, self.wrappedResource)
return renderer.renderHTTP(context)
|
[
"def",
"renderHTTP",
"(",
"self",
",",
"context",
")",
":",
"request",
"=",
"IRequest",
"(",
"context",
")",
"if",
"request",
".",
"isSecure",
"(",
")",
":",
"renderer",
"=",
"self",
".",
"wrappedResource",
"else",
":",
"renderer",
"=",
"_SecureWrapper",
"(",
"self",
".",
"urlGenerator",
",",
"self",
".",
"wrappedResource",
")",
"return",
"renderer",
".",
"renderHTTP",
"(",
"context",
")"
] |
Render the wrapped resource if HTTPS is already being used, otherwise
invoke a helper which may generate a redirect.
|
[
"Render",
"the",
"wrapped",
"resource",
"if",
"HTTPS",
"is",
"already",
"being",
"used",
"otherwise",
"invoke",
"a",
"helper",
"which",
"may",
"generate",
"a",
"redirect",
"."
] |
python
|
train
| 38.636364 |
fuzeman/PyUPnP
|
pyupnp/lict.py
|
https://github.com/fuzeman/PyUPnP/blob/6dea64be299952346a14300ab6cc7dac42736433/pyupnp/lict.py#L343-L355
|
def update(self, E=None, **F):
"""
D.update([E, ]**F) -> None. Update D from dict/iterable E and F.
If E present and has a .keys() method, does: for k in E: D[k] = E[k]
If E present and lacks .keys() method, does: for (k, v) in E: D[k] = v
In either case, this is followed by: for k in F: D[k] = F[k]
"""
if hasattr(E, 'keys'):
self.extend(E)
else:
for key, value in E:
self._set_key(key, value)
self.extend(F)
|
[
"def",
"update",
"(",
"self",
",",
"E",
"=",
"None",
",",
"*",
"*",
"F",
")",
":",
"if",
"hasattr",
"(",
"E",
",",
"'keys'",
")",
":",
"self",
".",
"extend",
"(",
"E",
")",
"else",
":",
"for",
"key",
",",
"value",
"in",
"E",
":",
"self",
".",
"_set_key",
"(",
"key",
",",
"value",
")",
"self",
".",
"extend",
"(",
"F",
")"
] |
D.update([E, ]**F) -> None. Update D from dict/iterable E and F.
If E present and has a .keys() method, does: for k in E: D[k] = E[k]
If E present and lacks .keys() method, does: for (k, v) in E: D[k] = v
In either case, this is followed by: for k in F: D[k] = F[k]
|
[
"D",
".",
"update",
"(",
"[",
"E",
"]",
"**",
"F",
")",
"-",
">",
"None",
".",
"Update",
"D",
"from",
"dict",
"/",
"iterable",
"E",
"and",
"F",
".",
"If",
"E",
"present",
"and",
"has",
"a",
".",
"keys",
"()",
"method",
"does",
":",
"for",
"k",
"in",
"E",
":",
"D",
"[",
"k",
"]",
"=",
"E",
"[",
"k",
"]",
"If",
"E",
"present",
"and",
"lacks",
".",
"keys",
"()",
"method",
"does",
":",
"for",
"(",
"k",
"v",
")",
"in",
"E",
":",
"D",
"[",
"k",
"]",
"=",
"v",
"In",
"either",
"case",
"this",
"is",
"followed",
"by",
":",
"for",
"k",
"in",
"F",
":",
"D",
"[",
"k",
"]",
"=",
"F",
"[",
"k",
"]"
] |
python
|
train
| 39.923077 |
gwastro/pycbc-glue
|
pycbc_glue/ligolw/utils/search_summary.py
|
https://github.com/gwastro/pycbc-glue/blob/a3e906bae59fbfd707c3ff82e5d008d939ec5e24/pycbc_glue/ligolw/utils/search_summary.py#L95-L109
|
def segmentlistdict_fromsearchsummary_out(xmldoc, program = None):
"""
Convenience wrapper for a common case usage of the segmentlistdict
class: searches the process table in xmldoc for occurances of a
program named program, then scans the search summary table for
matching process IDs and constructs a segmentlistdict object from
the out segments in those rows.
Note: the segmentlists in the segmentlistdict are not necessarily
coalesced, they contain the segments as they appear in the
search_summary table.
"""
stbl = lsctables.SearchSummaryTable.get_table(xmldoc)
ptbl = lsctables.ProcessTable.get_table(xmldoc)
return stbl.get_out_segmentlistdict(program and ptbl.get_ids_by_program(program))
|
[
"def",
"segmentlistdict_fromsearchsummary_out",
"(",
"xmldoc",
",",
"program",
"=",
"None",
")",
":",
"stbl",
"=",
"lsctables",
".",
"SearchSummaryTable",
".",
"get_table",
"(",
"xmldoc",
")",
"ptbl",
"=",
"lsctables",
".",
"ProcessTable",
".",
"get_table",
"(",
"xmldoc",
")",
"return",
"stbl",
".",
"get_out_segmentlistdict",
"(",
"program",
"and",
"ptbl",
".",
"get_ids_by_program",
"(",
"program",
")",
")"
] |
Convenience wrapper for a common case usage of the segmentlistdict
class: searches the process table in xmldoc for occurances of a
program named program, then scans the search summary table for
matching process IDs and constructs a segmentlistdict object from
the out segments in those rows.
Note: the segmentlists in the segmentlistdict are not necessarily
coalesced, they contain the segments as they appear in the
search_summary table.
|
[
"Convenience",
"wrapper",
"for",
"a",
"common",
"case",
"usage",
"of",
"the",
"segmentlistdict",
"class",
":",
"searches",
"the",
"process",
"table",
"in",
"xmldoc",
"for",
"occurances",
"of",
"a",
"program",
"named",
"program",
"then",
"scans",
"the",
"search",
"summary",
"table",
"for",
"matching",
"process",
"IDs",
"and",
"constructs",
"a",
"segmentlistdict",
"object",
"from",
"the",
"out",
"segments",
"in",
"those",
"rows",
"."
] |
python
|
train
| 46.6 |
lucapinello/Haystack
|
haystack/external.py
|
https://github.com/lucapinello/Haystack/blob/cc080d741f36cd77b07c0b59d08ea6a4cf0ef2f7/haystack/external.py#L286-L314
|
def _compute_ll(self):
"""
m._compute_ll() -- [utility] Compute the log-likelihood matrix from the count matrix
"""
self.fracs = []
self.logP = []
self.ll = []
for i in range(self.width):
Dll = {'A': 0, 'C': 0, 'T': 0, 'G': 0}
Df = {'A': 0, 'C': 0, 'T': 0, 'G': 0}
DlogP= {'A': 0, 'C': 0, 'T': 0, 'G': 0}
for key in self.counts[i].keys():
#print i,key,self.counts[i][key],self.nseqs
Pij = self.counts[i][key] / float(self.nseqs)
Df [key] = Pij
Dll[key] = (math.log( (self.counts[i][key] + self.bgscale*self.background[key] ) /
((self.nseqs + self.bgscale) * self.background[key]) ) /
math.log(2))
if Pij > 0:
DlogP[key] = math.log(Pij)/math.log(2)
else:
DlogP[key] = -100 #Near zero
self.fracs.append(Df)
self.logP.append (DlogP)
self.ll.append (Dll)
self.P = self.fracs
self._compute_bits()
self._compute_ambig_ll()
self._maxscore()
|
[
"def",
"_compute_ll",
"(",
"self",
")",
":",
"self",
".",
"fracs",
"=",
"[",
"]",
"self",
".",
"logP",
"=",
"[",
"]",
"self",
".",
"ll",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"width",
")",
":",
"Dll",
"=",
"{",
"'A'",
":",
"0",
",",
"'C'",
":",
"0",
",",
"'T'",
":",
"0",
",",
"'G'",
":",
"0",
"}",
"Df",
"=",
"{",
"'A'",
":",
"0",
",",
"'C'",
":",
"0",
",",
"'T'",
":",
"0",
",",
"'G'",
":",
"0",
"}",
"DlogP",
"=",
"{",
"'A'",
":",
"0",
",",
"'C'",
":",
"0",
",",
"'T'",
":",
"0",
",",
"'G'",
":",
"0",
"}",
"for",
"key",
"in",
"self",
".",
"counts",
"[",
"i",
"]",
".",
"keys",
"(",
")",
":",
"#print i,key,self.counts[i][key],self.nseqs",
"Pij",
"=",
"self",
".",
"counts",
"[",
"i",
"]",
"[",
"key",
"]",
"/",
"float",
"(",
"self",
".",
"nseqs",
")",
"Df",
"[",
"key",
"]",
"=",
"Pij",
"Dll",
"[",
"key",
"]",
"=",
"(",
"math",
".",
"log",
"(",
"(",
"self",
".",
"counts",
"[",
"i",
"]",
"[",
"key",
"]",
"+",
"self",
".",
"bgscale",
"*",
"self",
".",
"background",
"[",
"key",
"]",
")",
"/",
"(",
"(",
"self",
".",
"nseqs",
"+",
"self",
".",
"bgscale",
")",
"*",
"self",
".",
"background",
"[",
"key",
"]",
")",
")",
"/",
"math",
".",
"log",
"(",
"2",
")",
")",
"if",
"Pij",
">",
"0",
":",
"DlogP",
"[",
"key",
"]",
"=",
"math",
".",
"log",
"(",
"Pij",
")",
"/",
"math",
".",
"log",
"(",
"2",
")",
"else",
":",
"DlogP",
"[",
"key",
"]",
"=",
"-",
"100",
"#Near zero",
"self",
".",
"fracs",
".",
"append",
"(",
"Df",
")",
"self",
".",
"logP",
".",
"append",
"(",
"DlogP",
")",
"self",
".",
"ll",
".",
"append",
"(",
"Dll",
")",
"self",
".",
"P",
"=",
"self",
".",
"fracs",
"self",
".",
"_compute_bits",
"(",
")",
"self",
".",
"_compute_ambig_ll",
"(",
")",
"self",
".",
"_maxscore",
"(",
")"
] |
m._compute_ll() -- [utility] Compute the log-likelihood matrix from the count matrix
|
[
"m",
".",
"_compute_ll",
"()",
"--",
"[",
"utility",
"]",
"Compute",
"the",
"log",
"-",
"likelihood",
"matrix",
"from",
"the",
"count",
"matrix"
] |
python
|
train
| 41.241379 |
renweizhukov/pytwis
|
pytwis/pytwis.py
|
https://github.com/renweizhukov/pytwis/blob/1bc45b038d7e5343824c520f89f644bbd6faab0a/pytwis/pytwis.py#L695-L745
|
def get_followers(self, auth_secret):
"""Get the follower list of a logged-in user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the follower list is successfully obtained, False otherwise.
result
A dict containing the follower list with the key FOLLOWER_LIST_KEY
if the follower list is successfully obtained, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the list of followers' userids.
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(userid)
follower_userids = self._rc.zrange(follower_zset_key, 0, -1)
if follower_userids is None or not follower_userids:
result[pytwis_constants.FOLLOWER_LIST_KEY] = []
return (True, result)
# Get the list of followers' usernames from their userids.
with self._rc.pipeline() as pipe:
pipe.multi()
for follower_userid in follower_userids:
follower_userid_profile_key = \
pytwis_constants.USER_PROFILE_KEY_FORMAT.format(follower_userid)
pipe.hget(follower_userid_profile_key, pytwis_constants.USERNAME_KEY)
result[pytwis_constants.FOLLOWER_LIST_KEY] = pipe.execute()
return (True, result)
|
[
"def",
"get_followers",
"(",
"self",
",",
"auth_secret",
")",
":",
"result",
"=",
"{",
"pytwis_constants",
".",
"ERROR_KEY",
":",
"None",
"}",
"# Check if the user is logged in.",
"loggedin",
",",
"userid",
"=",
"self",
".",
"_is_loggedin",
"(",
"auth_secret",
")",
"if",
"not",
"loggedin",
":",
"result",
"[",
"pytwis_constants",
".",
"ERROR_KEY",
"]",
"=",
"pytwis_constants",
".",
"ERROR_NOT_LOGGED_IN",
"return",
"(",
"False",
",",
"result",
")",
"# Get the list of followers' userids.",
"follower_zset_key",
"=",
"pytwis_constants",
".",
"FOLLOWER_KEY_FORMAT",
".",
"format",
"(",
"userid",
")",
"follower_userids",
"=",
"self",
".",
"_rc",
".",
"zrange",
"(",
"follower_zset_key",
",",
"0",
",",
"-",
"1",
")",
"if",
"follower_userids",
"is",
"None",
"or",
"not",
"follower_userids",
":",
"result",
"[",
"pytwis_constants",
".",
"FOLLOWER_LIST_KEY",
"]",
"=",
"[",
"]",
"return",
"(",
"True",
",",
"result",
")",
"# Get the list of followers' usernames from their userids.",
"with",
"self",
".",
"_rc",
".",
"pipeline",
"(",
")",
"as",
"pipe",
":",
"pipe",
".",
"multi",
"(",
")",
"for",
"follower_userid",
"in",
"follower_userids",
":",
"follower_userid_profile_key",
"=",
"pytwis_constants",
".",
"USER_PROFILE_KEY_FORMAT",
".",
"format",
"(",
"follower_userid",
")",
"pipe",
".",
"hget",
"(",
"follower_userid_profile_key",
",",
"pytwis_constants",
".",
"USERNAME_KEY",
")",
"result",
"[",
"pytwis_constants",
".",
"FOLLOWER_LIST_KEY",
"]",
"=",
"pipe",
".",
"execute",
"(",
")",
"return",
"(",
"True",
",",
"result",
")"
] |
Get the follower list of a logged-in user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the follower list is successfully obtained, False otherwise.
result
A dict containing the follower list with the key FOLLOWER_LIST_KEY
if the follower list is successfully obtained, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
|
[
"Get",
"the",
"follower",
"list",
"of",
"a",
"logged",
"-",
"in",
"user",
"."
] |
python
|
train
| 35.54902 |
spyder-ide/spyder
|
spyder/preferences/shortcuts.py
|
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/preferences/shortcuts.py#L729-L735
|
def adjust_cells(self):
"""Adjust column size based on contents."""
self.resizeColumnsToContents()
fm = self.horizontalHeader().fontMetrics()
names = [fm.width(s.name + ' '*9) for s in self.source_model.shortcuts]
self.setColumnWidth(NAME, max(names))
self.horizontalHeader().setStretchLastSection(True)
|
[
"def",
"adjust_cells",
"(",
"self",
")",
":",
"self",
".",
"resizeColumnsToContents",
"(",
")",
"fm",
"=",
"self",
".",
"horizontalHeader",
"(",
")",
".",
"fontMetrics",
"(",
")",
"names",
"=",
"[",
"fm",
".",
"width",
"(",
"s",
".",
"name",
"+",
"' '",
"*",
"9",
")",
"for",
"s",
"in",
"self",
".",
"source_model",
".",
"shortcuts",
"]",
"self",
".",
"setColumnWidth",
"(",
"NAME",
",",
"max",
"(",
"names",
")",
")",
"self",
".",
"horizontalHeader",
"(",
")",
".",
"setStretchLastSection",
"(",
"True",
")"
] |
Adjust column size based on contents.
|
[
"Adjust",
"column",
"size",
"based",
"on",
"contents",
"."
] |
python
|
train
| 50.142857 |
DataDog/integrations-core
|
sqlserver/datadog_checks/sqlserver/sqlserver.py
|
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/sqlserver/datadog_checks/sqlserver/sqlserver.py#L178-L202
|
def _check_db_exists(self, instance):
"""
Check if the database we're targeting actually exists
If not then we won't do any checks
This allows the same config to be installed on many servers but fail gracefully
"""
dsn, host, username, password, database, driver = self._get_access_info(instance, self.DEFAULT_DB_KEY)
context = "{} - {}".format(host, database)
if self.existing_databases is None:
cursor = self.get_cursor(instance, None, self.DEFAULT_DATABASE)
try:
self.existing_databases = {}
cursor.execute(DATABASE_EXISTS_QUERY)
for row in cursor:
self.existing_databases[row.name] = True
except Exception as e:
self.log.error("Failed to check if database {} exists: {}".format(database, e))
return False, context
finally:
self.close_cursor(cursor)
return database in self.existing_databases, context
|
[
"def",
"_check_db_exists",
"(",
"self",
",",
"instance",
")",
":",
"dsn",
",",
"host",
",",
"username",
",",
"password",
",",
"database",
",",
"driver",
"=",
"self",
".",
"_get_access_info",
"(",
"instance",
",",
"self",
".",
"DEFAULT_DB_KEY",
")",
"context",
"=",
"\"{} - {}\"",
".",
"format",
"(",
"host",
",",
"database",
")",
"if",
"self",
".",
"existing_databases",
"is",
"None",
":",
"cursor",
"=",
"self",
".",
"get_cursor",
"(",
"instance",
",",
"None",
",",
"self",
".",
"DEFAULT_DATABASE",
")",
"try",
":",
"self",
".",
"existing_databases",
"=",
"{",
"}",
"cursor",
".",
"execute",
"(",
"DATABASE_EXISTS_QUERY",
")",
"for",
"row",
"in",
"cursor",
":",
"self",
".",
"existing_databases",
"[",
"row",
".",
"name",
"]",
"=",
"True",
"except",
"Exception",
"as",
"e",
":",
"self",
".",
"log",
".",
"error",
"(",
"\"Failed to check if database {} exists: {}\"",
".",
"format",
"(",
"database",
",",
"e",
")",
")",
"return",
"False",
",",
"context",
"finally",
":",
"self",
".",
"close_cursor",
"(",
"cursor",
")",
"return",
"database",
"in",
"self",
".",
"existing_databases",
",",
"context"
] |
Check if the database we're targeting actually exists
If not then we won't do any checks
This allows the same config to be installed on many servers but fail gracefully
|
[
"Check",
"if",
"the",
"database",
"we",
"re",
"targeting",
"actually",
"exists",
"If",
"not",
"then",
"we",
"won",
"t",
"do",
"any",
"checks",
"This",
"allows",
"the",
"same",
"config",
"to",
"be",
"installed",
"on",
"many",
"servers",
"but",
"fail",
"gracefully"
] |
python
|
train
| 40.8 |
googlefonts/fontbakery
|
Lib/fontbakery/profiles/hhea.py
|
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/hhea.py#L27-L43
|
def com_google_fonts_check_maxadvancewidth(ttFont):
"""MaxAdvanceWidth is consistent with values in the Hmtx and Hhea tables?"""
hhea_advance_width_max = ttFont['hhea'].advanceWidthMax
hmtx_advance_width_max = None
for g in ttFont['hmtx'].metrics.values():
if hmtx_advance_width_max is None:
hmtx_advance_width_max = max(0, g[0])
else:
hmtx_advance_width_max = max(g[0], hmtx_advance_width_max)
if hmtx_advance_width_max != hhea_advance_width_max:
yield FAIL, ("AdvanceWidthMax mismatch: expected {} (from hmtx);"
" got {} (from hhea)").format(hmtx_advance_width_max,
hhea_advance_width_max)
else:
yield PASS, ("MaxAdvanceWidth is consistent"
" with values in the Hmtx and Hhea tables.")
|
[
"def",
"com_google_fonts_check_maxadvancewidth",
"(",
"ttFont",
")",
":",
"hhea_advance_width_max",
"=",
"ttFont",
"[",
"'hhea'",
"]",
".",
"advanceWidthMax",
"hmtx_advance_width_max",
"=",
"None",
"for",
"g",
"in",
"ttFont",
"[",
"'hmtx'",
"]",
".",
"metrics",
".",
"values",
"(",
")",
":",
"if",
"hmtx_advance_width_max",
"is",
"None",
":",
"hmtx_advance_width_max",
"=",
"max",
"(",
"0",
",",
"g",
"[",
"0",
"]",
")",
"else",
":",
"hmtx_advance_width_max",
"=",
"max",
"(",
"g",
"[",
"0",
"]",
",",
"hmtx_advance_width_max",
")",
"if",
"hmtx_advance_width_max",
"!=",
"hhea_advance_width_max",
":",
"yield",
"FAIL",
",",
"(",
"\"AdvanceWidthMax mismatch: expected {} (from hmtx);\"",
"\" got {} (from hhea)\"",
")",
".",
"format",
"(",
"hmtx_advance_width_max",
",",
"hhea_advance_width_max",
")",
"else",
":",
"yield",
"PASS",
",",
"(",
"\"MaxAdvanceWidth is consistent\"",
"\" with values in the Hmtx and Hhea tables.\"",
")"
] |
MaxAdvanceWidth is consistent with values in the Hmtx and Hhea tables?
|
[
"MaxAdvanceWidth",
"is",
"consistent",
"with",
"values",
"in",
"the",
"Hmtx",
"and",
"Hhea",
"tables?"
] |
python
|
train
| 46.647059 |
vertexproject/synapse
|
synapse/cortex.py
|
https://github.com/vertexproject/synapse/blob/22e67c5a8f6d7caddbcf34b39ab1bd2d6c4a6e0b/synapse/cortex.py#L1045-L1058
|
async def setViewLayers(self, layers, iden=None):
'''
Args:
layers ([str]): A top-down list of of layer guids
iden (str): The view iden ( defaults to default view ).
'''
if iden is None:
iden = self.iden
view = self.views.get(iden)
if view is None:
raise s_exc.NoSuchView(iden=iden)
view.setLayers(layers)
|
[
"async",
"def",
"setViewLayers",
"(",
"self",
",",
"layers",
",",
"iden",
"=",
"None",
")",
":",
"if",
"iden",
"is",
"None",
":",
"iden",
"=",
"self",
".",
"iden",
"view",
"=",
"self",
".",
"views",
".",
"get",
"(",
"iden",
")",
"if",
"view",
"is",
"None",
":",
"raise",
"s_exc",
".",
"NoSuchView",
"(",
"iden",
"=",
"iden",
")",
"view",
".",
"setLayers",
"(",
"layers",
")"
] |
Args:
layers ([str]): A top-down list of of layer guids
iden (str): The view iden ( defaults to default view ).
|
[
"Args",
":",
"layers",
"(",
"[",
"str",
"]",
")",
":",
"A",
"top",
"-",
"down",
"list",
"of",
"of",
"layer",
"guids",
"iden",
"(",
"str",
")",
":",
"The",
"view",
"iden",
"(",
"defaults",
"to",
"default",
"view",
")",
"."
] |
python
|
train
| 28.428571 |
contentful/contentful-management.py
|
contentful_management/ui_extension.py
|
https://github.com/contentful/contentful-management.py/blob/707dd30883b98a10c7ff0f7f5bdb8edbdc1d8df0/contentful_management/ui_extension.py#L78-L88
|
def to_json(self):
"""
Returns the JSON Representation of the UI extension.
"""
result = super(UIExtension, self).to_json()
result.update({
'extension': self.extension
})
return result
|
[
"def",
"to_json",
"(",
"self",
")",
":",
"result",
"=",
"super",
"(",
"UIExtension",
",",
"self",
")",
".",
"to_json",
"(",
")",
"result",
".",
"update",
"(",
"{",
"'extension'",
":",
"self",
".",
"extension",
"}",
")",
"return",
"result"
] |
Returns the JSON Representation of the UI extension.
|
[
"Returns",
"the",
"JSON",
"Representation",
"of",
"the",
"UI",
"extension",
"."
] |
python
|
train
| 22.181818 |
raiden-network/raiden
|
raiden/network/proxies/payment_channel.py
|
https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/raiden/network/proxies/payment_channel.py#L231-L252
|
def settle(
self,
transferred_amount: TokenAmount,
locked_amount: TokenAmount,
locksroot: Locksroot,
partner_transferred_amount: TokenAmount,
partner_locked_amount: TokenAmount,
partner_locksroot: Locksroot,
block_identifier: BlockSpecification,
):
""" Settles the channel. """
self.token_network.settle(
channel_identifier=self.channel_identifier,
transferred_amount=transferred_amount,
locked_amount=locked_amount,
locksroot=locksroot,
partner=self.participant2,
partner_transferred_amount=partner_transferred_amount,
partner_locked_amount=partner_locked_amount,
partner_locksroot=partner_locksroot,
given_block_identifier=block_identifier,
)
|
[
"def",
"settle",
"(",
"self",
",",
"transferred_amount",
":",
"TokenAmount",
",",
"locked_amount",
":",
"TokenAmount",
",",
"locksroot",
":",
"Locksroot",
",",
"partner_transferred_amount",
":",
"TokenAmount",
",",
"partner_locked_amount",
":",
"TokenAmount",
",",
"partner_locksroot",
":",
"Locksroot",
",",
"block_identifier",
":",
"BlockSpecification",
",",
")",
":",
"self",
".",
"token_network",
".",
"settle",
"(",
"channel_identifier",
"=",
"self",
".",
"channel_identifier",
",",
"transferred_amount",
"=",
"transferred_amount",
",",
"locked_amount",
"=",
"locked_amount",
",",
"locksroot",
"=",
"locksroot",
",",
"partner",
"=",
"self",
".",
"participant2",
",",
"partner_transferred_amount",
"=",
"partner_transferred_amount",
",",
"partner_locked_amount",
"=",
"partner_locked_amount",
",",
"partner_locksroot",
"=",
"partner_locksroot",
",",
"given_block_identifier",
"=",
"block_identifier",
",",
")"
] |
Settles the channel.
|
[
"Settles",
"the",
"channel",
"."
] |
python
|
train
| 38.863636 |
david-caro/python-autosemver
|
autosemver/packaging.py
|
https://github.com/david-caro/python-autosemver/blob/3bc0adb70c33e4bd3623ae4c1944d5ee37f4303d/autosemver/packaging.py#L118-L146
|
def get_changelog(project_dir=os.curdir, bugtracker_url='', rpm_format=False):
"""
Retrieves the changelog, from the CHANGELOG file (if in a package) or
generates it from the git history. Optionally in rpm-compatible format.
:param project_dir: Path to the git repo of the project.
:type project_dir: str
:param bugtracker_url: Url to the bug tracker for the issues.
:type bugtracker_url: str
:param rpm_format: if set to True, will make the changelog rpm-compatible
:returns: changelog
:rtype: str
:rises RuntimeError: If the changelog could not be retrieved
"""
changelog = ''
pkg_info_file = os.path.join(project_dir, 'PKG-INFO')
changelog_file = os.path.join(project_dir, 'CHANGELOG')
if os.path.exists(pkg_info_file) and os.path.exists(changelog_file):
with open(changelog_file) as changelog_fd:
changelog = changelog_fd.read()
else:
changelog = api.get_changelog(
repo_path=project_dir,
bugtracker_url=bugtracker_url,
rpm_format=rpm_format,
)
return changelog
|
[
"def",
"get_changelog",
"(",
"project_dir",
"=",
"os",
".",
"curdir",
",",
"bugtracker_url",
"=",
"''",
",",
"rpm_format",
"=",
"False",
")",
":",
"changelog",
"=",
"''",
"pkg_info_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"project_dir",
",",
"'PKG-INFO'",
")",
"changelog_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"project_dir",
",",
"'CHANGELOG'",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"pkg_info_file",
")",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"changelog_file",
")",
":",
"with",
"open",
"(",
"changelog_file",
")",
"as",
"changelog_fd",
":",
"changelog",
"=",
"changelog_fd",
".",
"read",
"(",
")",
"else",
":",
"changelog",
"=",
"api",
".",
"get_changelog",
"(",
"repo_path",
"=",
"project_dir",
",",
"bugtracker_url",
"=",
"bugtracker_url",
",",
"rpm_format",
"=",
"rpm_format",
",",
")",
"return",
"changelog"
] |
Retrieves the changelog, from the CHANGELOG file (if in a package) or
generates it from the git history. Optionally in rpm-compatible format.
:param project_dir: Path to the git repo of the project.
:type project_dir: str
:param bugtracker_url: Url to the bug tracker for the issues.
:type bugtracker_url: str
:param rpm_format: if set to True, will make the changelog rpm-compatible
:returns: changelog
:rtype: str
:rises RuntimeError: If the changelog could not be retrieved
|
[
"Retrieves",
"the",
"changelog",
"from",
"the",
"CHANGELOG",
"file",
"(",
"if",
"in",
"a",
"package",
")",
"or",
"generates",
"it",
"from",
"the",
"git",
"history",
".",
"Optionally",
"in",
"rpm",
"-",
"compatible",
"format",
"."
] |
python
|
train
| 37.37931 |
atlassian-api/atlassian-python-api
|
atlassian/service_desk.py
|
https://github.com/atlassian-api/atlassian-python-api/blob/540d269905c3e7547b666fe30c647b2d512cf358/atlassian/service_desk.py#L494-L503
|
def get_queue_settings(self, project_key):
"""
Get queue settings on project
:param project_key: str
:return:
"""
url = 'rest/servicedeskapi/queues/{}'.format(project_key)
return self.get(url, headers=self.experimental_headers)
|
[
"def",
"get_queue_settings",
"(",
"self",
",",
"project_key",
")",
":",
"url",
"=",
"'rest/servicedeskapi/queues/{}'",
".",
"format",
"(",
"project_key",
")",
"return",
"self",
".",
"get",
"(",
"url",
",",
"headers",
"=",
"self",
".",
"experimental_headers",
")"
] |
Get queue settings on project
:param project_key: str
:return:
|
[
"Get",
"queue",
"settings",
"on",
"project"
] |
python
|
train
| 27.6 |
softlayer/softlayer-python
|
SoftLayer/managers/storage_utils.py
|
https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/managers/storage_utils.py#L789-L906
|
def prepare_duplicate_order_object(manager, origin_volume, iops, tier,
duplicate_size, duplicate_snapshot_size,
volume_type, hourly_billing_flag=False):
"""Prepare the duplicate order to submit to SoftLayer_Product::placeOrder()
:param manager: The File or Block manager calling this function
:param origin_volume: The origin volume which is being duplicated
:param iops: The IOPS for the duplicate volume (performance)
:param tier: The tier level for the duplicate volume (endurance)
:param duplicate_size: The requested size for the duplicate volume
:param duplicate_snapshot_size: The size for the duplicate snapshot space
:param volume_type: The type of the origin volume ('file' or 'block')
:param hourly_billing_flag: Billing type, monthly (False) or hourly (True)
:return: Returns the order object to be passed to the
placeOrder() method of the Product_Order service
"""
# Verify that the origin volume has not been cancelled
if 'billingItem' not in origin_volume:
raise exceptions.SoftLayerError(
"The origin volume has been cancelled; "
"unable to order duplicate volume")
# Verify that the origin volume has snapshot space (needed for duplication)
if isinstance(utils.lookup(origin_volume, 'snapshotCapacityGb'), str):
origin_snapshot_size = int(origin_volume['snapshotCapacityGb'])
else:
raise exceptions.SoftLayerError(
"Snapshot space not found for the origin volume. "
"Origin snapshot space is needed for duplication.")
# Obtain the datacenter location ID for the duplicate
if isinstance(utils.lookup(origin_volume, 'billingItem',
'location', 'id'), int):
location_id = origin_volume['billingItem']['location']['id']
else:
raise exceptions.SoftLayerError(
"Cannot find origin volume's location")
# Ensure the origin volume is STaaS v2 or higher
# and supports Encryption at Rest
if not _staas_version_is_v2_or_above(origin_volume):
raise exceptions.SoftLayerError(
"This volume cannot be duplicated since it "
"does not support Encryption at Rest.")
# If no specific snapshot space was requested for the duplicate,
# use the origin snapshot space size
if duplicate_snapshot_size is None:
duplicate_snapshot_size = origin_snapshot_size
# Use the origin volume size if no size was specified for the duplicate
if duplicate_size is None:
duplicate_size = origin_volume['capacityGb']
# Get the appropriate package for the order
# ('storage_as_a_service' is currently used for duplicate volumes)
package = get_package(manager, 'storage_as_a_service')
# Determine the IOPS or tier level for the duplicate volume, along with
# the type and prices for the order
origin_storage_type = origin_volume['storageType']['keyName']
if 'PERFORMANCE' in origin_storage_type:
volume_is_performance = True
if iops is None:
iops = int(origin_volume.get('provisionedIops', 0))
if iops <= 0:
raise exceptions.SoftLayerError("Cannot find origin volume's provisioned IOPS")
# Set up the price array for the order
prices = [
find_price_by_category(package, 'storage_as_a_service'),
find_price_by_category(package, 'storage_' + volume_type),
find_saas_perform_space_price(package, duplicate_size),
find_saas_perform_iops_price(package, duplicate_size, iops),
]
# Add the price code for snapshot space as well, unless 0 GB was given
if duplicate_snapshot_size > 0:
prices.append(find_saas_snapshot_space_price(
package, duplicate_snapshot_size, iops=iops))
elif 'ENDURANCE' in origin_storage_type:
volume_is_performance = False
if tier is None:
tier = find_endurance_tier_iops_per_gb(origin_volume)
# Set up the price array for the order
prices = [
find_price_by_category(package, 'storage_as_a_service'),
find_price_by_category(package, 'storage_' + volume_type),
find_saas_endurance_space_price(package, duplicate_size, tier),
find_saas_endurance_tier_price(package, tier),
]
# Add the price code for snapshot space as well, unless 0 GB was given
if duplicate_snapshot_size > 0:
prices.append(find_saas_snapshot_space_price(
package, duplicate_snapshot_size, tier=tier))
else:
raise exceptions.SoftLayerError(
"Origin volume does not have a valid storage type "
"(with an appropriate keyName to indicate the "
"volume is a PERFORMANCE or an ENDURANCE volume)")
duplicate_order = {
'complexType': 'SoftLayer_Container_Product_Order_'
'Network_Storage_AsAService',
'packageId': package['id'],
'prices': prices,
'volumeSize': duplicate_size,
'quantity': 1,
'location': location_id,
'duplicateOriginVolumeId': origin_volume['id'],
'useHourlyPricing': hourly_billing_flag
}
if volume_is_performance:
duplicate_order['iops'] = iops
return duplicate_order
|
[
"def",
"prepare_duplicate_order_object",
"(",
"manager",
",",
"origin_volume",
",",
"iops",
",",
"tier",
",",
"duplicate_size",
",",
"duplicate_snapshot_size",
",",
"volume_type",
",",
"hourly_billing_flag",
"=",
"False",
")",
":",
"# Verify that the origin volume has not been cancelled",
"if",
"'billingItem'",
"not",
"in",
"origin_volume",
":",
"raise",
"exceptions",
".",
"SoftLayerError",
"(",
"\"The origin volume has been cancelled; \"",
"\"unable to order duplicate volume\"",
")",
"# Verify that the origin volume has snapshot space (needed for duplication)",
"if",
"isinstance",
"(",
"utils",
".",
"lookup",
"(",
"origin_volume",
",",
"'snapshotCapacityGb'",
")",
",",
"str",
")",
":",
"origin_snapshot_size",
"=",
"int",
"(",
"origin_volume",
"[",
"'snapshotCapacityGb'",
"]",
")",
"else",
":",
"raise",
"exceptions",
".",
"SoftLayerError",
"(",
"\"Snapshot space not found for the origin volume. \"",
"\"Origin snapshot space is needed for duplication.\"",
")",
"# Obtain the datacenter location ID for the duplicate",
"if",
"isinstance",
"(",
"utils",
".",
"lookup",
"(",
"origin_volume",
",",
"'billingItem'",
",",
"'location'",
",",
"'id'",
")",
",",
"int",
")",
":",
"location_id",
"=",
"origin_volume",
"[",
"'billingItem'",
"]",
"[",
"'location'",
"]",
"[",
"'id'",
"]",
"else",
":",
"raise",
"exceptions",
".",
"SoftLayerError",
"(",
"\"Cannot find origin volume's location\"",
")",
"# Ensure the origin volume is STaaS v2 or higher",
"# and supports Encryption at Rest",
"if",
"not",
"_staas_version_is_v2_or_above",
"(",
"origin_volume",
")",
":",
"raise",
"exceptions",
".",
"SoftLayerError",
"(",
"\"This volume cannot be duplicated since it \"",
"\"does not support Encryption at Rest.\"",
")",
"# If no specific snapshot space was requested for the duplicate,",
"# use the origin snapshot space size",
"if",
"duplicate_snapshot_size",
"is",
"None",
":",
"duplicate_snapshot_size",
"=",
"origin_snapshot_size",
"# Use the origin volume size if no size was specified for the duplicate",
"if",
"duplicate_size",
"is",
"None",
":",
"duplicate_size",
"=",
"origin_volume",
"[",
"'capacityGb'",
"]",
"# Get the appropriate package for the order",
"# ('storage_as_a_service' is currently used for duplicate volumes)",
"package",
"=",
"get_package",
"(",
"manager",
",",
"'storage_as_a_service'",
")",
"# Determine the IOPS or tier level for the duplicate volume, along with",
"# the type and prices for the order",
"origin_storage_type",
"=",
"origin_volume",
"[",
"'storageType'",
"]",
"[",
"'keyName'",
"]",
"if",
"'PERFORMANCE'",
"in",
"origin_storage_type",
":",
"volume_is_performance",
"=",
"True",
"if",
"iops",
"is",
"None",
":",
"iops",
"=",
"int",
"(",
"origin_volume",
".",
"get",
"(",
"'provisionedIops'",
",",
"0",
")",
")",
"if",
"iops",
"<=",
"0",
":",
"raise",
"exceptions",
".",
"SoftLayerError",
"(",
"\"Cannot find origin volume's provisioned IOPS\"",
")",
"# Set up the price array for the order",
"prices",
"=",
"[",
"find_price_by_category",
"(",
"package",
",",
"'storage_as_a_service'",
")",
",",
"find_price_by_category",
"(",
"package",
",",
"'storage_'",
"+",
"volume_type",
")",
",",
"find_saas_perform_space_price",
"(",
"package",
",",
"duplicate_size",
")",
",",
"find_saas_perform_iops_price",
"(",
"package",
",",
"duplicate_size",
",",
"iops",
")",
",",
"]",
"# Add the price code for snapshot space as well, unless 0 GB was given",
"if",
"duplicate_snapshot_size",
">",
"0",
":",
"prices",
".",
"append",
"(",
"find_saas_snapshot_space_price",
"(",
"package",
",",
"duplicate_snapshot_size",
",",
"iops",
"=",
"iops",
")",
")",
"elif",
"'ENDURANCE'",
"in",
"origin_storage_type",
":",
"volume_is_performance",
"=",
"False",
"if",
"tier",
"is",
"None",
":",
"tier",
"=",
"find_endurance_tier_iops_per_gb",
"(",
"origin_volume",
")",
"# Set up the price array for the order",
"prices",
"=",
"[",
"find_price_by_category",
"(",
"package",
",",
"'storage_as_a_service'",
")",
",",
"find_price_by_category",
"(",
"package",
",",
"'storage_'",
"+",
"volume_type",
")",
",",
"find_saas_endurance_space_price",
"(",
"package",
",",
"duplicate_size",
",",
"tier",
")",
",",
"find_saas_endurance_tier_price",
"(",
"package",
",",
"tier",
")",
",",
"]",
"# Add the price code for snapshot space as well, unless 0 GB was given",
"if",
"duplicate_snapshot_size",
">",
"0",
":",
"prices",
".",
"append",
"(",
"find_saas_snapshot_space_price",
"(",
"package",
",",
"duplicate_snapshot_size",
",",
"tier",
"=",
"tier",
")",
")",
"else",
":",
"raise",
"exceptions",
".",
"SoftLayerError",
"(",
"\"Origin volume does not have a valid storage type \"",
"\"(with an appropriate keyName to indicate the \"",
"\"volume is a PERFORMANCE or an ENDURANCE volume)\"",
")",
"duplicate_order",
"=",
"{",
"'complexType'",
":",
"'SoftLayer_Container_Product_Order_'",
"'Network_Storage_AsAService'",
",",
"'packageId'",
":",
"package",
"[",
"'id'",
"]",
",",
"'prices'",
":",
"prices",
",",
"'volumeSize'",
":",
"duplicate_size",
",",
"'quantity'",
":",
"1",
",",
"'location'",
":",
"location_id",
",",
"'duplicateOriginVolumeId'",
":",
"origin_volume",
"[",
"'id'",
"]",
",",
"'useHourlyPricing'",
":",
"hourly_billing_flag",
"}",
"if",
"volume_is_performance",
":",
"duplicate_order",
"[",
"'iops'",
"]",
"=",
"iops",
"return",
"duplicate_order"
] |
Prepare the duplicate order to submit to SoftLayer_Product::placeOrder()
:param manager: The File or Block manager calling this function
:param origin_volume: The origin volume which is being duplicated
:param iops: The IOPS for the duplicate volume (performance)
:param tier: The tier level for the duplicate volume (endurance)
:param duplicate_size: The requested size for the duplicate volume
:param duplicate_snapshot_size: The size for the duplicate snapshot space
:param volume_type: The type of the origin volume ('file' or 'block')
:param hourly_billing_flag: Billing type, monthly (False) or hourly (True)
:return: Returns the order object to be passed to the
placeOrder() method of the Product_Order service
|
[
"Prepare",
"the",
"duplicate",
"order",
"to",
"submit",
"to",
"SoftLayer_Product",
"::",
"placeOrder",
"()"
] |
python
|
train
| 45 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.