repo
stringlengths 7
55
| path
stringlengths 4
223
| url
stringlengths 87
315
| code
stringlengths 75
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values | avg_line_len
float64 7.91
980
|
---|---|---|---|---|---|---|---|---|---|
elastic/elasticsearch-dsl-py
|
elasticsearch_dsl/index.py
|
https://github.com/elastic/elasticsearch-dsl-py/blob/874b52472fc47b601de0e5fa0e4300e21aff0085/elasticsearch_dsl/index.py#L373-L380
|
def exists(self, using=None, **kwargs):
"""
Returns ``True`` if the index already exists in elasticsearch.
Any additional keyword arguments will be passed to
``Elasticsearch.indices.exists`` unchanged.
"""
return self._get_connection(using).indices.exists(index=self._name, **kwargs)
|
[
"def",
"exists",
"(",
"self",
",",
"using",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"_get_connection",
"(",
"using",
")",
".",
"indices",
".",
"exists",
"(",
"index",
"=",
"self",
".",
"_name",
",",
"*",
"*",
"kwargs",
")"
] |
Returns ``True`` if the index already exists in elasticsearch.
Any additional keyword arguments will be passed to
``Elasticsearch.indices.exists`` unchanged.
|
[
"Returns",
"True",
"if",
"the",
"index",
"already",
"exists",
"in",
"elasticsearch",
"."
] |
python
|
train
| 40.625 |
apple/turicreate
|
src/unity/python/turicreate/data_structures/sframe.py
|
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/data_structures/sframe.py#L3062-L3137
|
def select_columns(self, column_names):
"""
Selects all columns where the name of the column or the type of column
is included in the column_names. An exception is raised if duplicate columns
are selected i.e. sf.select_columns(['a','a']), or non-existent columns
are selected.
Throws an exception for all other input types.
Parameters
----------
column_names: list[str or type]
The list of column names or a list of types.
Returns
-------
out : SFrame
A new SFrame that is made up of the columns referred to in
``column_names`` from the current SFrame.
See Also
--------
select_column
Examples
--------
>>> sf = turicreate.SFrame({'user_id': [1,2,3],
... 'user_name': ['alice', 'bob', 'charlie'],
... 'zipcode': [98101, 98102, 98103]
... })
>>> # This line is equivalent to `sf2 = sf[['user_id', 'zipcode']]`
>>> sf2 = sf.select_columns(['user_id', 'zipcode'])
>>> sf2
+---------+---------+
| user_id | zipcode |
+---------+---------+
| 1 | 98101 |
| 2 | 98102 |
| 3 | 98103 |
+---------+---------+
[3 rows x 2 columns]
"""
if not _is_non_string_iterable(column_names):
raise TypeError("column_names must be an iterable")
if not (all([isinstance(x, six.string_types) or isinstance(x, type) or isinstance(x, bytes)
for x in column_names])):
raise TypeError("Invalid key type: must be str, unicode, bytes or type")
requested_str_columns = [s for s in column_names if isinstance(s, six.string_types)]
# Make sure there are no duplicates keys
from collections import Counter
column_names_counter = Counter(column_names)
if (len(column_names)) != len(column_names_counter):
for key in column_names_counter:
if column_names_counter[key] > 1:
raise ValueError("There are duplicate keys in key list: '" + key + "'")
colnames_and_types = list(zip(self.column_names(), self.column_types()))
# Ok. we want the string columns to be in the ordering defined by the
# argument. And then all the type selection columns.
selected_columns = requested_str_columns
typelist = [s for s in column_names if isinstance(s, type)]
# next the type selection columns
# loop through all the columns, adding all columns with types in
# typelist. But don't add a column if it has already been added.
for i in colnames_and_types:
if i[1] in typelist and i[0] not in selected_columns:
selected_columns += [i[0]]
selected_columns = selected_columns
with cython_context():
return SFrame(data=[], _proxy=self.__proxy__.select_columns(selected_columns))
|
[
"def",
"select_columns",
"(",
"self",
",",
"column_names",
")",
":",
"if",
"not",
"_is_non_string_iterable",
"(",
"column_names",
")",
":",
"raise",
"TypeError",
"(",
"\"column_names must be an iterable\"",
")",
"if",
"not",
"(",
"all",
"(",
"[",
"isinstance",
"(",
"x",
",",
"six",
".",
"string_types",
")",
"or",
"isinstance",
"(",
"x",
",",
"type",
")",
"or",
"isinstance",
"(",
"x",
",",
"bytes",
")",
"for",
"x",
"in",
"column_names",
"]",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"Invalid key type: must be str, unicode, bytes or type\"",
")",
"requested_str_columns",
"=",
"[",
"s",
"for",
"s",
"in",
"column_names",
"if",
"isinstance",
"(",
"s",
",",
"six",
".",
"string_types",
")",
"]",
"# Make sure there are no duplicates keys",
"from",
"collections",
"import",
"Counter",
"column_names_counter",
"=",
"Counter",
"(",
"column_names",
")",
"if",
"(",
"len",
"(",
"column_names",
")",
")",
"!=",
"len",
"(",
"column_names_counter",
")",
":",
"for",
"key",
"in",
"column_names_counter",
":",
"if",
"column_names_counter",
"[",
"key",
"]",
">",
"1",
":",
"raise",
"ValueError",
"(",
"\"There are duplicate keys in key list: '\"",
"+",
"key",
"+",
"\"'\"",
")",
"colnames_and_types",
"=",
"list",
"(",
"zip",
"(",
"self",
".",
"column_names",
"(",
")",
",",
"self",
".",
"column_types",
"(",
")",
")",
")",
"# Ok. we want the string columns to be in the ordering defined by the",
"# argument. And then all the type selection columns.",
"selected_columns",
"=",
"requested_str_columns",
"typelist",
"=",
"[",
"s",
"for",
"s",
"in",
"column_names",
"if",
"isinstance",
"(",
"s",
",",
"type",
")",
"]",
"# next the type selection columns",
"# loop through all the columns, adding all columns with types in",
"# typelist. But don't add a column if it has already been added.",
"for",
"i",
"in",
"colnames_and_types",
":",
"if",
"i",
"[",
"1",
"]",
"in",
"typelist",
"and",
"i",
"[",
"0",
"]",
"not",
"in",
"selected_columns",
":",
"selected_columns",
"+=",
"[",
"i",
"[",
"0",
"]",
"]",
"selected_columns",
"=",
"selected_columns",
"with",
"cython_context",
"(",
")",
":",
"return",
"SFrame",
"(",
"data",
"=",
"[",
"]",
",",
"_proxy",
"=",
"self",
".",
"__proxy__",
".",
"select_columns",
"(",
"selected_columns",
")",
")"
] |
Selects all columns where the name of the column or the type of column
is included in the column_names. An exception is raised if duplicate columns
are selected i.e. sf.select_columns(['a','a']), or non-existent columns
are selected.
Throws an exception for all other input types.
Parameters
----------
column_names: list[str or type]
The list of column names or a list of types.
Returns
-------
out : SFrame
A new SFrame that is made up of the columns referred to in
``column_names`` from the current SFrame.
See Also
--------
select_column
Examples
--------
>>> sf = turicreate.SFrame({'user_id': [1,2,3],
... 'user_name': ['alice', 'bob', 'charlie'],
... 'zipcode': [98101, 98102, 98103]
... })
>>> # This line is equivalent to `sf2 = sf[['user_id', 'zipcode']]`
>>> sf2 = sf.select_columns(['user_id', 'zipcode'])
>>> sf2
+---------+---------+
| user_id | zipcode |
+---------+---------+
| 1 | 98101 |
| 2 | 98102 |
| 3 | 98103 |
+---------+---------+
[3 rows x 2 columns]
|
[
"Selects",
"all",
"columns",
"where",
"the",
"name",
"of",
"the",
"column",
"or",
"the",
"type",
"of",
"column",
"is",
"included",
"in",
"the",
"column_names",
".",
"An",
"exception",
"is",
"raised",
"if",
"duplicate",
"columns",
"are",
"selected",
"i",
".",
"e",
".",
"sf",
".",
"select_columns",
"(",
"[",
"a",
"a",
"]",
")",
"or",
"non",
"-",
"existent",
"columns",
"are",
"selected",
"."
] |
python
|
train
| 39.75 |
lsst-sqre/sqre-codekit
|
codekit/progressbar.py
|
https://github.com/lsst-sqre/sqre-codekit/blob/98122404cd9065d4d1d570867fe518042669126c/codekit/progressbar.py#L73-L90
|
def eta_bar(msg, max_value):
"""Display an adaptive ETA / countdown bar with a message.
Parameters
----------
msg: str
Message to prefix countdown bar line with
max_value: max_value
The max number of progress bar steps/updates
"""
widgets = [
"{msg}:".format(msg=msg),
progressbar.Bar(), ' ', progressbar.AdaptiveETA(),
]
return progressbar.ProgressBar(widgets=widgets, max_value=max_value)
|
[
"def",
"eta_bar",
"(",
"msg",
",",
"max_value",
")",
":",
"widgets",
"=",
"[",
"\"{msg}:\"",
".",
"format",
"(",
"msg",
"=",
"msg",
")",
",",
"progressbar",
".",
"Bar",
"(",
")",
",",
"' '",
",",
"progressbar",
".",
"AdaptiveETA",
"(",
")",
",",
"]",
"return",
"progressbar",
".",
"ProgressBar",
"(",
"widgets",
"=",
"widgets",
",",
"max_value",
"=",
"max_value",
")"
] |
Display an adaptive ETA / countdown bar with a message.
Parameters
----------
msg: str
Message to prefix countdown bar line with
max_value: max_value
The max number of progress bar steps/updates
|
[
"Display",
"an",
"adaptive",
"ETA",
"/",
"countdown",
"bar",
"with",
"a",
"message",
"."
] |
python
|
train
| 24.722222 |
google/grr
|
grr/client/grr_response_client/client_utils.py
|
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/client/grr_response_client/client_utils.py#L61-L96
|
def StatEntryFromStat(stat,
pathspec,
ext_attrs = True):
"""Build a stat entry object from a given stat object.
Args:
stat: A `Stat` object.
pathspec: A `PathSpec` from which `stat` was obtained.
ext_attrs: Whether to include extended file attributes in the result.
Returns:
`StatEntry` object.
"""
result = rdf_client_fs.StatEntry(pathspec=pathspec)
for attr in _STAT_ATTRS:
value = getattr(stat.GetRaw(), attr, None)
if value is None:
continue
# TODO(hanuszczak): Why are we doing this?
value = int(value)
if value < 0:
value &= 0xFFFFFFFF
setattr(result, attr, value)
result.st_flags_linux = stat.GetLinuxFlags()
result.st_flags_osx = stat.GetOsxFlags()
if ext_attrs:
# TODO(hanuszczak): Can we somehow incorporate extended attribute getter to
# the `Stat` class? That would make the code a lot prettier but would force
# `utils` to depend on `xattrs`.
result.ext_attrs = list(GetExtAttrs(stat.GetPath()))
return result
|
[
"def",
"StatEntryFromStat",
"(",
"stat",
",",
"pathspec",
",",
"ext_attrs",
"=",
"True",
")",
":",
"result",
"=",
"rdf_client_fs",
".",
"StatEntry",
"(",
"pathspec",
"=",
"pathspec",
")",
"for",
"attr",
"in",
"_STAT_ATTRS",
":",
"value",
"=",
"getattr",
"(",
"stat",
".",
"GetRaw",
"(",
")",
",",
"attr",
",",
"None",
")",
"if",
"value",
"is",
"None",
":",
"continue",
"# TODO(hanuszczak): Why are we doing this?",
"value",
"=",
"int",
"(",
"value",
")",
"if",
"value",
"<",
"0",
":",
"value",
"&=",
"0xFFFFFFFF",
"setattr",
"(",
"result",
",",
"attr",
",",
"value",
")",
"result",
".",
"st_flags_linux",
"=",
"stat",
".",
"GetLinuxFlags",
"(",
")",
"result",
".",
"st_flags_osx",
"=",
"stat",
".",
"GetOsxFlags",
"(",
")",
"if",
"ext_attrs",
":",
"# TODO(hanuszczak): Can we somehow incorporate extended attribute getter to",
"# the `Stat` class? That would make the code a lot prettier but would force",
"# `utils` to depend on `xattrs`.",
"result",
".",
"ext_attrs",
"=",
"list",
"(",
"GetExtAttrs",
"(",
"stat",
".",
"GetPath",
"(",
")",
")",
")",
"return",
"result"
] |
Build a stat entry object from a given stat object.
Args:
stat: A `Stat` object.
pathspec: A `PathSpec` from which `stat` was obtained.
ext_attrs: Whether to include extended file attributes in the result.
Returns:
`StatEntry` object.
|
[
"Build",
"a",
"stat",
"entry",
"object",
"from",
"a",
"given",
"stat",
"object",
"."
] |
python
|
train
| 28.5 |
Nic30/hwtGraph
|
hwtGraph/elk/fromHwt/mergeSplitsOnInterfaces.py
|
https://github.com/Nic30/hwtGraph/blob/6b7d4fdd759f263a0fdd2736f02f123e44e4354f/hwtGraph/elk/fromHwt/mergeSplitsOnInterfaces.py#L32-L40
|
def getRootIntfPort(port: LPort):
"""
:return: most top port which contains this port
"""
while True:
if isinstance(port.parent, LNode):
return port
else:
port = port.parent
|
[
"def",
"getRootIntfPort",
"(",
"port",
":",
"LPort",
")",
":",
"while",
"True",
":",
"if",
"isinstance",
"(",
"port",
".",
"parent",
",",
"LNode",
")",
":",
"return",
"port",
"else",
":",
"port",
"=",
"port",
".",
"parent"
] |
:return: most top port which contains this port
|
[
":",
"return",
":",
"most",
"top",
"port",
"which",
"contains",
"this",
"port"
] |
python
|
train
| 24.555556 |
Kortemme-Lab/klab
|
klab/bio/pymolmod/colors.py
|
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/pymolmod/colors.py#L385-L397
|
def update(self, path, node):
'''Update the dict with a new color using a 'path' through the dict. You can either pass an existing path e.g.
'Scaffold.mutations' to override a color or part of the hierarchy or you can add a new leaf node or dict.'''
assert(type(path) == type(self.name))
assert(type(node) == type(self.name) or type(node) == type(predefined))
d = self.color_scheme
tokens = path.split('.')
for t in tokens[:-1]:
d = d.get(t)
if d == None:
raise Exception("Path '%s' not found.")
d[tokens[-1]] = node
|
[
"def",
"update",
"(",
"self",
",",
"path",
",",
"node",
")",
":",
"assert",
"(",
"type",
"(",
"path",
")",
"==",
"type",
"(",
"self",
".",
"name",
")",
")",
"assert",
"(",
"type",
"(",
"node",
")",
"==",
"type",
"(",
"self",
".",
"name",
")",
"or",
"type",
"(",
"node",
")",
"==",
"type",
"(",
"predefined",
")",
")",
"d",
"=",
"self",
".",
"color_scheme",
"tokens",
"=",
"path",
".",
"split",
"(",
"'.'",
")",
"for",
"t",
"in",
"tokens",
"[",
":",
"-",
"1",
"]",
":",
"d",
"=",
"d",
".",
"get",
"(",
"t",
")",
"if",
"d",
"==",
"None",
":",
"raise",
"Exception",
"(",
"\"Path '%s' not found.\"",
")",
"d",
"[",
"tokens",
"[",
"-",
"1",
"]",
"]",
"=",
"node"
] |
Update the dict with a new color using a 'path' through the dict. You can either pass an existing path e.g.
'Scaffold.mutations' to override a color or part of the hierarchy or you can add a new leaf node or dict.
|
[
"Update",
"the",
"dict",
"with",
"a",
"new",
"color",
"using",
"a",
"path",
"through",
"the",
"dict",
".",
"You",
"can",
"either",
"pass",
"an",
"existing",
"path",
"e",
".",
"g",
".",
"Scaffold",
".",
"mutations",
"to",
"override",
"a",
"color",
"or",
"part",
"of",
"the",
"hierarchy",
"or",
"you",
"can",
"add",
"a",
"new",
"leaf",
"node",
"or",
"dict",
"."
] |
python
|
train
| 47.076923 |
saltstack/salt
|
salt/modules/vault.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/vault.py#L175-L197
|
def write_secret(path, **kwargs):
'''
Set secret at the path in vault. The vault policy used must allow this.
CLI Example:
.. code-block:: bash
salt '*' vault.write_secret "secret/my/secret" user="foo" password="bar"
'''
log.debug('Writing vault secrets for %s at %s', __grains__['id'], path)
data = dict([(x, y) for x, y in kwargs.items() if not x.startswith('__')])
try:
url = 'v1/{0}'.format(path)
response = __utils__['vault.make_request']('POST', url, json=data)
if response.status_code == 200:
return response.json()['data']
elif response.status_code != 204:
response.raise_for_status()
return True
except Exception as err:
log.error('Failed to write secret! %s: %s', type(err).__name__, err)
return False
|
[
"def",
"write_secret",
"(",
"path",
",",
"*",
"*",
"kwargs",
")",
":",
"log",
".",
"debug",
"(",
"'Writing vault secrets for %s at %s'",
",",
"__grains__",
"[",
"'id'",
"]",
",",
"path",
")",
"data",
"=",
"dict",
"(",
"[",
"(",
"x",
",",
"y",
")",
"for",
"x",
",",
"y",
"in",
"kwargs",
".",
"items",
"(",
")",
"if",
"not",
"x",
".",
"startswith",
"(",
"'__'",
")",
"]",
")",
"try",
":",
"url",
"=",
"'v1/{0}'",
".",
"format",
"(",
"path",
")",
"response",
"=",
"__utils__",
"[",
"'vault.make_request'",
"]",
"(",
"'POST'",
",",
"url",
",",
"json",
"=",
"data",
")",
"if",
"response",
".",
"status_code",
"==",
"200",
":",
"return",
"response",
".",
"json",
"(",
")",
"[",
"'data'",
"]",
"elif",
"response",
".",
"status_code",
"!=",
"204",
":",
"response",
".",
"raise_for_status",
"(",
")",
"return",
"True",
"except",
"Exception",
"as",
"err",
":",
"log",
".",
"error",
"(",
"'Failed to write secret! %s: %s'",
",",
"type",
"(",
"err",
")",
".",
"__name__",
",",
"err",
")",
"return",
"False"
] |
Set secret at the path in vault. The vault policy used must allow this.
CLI Example:
.. code-block:: bash
salt '*' vault.write_secret "secret/my/secret" user="foo" password="bar"
|
[
"Set",
"secret",
"at",
"the",
"path",
"in",
"vault",
".",
"The",
"vault",
"policy",
"used",
"must",
"allow",
"this",
"."
] |
python
|
train
| 35.652174 |
pandas-dev/pandas
|
pandas/core/internals/managers.py
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/managers.py#L1325-L1346
|
def take(self, indexer, axis=1, verify=True, convert=True):
"""
Take items along any axis.
"""
self._consolidate_inplace()
indexer = (np.arange(indexer.start, indexer.stop, indexer.step,
dtype='int64')
if isinstance(indexer, slice)
else np.asanyarray(indexer, dtype='int64'))
n = self.shape[axis]
if convert:
indexer = maybe_convert_indices(indexer, n)
if verify:
if ((indexer == -1) | (indexer >= n)).any():
raise Exception('Indices must be nonzero and less than '
'the axis length')
new_labels = self.axes[axis].take(indexer)
return self.reindex_indexer(new_axis=new_labels, indexer=indexer,
axis=axis, allow_dups=True)
|
[
"def",
"take",
"(",
"self",
",",
"indexer",
",",
"axis",
"=",
"1",
",",
"verify",
"=",
"True",
",",
"convert",
"=",
"True",
")",
":",
"self",
".",
"_consolidate_inplace",
"(",
")",
"indexer",
"=",
"(",
"np",
".",
"arange",
"(",
"indexer",
".",
"start",
",",
"indexer",
".",
"stop",
",",
"indexer",
".",
"step",
",",
"dtype",
"=",
"'int64'",
")",
"if",
"isinstance",
"(",
"indexer",
",",
"slice",
")",
"else",
"np",
".",
"asanyarray",
"(",
"indexer",
",",
"dtype",
"=",
"'int64'",
")",
")",
"n",
"=",
"self",
".",
"shape",
"[",
"axis",
"]",
"if",
"convert",
":",
"indexer",
"=",
"maybe_convert_indices",
"(",
"indexer",
",",
"n",
")",
"if",
"verify",
":",
"if",
"(",
"(",
"indexer",
"==",
"-",
"1",
")",
"|",
"(",
"indexer",
">=",
"n",
")",
")",
".",
"any",
"(",
")",
":",
"raise",
"Exception",
"(",
"'Indices must be nonzero and less than '",
"'the axis length'",
")",
"new_labels",
"=",
"self",
".",
"axes",
"[",
"axis",
"]",
".",
"take",
"(",
"indexer",
")",
"return",
"self",
".",
"reindex_indexer",
"(",
"new_axis",
"=",
"new_labels",
",",
"indexer",
"=",
"indexer",
",",
"axis",
"=",
"axis",
",",
"allow_dups",
"=",
"True",
")"
] |
Take items along any axis.
|
[
"Take",
"items",
"along",
"any",
"axis",
"."
] |
python
|
train
| 39 |
Hackerfleet/hfos
|
hfos/tool/installer.py
|
https://github.com/Hackerfleet/hfos/blob/b6df14eacaffb6be5c844108873ff8763ec7f0c9/hfos/tool/installer.py#L186-L189
|
def provisions(ctx, provision, clear_existing, overwrite, list_provisions):
"""Install default provisioning data"""
install_provisions(ctx, provision, clear_existing, overwrite, list_provisions)
|
[
"def",
"provisions",
"(",
"ctx",
",",
"provision",
",",
"clear_existing",
",",
"overwrite",
",",
"list_provisions",
")",
":",
"install_provisions",
"(",
"ctx",
",",
"provision",
",",
"clear_existing",
",",
"overwrite",
",",
"list_provisions",
")"
] |
Install default provisioning data
|
[
"Install",
"default",
"provisioning",
"data"
] |
python
|
train
| 50 |
timothydmorton/VESPA
|
vespa/kepler.py
|
https://github.com/timothydmorton/VESPA/blob/0446b54d48009f3655cfd1a3957ceea21d3adcaa/vespa/kepler.py#L114-L146
|
def modelshift_weaksec(koi):
"""
Max secondary depth based on model-shift secondary test from Jeff Coughlin
secondary metric: mod_depth_sec_dv * (1 + 3*mod_fred_dv / mod_sig_sec_dv)
"""
num = KOIDATA.ix[ku.koiname(koi), 'koi_tce_plnt_num']
if np.isnan(num):
num = 1
kid = KOIDATA.ix[ku.koiname(koi), 'kepid']
tce = '{:09.0f}-{:02.0f}'.format(kid,num)
#return largest depth between DV detrending and alternate detrending
try:
r = ROBOVETDATA.ix[tce]
except KeyError:
raise NoWeakSecondaryError(koi)
depth_dv = r['mod_depth_sec_dv'] * (1 + 3*r['mod_fred_dv'] / r['mod_sig_sec_dv'])
depth_alt = r['mod_depth_sec_alt'] * (1 + 3*r['mod_fred_alt'] / r['mod_sig_sec_alt'])
logging.debug(r[['mod_depth_sec_dv','mod_fred_dv','mod_sig_sec_dv']])
logging.debug(r[['mod_depth_sec_alt','mod_fred_alt','mod_sig_sec_alt']])
if np.isnan(depth_dv) and np.isnan(depth_alt):
#return weaksec_vv2(koi)
raise NoWeakSecondaryError(koi)
elif np.isnan(depth_dv):
return depth_alt
elif np.isnan(depth_alt):
return depth_dv
else:
return max(depth_dv, depth_alt)
|
[
"def",
"modelshift_weaksec",
"(",
"koi",
")",
":",
"num",
"=",
"KOIDATA",
".",
"ix",
"[",
"ku",
".",
"koiname",
"(",
"koi",
")",
",",
"'koi_tce_plnt_num'",
"]",
"if",
"np",
".",
"isnan",
"(",
"num",
")",
":",
"num",
"=",
"1",
"kid",
"=",
"KOIDATA",
".",
"ix",
"[",
"ku",
".",
"koiname",
"(",
"koi",
")",
",",
"'kepid'",
"]",
"tce",
"=",
"'{:09.0f}-{:02.0f}'",
".",
"format",
"(",
"kid",
",",
"num",
")",
"#return largest depth between DV detrending and alternate detrending",
"try",
":",
"r",
"=",
"ROBOVETDATA",
".",
"ix",
"[",
"tce",
"]",
"except",
"KeyError",
":",
"raise",
"NoWeakSecondaryError",
"(",
"koi",
")",
"depth_dv",
"=",
"r",
"[",
"'mod_depth_sec_dv'",
"]",
"*",
"(",
"1",
"+",
"3",
"*",
"r",
"[",
"'mod_fred_dv'",
"]",
"/",
"r",
"[",
"'mod_sig_sec_dv'",
"]",
")",
"depth_alt",
"=",
"r",
"[",
"'mod_depth_sec_alt'",
"]",
"*",
"(",
"1",
"+",
"3",
"*",
"r",
"[",
"'mod_fred_alt'",
"]",
"/",
"r",
"[",
"'mod_sig_sec_alt'",
"]",
")",
"logging",
".",
"debug",
"(",
"r",
"[",
"[",
"'mod_depth_sec_dv'",
",",
"'mod_fred_dv'",
",",
"'mod_sig_sec_dv'",
"]",
"]",
")",
"logging",
".",
"debug",
"(",
"r",
"[",
"[",
"'mod_depth_sec_alt'",
",",
"'mod_fred_alt'",
",",
"'mod_sig_sec_alt'",
"]",
"]",
")",
"if",
"np",
".",
"isnan",
"(",
"depth_dv",
")",
"and",
"np",
".",
"isnan",
"(",
"depth_alt",
")",
":",
"#return weaksec_vv2(koi)",
"raise",
"NoWeakSecondaryError",
"(",
"koi",
")",
"elif",
"np",
".",
"isnan",
"(",
"depth_dv",
")",
":",
"return",
"depth_alt",
"elif",
"np",
".",
"isnan",
"(",
"depth_alt",
")",
":",
"return",
"depth_dv",
"else",
":",
"return",
"max",
"(",
"depth_dv",
",",
"depth_alt",
")"
] |
Max secondary depth based on model-shift secondary test from Jeff Coughlin
secondary metric: mod_depth_sec_dv * (1 + 3*mod_fred_dv / mod_sig_sec_dv)
|
[
"Max",
"secondary",
"depth",
"based",
"on",
"model",
"-",
"shift",
"secondary",
"test",
"from",
"Jeff",
"Coughlin"
] |
python
|
train
| 34.757576 |
bcb/jsonrpcclient
|
jsonrpcclient/client.py
|
https://github.com/bcb/jsonrpcclient/blob/5b5abc28d1466d694c80b80c427a5dcb275382bb/jsonrpcclient/client.py#L206-L233
|
def request(
self,
method_name: str,
*args: Any,
trim_log_values: bool = False,
validate_against_schema: bool = True,
id_generator: Optional[Iterator] = None,
**kwargs: Any
) -> Response:
"""
Send a request by passing the method and arguments.
>>> client.request("cat", name="Yoko")
<Response[1]
Args:
method_name: The remote procedure's method name.
args: Positional arguments passed to the remote procedure.
kwargs: Keyword arguments passed to the remote procedure.
trim_log_values: Abbreviate the log entries of requests and responses.
validate_against_schema: Validate response against the JSON-RPC schema.
id_generator: Iterable of values to use as the "id" part of the request.
"""
return self.send(
Request(method_name, id_generator=id_generator, *args, **kwargs),
trim_log_values=trim_log_values,
validate_against_schema=validate_against_schema,
)
|
[
"def",
"request",
"(",
"self",
",",
"method_name",
":",
"str",
",",
"*",
"args",
":",
"Any",
",",
"trim_log_values",
":",
"bool",
"=",
"False",
",",
"validate_against_schema",
":",
"bool",
"=",
"True",
",",
"id_generator",
":",
"Optional",
"[",
"Iterator",
"]",
"=",
"None",
",",
"*",
"*",
"kwargs",
":",
"Any",
")",
"->",
"Response",
":",
"return",
"self",
".",
"send",
"(",
"Request",
"(",
"method_name",
",",
"id_generator",
"=",
"id_generator",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
",",
"trim_log_values",
"=",
"trim_log_values",
",",
"validate_against_schema",
"=",
"validate_against_schema",
",",
")"
] |
Send a request by passing the method and arguments.
>>> client.request("cat", name="Yoko")
<Response[1]
Args:
method_name: The remote procedure's method name.
args: Positional arguments passed to the remote procedure.
kwargs: Keyword arguments passed to the remote procedure.
trim_log_values: Abbreviate the log entries of requests and responses.
validate_against_schema: Validate response against the JSON-RPC schema.
id_generator: Iterable of values to use as the "id" part of the request.
|
[
"Send",
"a",
"request",
"by",
"passing",
"the",
"method",
"and",
"arguments",
"."
] |
python
|
train
| 37.928571 |
openvax/mhcflurry
|
mhcflurry/class1_affinity_predictor.py
|
https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/class1_affinity_predictor.py#L722-L766
|
def percentile_ranks(self, affinities, allele=None, alleles=None, throw=True):
"""
Return percentile ranks for the given ic50 affinities and alleles.
The 'allele' and 'alleles' argument are as in the `predict` method.
Specify one of these.
Parameters
----------
affinities : sequence of float
nM affinities
allele : string
alleles : sequence of string
throw : boolean
If True, a ValueError will be raised in the case of unsupported
alleles. If False, a warning will be logged and NaN will be returned
for those percentile ranks.
Returns
-------
numpy.array of float
"""
if allele is not None:
try:
transform = self.allele_to_percent_rank_transform[allele]
return transform.transform(affinities)
except KeyError:
msg = "Allele %s has no percentile rank information" % allele
if throw:
raise ValueError(msg)
else:
warnings.warn(msg)
# Return NaNs
return numpy.ones(len(affinities)) * numpy.nan
if alleles is None:
raise ValueError("Specify allele or alleles")
df = pandas.DataFrame({"affinity": affinities})
df["allele"] = alleles
df["result"] = numpy.nan
for (allele, sub_df) in df.groupby("allele"):
df.loc[sub_df.index, "result"] = self.percentile_ranks(
sub_df.affinity, allele=allele, throw=throw)
return df.result.values
|
[
"def",
"percentile_ranks",
"(",
"self",
",",
"affinities",
",",
"allele",
"=",
"None",
",",
"alleles",
"=",
"None",
",",
"throw",
"=",
"True",
")",
":",
"if",
"allele",
"is",
"not",
"None",
":",
"try",
":",
"transform",
"=",
"self",
".",
"allele_to_percent_rank_transform",
"[",
"allele",
"]",
"return",
"transform",
".",
"transform",
"(",
"affinities",
")",
"except",
"KeyError",
":",
"msg",
"=",
"\"Allele %s has no percentile rank information\"",
"%",
"allele",
"if",
"throw",
":",
"raise",
"ValueError",
"(",
"msg",
")",
"else",
":",
"warnings",
".",
"warn",
"(",
"msg",
")",
"# Return NaNs",
"return",
"numpy",
".",
"ones",
"(",
"len",
"(",
"affinities",
")",
")",
"*",
"numpy",
".",
"nan",
"if",
"alleles",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Specify allele or alleles\"",
")",
"df",
"=",
"pandas",
".",
"DataFrame",
"(",
"{",
"\"affinity\"",
":",
"affinities",
"}",
")",
"df",
"[",
"\"allele\"",
"]",
"=",
"alleles",
"df",
"[",
"\"result\"",
"]",
"=",
"numpy",
".",
"nan",
"for",
"(",
"allele",
",",
"sub_df",
")",
"in",
"df",
".",
"groupby",
"(",
"\"allele\"",
")",
":",
"df",
".",
"loc",
"[",
"sub_df",
".",
"index",
",",
"\"result\"",
"]",
"=",
"self",
".",
"percentile_ranks",
"(",
"sub_df",
".",
"affinity",
",",
"allele",
"=",
"allele",
",",
"throw",
"=",
"throw",
")",
"return",
"df",
".",
"result",
".",
"values"
] |
Return percentile ranks for the given ic50 affinities and alleles.
The 'allele' and 'alleles' argument are as in the `predict` method.
Specify one of these.
Parameters
----------
affinities : sequence of float
nM affinities
allele : string
alleles : sequence of string
throw : boolean
If True, a ValueError will be raised in the case of unsupported
alleles. If False, a warning will be logged and NaN will be returned
for those percentile ranks.
Returns
-------
numpy.array of float
|
[
"Return",
"percentile",
"ranks",
"for",
"the",
"given",
"ic50",
"affinities",
"and",
"alleles",
"."
] |
python
|
train
| 36.111111 |
tensorflow/tensorboard
|
tensorboard/plugins/debugger/interactive_debugger_server_lib.py
|
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/debugger/interactive_debugger_server_lib.py#L286-L311
|
def on_core_metadata_event(self, event):
"""Implementation of the core metadata-carrying Event proto callback.
Args:
event: An Event proto that contains core metadata about the debugged
Session::Run() in its log_message.message field, as a JSON string.
See the doc string of debug_data.DebugDumpDir.core_metadata for details.
"""
core_metadata = json.loads(event.log_message.message)
input_names = ','.join(core_metadata['input_names'])
output_names = ','.join(core_metadata['output_names'])
target_nodes = ','.join(core_metadata['target_nodes'])
self._run_key = RunKey(input_names, output_names, target_nodes)
if not self._graph_defs:
self._graph_defs_arrive_first = False
else:
for device_name in self._graph_defs:
self._add_graph_def(device_name, self._graph_defs[device_name])
self._outgoing_channel.put(_comm_metadata(self._run_key, event.wall_time))
# Wait for acknowledgement from client. Blocks until an item is got.
logger.info('on_core_metadata_event() waiting for client ack (meta)...')
self._incoming_channel.get()
logger.info('on_core_metadata_event() client ack received (meta).')
|
[
"def",
"on_core_metadata_event",
"(",
"self",
",",
"event",
")",
":",
"core_metadata",
"=",
"json",
".",
"loads",
"(",
"event",
".",
"log_message",
".",
"message",
")",
"input_names",
"=",
"','",
".",
"join",
"(",
"core_metadata",
"[",
"'input_names'",
"]",
")",
"output_names",
"=",
"','",
".",
"join",
"(",
"core_metadata",
"[",
"'output_names'",
"]",
")",
"target_nodes",
"=",
"','",
".",
"join",
"(",
"core_metadata",
"[",
"'target_nodes'",
"]",
")",
"self",
".",
"_run_key",
"=",
"RunKey",
"(",
"input_names",
",",
"output_names",
",",
"target_nodes",
")",
"if",
"not",
"self",
".",
"_graph_defs",
":",
"self",
".",
"_graph_defs_arrive_first",
"=",
"False",
"else",
":",
"for",
"device_name",
"in",
"self",
".",
"_graph_defs",
":",
"self",
".",
"_add_graph_def",
"(",
"device_name",
",",
"self",
".",
"_graph_defs",
"[",
"device_name",
"]",
")",
"self",
".",
"_outgoing_channel",
".",
"put",
"(",
"_comm_metadata",
"(",
"self",
".",
"_run_key",
",",
"event",
".",
"wall_time",
")",
")",
"# Wait for acknowledgement from client. Blocks until an item is got.",
"logger",
".",
"info",
"(",
"'on_core_metadata_event() waiting for client ack (meta)...'",
")",
"self",
".",
"_incoming_channel",
".",
"get",
"(",
")",
"logger",
".",
"info",
"(",
"'on_core_metadata_event() client ack received (meta).'",
")"
] |
Implementation of the core metadata-carrying Event proto callback.
Args:
event: An Event proto that contains core metadata about the debugged
Session::Run() in its log_message.message field, as a JSON string.
See the doc string of debug_data.DebugDumpDir.core_metadata for details.
|
[
"Implementation",
"of",
"the",
"core",
"metadata",
"-",
"carrying",
"Event",
"proto",
"callback",
"."
] |
python
|
train
| 45.192308 |
xapple/fasta
|
fasta/__init__.py
|
https://github.com/xapple/fasta/blob/a827c3138812d555203be45187ffae1277dd0d76/fasta/__init__.py#L356-L364
|
def graphs(self):
"""Sorry for the black magic. The result is an object whose attributes
are all the graphs found in graphs.py initialized with this instance as
only argument."""
result = Dummy()
for graph in graphs.__all__:
cls = getattr(graphs, graph)
setattr(result, cls.short_name, cls(self))
return result
|
[
"def",
"graphs",
"(",
"self",
")",
":",
"result",
"=",
"Dummy",
"(",
")",
"for",
"graph",
"in",
"graphs",
".",
"__all__",
":",
"cls",
"=",
"getattr",
"(",
"graphs",
",",
"graph",
")",
"setattr",
"(",
"result",
",",
"cls",
".",
"short_name",
",",
"cls",
"(",
"self",
")",
")",
"return",
"result"
] |
Sorry for the black magic. The result is an object whose attributes
are all the graphs found in graphs.py initialized with this instance as
only argument.
|
[
"Sorry",
"for",
"the",
"black",
"magic",
".",
"The",
"result",
"is",
"an",
"object",
"whose",
"attributes",
"are",
"all",
"the",
"graphs",
"found",
"in",
"graphs",
".",
"py",
"initialized",
"with",
"this",
"instance",
"as",
"only",
"argument",
"."
] |
python
|
train
| 41.555556 |
SoCo/SoCo
|
soco/music_services/music_service.py
|
https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/soco/music_services/music_service.py#L728-L744
|
def get_media_metadata(self, item_id):
"""Get metadata for a media item.
Args:
item_id (str): The item for which metadata is required.
Returns:
~collections.OrderedDict: The item's metadata, or `None`
See also:
The Sonos `getMediaMetadata API
<http://musicpartners.sonos.com/node/83>`_
"""
response = self.soap_client.call(
'getMediaMetadata',
[('id', item_id)])
return response.get('getMediaMetadataResult', None)
|
[
"def",
"get_media_metadata",
"(",
"self",
",",
"item_id",
")",
":",
"response",
"=",
"self",
".",
"soap_client",
".",
"call",
"(",
"'getMediaMetadata'",
",",
"[",
"(",
"'id'",
",",
"item_id",
")",
"]",
")",
"return",
"response",
".",
"get",
"(",
"'getMediaMetadataResult'",
",",
"None",
")"
] |
Get metadata for a media item.
Args:
item_id (str): The item for which metadata is required.
Returns:
~collections.OrderedDict: The item's metadata, or `None`
See also:
The Sonos `getMediaMetadata API
<http://musicpartners.sonos.com/node/83>`_
|
[
"Get",
"metadata",
"for",
"a",
"media",
"item",
"."
] |
python
|
train
| 31.117647 |
aroberge/experimental
|
experimental/core/import_hook.py
|
https://github.com/aroberge/experimental/blob/031a9be10698b429998436da748b8fdb86f18b47/experimental/core/import_hook.py#L47-L70
|
def find_spec(self, fullname, path, target=None):
'''finds the appropriate properties (spec) of a module, and sets
its loader.'''
if not path:
path = [os.getcwd()]
if "." in fullname:
name = fullname.split(".")[-1]
else:
name = fullname
for entry in path:
if os.path.isdir(os.path.join(entry, name)):
# this module has child modules
filename = os.path.join(entry, name, "__init__.py")
submodule_locations = [os.path.join(entry, name)]
else:
filename = os.path.join(entry, name + ".py")
submodule_locations = None
if not os.path.exists(filename):
continue
return spec_from_file_location(fullname, filename,
loader=MyLoader(filename),
submodule_search_locations=submodule_locations)
return None
|
[
"def",
"find_spec",
"(",
"self",
",",
"fullname",
",",
"path",
",",
"target",
"=",
"None",
")",
":",
"if",
"not",
"path",
":",
"path",
"=",
"[",
"os",
".",
"getcwd",
"(",
")",
"]",
"if",
"\".\"",
"in",
"fullname",
":",
"name",
"=",
"fullname",
".",
"split",
"(",
"\".\"",
")",
"[",
"-",
"1",
"]",
"else",
":",
"name",
"=",
"fullname",
"for",
"entry",
"in",
"path",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"entry",
",",
"name",
")",
")",
":",
"# this module has child modules",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"entry",
",",
"name",
",",
"\"__init__.py\"",
")",
"submodule_locations",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"entry",
",",
"name",
")",
"]",
"else",
":",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"entry",
",",
"name",
"+",
"\".py\"",
")",
"submodule_locations",
"=",
"None",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"filename",
")",
":",
"continue",
"return",
"spec_from_file_location",
"(",
"fullname",
",",
"filename",
",",
"loader",
"=",
"MyLoader",
"(",
"filename",
")",
",",
"submodule_search_locations",
"=",
"submodule_locations",
")",
"return",
"None"
] |
finds the appropriate properties (spec) of a module, and sets
its loader.
|
[
"finds",
"the",
"appropriate",
"properties",
"(",
"spec",
")",
"of",
"a",
"module",
"and",
"sets",
"its",
"loader",
"."
] |
python
|
train
| 41.458333 |
BerkeleyAutomation/autolab_core
|
autolab_core/json_serialization.py
|
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/json_serialization.py#L75-L82
|
def load(*args, **kwargs):
"""Load an numpy.ndarray from a file stream.
This works exactly like the usual `json.load()` function,
but it uses our custom deserializer.
"""
kwargs.update(dict(object_hook=json_numpy_obj_hook))
return _json.load(*args, **kwargs)
|
[
"def",
"load",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
".",
"update",
"(",
"dict",
"(",
"object_hook",
"=",
"json_numpy_obj_hook",
")",
")",
"return",
"_json",
".",
"load",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
Load an numpy.ndarray from a file stream.
This works exactly like the usual `json.load()` function,
but it uses our custom deserializer.
|
[
"Load",
"an",
"numpy",
".",
"ndarray",
"from",
"a",
"file",
"stream",
"."
] |
python
|
train
| 34.5 |
Alignak-monitoring/alignak
|
alignak/util.py
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/util.py#L230-L248
|
def format_t_into_dhms_format(timestamp):
""" Convert an amount of second into day, hour, min and sec
:param timestamp: seconds
:type timestamp: int
:return: 'Ad Bh Cm Ds'
:rtype: str
>>> format_t_into_dhms_format(456189)
'5d 6h 43m 9s'
>>> format_t_into_dhms_format(3600)
'0d 1h 0m 0s'
"""
mins, timestamp = divmod(timestamp, 60)
hour, mins = divmod(mins, 60)
day, hour = divmod(hour, 24)
return '%sd %sh %sm %ss' % (day, hour, mins, timestamp)
|
[
"def",
"format_t_into_dhms_format",
"(",
"timestamp",
")",
":",
"mins",
",",
"timestamp",
"=",
"divmod",
"(",
"timestamp",
",",
"60",
")",
"hour",
",",
"mins",
"=",
"divmod",
"(",
"mins",
",",
"60",
")",
"day",
",",
"hour",
"=",
"divmod",
"(",
"hour",
",",
"24",
")",
"return",
"'%sd %sh %sm %ss'",
"%",
"(",
"day",
",",
"hour",
",",
"mins",
",",
"timestamp",
")"
] |
Convert an amount of second into day, hour, min and sec
:param timestamp: seconds
:type timestamp: int
:return: 'Ad Bh Cm Ds'
:rtype: str
>>> format_t_into_dhms_format(456189)
'5d 6h 43m 9s'
>>> format_t_into_dhms_format(3600)
'0d 1h 0m 0s'
|
[
"Convert",
"an",
"amount",
"of",
"second",
"into",
"day",
"hour",
"min",
"and",
"sec"
] |
python
|
train
| 25.631579 |
selectel/pyte
|
pyte/screens.py
|
https://github.com/selectel/pyte/blob/8adad489f86da1788a7995720c344a2fa44f244e/pyte/screens.py#L568-L580
|
def reverse_index(self):
"""Move the cursor up one line in the same column. If the cursor
is at the first line, create a new line at the top.
"""
top, bottom = self.margins or Margins(0, self.lines - 1)
if self.cursor.y == top:
# TODO: mark only the lines within margins?
self.dirty.update(range(self.lines))
for y in range(bottom, top, -1):
self.buffer[y] = self.buffer[y - 1]
self.buffer.pop(top, None)
else:
self.cursor_up()
|
[
"def",
"reverse_index",
"(",
"self",
")",
":",
"top",
",",
"bottom",
"=",
"self",
".",
"margins",
"or",
"Margins",
"(",
"0",
",",
"self",
".",
"lines",
"-",
"1",
")",
"if",
"self",
".",
"cursor",
".",
"y",
"==",
"top",
":",
"# TODO: mark only the lines within margins?",
"self",
".",
"dirty",
".",
"update",
"(",
"range",
"(",
"self",
".",
"lines",
")",
")",
"for",
"y",
"in",
"range",
"(",
"bottom",
",",
"top",
",",
"-",
"1",
")",
":",
"self",
".",
"buffer",
"[",
"y",
"]",
"=",
"self",
".",
"buffer",
"[",
"y",
"-",
"1",
"]",
"self",
".",
"buffer",
".",
"pop",
"(",
"top",
",",
"None",
")",
"else",
":",
"self",
".",
"cursor_up",
"(",
")"
] |
Move the cursor up one line in the same column. If the cursor
is at the first line, create a new line at the top.
|
[
"Move",
"the",
"cursor",
"up",
"one",
"line",
"in",
"the",
"same",
"column",
".",
"If",
"the",
"cursor",
"is",
"at",
"the",
"first",
"line",
"create",
"a",
"new",
"line",
"at",
"the",
"top",
"."
] |
python
|
train
| 41.461538 |
clalancette/pycdlib
|
pycdlib/dr.py
|
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/dr.py#L311-L400
|
def _rr_new(self, rr_version, rr_name, rr_symlink_target, rr_relocated_child,
rr_relocated, rr_relocated_parent, file_mode):
# type: (str, bytes, bytes, bool, bool, bool, int) -> None
'''
Internal method to add Rock Ridge to a Directory Record.
Parameters:
rr_version - A string containing the version of Rock Ridge to use for
this record.
rr_name - The Rock Ridge name to associate with this directory record.
rr_symlink_target - The target for the symlink, if this is a symlink
record (otherwise, None).
rr_relocated_child - True if this is a directory record for a rock
ridge relocated child.
rr_relocated - True if this is a directory record for a relocated
entry.
rr_relocated_parent - True if this is a directory record for a rock
ridge relocated parent.
file_mode - The Unix file mode for this Rock Ridge entry.
Returns:
Nothing.
'''
if self.parent is None:
raise pycdlibexception.PyCdlibInternalError('Invalid call to create new Rock Ridge on root directory')
self.rock_ridge = rockridge.RockRidge()
is_first_dir_record_of_root = self.file_ident == b'\x00' and self.parent.is_root
bytes_to_skip = 0
if self.xa_record is not None:
bytes_to_skip = XARecord.length()
self.dr_len = self.rock_ridge.new(is_first_dir_record_of_root, rr_name,
file_mode, rr_symlink_target,
rr_version, rr_relocated_child,
rr_relocated, rr_relocated_parent,
bytes_to_skip, self.dr_len)
# For files, we are done
if not self.isdir:
return
# If this is a directory, we have to manipulate the file links
# appropriately.
if self.parent.is_root:
if self.file_ident == b'\x00' or self.file_ident == b'\x01':
# For the dot and dotdot children of the root, add one
# directly to their Rock Ridge links.
self.rock_ridge.add_to_file_links()
else:
# For all other children of the root, make sure to add one
# to each of the dot and dotdot entries.
if len(self.parent.children) < 2:
raise pycdlibexception.PyCdlibInvalidISO('Expected at least 2 children of the root directory record, saw %d' % (len(self.parent.children)))
if self.parent.children[0].rock_ridge is None:
raise pycdlibexception.PyCdlibInvalidISO('Dot child of directory has no Rock Ridge; ISO is corrupt')
self.parent.children[0].rock_ridge.add_to_file_links()
if self.parent.children[1].rock_ridge is None:
raise pycdlibexception.PyCdlibInvalidISO('Dot-dot child of directory has no Rock Ridge; ISO is corrupt')
self.parent.children[1].rock_ridge.add_to_file_links()
else:
if self.parent.rock_ridge is None:
raise pycdlibexception.PyCdlibInvalidISO('Parent of the entry did not have Rock Ridge, ISO is corrupt')
if self.file_ident == b'\x00':
# If we are adding the dot directory, increment the parent
# file links and our file links.
self.parent.rock_ridge.add_to_file_links()
self.rock_ridge.add_to_file_links()
elif self.file_ident == b'\x01':
# If we are adding the dotdot directory, copy the file links
# from the dot directory of the grandparent.
if self.parent.parent is None:
raise pycdlibexception.PyCdlibInternalError('Grandparent of the entry did not exist; this cannot be')
if not self.parent.children:
raise pycdlibexception.PyCdlibInvalidISO('Grandparent of the entry did not have a dot entry; ISO is corrupt')
if self.parent.parent.children[0].rock_ridge is None:
raise pycdlibexception.PyCdlibInvalidISO('Grandparent dotdot entry did not have Rock Ridge; ISO is corrupt')
self.rock_ridge.copy_file_links(self.parent.parent.children[0].rock_ridge)
else:
# For all other entries, increment the parents file links
# and the parents dot file links.
self.parent.rock_ridge.add_to_file_links()
if not self.parent.children:
raise pycdlibexception.PyCdlibInvalidISO('Parent of the entry did not have a dot entry; ISO is corrupt')
if self.parent.children[0].rock_ridge is None:
raise pycdlibexception.PyCdlibInvalidISO('Dot child of the parent did not have a dot entry; ISO is corrupt')
self.parent.children[0].rock_ridge.add_to_file_links()
|
[
"def",
"_rr_new",
"(",
"self",
",",
"rr_version",
",",
"rr_name",
",",
"rr_symlink_target",
",",
"rr_relocated_child",
",",
"rr_relocated",
",",
"rr_relocated_parent",
",",
"file_mode",
")",
":",
"# type: (str, bytes, bytes, bool, bool, bool, int) -> None",
"if",
"self",
".",
"parent",
"is",
"None",
":",
"raise",
"pycdlibexception",
".",
"PyCdlibInternalError",
"(",
"'Invalid call to create new Rock Ridge on root directory'",
")",
"self",
".",
"rock_ridge",
"=",
"rockridge",
".",
"RockRidge",
"(",
")",
"is_first_dir_record_of_root",
"=",
"self",
".",
"file_ident",
"==",
"b'\\x00'",
"and",
"self",
".",
"parent",
".",
"is_root",
"bytes_to_skip",
"=",
"0",
"if",
"self",
".",
"xa_record",
"is",
"not",
"None",
":",
"bytes_to_skip",
"=",
"XARecord",
".",
"length",
"(",
")",
"self",
".",
"dr_len",
"=",
"self",
".",
"rock_ridge",
".",
"new",
"(",
"is_first_dir_record_of_root",
",",
"rr_name",
",",
"file_mode",
",",
"rr_symlink_target",
",",
"rr_version",
",",
"rr_relocated_child",
",",
"rr_relocated",
",",
"rr_relocated_parent",
",",
"bytes_to_skip",
",",
"self",
".",
"dr_len",
")",
"# For files, we are done",
"if",
"not",
"self",
".",
"isdir",
":",
"return",
"# If this is a directory, we have to manipulate the file links",
"# appropriately.",
"if",
"self",
".",
"parent",
".",
"is_root",
":",
"if",
"self",
".",
"file_ident",
"==",
"b'\\x00'",
"or",
"self",
".",
"file_ident",
"==",
"b'\\x01'",
":",
"# For the dot and dotdot children of the root, add one",
"# directly to their Rock Ridge links.",
"self",
".",
"rock_ridge",
".",
"add_to_file_links",
"(",
")",
"else",
":",
"# For all other children of the root, make sure to add one",
"# to each of the dot and dotdot entries.",
"if",
"len",
"(",
"self",
".",
"parent",
".",
"children",
")",
"<",
"2",
":",
"raise",
"pycdlibexception",
".",
"PyCdlibInvalidISO",
"(",
"'Expected at least 2 children of the root directory record, saw %d'",
"%",
"(",
"len",
"(",
"self",
".",
"parent",
".",
"children",
")",
")",
")",
"if",
"self",
".",
"parent",
".",
"children",
"[",
"0",
"]",
".",
"rock_ridge",
"is",
"None",
":",
"raise",
"pycdlibexception",
".",
"PyCdlibInvalidISO",
"(",
"'Dot child of directory has no Rock Ridge; ISO is corrupt'",
")",
"self",
".",
"parent",
".",
"children",
"[",
"0",
"]",
".",
"rock_ridge",
".",
"add_to_file_links",
"(",
")",
"if",
"self",
".",
"parent",
".",
"children",
"[",
"1",
"]",
".",
"rock_ridge",
"is",
"None",
":",
"raise",
"pycdlibexception",
".",
"PyCdlibInvalidISO",
"(",
"'Dot-dot child of directory has no Rock Ridge; ISO is corrupt'",
")",
"self",
".",
"parent",
".",
"children",
"[",
"1",
"]",
".",
"rock_ridge",
".",
"add_to_file_links",
"(",
")",
"else",
":",
"if",
"self",
".",
"parent",
".",
"rock_ridge",
"is",
"None",
":",
"raise",
"pycdlibexception",
".",
"PyCdlibInvalidISO",
"(",
"'Parent of the entry did not have Rock Ridge, ISO is corrupt'",
")",
"if",
"self",
".",
"file_ident",
"==",
"b'\\x00'",
":",
"# If we are adding the dot directory, increment the parent",
"# file links and our file links.",
"self",
".",
"parent",
".",
"rock_ridge",
".",
"add_to_file_links",
"(",
")",
"self",
".",
"rock_ridge",
".",
"add_to_file_links",
"(",
")",
"elif",
"self",
".",
"file_ident",
"==",
"b'\\x01'",
":",
"# If we are adding the dotdot directory, copy the file links",
"# from the dot directory of the grandparent.",
"if",
"self",
".",
"parent",
".",
"parent",
"is",
"None",
":",
"raise",
"pycdlibexception",
".",
"PyCdlibInternalError",
"(",
"'Grandparent of the entry did not exist; this cannot be'",
")",
"if",
"not",
"self",
".",
"parent",
".",
"children",
":",
"raise",
"pycdlibexception",
".",
"PyCdlibInvalidISO",
"(",
"'Grandparent of the entry did not have a dot entry; ISO is corrupt'",
")",
"if",
"self",
".",
"parent",
".",
"parent",
".",
"children",
"[",
"0",
"]",
".",
"rock_ridge",
"is",
"None",
":",
"raise",
"pycdlibexception",
".",
"PyCdlibInvalidISO",
"(",
"'Grandparent dotdot entry did not have Rock Ridge; ISO is corrupt'",
")",
"self",
".",
"rock_ridge",
".",
"copy_file_links",
"(",
"self",
".",
"parent",
".",
"parent",
".",
"children",
"[",
"0",
"]",
".",
"rock_ridge",
")",
"else",
":",
"# For all other entries, increment the parents file links",
"# and the parents dot file links.",
"self",
".",
"parent",
".",
"rock_ridge",
".",
"add_to_file_links",
"(",
")",
"if",
"not",
"self",
".",
"parent",
".",
"children",
":",
"raise",
"pycdlibexception",
".",
"PyCdlibInvalidISO",
"(",
"'Parent of the entry did not have a dot entry; ISO is corrupt'",
")",
"if",
"self",
".",
"parent",
".",
"children",
"[",
"0",
"]",
".",
"rock_ridge",
"is",
"None",
":",
"raise",
"pycdlibexception",
".",
"PyCdlibInvalidISO",
"(",
"'Dot child of the parent did not have a dot entry; ISO is corrupt'",
")",
"self",
".",
"parent",
".",
"children",
"[",
"0",
"]",
".",
"rock_ridge",
".",
"add_to_file_links",
"(",
")"
] |
Internal method to add Rock Ridge to a Directory Record.
Parameters:
rr_version - A string containing the version of Rock Ridge to use for
this record.
rr_name - The Rock Ridge name to associate with this directory record.
rr_symlink_target - The target for the symlink, if this is a symlink
record (otherwise, None).
rr_relocated_child - True if this is a directory record for a rock
ridge relocated child.
rr_relocated - True if this is a directory record for a relocated
entry.
rr_relocated_parent - True if this is a directory record for a rock
ridge relocated parent.
file_mode - The Unix file mode for this Rock Ridge entry.
Returns:
Nothing.
|
[
"Internal",
"method",
"to",
"add",
"Rock",
"Ridge",
"to",
"a",
"Directory",
"Record",
"."
] |
python
|
train
| 56.011111 |
brocade/pynos
|
pynos/versions/ver_7/ver_7_1_0/yang/brocade_lldp_ext.py
|
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_lldp_ext.py#L320-L336
|
def get_lldp_neighbor_detail_output_lldp_neighbor_detail_lldp_pdu_transmitted(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_lldp_neighbor_detail = ET.Element("get_lldp_neighbor_detail")
config = get_lldp_neighbor_detail
output = ET.SubElement(get_lldp_neighbor_detail, "output")
lldp_neighbor_detail = ET.SubElement(output, "lldp-neighbor-detail")
local_interface_name_key = ET.SubElement(lldp_neighbor_detail, "local-interface-name")
local_interface_name_key.text = kwargs.pop('local_interface_name')
remote_interface_name_key = ET.SubElement(lldp_neighbor_detail, "remote-interface-name")
remote_interface_name_key.text = kwargs.pop('remote_interface_name')
lldp_pdu_transmitted = ET.SubElement(lldp_neighbor_detail, "lldp-pdu-transmitted")
lldp_pdu_transmitted.text = kwargs.pop('lldp_pdu_transmitted')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
[
"def",
"get_lldp_neighbor_detail_output_lldp_neighbor_detail_lldp_pdu_transmitted",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"get_lldp_neighbor_detail",
"=",
"ET",
".",
"Element",
"(",
"\"get_lldp_neighbor_detail\"",
")",
"config",
"=",
"get_lldp_neighbor_detail",
"output",
"=",
"ET",
".",
"SubElement",
"(",
"get_lldp_neighbor_detail",
",",
"\"output\"",
")",
"lldp_neighbor_detail",
"=",
"ET",
".",
"SubElement",
"(",
"output",
",",
"\"lldp-neighbor-detail\"",
")",
"local_interface_name_key",
"=",
"ET",
".",
"SubElement",
"(",
"lldp_neighbor_detail",
",",
"\"local-interface-name\"",
")",
"local_interface_name_key",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'local_interface_name'",
")",
"remote_interface_name_key",
"=",
"ET",
".",
"SubElement",
"(",
"lldp_neighbor_detail",
",",
"\"remote-interface-name\"",
")",
"remote_interface_name_key",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'remote_interface_name'",
")",
"lldp_pdu_transmitted",
"=",
"ET",
".",
"SubElement",
"(",
"lldp_neighbor_detail",
",",
"\"lldp-pdu-transmitted\"",
")",
"lldp_pdu_transmitted",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'lldp_pdu_transmitted'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
] |
Auto Generated Code
|
[
"Auto",
"Generated",
"Code"
] |
python
|
train
| 59.764706 |
SINGROUP/SOAPLite
|
soaplite/core.py
|
https://github.com/SINGROUP/SOAPLite/blob/80e27cc8d5b4c887011542c5a799583bfc6ff643/soaplite/core.py#L80-L169
|
def get_soap_locals(obj, Hpos, alp, bet, rCut=5.0, nMax=5, Lmax=5, crossOver=True, all_atomtypes=None, eta=1.0):
"""Get the RBF basis SOAP output for the given positions in a finite system.
Args:
obj(ase.Atoms): Atomic structure for which the SOAP output is
calculated.
Hpos: Positions at which to calculate SOAP
alp: Alphas
bet: Betas
rCut: Radial cutoff.
nMax: Maximum number of radial basis functions
Lmax: Maximum spherical harmonics degree
crossOver:
all_atomtypes: Can be used to specify the atomic elements for which to
calculate the output. If given the output is calculated only for the
given species and is ordered by atomic number.
eta: The gaussian smearing width.
Returns:
np.ndarray: SOAP output for the given positions.
"""
rCutHard = rCut + 5
assert Lmax <= 9, "l cannot exceed 9. Lmax={}".format(Lmax)
assert Lmax >= 0, "l cannot be negative.Lmax={}".format(Lmax)
assert rCutHard < 17.0001, "hard radius cuttof cannot be larger than 17 Angs. rCut={}".format(rCutHard)
assert rCutHard > 1.999, "hard redius cuttof cannot be lower than 1 Ang. rCut={}".format(rCutHard)
assert nMax >= 2, "number of basis functions cannot be lower than 2. nMax={}".format(nMax)
assert nMax <= 13, "number of basis functions cannot exceed 12. nMax={}".format(nMax)
assert eta >= 0.0001, "Eta cannot be zero or negative. nMax={}".format(eta)
# get clusgeo internal format for c-code
Apos, typeNs, py_Ntypes, atomtype_lst, totalAN = _format_ase2clusgeo(obj, all_atomtypes)
Hpos = np.array(Hpos)
py_Hsize = Hpos.shape[0]
# flatten arrays
Hpos = Hpos.flatten()
alp = alp.flatten()
bet = bet.flatten()
# convert int to c_int
lMax = c_int(Lmax)
Hsize = c_int(py_Hsize)
Ntypes = c_int(py_Ntypes)
totalAN = c_int(totalAN)
rCutHard = c_double(rCutHard)
Nsize = c_int(nMax)
c_eta = c_double(eta)
#convert int array to c_int array
typeNs = (c_int * len(typeNs))(*typeNs)
# convert to c_double arrays
# alphas
alphas = (c_double * len(alp))(*alp.tolist())
# betas
betas = (c_double * len(bet))(*bet.tolist())
#Apos
axyz = (c_double * len(Apos))(*Apos.tolist())
#Hpos
hxyz = (c_double * len(Hpos))(*Hpos.tolist())
### START SOAP###
#path_to_so = os.path.dirname(os.path.abspath(__file__))
_PATH_TO_SOAPLITE_SO = os.path.dirname(os.path.abspath(__file__))
_SOAPLITE_SOFILES = glob.glob( "".join([ _PATH_TO_SOAPLITE_SO, "/../lib/libsoap*.*so"]) ) ## NOT SURE ABOUT THIS
if py_Ntypes == 1 or (not crossOver):
substring = "lib/libsoapPySig."
libsoap = CDLL(next((s for s in _SOAPLITE_SOFILES if substring in s), None))
libsoap.soap.argtypes = [POINTER (c_double),POINTER (c_double), POINTER (c_double),POINTER (c_double), POINTER (c_double), POINTER (c_int),c_double,c_int,c_int,c_int,c_int,c_int,c_double]
libsoap.soap.restype = POINTER (c_double)
c = (c_double*(int((nMax*(nMax+1))/2)*(Lmax+1)*py_Ntypes*py_Hsize))()
libsoap.soap( c, axyz, hxyz, alphas, betas, typeNs, rCutHard, totalAN, Ntypes, Nsize, lMax, Hsize,c_eta)
else:
substring = "lib/libsoapGTO."
libsoapGTO = CDLL(next((s for s in _SOAPLITE_SOFILES if substring in s), None))
libsoapGTO.soap.argtypes = [POINTER (c_double),POINTER (c_double), POINTER (c_double),POINTER (c_double), POINTER (c_double), POINTER (c_int),c_double,c_int,c_int,c_int,c_int,c_int,c_double]
libsoapGTO.soap.restype = POINTER (c_double)
c = (c_double*(int((nMax*(nMax+1))/2)*(Lmax+1)*int((py_Ntypes*(py_Ntypes +1))/2)*py_Hsize))()
libsoapGTO.soap( c, axyz, hxyz, alphas, betas, typeNs, rCutHard, totalAN, Ntypes, Nsize, lMax, Hsize,c_eta)
# return c;
if crossOver:
crosTypes = int((py_Ntypes*(py_Ntypes+1))/2)
shape = (py_Hsize, int((nMax*(nMax+1))/2)*(Lmax+1)*crosTypes)
else:
shape = (py_Hsize, int((nMax*(nMax+1))/2)*(Lmax+1)*py_Ntypes)
a = np.ctypeslib.as_array(c)
a = a.reshape(shape)
return a
|
[
"def",
"get_soap_locals",
"(",
"obj",
",",
"Hpos",
",",
"alp",
",",
"bet",
",",
"rCut",
"=",
"5.0",
",",
"nMax",
"=",
"5",
",",
"Lmax",
"=",
"5",
",",
"crossOver",
"=",
"True",
",",
"all_atomtypes",
"=",
"None",
",",
"eta",
"=",
"1.0",
")",
":",
"rCutHard",
"=",
"rCut",
"+",
"5",
"assert",
"Lmax",
"<=",
"9",
",",
"\"l cannot exceed 9. Lmax={}\"",
".",
"format",
"(",
"Lmax",
")",
"assert",
"Lmax",
">=",
"0",
",",
"\"l cannot be negative.Lmax={}\"",
".",
"format",
"(",
"Lmax",
")",
"assert",
"rCutHard",
"<",
"17.0001",
",",
"\"hard radius cuttof cannot be larger than 17 Angs. rCut={}\"",
".",
"format",
"(",
"rCutHard",
")",
"assert",
"rCutHard",
">",
"1.999",
",",
"\"hard redius cuttof cannot be lower than 1 Ang. rCut={}\"",
".",
"format",
"(",
"rCutHard",
")",
"assert",
"nMax",
">=",
"2",
",",
"\"number of basis functions cannot be lower than 2. nMax={}\"",
".",
"format",
"(",
"nMax",
")",
"assert",
"nMax",
"<=",
"13",
",",
"\"number of basis functions cannot exceed 12. nMax={}\"",
".",
"format",
"(",
"nMax",
")",
"assert",
"eta",
">=",
"0.0001",
",",
"\"Eta cannot be zero or negative. nMax={}\"",
".",
"format",
"(",
"eta",
")",
"# get clusgeo internal format for c-code",
"Apos",
",",
"typeNs",
",",
"py_Ntypes",
",",
"atomtype_lst",
",",
"totalAN",
"=",
"_format_ase2clusgeo",
"(",
"obj",
",",
"all_atomtypes",
")",
"Hpos",
"=",
"np",
".",
"array",
"(",
"Hpos",
")",
"py_Hsize",
"=",
"Hpos",
".",
"shape",
"[",
"0",
"]",
"# flatten arrays",
"Hpos",
"=",
"Hpos",
".",
"flatten",
"(",
")",
"alp",
"=",
"alp",
".",
"flatten",
"(",
")",
"bet",
"=",
"bet",
".",
"flatten",
"(",
")",
"# convert int to c_int",
"lMax",
"=",
"c_int",
"(",
"Lmax",
")",
"Hsize",
"=",
"c_int",
"(",
"py_Hsize",
")",
"Ntypes",
"=",
"c_int",
"(",
"py_Ntypes",
")",
"totalAN",
"=",
"c_int",
"(",
"totalAN",
")",
"rCutHard",
"=",
"c_double",
"(",
"rCutHard",
")",
"Nsize",
"=",
"c_int",
"(",
"nMax",
")",
"c_eta",
"=",
"c_double",
"(",
"eta",
")",
"#convert int array to c_int array",
"typeNs",
"=",
"(",
"c_int",
"*",
"len",
"(",
"typeNs",
")",
")",
"(",
"*",
"typeNs",
")",
"# convert to c_double arrays",
"# alphas",
"alphas",
"=",
"(",
"c_double",
"*",
"len",
"(",
"alp",
")",
")",
"(",
"*",
"alp",
".",
"tolist",
"(",
")",
")",
"# betas",
"betas",
"=",
"(",
"c_double",
"*",
"len",
"(",
"bet",
")",
")",
"(",
"*",
"bet",
".",
"tolist",
"(",
")",
")",
"#Apos",
"axyz",
"=",
"(",
"c_double",
"*",
"len",
"(",
"Apos",
")",
")",
"(",
"*",
"Apos",
".",
"tolist",
"(",
")",
")",
"#Hpos",
"hxyz",
"=",
"(",
"c_double",
"*",
"len",
"(",
"Hpos",
")",
")",
"(",
"*",
"Hpos",
".",
"tolist",
"(",
")",
")",
"### START SOAP###",
"#path_to_so = os.path.dirname(os.path.abspath(__file__))",
"_PATH_TO_SOAPLITE_SO",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"__file__",
")",
")",
"_SOAPLITE_SOFILES",
"=",
"glob",
".",
"glob",
"(",
"\"\"",
".",
"join",
"(",
"[",
"_PATH_TO_SOAPLITE_SO",
",",
"\"/../lib/libsoap*.*so\"",
"]",
")",
")",
"## NOT SURE ABOUT THIS",
"if",
"py_Ntypes",
"==",
"1",
"or",
"(",
"not",
"crossOver",
")",
":",
"substring",
"=",
"\"lib/libsoapPySig.\"",
"libsoap",
"=",
"CDLL",
"(",
"next",
"(",
"(",
"s",
"for",
"s",
"in",
"_SOAPLITE_SOFILES",
"if",
"substring",
"in",
"s",
")",
",",
"None",
")",
")",
"libsoap",
".",
"soap",
".",
"argtypes",
"=",
"[",
"POINTER",
"(",
"c_double",
")",
",",
"POINTER",
"(",
"c_double",
")",
",",
"POINTER",
"(",
"c_double",
")",
",",
"POINTER",
"(",
"c_double",
")",
",",
"POINTER",
"(",
"c_double",
")",
",",
"POINTER",
"(",
"c_int",
")",
",",
"c_double",
",",
"c_int",
",",
"c_int",
",",
"c_int",
",",
"c_int",
",",
"c_int",
",",
"c_double",
"]",
"libsoap",
".",
"soap",
".",
"restype",
"=",
"POINTER",
"(",
"c_double",
")",
"c",
"=",
"(",
"c_double",
"*",
"(",
"int",
"(",
"(",
"nMax",
"*",
"(",
"nMax",
"+",
"1",
")",
")",
"/",
"2",
")",
"*",
"(",
"Lmax",
"+",
"1",
")",
"*",
"py_Ntypes",
"*",
"py_Hsize",
")",
")",
"(",
")",
"libsoap",
".",
"soap",
"(",
"c",
",",
"axyz",
",",
"hxyz",
",",
"alphas",
",",
"betas",
",",
"typeNs",
",",
"rCutHard",
",",
"totalAN",
",",
"Ntypes",
",",
"Nsize",
",",
"lMax",
",",
"Hsize",
",",
"c_eta",
")",
"else",
":",
"substring",
"=",
"\"lib/libsoapGTO.\"",
"libsoapGTO",
"=",
"CDLL",
"(",
"next",
"(",
"(",
"s",
"for",
"s",
"in",
"_SOAPLITE_SOFILES",
"if",
"substring",
"in",
"s",
")",
",",
"None",
")",
")",
"libsoapGTO",
".",
"soap",
".",
"argtypes",
"=",
"[",
"POINTER",
"(",
"c_double",
")",
",",
"POINTER",
"(",
"c_double",
")",
",",
"POINTER",
"(",
"c_double",
")",
",",
"POINTER",
"(",
"c_double",
")",
",",
"POINTER",
"(",
"c_double",
")",
",",
"POINTER",
"(",
"c_int",
")",
",",
"c_double",
",",
"c_int",
",",
"c_int",
",",
"c_int",
",",
"c_int",
",",
"c_int",
",",
"c_double",
"]",
"libsoapGTO",
".",
"soap",
".",
"restype",
"=",
"POINTER",
"(",
"c_double",
")",
"c",
"=",
"(",
"c_double",
"*",
"(",
"int",
"(",
"(",
"nMax",
"*",
"(",
"nMax",
"+",
"1",
")",
")",
"/",
"2",
")",
"*",
"(",
"Lmax",
"+",
"1",
")",
"*",
"int",
"(",
"(",
"py_Ntypes",
"*",
"(",
"py_Ntypes",
"+",
"1",
")",
")",
"/",
"2",
")",
"*",
"py_Hsize",
")",
")",
"(",
")",
"libsoapGTO",
".",
"soap",
"(",
"c",
",",
"axyz",
",",
"hxyz",
",",
"alphas",
",",
"betas",
",",
"typeNs",
",",
"rCutHard",
",",
"totalAN",
",",
"Ntypes",
",",
"Nsize",
",",
"lMax",
",",
"Hsize",
",",
"c_eta",
")",
"# return c;",
"if",
"crossOver",
":",
"crosTypes",
"=",
"int",
"(",
"(",
"py_Ntypes",
"*",
"(",
"py_Ntypes",
"+",
"1",
")",
")",
"/",
"2",
")",
"shape",
"=",
"(",
"py_Hsize",
",",
"int",
"(",
"(",
"nMax",
"*",
"(",
"nMax",
"+",
"1",
")",
")",
"/",
"2",
")",
"*",
"(",
"Lmax",
"+",
"1",
")",
"*",
"crosTypes",
")",
"else",
":",
"shape",
"=",
"(",
"py_Hsize",
",",
"int",
"(",
"(",
"nMax",
"*",
"(",
"nMax",
"+",
"1",
")",
")",
"/",
"2",
")",
"*",
"(",
"Lmax",
"+",
"1",
")",
"*",
"py_Ntypes",
")",
"a",
"=",
"np",
".",
"ctypeslib",
".",
"as_array",
"(",
"c",
")",
"a",
"=",
"a",
".",
"reshape",
"(",
"shape",
")",
"return",
"a"
] |
Get the RBF basis SOAP output for the given positions in a finite system.
Args:
obj(ase.Atoms): Atomic structure for which the SOAP output is
calculated.
Hpos: Positions at which to calculate SOAP
alp: Alphas
bet: Betas
rCut: Radial cutoff.
nMax: Maximum number of radial basis functions
Lmax: Maximum spherical harmonics degree
crossOver:
all_atomtypes: Can be used to specify the atomic elements for which to
calculate the output. If given the output is calculated only for the
given species and is ordered by atomic number.
eta: The gaussian smearing width.
Returns:
np.ndarray: SOAP output for the given positions.
|
[
"Get",
"the",
"RBF",
"basis",
"SOAP",
"output",
"for",
"the",
"given",
"positions",
"in",
"a",
"finite",
"system",
"."
] |
python
|
train
| 45.4 |
codelv/enaml-native
|
src/enamlnative/android/android_fragment.py
|
https://github.com/codelv/enaml-native/blob/c33986e9eda468c508806e0a3e73c771401e5718/src/enamlnative/android/android_fragment.py#L117-L131
|
def on_create_view(self):
""" Trigger the click
"""
d = self.declaration
changed = not d.condition
if changed:
d.condition = True
view = self.get_view()
if changed:
self.ready.set_result(True)
return view
|
[
"def",
"on_create_view",
"(",
"self",
")",
":",
"d",
"=",
"self",
".",
"declaration",
"changed",
"=",
"not",
"d",
".",
"condition",
"if",
"changed",
":",
"d",
".",
"condition",
"=",
"True",
"view",
"=",
"self",
".",
"get_view",
"(",
")",
"if",
"changed",
":",
"self",
".",
"ready",
".",
"set_result",
"(",
"True",
")",
"return",
"view"
] |
Trigger the click
|
[
"Trigger",
"the",
"click"
] |
python
|
train
| 18.8 |
square/pylink
|
setup.py
|
https://github.com/square/pylink/blob/81dda0a191d923a8b2627c52cb778aba24d279d7/setup.py#L111-L121
|
def finalize_options(self):
"""Finalizes the command's options.
Args:
self (CoverageCommand): the ``CoverageCommand`` instance
Returns:
``None``
"""
self.cwd = os.path.abspath(os.path.dirname(__file__))
self.test_dir = os.path.join(self.cwd, 'tests')
|
[
"def",
"finalize_options",
"(",
"self",
")",
":",
"self",
".",
"cwd",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
")",
"self",
".",
"test_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"cwd",
",",
"'tests'",
")"
] |
Finalizes the command's options.
Args:
self (CoverageCommand): the ``CoverageCommand`` instance
Returns:
``None``
|
[
"Finalizes",
"the",
"command",
"s",
"options",
"."
] |
python
|
train
| 28.181818 |
noxdafox/clipspy
|
clips/classes.py
|
https://github.com/noxdafox/clipspy/blob/b22d71a6da821c1715d8fa00d7d75cabc09ed364/clips/classes.py#L451-L453
|
def writable(self):
"""True if the Slot is writable."""
return bool(lib.EnvSlotWritableP(self._env, self._cls, self._name))
|
[
"def",
"writable",
"(",
"self",
")",
":",
"return",
"bool",
"(",
"lib",
".",
"EnvSlotWritableP",
"(",
"self",
".",
"_env",
",",
"self",
".",
"_cls",
",",
"self",
".",
"_name",
")",
")"
] |
True if the Slot is writable.
|
[
"True",
"if",
"the",
"Slot",
"is",
"writable",
"."
] |
python
|
train
| 45.666667 |
CellProfiler/centrosome
|
centrosome/threshold.py
|
https://github.com/CellProfiler/centrosome/blob/7bd9350a2d4ae1b215b81eabcecfe560bbb1f32a/centrosome/threshold.py#L180-L247
|
def get_adaptive_threshold(threshold_method, image, threshold,
mask = None,
adaptive_window_size = 10,
**kwargs):
"""Given a global threshold, compute a threshold per pixel
Break the image into blocks, computing the threshold per block.
Afterwards, constrain the block threshold to .7 T < t < 1.5 T.
Block sizes must be at least 50x50. Images > 500 x 500 get 10x10
blocks.
"""
# for the X and Y direction, find the # of blocks, given the
# size constraints
image_size = np.array(image.shape[:2],dtype=int)
nblocks = image_size // adaptive_window_size
#
# Use a floating point block size to apportion the roundoff
# roughly equally to each block
#
increment = ( np.array(image_size,dtype=float) /
np.array(nblocks,dtype=float))
#
# Put the answer here
#
thresh_out = np.zeros(image_size, image.dtype)
#
# Loop once per block, computing the "global" threshold within the
# block.
#
block_threshold = np.zeros([nblocks[0],nblocks[1]])
for i in range(nblocks[0]):
i0 = int(i*increment[0])
i1 = int((i+1)*increment[0])
for j in range(nblocks[1]):
j0 = int(j*increment[1])
j1 = int((j+1)*increment[1])
block = image[i0:i1,j0:j1]
block_mask = None if mask is None else mask[i0:i1,j0:j1]
block_threshold[i,j] = get_global_threshold(
threshold_method,
block, mask = block_mask,
**kwargs)
#
# Use a cubic spline to blend the thresholds across the image to avoid image artifacts
#
spline_order = min(3, np.min(nblocks) - 1)
xStart = int(increment[0] / 2)
xEnd = int((nblocks[0] - 0.5) * increment[0])
yStart = int(increment[1] / 2)
yEnd = int((nblocks[1] - 0.5) * increment[1])
xtStart = .5
xtEnd = image.shape[0] - .5
ytStart = .5
ytEnd = image.shape[1] - .5
block_x_coords = np.linspace(xStart,xEnd, nblocks[0])
block_y_coords = np.linspace(yStart,yEnd, nblocks[1])
adaptive_interpolation = scipy.interpolate.RectBivariateSpline(
block_x_coords, block_y_coords, block_threshold,
bbox = (xtStart, xtEnd, ytStart, ytEnd),
kx = spline_order, ky = spline_order)
thresh_out_x_coords = np.linspace(.5, int(nblocks[0] * increment[0]) - .5, thresh_out.shape[0])
thresh_out_y_coords = np.linspace(.5, int(nblocks[1] * increment[1]) - .5 , thresh_out.shape[1])
thresh_out = adaptive_interpolation(thresh_out_x_coords, thresh_out_y_coords)
return thresh_out
|
[
"def",
"get_adaptive_threshold",
"(",
"threshold_method",
",",
"image",
",",
"threshold",
",",
"mask",
"=",
"None",
",",
"adaptive_window_size",
"=",
"10",
",",
"*",
"*",
"kwargs",
")",
":",
"# for the X and Y direction, find the # of blocks, given the",
"# size constraints",
"image_size",
"=",
"np",
".",
"array",
"(",
"image",
".",
"shape",
"[",
":",
"2",
"]",
",",
"dtype",
"=",
"int",
")",
"nblocks",
"=",
"image_size",
"//",
"adaptive_window_size",
"#",
"# Use a floating point block size to apportion the roundoff",
"# roughly equally to each block",
"#",
"increment",
"=",
"(",
"np",
".",
"array",
"(",
"image_size",
",",
"dtype",
"=",
"float",
")",
"/",
"np",
".",
"array",
"(",
"nblocks",
",",
"dtype",
"=",
"float",
")",
")",
"#",
"# Put the answer here",
"#",
"thresh_out",
"=",
"np",
".",
"zeros",
"(",
"image_size",
",",
"image",
".",
"dtype",
")",
"#",
"# Loop once per block, computing the \"global\" threshold within the",
"# block.",
"#",
"block_threshold",
"=",
"np",
".",
"zeros",
"(",
"[",
"nblocks",
"[",
"0",
"]",
",",
"nblocks",
"[",
"1",
"]",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"nblocks",
"[",
"0",
"]",
")",
":",
"i0",
"=",
"int",
"(",
"i",
"*",
"increment",
"[",
"0",
"]",
")",
"i1",
"=",
"int",
"(",
"(",
"i",
"+",
"1",
")",
"*",
"increment",
"[",
"0",
"]",
")",
"for",
"j",
"in",
"range",
"(",
"nblocks",
"[",
"1",
"]",
")",
":",
"j0",
"=",
"int",
"(",
"j",
"*",
"increment",
"[",
"1",
"]",
")",
"j1",
"=",
"int",
"(",
"(",
"j",
"+",
"1",
")",
"*",
"increment",
"[",
"1",
"]",
")",
"block",
"=",
"image",
"[",
"i0",
":",
"i1",
",",
"j0",
":",
"j1",
"]",
"block_mask",
"=",
"None",
"if",
"mask",
"is",
"None",
"else",
"mask",
"[",
"i0",
":",
"i1",
",",
"j0",
":",
"j1",
"]",
"block_threshold",
"[",
"i",
",",
"j",
"]",
"=",
"get_global_threshold",
"(",
"threshold_method",
",",
"block",
",",
"mask",
"=",
"block_mask",
",",
"*",
"*",
"kwargs",
")",
"#",
"# Use a cubic spline to blend the thresholds across the image to avoid image artifacts",
"#",
"spline_order",
"=",
"min",
"(",
"3",
",",
"np",
".",
"min",
"(",
"nblocks",
")",
"-",
"1",
")",
"xStart",
"=",
"int",
"(",
"increment",
"[",
"0",
"]",
"/",
"2",
")",
"xEnd",
"=",
"int",
"(",
"(",
"nblocks",
"[",
"0",
"]",
"-",
"0.5",
")",
"*",
"increment",
"[",
"0",
"]",
")",
"yStart",
"=",
"int",
"(",
"increment",
"[",
"1",
"]",
"/",
"2",
")",
"yEnd",
"=",
"int",
"(",
"(",
"nblocks",
"[",
"1",
"]",
"-",
"0.5",
")",
"*",
"increment",
"[",
"1",
"]",
")",
"xtStart",
"=",
".5",
"xtEnd",
"=",
"image",
".",
"shape",
"[",
"0",
"]",
"-",
".5",
"ytStart",
"=",
".5",
"ytEnd",
"=",
"image",
".",
"shape",
"[",
"1",
"]",
"-",
".5",
"block_x_coords",
"=",
"np",
".",
"linspace",
"(",
"xStart",
",",
"xEnd",
",",
"nblocks",
"[",
"0",
"]",
")",
"block_y_coords",
"=",
"np",
".",
"linspace",
"(",
"yStart",
",",
"yEnd",
",",
"nblocks",
"[",
"1",
"]",
")",
"adaptive_interpolation",
"=",
"scipy",
".",
"interpolate",
".",
"RectBivariateSpline",
"(",
"block_x_coords",
",",
"block_y_coords",
",",
"block_threshold",
",",
"bbox",
"=",
"(",
"xtStart",
",",
"xtEnd",
",",
"ytStart",
",",
"ytEnd",
")",
",",
"kx",
"=",
"spline_order",
",",
"ky",
"=",
"spline_order",
")",
"thresh_out_x_coords",
"=",
"np",
".",
"linspace",
"(",
".5",
",",
"int",
"(",
"nblocks",
"[",
"0",
"]",
"*",
"increment",
"[",
"0",
"]",
")",
"-",
".5",
",",
"thresh_out",
".",
"shape",
"[",
"0",
"]",
")",
"thresh_out_y_coords",
"=",
"np",
".",
"linspace",
"(",
".5",
",",
"int",
"(",
"nblocks",
"[",
"1",
"]",
"*",
"increment",
"[",
"1",
"]",
")",
"-",
".5",
",",
"thresh_out",
".",
"shape",
"[",
"1",
"]",
")",
"thresh_out",
"=",
"adaptive_interpolation",
"(",
"thresh_out_x_coords",
",",
"thresh_out_y_coords",
")",
"return",
"thresh_out"
] |
Given a global threshold, compute a threshold per pixel
Break the image into blocks, computing the threshold per block.
Afterwards, constrain the block threshold to .7 T < t < 1.5 T.
Block sizes must be at least 50x50. Images > 500 x 500 get 10x10
blocks.
|
[
"Given",
"a",
"global",
"threshold",
"compute",
"a",
"threshold",
"per",
"pixel",
"Break",
"the",
"image",
"into",
"blocks",
"computing",
"the",
"threshold",
"per",
"block",
".",
"Afterwards",
"constrain",
"the",
"block",
"threshold",
"to",
".",
"7",
"T",
"<",
"t",
"<",
"1",
".",
"5",
"T",
".",
"Block",
"sizes",
"must",
"be",
"at",
"least",
"50x50",
".",
"Images",
">",
"500",
"x",
"500",
"get",
"10x10",
"blocks",
"."
] |
python
|
train
| 38.588235 |
riga/law
|
law/cli/config.py
|
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/cli/config.py#L55-L71
|
def get_config(name, expand=False):
"""
Returns the config value that corresponds to *name*, which must have the format
``<section>[.<option>]``. When an option is given and *expand* is *True*, variables are expanded
in the returned value.
"""
cfg = Config.instance()
only_section = "." not in name
# when only the section is given, print all keys
if only_section:
return "\n".join(cfg.keys(name))
else:
section, option = name.split(".", 1)
func = cfg.get_expanded if expand else cfg.get
return func(section, option)
|
[
"def",
"get_config",
"(",
"name",
",",
"expand",
"=",
"False",
")",
":",
"cfg",
"=",
"Config",
".",
"instance",
"(",
")",
"only_section",
"=",
"\".\"",
"not",
"in",
"name",
"# when only the section is given, print all keys",
"if",
"only_section",
":",
"return",
"\"\\n\"",
".",
"join",
"(",
"cfg",
".",
"keys",
"(",
"name",
")",
")",
"else",
":",
"section",
",",
"option",
"=",
"name",
".",
"split",
"(",
"\".\"",
",",
"1",
")",
"func",
"=",
"cfg",
".",
"get_expanded",
"if",
"expand",
"else",
"cfg",
".",
"get",
"return",
"func",
"(",
"section",
",",
"option",
")"
] |
Returns the config value that corresponds to *name*, which must have the format
``<section>[.<option>]``. When an option is given and *expand* is *True*, variables are expanded
in the returned value.
|
[
"Returns",
"the",
"config",
"value",
"that",
"corresponds",
"to",
"*",
"name",
"*",
"which",
"must",
"have",
"the",
"format",
"<section",
">",
"[",
".",
"<option",
">",
"]",
".",
"When",
"an",
"option",
"is",
"given",
"and",
"*",
"expand",
"*",
"is",
"*",
"True",
"*",
"variables",
"are",
"expanded",
"in",
"the",
"returned",
"value",
"."
] |
python
|
train
| 33.764706 |
mkoura/dump2polarion
|
dump2polarion/properties.py
|
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/properties.py#L181-L190
|
def set_dry_run(xml_root, value=True):
"""Sets dry-run so records are not updated, only log file is produced."""
value_str = str(value).lower()
assert value_str in ("true", "false")
if xml_root.tag == "testsuites":
_set_property(xml_root, "polarion-dry-run", value_str)
elif xml_root.tag in ("testcases", "requirements"):
_set_property(xml_root, "dry-run", value_str)
else:
raise Dump2PolarionException(_NOT_EXPECTED_FORMAT_MSG)
|
[
"def",
"set_dry_run",
"(",
"xml_root",
",",
"value",
"=",
"True",
")",
":",
"value_str",
"=",
"str",
"(",
"value",
")",
".",
"lower",
"(",
")",
"assert",
"value_str",
"in",
"(",
"\"true\"",
",",
"\"false\"",
")",
"if",
"xml_root",
".",
"tag",
"==",
"\"testsuites\"",
":",
"_set_property",
"(",
"xml_root",
",",
"\"polarion-dry-run\"",
",",
"value_str",
")",
"elif",
"xml_root",
".",
"tag",
"in",
"(",
"\"testcases\"",
",",
"\"requirements\"",
")",
":",
"_set_property",
"(",
"xml_root",
",",
"\"dry-run\"",
",",
"value_str",
")",
"else",
":",
"raise",
"Dump2PolarionException",
"(",
"_NOT_EXPECTED_FORMAT_MSG",
")"
] |
Sets dry-run so records are not updated, only log file is produced.
|
[
"Sets",
"dry",
"-",
"run",
"so",
"records",
"are",
"not",
"updated",
"only",
"log",
"file",
"is",
"produced",
"."
] |
python
|
train
| 46.7 |
moralrecordings/mrcrowbar
|
mrcrowbar/utils.py
|
https://github.com/moralrecordings/mrcrowbar/blob/b1ed882c4555552e7656b2d84aca543184577fa3/mrcrowbar/utils.py#L376-L382
|
def unpack_bits( byte ):
"""Expand a bitfield into a 64-bit int (8 bool bytes)."""
longbits = byte & (0x00000000000000ff)
longbits = (longbits | (longbits<<28)) & (0x0000000f0000000f)
longbits = (longbits | (longbits<<14)) & (0x0003000300030003)
longbits = (longbits | (longbits<<7)) & (0x0101010101010101)
return longbits
|
[
"def",
"unpack_bits",
"(",
"byte",
")",
":",
"longbits",
"=",
"byte",
"&",
"(",
"0x00000000000000ff",
")",
"longbits",
"=",
"(",
"longbits",
"|",
"(",
"longbits",
"<<",
"28",
")",
")",
"&",
"(",
"0x0000000f0000000f",
")",
"longbits",
"=",
"(",
"longbits",
"|",
"(",
"longbits",
"<<",
"14",
")",
")",
"&",
"(",
"0x0003000300030003",
")",
"longbits",
"=",
"(",
"longbits",
"|",
"(",
"longbits",
"<<",
"7",
")",
")",
"&",
"(",
"0x0101010101010101",
")",
"return",
"longbits"
] |
Expand a bitfield into a 64-bit int (8 bool bytes).
|
[
"Expand",
"a",
"bitfield",
"into",
"a",
"64",
"-",
"bit",
"int",
"(",
"8",
"bool",
"bytes",
")",
"."
] |
python
|
train
| 48.571429 |
SUSE-Enceladus/ipa
|
ipa/ipa_azure.py
|
https://github.com/SUSE-Enceladus/ipa/blob/0845eed0ea25a27dbb059ad1016105fa60002228/ipa/ipa_azure.py#L200-L213
|
def _create_resource_group(self, region, resource_group_name):
"""
Create resource group if it does not exist.
"""
resource_group_config = {'location': region}
try:
self.resource.resource_groups.create_or_update(
resource_group_name, resource_group_config
)
except Exception as error:
raise AzureCloudException(
'Unable to create resource group: {0}.'.format(error)
)
|
[
"def",
"_create_resource_group",
"(",
"self",
",",
"region",
",",
"resource_group_name",
")",
":",
"resource_group_config",
"=",
"{",
"'location'",
":",
"region",
"}",
"try",
":",
"self",
".",
"resource",
".",
"resource_groups",
".",
"create_or_update",
"(",
"resource_group_name",
",",
"resource_group_config",
")",
"except",
"Exception",
"as",
"error",
":",
"raise",
"AzureCloudException",
"(",
"'Unable to create resource group: {0}.'",
".",
"format",
"(",
"error",
")",
")"
] |
Create resource group if it does not exist.
|
[
"Create",
"resource",
"group",
"if",
"it",
"does",
"not",
"exist",
"."
] |
python
|
train
| 34.5 |
QualiSystems/vCenterShell
|
package/cloudshell/cp/vcenter/network/vnic/vnic_service.py
|
https://github.com/QualiSystems/vCenterShell/blob/e2e24cd938a92a68f4a8e6a860810d3ef72aae6d/package/cloudshell/cp/vcenter/network/vnic/vnic_service.py#L200-L215
|
def vnic_attached_to_network(nicspec, network, logger):
"""
Attach vNIC to Network.
:param nicspec: <vim.vm.device.VirtualDeviceSpec>
:param network: <vim network obj>
:return: updated 'nicspec'
"""
if nicspec:
if network_is_portgroup(network):
return VNicService.vnic_attach_to_network_distributed(nicspec, network,
logger=logger)
elif network_is_standard(network):
return VNicService.vnic_attach_to_network_standard(nicspec, network,
logger=logger)
return None
|
[
"def",
"vnic_attached_to_network",
"(",
"nicspec",
",",
"network",
",",
"logger",
")",
":",
"if",
"nicspec",
":",
"if",
"network_is_portgroup",
"(",
"network",
")",
":",
"return",
"VNicService",
".",
"vnic_attach_to_network_distributed",
"(",
"nicspec",
",",
"network",
",",
"logger",
"=",
"logger",
")",
"elif",
"network_is_standard",
"(",
"network",
")",
":",
"return",
"VNicService",
".",
"vnic_attach_to_network_standard",
"(",
"nicspec",
",",
"network",
",",
"logger",
"=",
"logger",
")",
"return",
"None"
] |
Attach vNIC to Network.
:param nicspec: <vim.vm.device.VirtualDeviceSpec>
:param network: <vim network obj>
:return: updated 'nicspec'
|
[
"Attach",
"vNIC",
"to",
"Network",
".",
":",
"param",
"nicspec",
":",
"<vim",
".",
"vm",
".",
"device",
".",
"VirtualDeviceSpec",
">",
":",
"param",
"network",
":",
"<vim",
"network",
"obj",
">",
":",
"return",
":",
"updated",
"nicspec"
] |
python
|
train
| 44.0625 |
pymoca/pymoca
|
src/pymoca/backends/xml/generator.py
|
https://github.com/pymoca/pymoca/blob/14b5eb7425e96689de6cc5c10f400895d586a978/src/pymoca/backends/xml/generator.py#L144-L156
|
def generate(ast_tree: ast.Tree, model_name: str):
"""
:param ast_tree: AST to generate from
:param model_name: class to generate
:return: sympy source code for model
"""
component_ref = ast.ComponentRef.from_string(model_name)
ast_tree_new = copy.deepcopy(ast_tree)
ast_walker = TreeWalker()
flat_tree = flatten(ast_tree_new, component_ref)
gen = XmlGenerator()
ast_walker.walk(gen, flat_tree)
return etree.tostring(gen.xml[flat_tree], pretty_print=True).decode('utf-8')
|
[
"def",
"generate",
"(",
"ast_tree",
":",
"ast",
".",
"Tree",
",",
"model_name",
":",
"str",
")",
":",
"component_ref",
"=",
"ast",
".",
"ComponentRef",
".",
"from_string",
"(",
"model_name",
")",
"ast_tree_new",
"=",
"copy",
".",
"deepcopy",
"(",
"ast_tree",
")",
"ast_walker",
"=",
"TreeWalker",
"(",
")",
"flat_tree",
"=",
"flatten",
"(",
"ast_tree_new",
",",
"component_ref",
")",
"gen",
"=",
"XmlGenerator",
"(",
")",
"ast_walker",
".",
"walk",
"(",
"gen",
",",
"flat_tree",
")",
"return",
"etree",
".",
"tostring",
"(",
"gen",
".",
"xml",
"[",
"flat_tree",
"]",
",",
"pretty_print",
"=",
"True",
")",
".",
"decode",
"(",
"'utf-8'",
")"
] |
:param ast_tree: AST to generate from
:param model_name: class to generate
:return: sympy source code for model
|
[
":",
"param",
"ast_tree",
":",
"AST",
"to",
"generate",
"from",
":",
"param",
"model_name",
":",
"class",
"to",
"generate",
":",
"return",
":",
"sympy",
"source",
"code",
"for",
"model"
] |
python
|
train
| 39 |
shaded-enmity/docker-hica
|
injectors/introspect_runtime.py
|
https://github.com/shaded-enmity/docker-hica/blob/bc425586297e1eb228b70ee6fca8c499849ec87d/injectors/introspect_runtime.py#L81-L105
|
def inject_config(self, config, from_args):
"""
:param config:
:type config: list
:param from_args:
:type from_args: dict
"""
# First get required values from labelStore
runtime = self._get_runtime()
whitelist = self._get_whitelist()
#Run introspection on the libraries to retrieve list of libraries to link
found_libraries = self._run_introspection(runtime, whitelist, verbose=True)
container_path_set=set()
for library in found_libraries:
#disallow duplicate library targets
cpath = self.__get_container_path(library)
if cpath in container_path_set:
continue
container_path_set.add(cpath)
config.append('--volume={0}:{1}'.format(library, cpath))
config.extend(['-e', 'LD_LIBRARY_PATH={0}'.format(_container_lib_location)])
config.extend(['-e', 'LIBGL_DRIVERS_PATH={0}'.format(_container_lib_location)])
|
[
"def",
"inject_config",
"(",
"self",
",",
"config",
",",
"from_args",
")",
":",
"# First get required values from labelStore",
"runtime",
"=",
"self",
".",
"_get_runtime",
"(",
")",
"whitelist",
"=",
"self",
".",
"_get_whitelist",
"(",
")",
"#Run introspection on the libraries to retrieve list of libraries to link",
"found_libraries",
"=",
"self",
".",
"_run_introspection",
"(",
"runtime",
",",
"whitelist",
",",
"verbose",
"=",
"True",
")",
"container_path_set",
"=",
"set",
"(",
")",
"for",
"library",
"in",
"found_libraries",
":",
"#disallow duplicate library targets",
"cpath",
"=",
"self",
".",
"__get_container_path",
"(",
"library",
")",
"if",
"cpath",
"in",
"container_path_set",
":",
"continue",
"container_path_set",
".",
"add",
"(",
"cpath",
")",
"config",
".",
"append",
"(",
"'--volume={0}:{1}'",
".",
"format",
"(",
"library",
",",
"cpath",
")",
")",
"config",
".",
"extend",
"(",
"[",
"'-e'",
",",
"'LD_LIBRARY_PATH={0}'",
".",
"format",
"(",
"_container_lib_location",
")",
"]",
")",
"config",
".",
"extend",
"(",
"[",
"'-e'",
",",
"'LIBGL_DRIVERS_PATH={0}'",
".",
"format",
"(",
"_container_lib_location",
")",
"]",
")"
] |
:param config:
:type config: list
:param from_args:
:type from_args: dict
|
[
":",
"param",
"config",
":",
":",
"type",
"config",
":",
"list",
":",
"param",
"from_args",
":",
":",
"type",
"from_args",
":",
"dict"
] |
python
|
train
| 35.4 |
ray-project/ray
|
python/ray/tune/automlboard/backend/collector.py
|
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/tune/automlboard/backend/collector.py#L62-L72
|
def init_logger(cls, log_level):
"""Initialize logger settings."""
logger = logging.getLogger("AutoMLBoard")
handler = logging.StreamHandler()
formatter = logging.Formatter("[%(levelname)s %(asctime)s] "
"%(filename)s: %(lineno)d "
"%(message)s")
handler.setFormatter(formatter)
logger.setLevel(log_level)
logger.addHandler(handler)
return logger
|
[
"def",
"init_logger",
"(",
"cls",
",",
"log_level",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"\"AutoMLBoard\"",
")",
"handler",
"=",
"logging",
".",
"StreamHandler",
"(",
")",
"formatter",
"=",
"logging",
".",
"Formatter",
"(",
"\"[%(levelname)s %(asctime)s] \"",
"\"%(filename)s: %(lineno)d \"",
"\"%(message)s\"",
")",
"handler",
".",
"setFormatter",
"(",
"formatter",
")",
"logger",
".",
"setLevel",
"(",
"log_level",
")",
"logger",
".",
"addHandler",
"(",
"handler",
")",
"return",
"logger"
] |
Initialize logger settings.
|
[
"Initialize",
"logger",
"settings",
"."
] |
python
|
train
| 43.363636 |
apache/incubator-mxnet
|
python/mxnet/gluon/model_zoo/vision/squeezenet.py
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/model_zoo/vision/squeezenet.py#L113-L137
|
def get_squeezenet(version, pretrained=False, ctx=cpu(),
root=os.path.join(base.data_dir(), 'models'), **kwargs):
r"""SqueezeNet model from the `"SqueezeNet: AlexNet-level accuracy with 50x fewer parameters
and <0.5MB model size" <https://arxiv.org/abs/1602.07360>`_ paper.
SqueezeNet 1.1 model from the `official SqueezeNet repo
<https://github.com/DeepScale/SqueezeNet/tree/master/SqueezeNet_v1.1>`_.
SqueezeNet 1.1 has 2.4x less computation and slightly fewer parameters
than SqueezeNet 1.0, without sacrificing accuracy.
Parameters
----------
version : str
Version of squeezenet. Options are '1.0', '1.1'.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default $MXNET_HOME/models
Location for keeping the model parameters.
"""
net = SqueezeNet(version, **kwargs)
if pretrained:
from ..model_store import get_model_file
net.load_parameters(get_model_file('squeezenet%s'%version, root=root), ctx=ctx)
return net
|
[
"def",
"get_squeezenet",
"(",
"version",
",",
"pretrained",
"=",
"False",
",",
"ctx",
"=",
"cpu",
"(",
")",
",",
"root",
"=",
"os",
".",
"path",
".",
"join",
"(",
"base",
".",
"data_dir",
"(",
")",
",",
"'models'",
")",
",",
"*",
"*",
"kwargs",
")",
":",
"net",
"=",
"SqueezeNet",
"(",
"version",
",",
"*",
"*",
"kwargs",
")",
"if",
"pretrained",
":",
"from",
".",
".",
"model_store",
"import",
"get_model_file",
"net",
".",
"load_parameters",
"(",
"get_model_file",
"(",
"'squeezenet%s'",
"%",
"version",
",",
"root",
"=",
"root",
")",
",",
"ctx",
"=",
"ctx",
")",
"return",
"net"
] |
r"""SqueezeNet model from the `"SqueezeNet: AlexNet-level accuracy with 50x fewer parameters
and <0.5MB model size" <https://arxiv.org/abs/1602.07360>`_ paper.
SqueezeNet 1.1 model from the `official SqueezeNet repo
<https://github.com/DeepScale/SqueezeNet/tree/master/SqueezeNet_v1.1>`_.
SqueezeNet 1.1 has 2.4x less computation and slightly fewer parameters
than SqueezeNet 1.0, without sacrificing accuracy.
Parameters
----------
version : str
Version of squeezenet. Options are '1.0', '1.1'.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default $MXNET_HOME/models
Location for keeping the model parameters.
|
[
"r",
"SqueezeNet",
"model",
"from",
"the",
"SqueezeNet",
":",
"AlexNet",
"-",
"level",
"accuracy",
"with",
"50x",
"fewer",
"parameters",
"and",
"<0",
".",
"5MB",
"model",
"size",
"<https",
":",
"//",
"arxiv",
".",
"org",
"/",
"abs",
"/",
"1602",
".",
"07360",
">",
"_",
"paper",
".",
"SqueezeNet",
"1",
".",
"1",
"model",
"from",
"the",
"official",
"SqueezeNet",
"repo",
"<https",
":",
"//",
"github",
".",
"com",
"/",
"DeepScale",
"/",
"SqueezeNet",
"/",
"tree",
"/",
"master",
"/",
"SqueezeNet_v1",
".",
"1",
">",
"_",
".",
"SqueezeNet",
"1",
".",
"1",
"has",
"2",
".",
"4x",
"less",
"computation",
"and",
"slightly",
"fewer",
"parameters",
"than",
"SqueezeNet",
"1",
".",
"0",
"without",
"sacrificing",
"accuracy",
"."
] |
python
|
train
| 45.96 |
msiemens/tinydb
|
tinydb/queries.py
|
https://github.com/msiemens/tinydb/blob/10052cb1ae6a3682d26eb4272c44e3b020aa5877/tinydb/queries.py#L277-L289
|
def search(self, regex, flags=0):
"""
Run a regex test against a dict value (only substring string has to
match).
>>> Query().f1.search(r'^\w+$')
:param regex: The regular expression to use for matching
"""
return self._generate_test(
lambda value: re.search(regex, value, flags),
('search', self._path, regex)
)
|
[
"def",
"search",
"(",
"self",
",",
"regex",
",",
"flags",
"=",
"0",
")",
":",
"return",
"self",
".",
"_generate_test",
"(",
"lambda",
"value",
":",
"re",
".",
"search",
"(",
"regex",
",",
"value",
",",
"flags",
")",
",",
"(",
"'search'",
",",
"self",
".",
"_path",
",",
"regex",
")",
")"
] |
Run a regex test against a dict value (only substring string has to
match).
>>> Query().f1.search(r'^\w+$')
:param regex: The regular expression to use for matching
|
[
"Run",
"a",
"regex",
"test",
"against",
"a",
"dict",
"value",
"(",
"only",
"substring",
"string",
"has",
"to",
"match",
")",
"."
] |
python
|
train
| 30 |
linkhub-sdk/popbill.py
|
popbill/messageService.py
|
https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/messageService.py#L524-L540
|
def getStates(self, Corpnum, reciptNumList, UserID=None):
""" 전송내역 요약정보 확인
args
CorpNum : 팝빌회원 사업자번호
reciptNumList : 문자전송 접수번호 배열
UserID : 팝빌회원 아이디
return
전송정보 as list
raise
PopbillException
"""
if reciptNumList == None or len(reciptNumList) < 1:
raise PopbillException(-99999999, "접수번호가 입력되지 않았습니다.")
postData = self._stringtify(reciptNumList)
return self._httppost('/Message/States', postData, Corpnum, UserID)
|
[
"def",
"getStates",
"(",
"self",
",",
"Corpnum",
",",
"reciptNumList",
",",
"UserID",
"=",
"None",
")",
":",
"if",
"reciptNumList",
"==",
"None",
"or",
"len",
"(",
"reciptNumList",
")",
"<",
"1",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"접수번호가 입력되지 않았습니다.\")\r",
"",
"postData",
"=",
"self",
".",
"_stringtify",
"(",
"reciptNumList",
")",
"return",
"self",
".",
"_httppost",
"(",
"'/Message/States'",
",",
"postData",
",",
"Corpnum",
",",
"UserID",
")"
] |
전송내역 요약정보 확인
args
CorpNum : 팝빌회원 사업자번호
reciptNumList : 문자전송 접수번호 배열
UserID : 팝빌회원 아이디
return
전송정보 as list
raise
PopbillException
|
[
"전송내역",
"요약정보",
"확인",
"args",
"CorpNum",
":",
"팝빌회원",
"사업자번호",
"reciptNumList",
":",
"문자전송",
"접수번호",
"배열",
"UserID",
":",
"팝빌회원",
"아이디",
"return",
"전송정보",
"as",
"list",
"raise",
"PopbillException"
] |
python
|
train
| 34.235294 |
PagerDuty/pagerduty-api-python-client
|
pypd/models/integration.py
|
https://github.com/PagerDuty/pagerduty-api-python-client/blob/f420b34ca9b29689cc2ecc9adca6dc5d56ae7161/pypd/models/integration.py#L63-L80
|
def create(cls, service=None, endpoint=None, data=None, *args, **kwargs):
"""
Create an integration within the scope of an service.
Make sure that they should reasonably be able to query with an
service or endpoint that knows about an service.
"""
cls.validate(data)
if service is None and endpoint is None:
raise InvalidArguments(service, endpoint)
if endpoint is None:
sid = service['id'] if isinstance(service, Entity) else service
endpoint = 'services/{0}/integrations'.format(sid)
# otherwise endpoint should contain the service path too
return getattr(Entity, 'create').__func__(cls, endpoint=endpoint,
data=data, *args, **kwargs)
|
[
"def",
"create",
"(",
"cls",
",",
"service",
"=",
"None",
",",
"endpoint",
"=",
"None",
",",
"data",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"cls",
".",
"validate",
"(",
"data",
")",
"if",
"service",
"is",
"None",
"and",
"endpoint",
"is",
"None",
":",
"raise",
"InvalidArguments",
"(",
"service",
",",
"endpoint",
")",
"if",
"endpoint",
"is",
"None",
":",
"sid",
"=",
"service",
"[",
"'id'",
"]",
"if",
"isinstance",
"(",
"service",
",",
"Entity",
")",
"else",
"service",
"endpoint",
"=",
"'services/{0}/integrations'",
".",
"format",
"(",
"sid",
")",
"# otherwise endpoint should contain the service path too",
"return",
"getattr",
"(",
"Entity",
",",
"'create'",
")",
".",
"__func__",
"(",
"cls",
",",
"endpoint",
"=",
"endpoint",
",",
"data",
"=",
"data",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
Create an integration within the scope of an service.
Make sure that they should reasonably be able to query with an
service or endpoint that knows about an service.
|
[
"Create",
"an",
"integration",
"within",
"the",
"scope",
"of",
"an",
"service",
"."
] |
python
|
train
| 43.777778 |
chrisspen/burlap
|
burlap/db.py
|
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/db.py#L321-L382
|
def dump(self, dest_dir=None, to_local=1, from_local=0, archive=0, dump_fn=None, name=None, site=None, use_sudo=0, cleanup=1):
"""
Exports the target database to a single transportable file on the localhost,
appropriate for loading using load().
"""
r = self.local_renderer
site = site or self.genv.SITE
r = self.database_renderer(name=name, site=site)
# Load optional site-specific command, if given.
try:
r.env.dump_command = self.genv.sites[site]['postgresql_dump_command']
except KeyError:
pass
use_sudo = int(use_sudo)
from_local = int(from_local)
to_local = int(to_local)
dump_fn = dump_fn or r.env.dump_fn_template
# Render the snapshot filename.
r.env.dump_fn = self.get_default_db_fn(
fn_template=dump_fn,
dest_dir=dest_dir,
name=name,
site=site,
)
# Dump the database to a snapshot file.
#if not os.path.isfile(os.path.abspath(r.env.dump_fn))):
r.pc('Dumping database snapshot.')
if from_local:
r.local(r.env.dump_command)
elif use_sudo:
r.sudo(r.env.dump_command)
else:
r.run(r.env.dump_command)
# Download the database dump file on the remote host to localhost.
if not from_local and to_local:
r.pc('Downloading database snapshot to localhost.')
r.local('rsync -rvz --progress --recursive --no-p --no-g '
'--rsh "ssh -o StrictHostKeyChecking=no -i {key_filename}" {user}@{host_string}:{dump_fn} {dump_fn}')
# Delete the snapshot file on the remote system.
if int(cleanup):
r.pc('Deleting database snapshot on remote host.')
r.sudo('rm {dump_fn}')
# Move the database snapshot to an archive directory.
if to_local and int(archive):
r.pc('Archiving database snapshot.')
db_fn = r.render_fn(r.env.dump_fn)
r.env.archive_fn = '%s/%s' % (env.db_dump_archive_dir, os.path.split(db_fn)[-1])
r.local('mv %s %s' % (db_fn, env.archive_fn))
return r.env.dump_fn
|
[
"def",
"dump",
"(",
"self",
",",
"dest_dir",
"=",
"None",
",",
"to_local",
"=",
"1",
",",
"from_local",
"=",
"0",
",",
"archive",
"=",
"0",
",",
"dump_fn",
"=",
"None",
",",
"name",
"=",
"None",
",",
"site",
"=",
"None",
",",
"use_sudo",
"=",
"0",
",",
"cleanup",
"=",
"1",
")",
":",
"r",
"=",
"self",
".",
"local_renderer",
"site",
"=",
"site",
"or",
"self",
".",
"genv",
".",
"SITE",
"r",
"=",
"self",
".",
"database_renderer",
"(",
"name",
"=",
"name",
",",
"site",
"=",
"site",
")",
"# Load optional site-specific command, if given.",
"try",
":",
"r",
".",
"env",
".",
"dump_command",
"=",
"self",
".",
"genv",
".",
"sites",
"[",
"site",
"]",
"[",
"'postgresql_dump_command'",
"]",
"except",
"KeyError",
":",
"pass",
"use_sudo",
"=",
"int",
"(",
"use_sudo",
")",
"from_local",
"=",
"int",
"(",
"from_local",
")",
"to_local",
"=",
"int",
"(",
"to_local",
")",
"dump_fn",
"=",
"dump_fn",
"or",
"r",
".",
"env",
".",
"dump_fn_template",
"# Render the snapshot filename.",
"r",
".",
"env",
".",
"dump_fn",
"=",
"self",
".",
"get_default_db_fn",
"(",
"fn_template",
"=",
"dump_fn",
",",
"dest_dir",
"=",
"dest_dir",
",",
"name",
"=",
"name",
",",
"site",
"=",
"site",
",",
")",
"# Dump the database to a snapshot file.",
"#if not os.path.isfile(os.path.abspath(r.env.dump_fn))):",
"r",
".",
"pc",
"(",
"'Dumping database snapshot.'",
")",
"if",
"from_local",
":",
"r",
".",
"local",
"(",
"r",
".",
"env",
".",
"dump_command",
")",
"elif",
"use_sudo",
":",
"r",
".",
"sudo",
"(",
"r",
".",
"env",
".",
"dump_command",
")",
"else",
":",
"r",
".",
"run",
"(",
"r",
".",
"env",
".",
"dump_command",
")",
"# Download the database dump file on the remote host to localhost.",
"if",
"not",
"from_local",
"and",
"to_local",
":",
"r",
".",
"pc",
"(",
"'Downloading database snapshot to localhost.'",
")",
"r",
".",
"local",
"(",
"'rsync -rvz --progress --recursive --no-p --no-g '",
"'--rsh \"ssh -o StrictHostKeyChecking=no -i {key_filename}\" {user}@{host_string}:{dump_fn} {dump_fn}'",
")",
"# Delete the snapshot file on the remote system.",
"if",
"int",
"(",
"cleanup",
")",
":",
"r",
".",
"pc",
"(",
"'Deleting database snapshot on remote host.'",
")",
"r",
".",
"sudo",
"(",
"'rm {dump_fn}'",
")",
"# Move the database snapshot to an archive directory.",
"if",
"to_local",
"and",
"int",
"(",
"archive",
")",
":",
"r",
".",
"pc",
"(",
"'Archiving database snapshot.'",
")",
"db_fn",
"=",
"r",
".",
"render_fn",
"(",
"r",
".",
"env",
".",
"dump_fn",
")",
"r",
".",
"env",
".",
"archive_fn",
"=",
"'%s/%s'",
"%",
"(",
"env",
".",
"db_dump_archive_dir",
",",
"os",
".",
"path",
".",
"split",
"(",
"db_fn",
")",
"[",
"-",
"1",
"]",
")",
"r",
".",
"local",
"(",
"'mv %s %s'",
"%",
"(",
"db_fn",
",",
"env",
".",
"archive_fn",
")",
")",
"return",
"r",
".",
"env",
".",
"dump_fn"
] |
Exports the target database to a single transportable file on the localhost,
appropriate for loading using load().
|
[
"Exports",
"the",
"target",
"database",
"to",
"a",
"single",
"transportable",
"file",
"on",
"the",
"localhost",
"appropriate",
"for",
"loading",
"using",
"load",
"()",
"."
] |
python
|
valid
| 35.322581 |
CiscoDevNet/webexteamssdk
|
examples/bot-example-flask.py
|
https://github.com/CiscoDevNet/webexteamssdk/blob/6fc2cc3557e080ba4b2a380664cb2a0532ae45cd/examples/bot-example-flask.py#L98-L158
|
def webex_teams_webhook_events():
"""Processes incoming requests to the '/events' URI."""
if request.method == 'GET':
return ("""<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Webex Teams Bot served via Flask</title>
</head>
<body>
<p>
<strong>Your Flask web server is up and running!</strong>
</p>
<p>
Here is a nice Cat Fact for you:
</p>
<blockquote>{}</blockquote>
</body>
</html>
""".format(get_catfact()))
elif request.method == 'POST':
"""Respond to inbound webhook JSON HTTP POST from Webex Teams."""
# Get the POST data sent from Webex Teams
json_data = request.json
print("\n")
print("WEBHOOK POST RECEIVED:")
print(json_data)
print("\n")
# Create a Webhook object from the JSON data
webhook_obj = Webhook(json_data)
# Get the room details
room = api.rooms.get(webhook_obj.data.roomId)
# Get the message details
message = api.messages.get(webhook_obj.data.id)
# Get the sender's details
person = api.people.get(message.personId)
print("NEW MESSAGE IN ROOM '{}'".format(room.title))
print("FROM '{}'".format(person.displayName))
print("MESSAGE '{}'\n".format(message.text))
# This is a VERY IMPORTANT loop prevention control step.
# If you respond to all messages... You will respond to the messages
# that the bot posts and thereby create a loop condition.
me = api.people.me()
if message.personId == me.id:
# Message was sent by me (bot); do not respond.
return 'OK'
else:
# Message was sent by someone else; parse message and respond.
if "/CAT" in message.text:
print("FOUND '/CAT'")
# Get a cat fact
cat_fact = get_catfact()
print("SENDING CAT FACT '{}'".format(cat_fact))
# Post the fact to the room where the request was received
api.messages.create(room.id, text=cat_fact)
return 'OK'
|
[
"def",
"webex_teams_webhook_events",
"(",
")",
":",
"if",
"request",
".",
"method",
"==",
"'GET'",
":",
"return",
"(",
"\"\"\"<!DOCTYPE html>\n <html lang=\"en\">\n <head>\n <meta charset=\"UTF-8\">\n <title>Webex Teams Bot served via Flask</title>\n </head>\n <body>\n <p>\n <strong>Your Flask web server is up and running!</strong>\n </p>\n <p>\n Here is a nice Cat Fact for you:\n </p>\n <blockquote>{}</blockquote>\n </body>\n </html>\n \"\"\"",
".",
"format",
"(",
"get_catfact",
"(",
")",
")",
")",
"elif",
"request",
".",
"method",
"==",
"'POST'",
":",
"\"\"\"Respond to inbound webhook JSON HTTP POST from Webex Teams.\"\"\"",
"# Get the POST data sent from Webex Teams",
"json_data",
"=",
"request",
".",
"json",
"print",
"(",
"\"\\n\"",
")",
"print",
"(",
"\"WEBHOOK POST RECEIVED:\"",
")",
"print",
"(",
"json_data",
")",
"print",
"(",
"\"\\n\"",
")",
"# Create a Webhook object from the JSON data",
"webhook_obj",
"=",
"Webhook",
"(",
"json_data",
")",
"# Get the room details",
"room",
"=",
"api",
".",
"rooms",
".",
"get",
"(",
"webhook_obj",
".",
"data",
".",
"roomId",
")",
"# Get the message details",
"message",
"=",
"api",
".",
"messages",
".",
"get",
"(",
"webhook_obj",
".",
"data",
".",
"id",
")",
"# Get the sender's details",
"person",
"=",
"api",
".",
"people",
".",
"get",
"(",
"message",
".",
"personId",
")",
"print",
"(",
"\"NEW MESSAGE IN ROOM '{}'\"",
".",
"format",
"(",
"room",
".",
"title",
")",
")",
"print",
"(",
"\"FROM '{}'\"",
".",
"format",
"(",
"person",
".",
"displayName",
")",
")",
"print",
"(",
"\"MESSAGE '{}'\\n\"",
".",
"format",
"(",
"message",
".",
"text",
")",
")",
"# This is a VERY IMPORTANT loop prevention control step.",
"# If you respond to all messages... You will respond to the messages",
"# that the bot posts and thereby create a loop condition.",
"me",
"=",
"api",
".",
"people",
".",
"me",
"(",
")",
"if",
"message",
".",
"personId",
"==",
"me",
".",
"id",
":",
"# Message was sent by me (bot); do not respond.",
"return",
"'OK'",
"else",
":",
"# Message was sent by someone else; parse message and respond.",
"if",
"\"/CAT\"",
"in",
"message",
".",
"text",
":",
"print",
"(",
"\"FOUND '/CAT'\"",
")",
"# Get a cat fact",
"cat_fact",
"=",
"get_catfact",
"(",
")",
"print",
"(",
"\"SENDING CAT FACT '{}'\"",
".",
"format",
"(",
"cat_fact",
")",
")",
"# Post the fact to the room where the request was received",
"api",
".",
"messages",
".",
"create",
"(",
"room",
".",
"id",
",",
"text",
"=",
"cat_fact",
")",
"return",
"'OK'"
] |
Processes incoming requests to the '/events' URI.
|
[
"Processes",
"incoming",
"requests",
"to",
"the",
"/",
"events",
"URI",
"."
] |
python
|
test
| 38.721311 |
pycontribs/pyrax
|
pyrax/image.py
|
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/image.py#L319-L345
|
def update_image_member(self, img_id, status):
"""
Updates the image whose ID is given with the status specified. This
must be called by the user whose project_id is in the members for the
image. If called by the owner of the image, an InvalidImageMember
exception will be raised.
Valid values for 'status' include:
pending
accepted
rejected
Any other value will result in an InvalidImageMemberStatus exception
being raised.
"""
if status not in ("pending", "accepted", "rejected"):
raise exc.InvalidImageMemberStatus("The status value must be one "
"of 'accepted', 'rejected', or 'pending'. Received: '%s'" %
status)
api = self.api
project_id = api.identity.tenant_id
uri = "/%s/%s/members/%s" % (self.uri_base, img_id, project_id)
body = {"status": status}
try:
resp, resp_body = self.api.method_put(uri, body=body)
except exc.NotFound as e:
raise exc.InvalidImageMember("The update member request could not "
"be completed. No member request for that image was found.")
|
[
"def",
"update_image_member",
"(",
"self",
",",
"img_id",
",",
"status",
")",
":",
"if",
"status",
"not",
"in",
"(",
"\"pending\"",
",",
"\"accepted\"",
",",
"\"rejected\"",
")",
":",
"raise",
"exc",
".",
"InvalidImageMemberStatus",
"(",
"\"The status value must be one \"",
"\"of 'accepted', 'rejected', or 'pending'. Received: '%s'\"",
"%",
"status",
")",
"api",
"=",
"self",
".",
"api",
"project_id",
"=",
"api",
".",
"identity",
".",
"tenant_id",
"uri",
"=",
"\"/%s/%s/members/%s\"",
"%",
"(",
"self",
".",
"uri_base",
",",
"img_id",
",",
"project_id",
")",
"body",
"=",
"{",
"\"status\"",
":",
"status",
"}",
"try",
":",
"resp",
",",
"resp_body",
"=",
"self",
".",
"api",
".",
"method_put",
"(",
"uri",
",",
"body",
"=",
"body",
")",
"except",
"exc",
".",
"NotFound",
"as",
"e",
":",
"raise",
"exc",
".",
"InvalidImageMember",
"(",
"\"The update member request could not \"",
"\"be completed. No member request for that image was found.\"",
")"
] |
Updates the image whose ID is given with the status specified. This
must be called by the user whose project_id is in the members for the
image. If called by the owner of the image, an InvalidImageMember
exception will be raised.
Valid values for 'status' include:
pending
accepted
rejected
Any other value will result in an InvalidImageMemberStatus exception
being raised.
|
[
"Updates",
"the",
"image",
"whose",
"ID",
"is",
"given",
"with",
"the",
"status",
"specified",
".",
"This",
"must",
"be",
"called",
"by",
"the",
"user",
"whose",
"project_id",
"is",
"in",
"the",
"members",
"for",
"the",
"image",
".",
"If",
"called",
"by",
"the",
"owner",
"of",
"the",
"image",
"an",
"InvalidImageMember",
"exception",
"will",
"be",
"raised",
"."
] |
python
|
train
| 44.703704 |
HDI-Project/RDT
|
rdt/transformers/null.py
|
https://github.com/HDI-Project/RDT/blob/b28fdd671a1d7fbd14983eefe0cfbd8d87ded92a/rdt/transformers/null.py#L47-L61
|
def reverse_transform(self, col):
"""Converts data back into original format.
Args:
col(pandas.DataFrame): Data to transform.
Returns:
pandas.DataFrame
"""
output = pd.DataFrame()
new_name = '?' + self.col_name
col.loc[col[new_name] == 0, self.col_name] = np.nan
output[self.col_name] = col[self.col_name]
return output
|
[
"def",
"reverse_transform",
"(",
"self",
",",
"col",
")",
":",
"output",
"=",
"pd",
".",
"DataFrame",
"(",
")",
"new_name",
"=",
"'?'",
"+",
"self",
".",
"col_name",
"col",
".",
"loc",
"[",
"col",
"[",
"new_name",
"]",
"==",
"0",
",",
"self",
".",
"col_name",
"]",
"=",
"np",
".",
"nan",
"output",
"[",
"self",
".",
"col_name",
"]",
"=",
"col",
"[",
"self",
".",
"col_name",
"]",
"return",
"output"
] |
Converts data back into original format.
Args:
col(pandas.DataFrame): Data to transform.
Returns:
pandas.DataFrame
|
[
"Converts",
"data",
"back",
"into",
"original",
"format",
"."
] |
python
|
train
| 26.933333 |
google/dotty
|
efilter/transforms/solve.py
|
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/transforms/solve.py#L463-L475
|
def solve_filter(expr, vars):
"""Filter values on the LHS by evaluating RHS with each value.
Returns any LHS values for which RHS evaluates to a true value.
"""
lhs_values, _ = __solve_for_repeated(expr.lhs, vars)
def lazy_filter():
for lhs_value in repeated.getvalues(lhs_values):
if solve(expr.rhs, __nest_scope(expr.lhs, vars, lhs_value)).value:
yield lhs_value
return Result(repeated.lazy(lazy_filter), ())
|
[
"def",
"solve_filter",
"(",
"expr",
",",
"vars",
")",
":",
"lhs_values",
",",
"_",
"=",
"__solve_for_repeated",
"(",
"expr",
".",
"lhs",
",",
"vars",
")",
"def",
"lazy_filter",
"(",
")",
":",
"for",
"lhs_value",
"in",
"repeated",
".",
"getvalues",
"(",
"lhs_values",
")",
":",
"if",
"solve",
"(",
"expr",
".",
"rhs",
",",
"__nest_scope",
"(",
"expr",
".",
"lhs",
",",
"vars",
",",
"lhs_value",
")",
")",
".",
"value",
":",
"yield",
"lhs_value",
"return",
"Result",
"(",
"repeated",
".",
"lazy",
"(",
"lazy_filter",
")",
",",
"(",
")",
")"
] |
Filter values on the LHS by evaluating RHS with each value.
Returns any LHS values for which RHS evaluates to a true value.
|
[
"Filter",
"values",
"on",
"the",
"LHS",
"by",
"evaluating",
"RHS",
"with",
"each",
"value",
"."
] |
python
|
train
| 35.461538 |
wind-python/windpowerlib
|
windpowerlib/wind_speed.py
|
https://github.com/wind-python/windpowerlib/blob/421b316139743311b7cb68a69f6b53d2665f7e23/windpowerlib/wind_speed.py#L14-L89
|
def logarithmic_profile(wind_speed, wind_speed_height, hub_height,
roughness_length, obstacle_height=0.0):
r"""
Calculates the wind speed at hub height using a logarithmic wind profile.
The logarithmic height equation is used. There is the possibility of
including the height of the surrounding obstacles in the calculation. This
function is carried out when the parameter `wind_speed_model` of an
instance of the :class:`~.modelchain.ModelChain` class is 'logarithmic'.
Parameters
----------
wind_speed : pandas.Series or numpy.array
Wind speed time series.
wind_speed_height : float
Height for which the parameter `wind_speed` applies.
hub_height : float
Hub height of wind turbine.
roughness_length : pandas.Series or numpy.array or float
Roughness length.
obstacle_height : float
Height of obstacles in the surrounding area of the wind turbine. Set
`obstacle_height` to zero for wide spread obstacles. Default: 0.
Returns
-------
pandas.Series or numpy.array
Wind speed at hub height. Data type depends on type of `wind_speed`.
Notes
-----
The following equation is used [1]_, [2]_, [3]_:
.. math:: v_{wind,hub}=v_{wind,data}\cdot
\frac{\ln\left(\frac{h_{hub}-d}{z_{0}}\right)}{\ln\left(
\frac{h_{data}-d}{z_{0}}\right)}
with:
v: wind speed, h: height, :math:`z_{0}`: roughness length,
d: boundary layer offset (estimated by d = 0.7 * `obstacle_height`)
For d = 0 it results in the following equation [2]_, [3]_:
.. math:: v_{wind,hub}=v_{wind,data}\cdot\frac{\ln\left(\frac{h_{hub}}
{z_{0}}\right)}{\ln\left(\frac{h_{data}}{z_{0}}\right)}
:math:`h_{data}` is the height at which the wind speed
:math:`v_{wind,data}` is measured and :math:`v_{wind,hub}` is the wind
speed at hub height :math:`h_{hub}` of the wind turbine.
Parameters `wind_speed_height`, `roughness_length`, `hub_height` and
`obstacle_height` have to be of the same unit.
References
----------
.. [1] Quaschning V.: "Regenerative Energiesysteme". München, Hanser
Verlag, 2011, p. 278
.. [2] Gasch, R., Twele, J.: "Windkraftanlagen". 6. Auflage, Wiesbaden,
Vieweg + Teubner, 2010, p. 129
.. [3] Hau, E.: "Windkraftanlagen - Grundlagen, Technik, Einsatz,
Wirtschaftlichkeit". 4. Auflage, Springer-Verlag, 2008, p. 515
"""
if 0.7 * obstacle_height > wind_speed_height:
raise ValueError("To take an obstacle height of {0} m ".format(
obstacle_height) + "into consideration, wind " +
"speed data of a greater height is needed.")
# Return np.array if wind_speed is np.array
if (isinstance(wind_speed, np.ndarray) and
isinstance(roughness_length, pd.Series)):
roughness_length = np.array(roughness_length)
return (wind_speed * np.log((hub_height - 0.7 * obstacle_height) /
roughness_length) /
np.log((wind_speed_height - 0.7 * obstacle_height) /
roughness_length))
|
[
"def",
"logarithmic_profile",
"(",
"wind_speed",
",",
"wind_speed_height",
",",
"hub_height",
",",
"roughness_length",
",",
"obstacle_height",
"=",
"0.0",
")",
":",
"if",
"0.7",
"*",
"obstacle_height",
">",
"wind_speed_height",
":",
"raise",
"ValueError",
"(",
"\"To take an obstacle height of {0} m \"",
".",
"format",
"(",
"obstacle_height",
")",
"+",
"\"into consideration, wind \"",
"+",
"\"speed data of a greater height is needed.\"",
")",
"# Return np.array if wind_speed is np.array",
"if",
"(",
"isinstance",
"(",
"wind_speed",
",",
"np",
".",
"ndarray",
")",
"and",
"isinstance",
"(",
"roughness_length",
",",
"pd",
".",
"Series",
")",
")",
":",
"roughness_length",
"=",
"np",
".",
"array",
"(",
"roughness_length",
")",
"return",
"(",
"wind_speed",
"*",
"np",
".",
"log",
"(",
"(",
"hub_height",
"-",
"0.7",
"*",
"obstacle_height",
")",
"/",
"roughness_length",
")",
"/",
"np",
".",
"log",
"(",
"(",
"wind_speed_height",
"-",
"0.7",
"*",
"obstacle_height",
")",
"/",
"roughness_length",
")",
")"
] |
r"""
Calculates the wind speed at hub height using a logarithmic wind profile.
The logarithmic height equation is used. There is the possibility of
including the height of the surrounding obstacles in the calculation. This
function is carried out when the parameter `wind_speed_model` of an
instance of the :class:`~.modelchain.ModelChain` class is 'logarithmic'.
Parameters
----------
wind_speed : pandas.Series or numpy.array
Wind speed time series.
wind_speed_height : float
Height for which the parameter `wind_speed` applies.
hub_height : float
Hub height of wind turbine.
roughness_length : pandas.Series or numpy.array or float
Roughness length.
obstacle_height : float
Height of obstacles in the surrounding area of the wind turbine. Set
`obstacle_height` to zero for wide spread obstacles. Default: 0.
Returns
-------
pandas.Series or numpy.array
Wind speed at hub height. Data type depends on type of `wind_speed`.
Notes
-----
The following equation is used [1]_, [2]_, [3]_:
.. math:: v_{wind,hub}=v_{wind,data}\cdot
\frac{\ln\left(\frac{h_{hub}-d}{z_{0}}\right)}{\ln\left(
\frac{h_{data}-d}{z_{0}}\right)}
with:
v: wind speed, h: height, :math:`z_{0}`: roughness length,
d: boundary layer offset (estimated by d = 0.7 * `obstacle_height`)
For d = 0 it results in the following equation [2]_, [3]_:
.. math:: v_{wind,hub}=v_{wind,data}\cdot\frac{\ln\left(\frac{h_{hub}}
{z_{0}}\right)}{\ln\left(\frac{h_{data}}{z_{0}}\right)}
:math:`h_{data}` is the height at which the wind speed
:math:`v_{wind,data}` is measured and :math:`v_{wind,hub}` is the wind
speed at hub height :math:`h_{hub}` of the wind turbine.
Parameters `wind_speed_height`, `roughness_length`, `hub_height` and
`obstacle_height` have to be of the same unit.
References
----------
.. [1] Quaschning V.: "Regenerative Energiesysteme". München, Hanser
Verlag, 2011, p. 278
.. [2] Gasch, R., Twele, J.: "Windkraftanlagen". 6. Auflage, Wiesbaden,
Vieweg + Teubner, 2010, p. 129
.. [3] Hau, E.: "Windkraftanlagen - Grundlagen, Technik, Einsatz,
Wirtschaftlichkeit". 4. Auflage, Springer-Verlag, 2008, p. 515
|
[
"r",
"Calculates",
"the",
"wind",
"speed",
"at",
"hub",
"height",
"using",
"a",
"logarithmic",
"wind",
"profile",
"."
] |
python
|
train
| 41 |
BD2KOnFHIR/i2b2model
|
i2b2model/shared/tablenames.py
|
https://github.com/BD2KOnFHIR/i2b2model/blob/9d49bb53b0733dd83ab5b716014865e270a3c903/i2b2model/shared/tablenames.py#L46-L52
|
def all_tables(self) -> List[str]:
"""
List of all known tables
:return:
"""
return sorted([k for k in self.__dict__.keys()
if k not in _I2B2Tables._funcs and not k.startswith("_")])
|
[
"def",
"all_tables",
"(",
"self",
")",
"->",
"List",
"[",
"str",
"]",
":",
"return",
"sorted",
"(",
"[",
"k",
"for",
"k",
"in",
"self",
".",
"__dict__",
".",
"keys",
"(",
")",
"if",
"k",
"not",
"in",
"_I2B2Tables",
".",
"_funcs",
"and",
"not",
"k",
".",
"startswith",
"(",
"\"_\"",
")",
"]",
")"
] |
List of all known tables
:return:
|
[
"List",
"of",
"all",
"known",
"tables",
":",
"return",
":"
] |
python
|
train
| 34.142857 |
crunchyroll/ef-open
|
efopen/ef_aws_resolver.py
|
https://github.com/crunchyroll/ef-open/blob/59fff3761af07a59f8f1c1682f2be004bdac15f7/efopen/ef_aws_resolver.py#L53-L66
|
def _elbv2_load_balancer(self, lookup):
"""
Args:
lookup: the friendly name of the V2 elb to look up
Returns:
A dict with the load balancer description
Raises:
botocore.exceptions.ClientError: no such load-balancer
"""
client = EFAwsResolver.__CLIENTS['elbv2']
elbs = client.describe_load_balancers(Names=[lookup])
# getting the first one, since we requested only one lb
elb = elbs['LoadBalancers'][0]
return elb
|
[
"def",
"_elbv2_load_balancer",
"(",
"self",
",",
"lookup",
")",
":",
"client",
"=",
"EFAwsResolver",
".",
"__CLIENTS",
"[",
"'elbv2'",
"]",
"elbs",
"=",
"client",
".",
"describe_load_balancers",
"(",
"Names",
"=",
"[",
"lookup",
"]",
")",
"# getting the first one, since we requested only one lb",
"elb",
"=",
"elbs",
"[",
"'LoadBalancers'",
"]",
"[",
"0",
"]",
"return",
"elb"
] |
Args:
lookup: the friendly name of the V2 elb to look up
Returns:
A dict with the load balancer description
Raises:
botocore.exceptions.ClientError: no such load-balancer
|
[
"Args",
":",
"lookup",
":",
"the",
"friendly",
"name",
"of",
"the",
"V2",
"elb",
"to",
"look",
"up",
"Returns",
":",
"A",
"dict",
"with",
"the",
"load",
"balancer",
"description",
"Raises",
":",
"botocore",
".",
"exceptions",
".",
"ClientError",
":",
"no",
"such",
"load",
"-",
"balancer"
] |
python
|
train
| 32.642857 |
google/grr
|
grr/server/grr_response_server/client_index.py
|
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/client_index.py#L275-L283
|
def AddClient(self, client):
"""Adds a client to the index.
Args:
client: A VFSGRRClient record to add or update.
"""
client_id, keywords = self.AnalyzeClient(client)
self.AddKeywordsForName(client_id, keywords)
|
[
"def",
"AddClient",
"(",
"self",
",",
"client",
")",
":",
"client_id",
",",
"keywords",
"=",
"self",
".",
"AnalyzeClient",
"(",
"client",
")",
"self",
".",
"AddKeywordsForName",
"(",
"client_id",
",",
"keywords",
")"
] |
Adds a client to the index.
Args:
client: A VFSGRRClient record to add or update.
|
[
"Adds",
"a",
"client",
"to",
"the",
"index",
"."
] |
python
|
train
| 25.666667 |
primetang/qrtools
|
src/qrtools.py
|
https://github.com/primetang/qrtools/blob/3263c6136f54f0499b9945bfad593537d436c7a1/src/qrtools.py#L125-L133
|
def data_to_string(self):
"""Returns a UTF8 string with the QR Code's data"""
# FIX-ME: if we don't add the BOM_UTF8 char, QtQR doesn't decode
# correctly; but if we add it, mobile apps don't.-
# Apparently is a zbar bug.
if self.data_type == 'text':
return BOM_UTF8 + self.__class__.data_encode[self.data_type](self.data).encode('utf-8')
else:
return self.__class__.data_encode[self.data_type](self.data).encode('utf-8')
|
[
"def",
"data_to_string",
"(",
"self",
")",
":",
"# FIX-ME: if we don't add the BOM_UTF8 char, QtQR doesn't decode",
"# correctly; but if we add it, mobile apps don't.-",
"# Apparently is a zbar bug.",
"if",
"self",
".",
"data_type",
"==",
"'text'",
":",
"return",
"BOM_UTF8",
"+",
"self",
".",
"__class__",
".",
"data_encode",
"[",
"self",
".",
"data_type",
"]",
"(",
"self",
".",
"data",
")",
".",
"encode",
"(",
"'utf-8'",
")",
"else",
":",
"return",
"self",
".",
"__class__",
".",
"data_encode",
"[",
"self",
".",
"data_type",
"]",
"(",
"self",
".",
"data",
")",
".",
"encode",
"(",
"'utf-8'",
")"
] |
Returns a UTF8 string with the QR Code's data
|
[
"Returns",
"a",
"UTF8",
"string",
"with",
"the",
"QR",
"Code",
"s",
"data"
] |
python
|
train
| 53.888889 |
emc-openstack/storops
|
storops/unity/resource/snap.py
|
https://github.com/emc-openstack/storops/blob/24b4b13bf065c0ef0538dd0b5ebb8f25d24176bd/storops/unity/resource/snap.py#L154-L172
|
def restore(self, backup=None, delete_backup=False):
"""Restore the snapshot to the associated storage resource.
:param backup: name of the backup snapshot
:param delete_backup: Whether to delete the backup snap after a
successful restore.
"""
resp = self._cli.action(self.resource_class, self.get_id(),
'restore', copyName=backup)
resp.raise_if_err()
backup = resp.first_content['backup']
backup_snap = UnitySnap(_id=backup['id'], cli=self._cli)
if delete_backup:
log.info("Deleting the backup snap {} as the restoration "
"succeeded.".format(backup['id']))
backup_snap.delete()
return backup_snap
|
[
"def",
"restore",
"(",
"self",
",",
"backup",
"=",
"None",
",",
"delete_backup",
"=",
"False",
")",
":",
"resp",
"=",
"self",
".",
"_cli",
".",
"action",
"(",
"self",
".",
"resource_class",
",",
"self",
".",
"get_id",
"(",
")",
",",
"'restore'",
",",
"copyName",
"=",
"backup",
")",
"resp",
".",
"raise_if_err",
"(",
")",
"backup",
"=",
"resp",
".",
"first_content",
"[",
"'backup'",
"]",
"backup_snap",
"=",
"UnitySnap",
"(",
"_id",
"=",
"backup",
"[",
"'id'",
"]",
",",
"cli",
"=",
"self",
".",
"_cli",
")",
"if",
"delete_backup",
":",
"log",
".",
"info",
"(",
"\"Deleting the backup snap {} as the restoration \"",
"\"succeeded.\"",
".",
"format",
"(",
"backup",
"[",
"'id'",
"]",
")",
")",
"backup_snap",
".",
"delete",
"(",
")",
"return",
"backup_snap"
] |
Restore the snapshot to the associated storage resource.
:param backup: name of the backup snapshot
:param delete_backup: Whether to delete the backup snap after a
successful restore.
|
[
"Restore",
"the",
"snapshot",
"to",
"the",
"associated",
"storage",
"resource",
"."
] |
python
|
train
| 40.526316 |
edx/edx-enterprise
|
enterprise/tpa_pipeline.py
|
https://github.com/edx/edx-enterprise/blob/aea91379ab0a87cd3bc798961fce28b60ee49a80/enterprise/tpa_pipeline.py#L77-L90
|
def get_user_from_social_auth(tpa_provider, tpa_username):
"""
Find the LMS user from the LMS model `UserSocialAuth`.
Arguments:
tpa_provider (third_party_auth.provider): third party auth provider object
tpa_username (str): Username returned by the third party auth
"""
user_social_auth = UserSocialAuth.objects.select_related('user').filter(
user__username=tpa_username, provider=tpa_provider.backend_name
).first()
return user_social_auth.user if user_social_auth else None
|
[
"def",
"get_user_from_social_auth",
"(",
"tpa_provider",
",",
"tpa_username",
")",
":",
"user_social_auth",
"=",
"UserSocialAuth",
".",
"objects",
".",
"select_related",
"(",
"'user'",
")",
".",
"filter",
"(",
"user__username",
"=",
"tpa_username",
",",
"provider",
"=",
"tpa_provider",
".",
"backend_name",
")",
".",
"first",
"(",
")",
"return",
"user_social_auth",
".",
"user",
"if",
"user_social_auth",
"else",
"None"
] |
Find the LMS user from the LMS model `UserSocialAuth`.
Arguments:
tpa_provider (third_party_auth.provider): third party auth provider object
tpa_username (str): Username returned by the third party auth
|
[
"Find",
"the",
"LMS",
"user",
"from",
"the",
"LMS",
"model",
"UserSocialAuth",
"."
] |
python
|
valid
| 36.928571 |
apache/airflow
|
airflow/models/taskinstance.py
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/models/taskinstance.py#L458-L467
|
def clear_xcom_data(self, session=None):
"""
Clears all XCom data from the database for the task instance
"""
session.query(XCom).filter(
XCom.dag_id == self.dag_id,
XCom.task_id == self.task_id,
XCom.execution_date == self.execution_date
).delete()
session.commit()
|
[
"def",
"clear_xcom_data",
"(",
"self",
",",
"session",
"=",
"None",
")",
":",
"session",
".",
"query",
"(",
"XCom",
")",
".",
"filter",
"(",
"XCom",
".",
"dag_id",
"==",
"self",
".",
"dag_id",
",",
"XCom",
".",
"task_id",
"==",
"self",
".",
"task_id",
",",
"XCom",
".",
"execution_date",
"==",
"self",
".",
"execution_date",
")",
".",
"delete",
"(",
")",
"session",
".",
"commit",
"(",
")"
] |
Clears all XCom data from the database for the task instance
|
[
"Clears",
"all",
"XCom",
"data",
"from",
"the",
"database",
"for",
"the",
"task",
"instance"
] |
python
|
test
| 34.1 |
dh1tw/pyhamtools
|
pyhamtools/lookuplib.py
|
https://github.com/dh1tw/pyhamtools/blob/ee7e4b8732e23c298da10e07163748156c16d0fa/pyhamtools/lookuplib.py#L485-L538
|
def lookup_prefix(self, prefix, timestamp=timestamp_now):
"""
Returns lookup data of a Prefix
Args:
prefix (string): Prefix of a Amateur Radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Returns:
dict: Dictionary containing the country specific data of the Prefix
Raises:
KeyError: No matching Prefix found
APIKeyMissingError: API Key for Clublog missing or incorrect
Example:
The following code shows how to obtain the information for the prefix "DH" from the countryfile.com
database (default database).
>>> from pyhamtools import LookupLib
>>> myLookupLib = LookupLib()
>>> print myLookupLib.lookup_prefix("DH")
{
'adif': 230,
'country': u'Fed. Rep. of Germany',
'longitude': 10.0,
'cqz': 14,
'ituz': 28,
'latitude': 51.0,
'continent': u'EU'
}
Note:
This method is available for
- clublogxml
- countryfile
- redis
"""
prefix = prefix.strip().upper()
if self._lookuptype == "clublogxml" or self._lookuptype == "countryfile":
return self._check_data_for_date(prefix, timestamp, self._prefixes, self._prefixes_index)
elif self._lookuptype == "redis":
data_dict, index = self._get_dicts_from_redis("_prefix_", "_prefix_index_", self._redis_prefix, prefix)
return self._check_data_for_date(prefix, timestamp, data_dict, index)
# no matching case
raise KeyError
|
[
"def",
"lookup_prefix",
"(",
"self",
",",
"prefix",
",",
"timestamp",
"=",
"timestamp_now",
")",
":",
"prefix",
"=",
"prefix",
".",
"strip",
"(",
")",
".",
"upper",
"(",
")",
"if",
"self",
".",
"_lookuptype",
"==",
"\"clublogxml\"",
"or",
"self",
".",
"_lookuptype",
"==",
"\"countryfile\"",
":",
"return",
"self",
".",
"_check_data_for_date",
"(",
"prefix",
",",
"timestamp",
",",
"self",
".",
"_prefixes",
",",
"self",
".",
"_prefixes_index",
")",
"elif",
"self",
".",
"_lookuptype",
"==",
"\"redis\"",
":",
"data_dict",
",",
"index",
"=",
"self",
".",
"_get_dicts_from_redis",
"(",
"\"_prefix_\"",
",",
"\"_prefix_index_\"",
",",
"self",
".",
"_redis_prefix",
",",
"prefix",
")",
"return",
"self",
".",
"_check_data_for_date",
"(",
"prefix",
",",
"timestamp",
",",
"data_dict",
",",
"index",
")",
"# no matching case",
"raise",
"KeyError"
] |
Returns lookup data of a Prefix
Args:
prefix (string): Prefix of a Amateur Radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Returns:
dict: Dictionary containing the country specific data of the Prefix
Raises:
KeyError: No matching Prefix found
APIKeyMissingError: API Key for Clublog missing or incorrect
Example:
The following code shows how to obtain the information for the prefix "DH" from the countryfile.com
database (default database).
>>> from pyhamtools import LookupLib
>>> myLookupLib = LookupLib()
>>> print myLookupLib.lookup_prefix("DH")
{
'adif': 230,
'country': u'Fed. Rep. of Germany',
'longitude': 10.0,
'cqz': 14,
'ituz': 28,
'latitude': 51.0,
'continent': u'EU'
}
Note:
This method is available for
- clublogxml
- countryfile
- redis
|
[
"Returns",
"lookup",
"data",
"of",
"a",
"Prefix"
] |
python
|
train
| 30.722222 |
hannes-brt/hebel
|
hebel/layers/multitask_top_layer.py
|
https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/layers/multitask_top_layer.py#L178-L184
|
def parameters(self):
"""Return a list where each element contains the parameters for a task.
"""
parameters = []
for task in self.tasks:
parameters.extend(task.parameters)
return parameters
|
[
"def",
"parameters",
"(",
"self",
")",
":",
"parameters",
"=",
"[",
"]",
"for",
"task",
"in",
"self",
".",
"tasks",
":",
"parameters",
".",
"extend",
"(",
"task",
".",
"parameters",
")",
"return",
"parameters"
] |
Return a list where each element contains the parameters for a task.
|
[
"Return",
"a",
"list",
"where",
"each",
"element",
"contains",
"the",
"parameters",
"for",
"a",
"task",
"."
] |
python
|
train
| 33.714286 |
inveniosoftware/invenio-formatter
|
invenio_formatter/filters/datetime.py
|
https://github.com/inveniosoftware/invenio-formatter/blob/aa25f36742e809f05e116b52e8255cdb362e5642/invenio_formatter/filters/datetime.py#L29-L39
|
def from_isodatetime(value, strict=False):
"""Convert an ISO formatted datetime into a Date object.
:param value: The ISO formatted datetime.
:param strict: If value is ``None``, then if strict is ``True`` it returns
the Date object of today, otherwise it returns ``None``.
(Default: ``False``)
:returns: The Date object or ``None``.
"""
if value or strict:
return arrow.get(value).datetime
|
[
"def",
"from_isodatetime",
"(",
"value",
",",
"strict",
"=",
"False",
")",
":",
"if",
"value",
"or",
"strict",
":",
"return",
"arrow",
".",
"get",
"(",
"value",
")",
".",
"datetime"
] |
Convert an ISO formatted datetime into a Date object.
:param value: The ISO formatted datetime.
:param strict: If value is ``None``, then if strict is ``True`` it returns
the Date object of today, otherwise it returns ``None``.
(Default: ``False``)
:returns: The Date object or ``None``.
|
[
"Convert",
"an",
"ISO",
"formatted",
"datetime",
"into",
"a",
"Date",
"object",
"."
] |
python
|
train
| 39 |
metglobal/django-exchange
|
exchange/utils.py
|
https://github.com/metglobal/django-exchange/blob/2133593885e02f42a4ed2ed4be2763c4777a1245/exchange/utils.py#L5-L25
|
def import_class(class_path):
"""imports and returns given class string.
:param class_path: Class path as string
:type class_path: str
:returns: Class that has given path
:rtype: class
:Example:
>>> import_class('collections.OrderedDict').__name__
'OrderedDict'
"""
try:
from django.utils.importlib import import_module
module_name = '.'.join(class_path.split(".")[:-1])
mod = import_module(module_name)
return getattr(mod, class_path.split(".")[-1])
except Exception, detail:
raise ImportError(detail)
|
[
"def",
"import_class",
"(",
"class_path",
")",
":",
"try",
":",
"from",
"django",
".",
"utils",
".",
"importlib",
"import",
"import_module",
"module_name",
"=",
"'.'",
".",
"join",
"(",
"class_path",
".",
"split",
"(",
"\".\"",
")",
"[",
":",
"-",
"1",
"]",
")",
"mod",
"=",
"import_module",
"(",
"module_name",
")",
"return",
"getattr",
"(",
"mod",
",",
"class_path",
".",
"split",
"(",
"\".\"",
")",
"[",
"-",
"1",
"]",
")",
"except",
"Exception",
",",
"detail",
":",
"raise",
"ImportError",
"(",
"detail",
")"
] |
imports and returns given class string.
:param class_path: Class path as string
:type class_path: str
:returns: Class that has given path
:rtype: class
:Example:
>>> import_class('collections.OrderedDict').__name__
'OrderedDict'
|
[
"imports",
"and",
"returns",
"given",
"class",
"string",
"."
] |
python
|
train
| 27.142857 |
lrgar/scope
|
scope/scope.py
|
https://github.com/lrgar/scope/blob/f1c5815b0efd6be75ce54370d69e9b7eca854844/scope/scope.py#L232-L238
|
def set_children(self, children):
"""Set children of the span block."""
if isinstance(children, tuple):
self._children = list(children)
else:
self._children = [children]
return self
|
[
"def",
"set_children",
"(",
"self",
",",
"children",
")",
":",
"if",
"isinstance",
"(",
"children",
",",
"tuple",
")",
":",
"self",
".",
"_children",
"=",
"list",
"(",
"children",
")",
"else",
":",
"self",
".",
"_children",
"=",
"[",
"children",
"]",
"return",
"self"
] |
Set children of the span block.
|
[
"Set",
"children",
"of",
"the",
"span",
"block",
"."
] |
python
|
train
| 33 |
spyder-ide/spyder
|
spyder/plugins/console/widgets/shell.py
|
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/console/widgets/shell.py#L523-L528
|
def write_error(self, text):
"""Simulate stderr"""
self.flush()
self.write(text, flush=True, error=True)
if get_debug_level():
STDERR.write(text)
|
[
"def",
"write_error",
"(",
"self",
",",
"text",
")",
":",
"self",
".",
"flush",
"(",
")",
"self",
".",
"write",
"(",
"text",
",",
"flush",
"=",
"True",
",",
"error",
"=",
"True",
")",
"if",
"get_debug_level",
"(",
")",
":",
"STDERR",
".",
"write",
"(",
"text",
")"
] |
Simulate stderr
|
[
"Simulate",
"stderr"
] |
python
|
train
| 31.5 |
vertexproject/synapse
|
synapse/lib/editatom.py
|
https://github.com/vertexproject/synapse/blob/22e67c5a8f6d7caddbcf34b39ab1bd2d6c4a6e0b/synapse/lib/editatom.py#L55-L67
|
def _notifyDone(self):
'''
Allow any other editatoms waiting on me to complete to resume
'''
if self.notified:
return
self.doneevent.set()
for buid in self.mybldgbuids:
del self.allbldgbuids[buid]
self.notified = True
|
[
"def",
"_notifyDone",
"(",
"self",
")",
":",
"if",
"self",
".",
"notified",
":",
"return",
"self",
".",
"doneevent",
".",
"set",
"(",
")",
"for",
"buid",
"in",
"self",
".",
"mybldgbuids",
":",
"del",
"self",
".",
"allbldgbuids",
"[",
"buid",
"]",
"self",
".",
"notified",
"=",
"True"
] |
Allow any other editatoms waiting on me to complete to resume
|
[
"Allow",
"any",
"other",
"editatoms",
"waiting",
"on",
"me",
"to",
"complete",
"to",
"resume"
] |
python
|
train
| 22.153846 |
mitsei/dlkit
|
dlkit/json_/proxy/sessions.py
|
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/proxy/sessions.py#L45-L65
|
def get_proxy(self, input_):
"""Gets a proxy.
arg: input (osid.proxy.ProxyCondition): a proxy condition
return: (osid.proxy.Proxy) - a proxy
raise: NullArgument - ``input`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``input`` is not of this service
*compliance: mandatory -- This method is must be implemented.*
"""
if input_._http_request is not None:
authentication = Authentication()
authentication.set_django_user(input_._http_request.user)
else:
authentication = None
effective_agent_id = input_._effective_agent_id
# Also need to deal with effective dates and Local
return rules.Proxy(authentication=authentication,
effective_agent_id=effective_agent_id)
|
[
"def",
"get_proxy",
"(",
"self",
",",
"input_",
")",
":",
"if",
"input_",
".",
"_http_request",
"is",
"not",
"None",
":",
"authentication",
"=",
"Authentication",
"(",
")",
"authentication",
".",
"set_django_user",
"(",
"input_",
".",
"_http_request",
".",
"user",
")",
"else",
":",
"authentication",
"=",
"None",
"effective_agent_id",
"=",
"input_",
".",
"_effective_agent_id",
"# Also need to deal with effective dates and Local",
"return",
"rules",
".",
"Proxy",
"(",
"authentication",
"=",
"authentication",
",",
"effective_agent_id",
"=",
"effective_agent_id",
")"
] |
Gets a proxy.
arg: input (osid.proxy.ProxyCondition): a proxy condition
return: (osid.proxy.Proxy) - a proxy
raise: NullArgument - ``input`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``input`` is not of this service
*compliance: mandatory -- This method is must be implemented.*
|
[
"Gets",
"a",
"proxy",
"."
] |
python
|
train
| 43.52381 |
trevisanj/f311
|
f311/filetypes/filesqlitedb.py
|
https://github.com/trevisanj/f311/blob/9e502a3d1e1f74d4290a8a0bae9a34ef8d7b29f7/f311/filetypes/filesqlitedb.py#L179-L201
|
def __get_conn(self, flag_force_new=False, filename=None):
"""Returns connection to database. Tries to return existing connection, unless flag_force_new
Args:
flag_force_new:
filename:
Returns: sqlite3.Connection object
**Note** this is a private method because you can get a connection to any file, so it has to
be used in the right moment
"""
flag_open_new = flag_force_new or not self._conn_is_open()
if flag_open_new:
if filename is None:
filename = self.filename
# funny that __get_conn() calls _get_conn() but that's it
conn = self._get_conn(filename)
self._conn = conn
else:
conn = self._conn
return conn
|
[
"def",
"__get_conn",
"(",
"self",
",",
"flag_force_new",
"=",
"False",
",",
"filename",
"=",
"None",
")",
":",
"flag_open_new",
"=",
"flag_force_new",
"or",
"not",
"self",
".",
"_conn_is_open",
"(",
")",
"if",
"flag_open_new",
":",
"if",
"filename",
"is",
"None",
":",
"filename",
"=",
"self",
".",
"filename",
"# funny that __get_conn() calls _get_conn() but that's it",
"conn",
"=",
"self",
".",
"_get_conn",
"(",
"filename",
")",
"self",
".",
"_conn",
"=",
"conn",
"else",
":",
"conn",
"=",
"self",
".",
"_conn",
"return",
"conn"
] |
Returns connection to database. Tries to return existing connection, unless flag_force_new
Args:
flag_force_new:
filename:
Returns: sqlite3.Connection object
**Note** this is a private method because you can get a connection to any file, so it has to
be used in the right moment
|
[
"Returns",
"connection",
"to",
"database",
".",
"Tries",
"to",
"return",
"existing",
"connection",
"unless",
"flag_force_new"
] |
python
|
train
| 34 |
hobson/aima
|
aima/agents.py
|
https://github.com/hobson/aima/blob/3572b2fb92039b4a1abe384be8545560fbd3d470/aima/agents.py#L310-L313
|
def percept(self, agent):
"By default, agent perceives things within a default radius."
return [self.thing_percept(thing, agent)
for thing in self.things_near(agent.location)]
|
[
"def",
"percept",
"(",
"self",
",",
"agent",
")",
":",
"return",
"[",
"self",
".",
"thing_percept",
"(",
"thing",
",",
"agent",
")",
"for",
"thing",
"in",
"self",
".",
"things_near",
"(",
"agent",
".",
"location",
")",
"]"
] |
By default, agent perceives things within a default radius.
|
[
"By",
"default",
"agent",
"perceives",
"things",
"within",
"a",
"default",
"radius",
"."
] |
python
|
valid
| 51 |
minio/minio-py
|
minio/api.py
|
https://github.com/minio/minio-py/blob/7107c84183cf5fb4deff68c0a16ab9f1c0b4c37e/minio/api.py#L1055-L1103
|
def remove_objects(self, bucket_name, objects_iter):
"""
Removes multiple objects from a bucket.
:param bucket_name: Bucket from which to remove objects
:param objects_iter: A list, tuple or iterator that provides
objects names to delete.
:return: An iterator of MultiDeleteError instances for each
object that had a delete error.
"""
is_valid_bucket_name(bucket_name)
if isinstance(objects_iter, basestring):
raise TypeError(
'objects_iter cannot be `str` or `bytes` instance. It must be '
'a list, tuple or iterator of object names'
)
# turn list like objects into an iterator.
objects_iter = itertools.chain(objects_iter)
obj_batch = []
exit_loop = False
while not exit_loop:
try:
object_name = next(objects_iter)
is_non_empty_string(object_name)
except StopIteration:
exit_loop = True
if not exit_loop:
obj_batch.append(object_name)
# if we have 1000 items in the batch, or we have to exit
# the loop, we have to make a request to delete objects.
if len(obj_batch) == 1000 or (exit_loop and len(obj_batch) > 0):
# send request and parse response
errs_result = self._process_remove_objects_batch(
bucket_name, obj_batch
)
# return the delete errors.
for err_result in errs_result:
yield err_result
# clear batch for next set of items
obj_batch = []
|
[
"def",
"remove_objects",
"(",
"self",
",",
"bucket_name",
",",
"objects_iter",
")",
":",
"is_valid_bucket_name",
"(",
"bucket_name",
")",
"if",
"isinstance",
"(",
"objects_iter",
",",
"basestring",
")",
":",
"raise",
"TypeError",
"(",
"'objects_iter cannot be `str` or `bytes` instance. It must be '",
"'a list, tuple or iterator of object names'",
")",
"# turn list like objects into an iterator.",
"objects_iter",
"=",
"itertools",
".",
"chain",
"(",
"objects_iter",
")",
"obj_batch",
"=",
"[",
"]",
"exit_loop",
"=",
"False",
"while",
"not",
"exit_loop",
":",
"try",
":",
"object_name",
"=",
"next",
"(",
"objects_iter",
")",
"is_non_empty_string",
"(",
"object_name",
")",
"except",
"StopIteration",
":",
"exit_loop",
"=",
"True",
"if",
"not",
"exit_loop",
":",
"obj_batch",
".",
"append",
"(",
"object_name",
")",
"# if we have 1000 items in the batch, or we have to exit",
"# the loop, we have to make a request to delete objects.",
"if",
"len",
"(",
"obj_batch",
")",
"==",
"1000",
"or",
"(",
"exit_loop",
"and",
"len",
"(",
"obj_batch",
")",
">",
"0",
")",
":",
"# send request and parse response",
"errs_result",
"=",
"self",
".",
"_process_remove_objects_batch",
"(",
"bucket_name",
",",
"obj_batch",
")",
"# return the delete errors.",
"for",
"err_result",
"in",
"errs_result",
":",
"yield",
"err_result",
"# clear batch for next set of items",
"obj_batch",
"=",
"[",
"]"
] |
Removes multiple objects from a bucket.
:param bucket_name: Bucket from which to remove objects
:param objects_iter: A list, tuple or iterator that provides
objects names to delete.
:return: An iterator of MultiDeleteError instances for each
object that had a delete error.
|
[
"Removes",
"multiple",
"objects",
"from",
"a",
"bucket",
"."
] |
python
|
train
| 34.22449 |
RediSearch/redisearch-py
|
redisearch/auto_complete.py
|
https://github.com/RediSearch/redisearch-py/blob/f65d1dd078713cbe9b83584e86655a254d0531ab/redisearch/auto_complete.py#L81-L98
|
def add_suggestions(self, *suggestions, **kwargs):
"""
Add suggestion terms to the AutoCompleter engine. Each suggestion has a score and string.
If kwargs['increment'] is true and the terms are already in the server's dictionary, we increment their scores
"""
pipe = self.redis.pipeline()
for sug in suggestions:
args = [AutoCompleter.SUGADD_COMMAND, self.key, sug.string, sug.score]
if kwargs.get('increment'):
args.append(AutoCompleter.INCR)
if sug.payload:
args.append('PAYLOAD')
args.append(sug.payload)
pipe.execute_command(*args)
return pipe.execute()[-1]
|
[
"def",
"add_suggestions",
"(",
"self",
",",
"*",
"suggestions",
",",
"*",
"*",
"kwargs",
")",
":",
"pipe",
"=",
"self",
".",
"redis",
".",
"pipeline",
"(",
")",
"for",
"sug",
"in",
"suggestions",
":",
"args",
"=",
"[",
"AutoCompleter",
".",
"SUGADD_COMMAND",
",",
"self",
".",
"key",
",",
"sug",
".",
"string",
",",
"sug",
".",
"score",
"]",
"if",
"kwargs",
".",
"get",
"(",
"'increment'",
")",
":",
"args",
".",
"append",
"(",
"AutoCompleter",
".",
"INCR",
")",
"if",
"sug",
".",
"payload",
":",
"args",
".",
"append",
"(",
"'PAYLOAD'",
")",
"args",
".",
"append",
"(",
"sug",
".",
"payload",
")",
"pipe",
".",
"execute_command",
"(",
"*",
"args",
")",
"return",
"pipe",
".",
"execute",
"(",
")",
"[",
"-",
"1",
"]"
] |
Add suggestion terms to the AutoCompleter engine. Each suggestion has a score and string.
If kwargs['increment'] is true and the terms are already in the server's dictionary, we increment their scores
|
[
"Add",
"suggestion",
"terms",
"to",
"the",
"AutoCompleter",
"engine",
".",
"Each",
"suggestion",
"has",
"a",
"score",
"and",
"string",
"."
] |
python
|
valid
| 38.888889 |
StackStorm/pybind
|
pybind/slxos/v17s_1_02/protocol/cfm/domain_name/ma_name/cfm_ma_sub_commands/__init__.py
|
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/protocol/cfm/domain_name/ma_name/cfm_ma_sub_commands/__init__.py#L127-L148
|
def _set_mip_policy(self, v, load=False):
"""
Setter method for mip_policy, mapped from YANG variable /protocol/cfm/domain_name/ma_name/cfm_ma_sub_commands/mip_policy (mip-policy-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_mip_policy is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mip_policy() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'default': {'value': 1}, u'explicit': {'value': 2}},), is_leaf=True, yang_name="mip-policy", rest_name="mip-policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Set MIP policy', u'cli-full-no': None, u'callpoint': u'setDot1agMipPolicy'}}, namespace='urn:brocade.com:mgmt:brocade-dot1ag', defining_module='brocade-dot1ag', yang_type='mip-policy-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mip_policy must be of a type compatible with mip-policy-type""",
'defined-type': "brocade-dot1ag:mip-policy-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'default': {'value': 1}, u'explicit': {'value': 2}},), is_leaf=True, yang_name="mip-policy", rest_name="mip-policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Set MIP policy', u'cli-full-no': None, u'callpoint': u'setDot1agMipPolicy'}}, namespace='urn:brocade.com:mgmt:brocade-dot1ag', defining_module='brocade-dot1ag', yang_type='mip-policy-type', is_config=True)""",
})
self.__mip_policy = t
if hasattr(self, '_set'):
self._set()
|
[
"def",
"_set_mip_policy",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base",
"=",
"RestrictedClassType",
"(",
"base_type",
"=",
"unicode",
",",
"restriction_type",
"=",
"\"dict_key\"",
",",
"restriction_arg",
"=",
"{",
"u'default'",
":",
"{",
"'value'",
":",
"1",
"}",
",",
"u'explicit'",
":",
"{",
"'value'",
":",
"2",
"}",
"}",
",",
")",
",",
"is_leaf",
"=",
"True",
",",
"yang_name",
"=",
"\"mip-policy\"",
",",
"rest_name",
"=",
"\"mip-policy\"",
",",
"parent",
"=",
"self",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"extmethods",
"=",
"self",
".",
"_extmethods",
",",
"register_paths",
"=",
"True",
",",
"extensions",
"=",
"{",
"u'tailf-common'",
":",
"{",
"u'cli-full-command'",
":",
"None",
",",
"u'info'",
":",
"u'Set MIP policy'",
",",
"u'cli-full-no'",
":",
"None",
",",
"u'callpoint'",
":",
"u'setDot1agMipPolicy'",
"}",
"}",
",",
"namespace",
"=",
"'urn:brocade.com:mgmt:brocade-dot1ag'",
",",
"defining_module",
"=",
"'brocade-dot1ag'",
",",
"yang_type",
"=",
"'mip-policy-type'",
",",
"is_config",
"=",
"True",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"ValueError",
"(",
"{",
"'error-string'",
":",
"\"\"\"mip_policy must be of a type compatible with mip-policy-type\"\"\"",
",",
"'defined-type'",
":",
"\"brocade-dot1ag:mip-policy-type\"",
",",
"'generated-type'",
":",
"\"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'default': {'value': 1}, u'explicit': {'value': 2}},), is_leaf=True, yang_name=\"mip-policy\", rest_name=\"mip-policy\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Set MIP policy', u'cli-full-no': None, u'callpoint': u'setDot1agMipPolicy'}}, namespace='urn:brocade.com:mgmt:brocade-dot1ag', defining_module='brocade-dot1ag', yang_type='mip-policy-type', is_config=True)\"\"\"",
",",
"}",
")",
"self",
".",
"__mip_policy",
"=",
"t",
"if",
"hasattr",
"(",
"self",
",",
"'_set'",
")",
":",
"self",
".",
"_set",
"(",
")"
] |
Setter method for mip_policy, mapped from YANG variable /protocol/cfm/domain_name/ma_name/cfm_ma_sub_commands/mip_policy (mip-policy-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_mip_policy is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mip_policy() directly.
|
[
"Setter",
"method",
"for",
"mip_policy",
"mapped",
"from",
"YANG",
"variable",
"/",
"protocol",
"/",
"cfm",
"/",
"domain_name",
"/",
"ma_name",
"/",
"cfm_ma_sub_commands",
"/",
"mip_policy",
"(",
"mip",
"-",
"policy",
"-",
"type",
")",
"If",
"this",
"variable",
"is",
"read",
"-",
"only",
"(",
"config",
":",
"false",
")",
"in",
"the",
"source",
"YANG",
"file",
"then",
"_set_mip_policy",
"is",
"considered",
"as",
"a",
"private",
"method",
".",
"Backends",
"looking",
"to",
"populate",
"this",
"variable",
"should",
"do",
"so",
"via",
"calling",
"thisObj",
".",
"_set_mip_policy",
"()",
"directly",
"."
] |
python
|
train
| 98 |
has2k1/plotnine
|
plotnine/layer.py
|
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/layer.py#L229-L249
|
def generate_data(self, plot_data):
"""
Generate data to be used by this layer
Parameters
----------
plot_data : dataframe
ggplot object data
"""
# Each layer that does not have data gets a copy of
# of the ggplot.data. If the has data it is replaced
# by copy so that we do not alter the users data
if self.data is None:
self.data = plot_data.copy()
elif hasattr(self.data, '__call__'):
self.data = self.data(plot_data)
if not isinstance(self.data, pd.DataFrame):
raise PlotnineError(
"Data function must return a dataframe")
else:
self.data = self.data.copy()
|
[
"def",
"generate_data",
"(",
"self",
",",
"plot_data",
")",
":",
"# Each layer that does not have data gets a copy of",
"# of the ggplot.data. If the has data it is replaced",
"# by copy so that we do not alter the users data",
"if",
"self",
".",
"data",
"is",
"None",
":",
"self",
".",
"data",
"=",
"plot_data",
".",
"copy",
"(",
")",
"elif",
"hasattr",
"(",
"self",
".",
"data",
",",
"'__call__'",
")",
":",
"self",
".",
"data",
"=",
"self",
".",
"data",
"(",
"plot_data",
")",
"if",
"not",
"isinstance",
"(",
"self",
".",
"data",
",",
"pd",
".",
"DataFrame",
")",
":",
"raise",
"PlotnineError",
"(",
"\"Data function must return a dataframe\"",
")",
"else",
":",
"self",
".",
"data",
"=",
"self",
".",
"data",
".",
"copy",
"(",
")"
] |
Generate data to be used by this layer
Parameters
----------
plot_data : dataframe
ggplot object data
|
[
"Generate",
"data",
"to",
"be",
"used",
"by",
"this",
"layer"
] |
python
|
train
| 34.952381 |
AshleySetter/optoanalysis
|
optoanalysis/optoanalysis/optoanalysis.py
|
https://github.com/AshleySetter/optoanalysis/blob/9d390acc834d70024d47b574aea14189a5a5714e/optoanalysis/optoanalysis/optoanalysis.py#L2736-L2760
|
def calc_fft_with_PyCUDA(Signal):
"""
Calculates the FFT of the passed signal by using
the scikit-cuda libary which relies on PyCUDA
Parameters
----------
Signal : ndarray
Signal to be transformed into Fourier space
Returns
-------
Signalfft : ndarray
Array containing the signal's FFT
"""
print("starting fft")
Signal = Signal.astype(_np.float32)
Signal_gpu = _gpuarray.to_gpu(Signal)
Signalfft_gpu = _gpuarray.empty(len(Signal)//2+1,_np.complex64)
plan = _Plan(Signal.shape,_np.float32,_np.complex64)
_fft(Signal_gpu, Signalfft_gpu, plan)
Signalfft = Signalfft_gpu.get() #only 2N+1 long
Signalfft = _np.hstack((Signalfft,_np.conj(_np.flipud(Signalfft[1:len(Signal)//2]))))
print("fft done")
return Signalfft
|
[
"def",
"calc_fft_with_PyCUDA",
"(",
"Signal",
")",
":",
"print",
"(",
"\"starting fft\"",
")",
"Signal",
"=",
"Signal",
".",
"astype",
"(",
"_np",
".",
"float32",
")",
"Signal_gpu",
"=",
"_gpuarray",
".",
"to_gpu",
"(",
"Signal",
")",
"Signalfft_gpu",
"=",
"_gpuarray",
".",
"empty",
"(",
"len",
"(",
"Signal",
")",
"//",
"2",
"+",
"1",
",",
"_np",
".",
"complex64",
")",
"plan",
"=",
"_Plan",
"(",
"Signal",
".",
"shape",
",",
"_np",
".",
"float32",
",",
"_np",
".",
"complex64",
")",
"_fft",
"(",
"Signal_gpu",
",",
"Signalfft_gpu",
",",
"plan",
")",
"Signalfft",
"=",
"Signalfft_gpu",
".",
"get",
"(",
")",
"#only 2N+1 long",
"Signalfft",
"=",
"_np",
".",
"hstack",
"(",
"(",
"Signalfft",
",",
"_np",
".",
"conj",
"(",
"_np",
".",
"flipud",
"(",
"Signalfft",
"[",
"1",
":",
"len",
"(",
"Signal",
")",
"//",
"2",
"]",
")",
")",
")",
")",
"print",
"(",
"\"fft done\"",
")",
"return",
"Signalfft"
] |
Calculates the FFT of the passed signal by using
the scikit-cuda libary which relies on PyCUDA
Parameters
----------
Signal : ndarray
Signal to be transformed into Fourier space
Returns
-------
Signalfft : ndarray
Array containing the signal's FFT
|
[
"Calculates",
"the",
"FFT",
"of",
"the",
"passed",
"signal",
"by",
"using",
"the",
"scikit",
"-",
"cuda",
"libary",
"which",
"relies",
"on",
"PyCUDA"
] |
python
|
train
| 31.32 |
geopy/geopy
|
geopy/geocoders/here.py
|
https://github.com/geopy/geopy/blob/02c838d965e76497f3c3d61f53808c86b5c58224/geopy/geocoders/here.py#L96-L202
|
def geocode(
self,
query,
bbox=None,
mapview=None,
exactly_one=True,
maxresults=None,
pageinformation=None,
language=None,
additional_data=False,
timeout=DEFAULT_SENTINEL
):
"""
Return a location point by address.
This implementation supports only a subset of all available parameters.
A list of all parameters of the pure REST API is available here:
https://developer.here.com/documentation/geocoder/topics/resource-geocode.html
:param str query: The address or query you wish to geocode.
For a structured query, provide a dictionary whose keys
are one of: `city`, `county`, `district`, `country`, `state`,
`street`, `housenumber`, or `postalcode`.
:param bbox: A type of spatial filter, limits the search for any other attributes
in the request. Specified by two coordinate (lat/lon)
pairs -- corners of the box. `The bbox search is currently similar
to mapview but it is not extended` (cited from the REST API docs).
Relevant global results are also returned.
Example: ``[Point(22, 180), Point(-22, -180)]``.
:type bbox: list or tuple of 2 items of :class:`geopy.point.Point` or
``(latitude, longitude)`` or ``"%(latitude)s, %(longitude)s"``.
:param mapview: The app's viewport, given as two coordinate pairs, specified
by two lat/lon pairs -- corners of the bounding box,
respectively. Matches from within the set map view plus an extended area
are ranked highest. Relevant global results are also returned.
Example: ``[Point(22, 180), Point(-22, -180)]``.
:type mapview: list or tuple of 2 items of :class:`geopy.point.Point` or
``(latitude, longitude)`` or ``"%(latitude)s, %(longitude)s"``.
:param bool exactly_one: Return one result or a list of results, if
available.
:param int maxresults: Defines the maximum number of items in the
response structure. If not provided and there are multiple results
the HERE API will return 10 results by default. This will be reset
to one if ``exactly_one`` is True.
:param int pageinformation: A key which identifies the page to be returned
when the response is separated into multiple pages. Only useful when
``maxresults`` is also provided.
:param str language: Affects the language of the response,
must be a RFC 4647 language code, e.g. 'en-US'.
:param str additional_data: A string with key-value pairs as described on
https://developer.here.com/documentation/geocoder/topics/resource-params-additional.html.
These will be added as one query parameter to the URL.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
:rtype: ``None``, :class:`geopy.location.Location` or a list of them, if
``exactly_one=False``.
"""
if isinstance(query, dict):
params = {
key: val
for key, val
in query.items()
if key in self.structured_query_params
}
params['app_id'] = self.app_id
params['app_code'] = self.app_code
else:
params = {
'searchtext': self.format_string % query,
'app_id': self.app_id,
'app_code': self.app_code
}
if bbox:
params['bbox'] = self._format_bounding_box(
bbox, "%(lat2)s,%(lon1)s;%(lat1)s,%(lon2)s")
if mapview:
params['mapview'] = self._format_bounding_box(
mapview, "%(lat2)s,%(lon1)s;%(lat1)s,%(lon2)s")
if pageinformation:
params['pageinformation'] = pageinformation
if maxresults:
params['maxresults'] = maxresults
if exactly_one:
params['maxresults'] = 1
if language:
params['language'] = language
if additional_data:
params['additionaldata'] = additional_data
url = "?".join((self.api, urlencode(params)))
logger.debug("%s.geocode: %s", self.__class__.__name__, url)
return self._parse_json(
self._call_geocoder(url, timeout=timeout),
exactly_one
)
|
[
"def",
"geocode",
"(",
"self",
",",
"query",
",",
"bbox",
"=",
"None",
",",
"mapview",
"=",
"None",
",",
"exactly_one",
"=",
"True",
",",
"maxresults",
"=",
"None",
",",
"pageinformation",
"=",
"None",
",",
"language",
"=",
"None",
",",
"additional_data",
"=",
"False",
",",
"timeout",
"=",
"DEFAULT_SENTINEL",
")",
":",
"if",
"isinstance",
"(",
"query",
",",
"dict",
")",
":",
"params",
"=",
"{",
"key",
":",
"val",
"for",
"key",
",",
"val",
"in",
"query",
".",
"items",
"(",
")",
"if",
"key",
"in",
"self",
".",
"structured_query_params",
"}",
"params",
"[",
"'app_id'",
"]",
"=",
"self",
".",
"app_id",
"params",
"[",
"'app_code'",
"]",
"=",
"self",
".",
"app_code",
"else",
":",
"params",
"=",
"{",
"'searchtext'",
":",
"self",
".",
"format_string",
"%",
"query",
",",
"'app_id'",
":",
"self",
".",
"app_id",
",",
"'app_code'",
":",
"self",
".",
"app_code",
"}",
"if",
"bbox",
":",
"params",
"[",
"'bbox'",
"]",
"=",
"self",
".",
"_format_bounding_box",
"(",
"bbox",
",",
"\"%(lat2)s,%(lon1)s;%(lat1)s,%(lon2)s\"",
")",
"if",
"mapview",
":",
"params",
"[",
"'mapview'",
"]",
"=",
"self",
".",
"_format_bounding_box",
"(",
"mapview",
",",
"\"%(lat2)s,%(lon1)s;%(lat1)s,%(lon2)s\"",
")",
"if",
"pageinformation",
":",
"params",
"[",
"'pageinformation'",
"]",
"=",
"pageinformation",
"if",
"maxresults",
":",
"params",
"[",
"'maxresults'",
"]",
"=",
"maxresults",
"if",
"exactly_one",
":",
"params",
"[",
"'maxresults'",
"]",
"=",
"1",
"if",
"language",
":",
"params",
"[",
"'language'",
"]",
"=",
"language",
"if",
"additional_data",
":",
"params",
"[",
"'additionaldata'",
"]",
"=",
"additional_data",
"url",
"=",
"\"?\"",
".",
"join",
"(",
"(",
"self",
".",
"api",
",",
"urlencode",
"(",
"params",
")",
")",
")",
"logger",
".",
"debug",
"(",
"\"%s.geocode: %s\"",
",",
"self",
".",
"__class__",
".",
"__name__",
",",
"url",
")",
"return",
"self",
".",
"_parse_json",
"(",
"self",
".",
"_call_geocoder",
"(",
"url",
",",
"timeout",
"=",
"timeout",
")",
",",
"exactly_one",
")"
] |
Return a location point by address.
This implementation supports only a subset of all available parameters.
A list of all parameters of the pure REST API is available here:
https://developer.here.com/documentation/geocoder/topics/resource-geocode.html
:param str query: The address or query you wish to geocode.
For a structured query, provide a dictionary whose keys
are one of: `city`, `county`, `district`, `country`, `state`,
`street`, `housenumber`, or `postalcode`.
:param bbox: A type of spatial filter, limits the search for any other attributes
in the request. Specified by two coordinate (lat/lon)
pairs -- corners of the box. `The bbox search is currently similar
to mapview but it is not extended` (cited from the REST API docs).
Relevant global results are also returned.
Example: ``[Point(22, 180), Point(-22, -180)]``.
:type bbox: list or tuple of 2 items of :class:`geopy.point.Point` or
``(latitude, longitude)`` or ``"%(latitude)s, %(longitude)s"``.
:param mapview: The app's viewport, given as two coordinate pairs, specified
by two lat/lon pairs -- corners of the bounding box,
respectively. Matches from within the set map view plus an extended area
are ranked highest. Relevant global results are also returned.
Example: ``[Point(22, 180), Point(-22, -180)]``.
:type mapview: list or tuple of 2 items of :class:`geopy.point.Point` or
``(latitude, longitude)`` or ``"%(latitude)s, %(longitude)s"``.
:param bool exactly_one: Return one result or a list of results, if
available.
:param int maxresults: Defines the maximum number of items in the
response structure. If not provided and there are multiple results
the HERE API will return 10 results by default. This will be reset
to one if ``exactly_one`` is True.
:param int pageinformation: A key which identifies the page to be returned
when the response is separated into multiple pages. Only useful when
``maxresults`` is also provided.
:param str language: Affects the language of the response,
must be a RFC 4647 language code, e.g. 'en-US'.
:param str additional_data: A string with key-value pairs as described on
https://developer.here.com/documentation/geocoder/topics/resource-params-additional.html.
These will be added as one query parameter to the URL.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
:rtype: ``None``, :class:`geopy.location.Location` or a list of them, if
``exactly_one=False``.
|
[
"Return",
"a",
"location",
"point",
"by",
"address",
"."
] |
python
|
train
| 43.429907 |
hollenstein/maspy
|
maspy/core.py
|
https://github.com/hollenstein/maspy/blob/f15fcfd24df306d8420540460d902aa3073ec133/maspy/core.py#L1071-L1082
|
def _fromJSON(cls, jsonobject):
"""Generates a new instance of :class:`maspy.core.MzmlScan` from a
decoded JSON object (as generated by
:func:`maspy.core.MzmlScan._reprJSON()`).
:param jsonobject: decoded JSON object
:returns: a new instance of :class:`MzmlScan`
"""
scanWindowList = _mzmlListAttribToTuple(jsonobject[0])
params = [tuple(param) for param in jsonobject[1]]
return cls(scanWindowList, params)
|
[
"def",
"_fromJSON",
"(",
"cls",
",",
"jsonobject",
")",
":",
"scanWindowList",
"=",
"_mzmlListAttribToTuple",
"(",
"jsonobject",
"[",
"0",
"]",
")",
"params",
"=",
"[",
"tuple",
"(",
"param",
")",
"for",
"param",
"in",
"jsonobject",
"[",
"1",
"]",
"]",
"return",
"cls",
"(",
"scanWindowList",
",",
"params",
")"
] |
Generates a new instance of :class:`maspy.core.MzmlScan` from a
decoded JSON object (as generated by
:func:`maspy.core.MzmlScan._reprJSON()`).
:param jsonobject: decoded JSON object
:returns: a new instance of :class:`MzmlScan`
|
[
"Generates",
"a",
"new",
"instance",
"of",
":",
"class",
":",
"maspy",
".",
"core",
".",
"MzmlScan",
"from",
"a",
"decoded",
"JSON",
"object",
"(",
"as",
"generated",
"by",
":",
"func",
":",
"maspy",
".",
"core",
".",
"MzmlScan",
".",
"_reprJSON",
"()",
")",
"."
] |
python
|
train
| 39.166667 |
ihmeuw/vivarium
|
src/vivarium/framework/randomness.py
|
https://github.com/ihmeuw/vivarium/blob/c5f5d50f775c8bf337d3aae1ff7c57c025a8e258/src/vivarium/framework/randomness.py#L157-L159
|
def clip_to_seconds(m: Union[int, pd.Series]) -> Union[int, pd.Series]:
"""Clips UTC datetime in nanoseconds to seconds."""
return m // pd.Timedelta(1, unit='s').value
|
[
"def",
"clip_to_seconds",
"(",
"m",
":",
"Union",
"[",
"int",
",",
"pd",
".",
"Series",
"]",
")",
"->",
"Union",
"[",
"int",
",",
"pd",
".",
"Series",
"]",
":",
"return",
"m",
"//",
"pd",
".",
"Timedelta",
"(",
"1",
",",
"unit",
"=",
"'s'",
")",
".",
"value"
] |
Clips UTC datetime in nanoseconds to seconds.
|
[
"Clips",
"UTC",
"datetime",
"in",
"nanoseconds",
"to",
"seconds",
"."
] |
python
|
train
| 60.333333 |
proycon/pynlpl
|
pynlpl/formats/folia.py
|
https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L2419-L2421
|
def context(self, size, placeholder=None, scope=None):
"""Returns this word in context, {size} words to the left, the current word, and {size} words to the right"""
return self.leftcontext(size, placeholder,scope) + [self] + self.rightcontext(size, placeholder,scope)
|
[
"def",
"context",
"(",
"self",
",",
"size",
",",
"placeholder",
"=",
"None",
",",
"scope",
"=",
"None",
")",
":",
"return",
"self",
".",
"leftcontext",
"(",
"size",
",",
"placeholder",
",",
"scope",
")",
"+",
"[",
"self",
"]",
"+",
"self",
".",
"rightcontext",
"(",
"size",
",",
"placeholder",
",",
"scope",
")"
] |
Returns this word in context, {size} words to the left, the current word, and {size} words to the right
|
[
"Returns",
"this",
"word",
"in",
"context",
"{",
"size",
"}",
"words",
"to",
"the",
"left",
"the",
"current",
"word",
"and",
"{",
"size",
"}",
"words",
"to",
"the",
"right"
] |
python
|
train
| 93.666667 |
PlaidWeb/Publ
|
publ/markdown.py
|
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/markdown.py#L144-L164
|
def _render_image(self, spec, container_args, alt_text=None):
""" Render an image specification into an <img> tag """
try:
path, image_args, title = image.parse_image_spec(spec)
except Exception as err: # pylint: disable=broad-except
logger.exception("Got error on spec %s: %s", spec, err)
return ('<span class="error">Couldn\'t parse image spec: ' +
'<code>{}</code> {}</span>'.format(flask.escape(spec),
flask.escape(str(err))))
composite_args = {**container_args, **image_args}
try:
img = image.get_image(path, self._search_path)
except Exception as err: # pylint: disable=broad-except
logger.exception("Got error on image %s: %s", path, err)
return ('<span class="error">Error loading image {}: {}</span>'.format(
flask.escape(spec), flask.escape(str(err))))
return img.get_img_tag(title, alt_text, **composite_args)
|
[
"def",
"_render_image",
"(",
"self",
",",
"spec",
",",
"container_args",
",",
"alt_text",
"=",
"None",
")",
":",
"try",
":",
"path",
",",
"image_args",
",",
"title",
"=",
"image",
".",
"parse_image_spec",
"(",
"spec",
")",
"except",
"Exception",
"as",
"err",
":",
"# pylint: disable=broad-except",
"logger",
".",
"exception",
"(",
"\"Got error on spec %s: %s\"",
",",
"spec",
",",
"err",
")",
"return",
"(",
"'<span class=\"error\">Couldn\\'t parse image spec: '",
"+",
"'<code>{}</code> {}</span>'",
".",
"format",
"(",
"flask",
".",
"escape",
"(",
"spec",
")",
",",
"flask",
".",
"escape",
"(",
"str",
"(",
"err",
")",
")",
")",
")",
"composite_args",
"=",
"{",
"*",
"*",
"container_args",
",",
"*",
"*",
"image_args",
"}",
"try",
":",
"img",
"=",
"image",
".",
"get_image",
"(",
"path",
",",
"self",
".",
"_search_path",
")",
"except",
"Exception",
"as",
"err",
":",
"# pylint: disable=broad-except",
"logger",
".",
"exception",
"(",
"\"Got error on image %s: %s\"",
",",
"path",
",",
"err",
")",
"return",
"(",
"'<span class=\"error\">Error loading image {}: {}</span>'",
".",
"format",
"(",
"flask",
".",
"escape",
"(",
"spec",
")",
",",
"flask",
".",
"escape",
"(",
"str",
"(",
"err",
")",
")",
")",
")",
"return",
"img",
".",
"get_img_tag",
"(",
"title",
",",
"alt_text",
",",
"*",
"*",
"composite_args",
")"
] |
Render an image specification into an <img> tag
|
[
"Render",
"an",
"image",
"specification",
"into",
"an",
"<img",
">",
"tag"
] |
python
|
train
| 48.809524 |
soxofaan/dahuffman
|
dahuffman/huffmancodec.py
|
https://github.com/soxofaan/dahuffman/blob/e6e1cf6ab3f6cb29f21e642fbcdd63084e5d63c2/dahuffman/huffmancodec.py#L42-L49
|
def _guess_concat(data):
"""
Guess concat function from given data
"""
return {
type(u''): u''.join,
type(b''): concat_bytes,
}.get(type(data), list)
|
[
"def",
"_guess_concat",
"(",
"data",
")",
":",
"return",
"{",
"type",
"(",
"u''",
")",
":",
"u''",
".",
"join",
",",
"type",
"(",
"b''",
")",
":",
"concat_bytes",
",",
"}",
".",
"get",
"(",
"type",
"(",
"data",
")",
",",
"list",
")"
] |
Guess concat function from given data
|
[
"Guess",
"concat",
"function",
"from",
"given",
"data"
] |
python
|
train
| 22.25 |
xapple/plumbing
|
plumbing/runner.py
|
https://github.com/xapple/plumbing/blob/4a7706c7722f5996d0ca366f191aff9ac145880a/plumbing/runner.py#L108-L113
|
def logs(self):
"""Find the log directory and return all the logs sorted."""
if not self.parent.loaded: self.parent.load()
logs = self.parent.p.logs_dir.flat_directories
logs.sort(key=lambda x: x.mod_time)
return logs
|
[
"def",
"logs",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"parent",
".",
"loaded",
":",
"self",
".",
"parent",
".",
"load",
"(",
")",
"logs",
"=",
"self",
".",
"parent",
".",
"p",
".",
"logs_dir",
".",
"flat_directories",
"logs",
".",
"sort",
"(",
"key",
"=",
"lambda",
"x",
":",
"x",
".",
"mod_time",
")",
"return",
"logs"
] |
Find the log directory and return all the logs sorted.
|
[
"Find",
"the",
"log",
"directory",
"and",
"return",
"all",
"the",
"logs",
"sorted",
"."
] |
python
|
train
| 42 |
mitsei/dlkit
|
dlkit/json_/grading/searches.py
|
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/grading/searches.py#L97-L108
|
def get_grade_systems(self):
"""Gets the grade system list resulting from the search.
return: (osid.grading.GradeSystemList) - the grade system list
raise: IllegalState - list already retrieved
*compliance: mandatory -- This method must be implemented.*
"""
if self.retrieved:
raise errors.IllegalState('List has already been retrieved.')
self.retrieved = True
return objects.GradeSystemList(self._results, runtime=self._runtime)
|
[
"def",
"get_grade_systems",
"(",
"self",
")",
":",
"if",
"self",
".",
"retrieved",
":",
"raise",
"errors",
".",
"IllegalState",
"(",
"'List has already been retrieved.'",
")",
"self",
".",
"retrieved",
"=",
"True",
"return",
"objects",
".",
"GradeSystemList",
"(",
"self",
".",
"_results",
",",
"runtime",
"=",
"self",
".",
"_runtime",
")"
] |
Gets the grade system list resulting from the search.
return: (osid.grading.GradeSystemList) - the grade system list
raise: IllegalState - list already retrieved
*compliance: mandatory -- This method must be implemented.*
|
[
"Gets",
"the",
"grade",
"system",
"list",
"resulting",
"from",
"the",
"search",
"."
] |
python
|
train
| 41.416667 |
samstav/requests-chef
|
requests_chef/mixlib_auth.py
|
https://github.com/samstav/requests-chef/blob/a0bf013b925abd0cf76eeaf6300cf32659632773/requests_chef/mixlib_auth.py#L127-L137
|
def canonical_request(self, method, path, content, timestamp):
"""Return the canonical request string."""
request = collections.OrderedDict([
('Method', method.upper()),
('Hashed Path', path),
('X-Ops-Content-Hash', content),
('X-Ops-Timestamp', timestamp),
('X-Ops-UserId', self.user_id),
])
return '\n'.join(['%s:%s' % (key, value)
for key, value in request.items()])
|
[
"def",
"canonical_request",
"(",
"self",
",",
"method",
",",
"path",
",",
"content",
",",
"timestamp",
")",
":",
"request",
"=",
"collections",
".",
"OrderedDict",
"(",
"[",
"(",
"'Method'",
",",
"method",
".",
"upper",
"(",
")",
")",
",",
"(",
"'Hashed Path'",
",",
"path",
")",
",",
"(",
"'X-Ops-Content-Hash'",
",",
"content",
")",
",",
"(",
"'X-Ops-Timestamp'",
",",
"timestamp",
")",
",",
"(",
"'X-Ops-UserId'",
",",
"self",
".",
"user_id",
")",
",",
"]",
")",
"return",
"'\\n'",
".",
"join",
"(",
"[",
"'%s:%s'",
"%",
"(",
"key",
",",
"value",
")",
"for",
"key",
",",
"value",
"in",
"request",
".",
"items",
"(",
")",
"]",
")"
] |
Return the canonical request string.
|
[
"Return",
"the",
"canonical",
"request",
"string",
"."
] |
python
|
train
| 43.363636 |
MartinThoma/hwrt
|
hwrt/handwritten_data.py
|
https://github.com/MartinThoma/hwrt/blob/725c21a3d0f5a30b8492cbc184b3688ceb364e1c/hwrt/handwritten_data.py#L221-L233
|
def feature_extraction(self, algorithms):
"""Get a list of features.
Every algorithm has to return the features as a list."""
assert type(algorithms) is list
features = []
for algorithm in algorithms:
new_features = algorithm(self)
assert len(new_features) == algorithm.get_dimension(), \
"Expected %i features from algorithm %s, got %i features" % \
(algorithm.get_dimension(), str(algorithm), len(new_features))
features += new_features
return features
|
[
"def",
"feature_extraction",
"(",
"self",
",",
"algorithms",
")",
":",
"assert",
"type",
"(",
"algorithms",
")",
"is",
"list",
"features",
"=",
"[",
"]",
"for",
"algorithm",
"in",
"algorithms",
":",
"new_features",
"=",
"algorithm",
"(",
"self",
")",
"assert",
"len",
"(",
"new_features",
")",
"==",
"algorithm",
".",
"get_dimension",
"(",
")",
",",
"\"Expected %i features from algorithm %s, got %i features\"",
"%",
"(",
"algorithm",
".",
"get_dimension",
"(",
")",
",",
"str",
"(",
"algorithm",
")",
",",
"len",
"(",
"new_features",
")",
")",
"features",
"+=",
"new_features",
"return",
"features"
] |
Get a list of features.
Every algorithm has to return the features as a list.
|
[
"Get",
"a",
"list",
"of",
"features",
"."
] |
python
|
train
| 43 |
coursera/courseraoauth2client
|
courseraoauth2client/oauth2.py
|
https://github.com/coursera/courseraoauth2client/blob/4edd991defe26bfc768ab28a30368cace40baf44/courseraoauth2client/oauth2.py#L417-L434
|
def _exchange_refresh_tokens(self):
'Exchanges a refresh token for an access token'
if self.token_cache is not None and 'refresh' in self.token_cache:
# Attempt to use the refresh token to get a new access token.
refresh_form = {
'grant_type': 'refresh_token',
'refresh_token': self.token_cache['refresh'],
'client_id': self.client_id,
'client_secret': self.client_secret,
}
try:
tokens = self._request_tokens_from_token_endpoint(refresh_form)
tokens['refresh'] = self.token_cache['refresh']
return tokens
except OAuth2Exception:
logging.exception(
'Encountered an exception during refresh token flow.')
return None
|
[
"def",
"_exchange_refresh_tokens",
"(",
"self",
")",
":",
"if",
"self",
".",
"token_cache",
"is",
"not",
"None",
"and",
"'refresh'",
"in",
"self",
".",
"token_cache",
":",
"# Attempt to use the refresh token to get a new access token.",
"refresh_form",
"=",
"{",
"'grant_type'",
":",
"'refresh_token'",
",",
"'refresh_token'",
":",
"self",
".",
"token_cache",
"[",
"'refresh'",
"]",
",",
"'client_id'",
":",
"self",
".",
"client_id",
",",
"'client_secret'",
":",
"self",
".",
"client_secret",
",",
"}",
"try",
":",
"tokens",
"=",
"self",
".",
"_request_tokens_from_token_endpoint",
"(",
"refresh_form",
")",
"tokens",
"[",
"'refresh'",
"]",
"=",
"self",
".",
"token_cache",
"[",
"'refresh'",
"]",
"return",
"tokens",
"except",
"OAuth2Exception",
":",
"logging",
".",
"exception",
"(",
"'Encountered an exception during refresh token flow.'",
")",
"return",
"None"
] |
Exchanges a refresh token for an access token
|
[
"Exchanges",
"a",
"refresh",
"token",
"for",
"an",
"access",
"token"
] |
python
|
train
| 46.111111 |
h2oai/h2o-3
|
h2o-docs/src/product/sphinxext/apigen.py
|
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-docs/src/product/sphinxext/apigen.py#L154-L159
|
def _path2uri(self, dirpath):
''' Convert directory path to uri '''
relpath = dirpath.replace(self.root_path, self.package_name)
if relpath.startswith(os.path.sep):
relpath = relpath[1:]
return relpath.replace(os.path.sep, '.')
|
[
"def",
"_path2uri",
"(",
"self",
",",
"dirpath",
")",
":",
"relpath",
"=",
"dirpath",
".",
"replace",
"(",
"self",
".",
"root_path",
",",
"self",
".",
"package_name",
")",
"if",
"relpath",
".",
"startswith",
"(",
"os",
".",
"path",
".",
"sep",
")",
":",
"relpath",
"=",
"relpath",
"[",
"1",
":",
"]",
"return",
"relpath",
".",
"replace",
"(",
"os",
".",
"path",
".",
"sep",
",",
"'.'",
")"
] |
Convert directory path to uri
|
[
"Convert",
"directory",
"path",
"to",
"uri"
] |
python
|
test
| 44.333333 |
tensorflow/hub
|
tensorflow_hub/native_module.py
|
https://github.com/tensorflow/hub/blob/09f45963f6787322967b6fec61459f3ac56fbb27/tensorflow_hub/native_module.py#L758-L766
|
def check_collections_are_supported(saved_model_handler, supported):
"""Checks that SavedModelHandler only uses supported collections."""
for meta_graph in saved_model_handler.meta_graphs:
used_collection_keys = set(meta_graph.collection_def.keys())
unsupported = used_collection_keys - supported
if unsupported:
raise ValueError("Unsupported collections in graph: %s\n"
"Use hub.create_module_spec(..., drop_collections=[...])"
" as appropriate." % list(unsupported))
|
[
"def",
"check_collections_are_supported",
"(",
"saved_model_handler",
",",
"supported",
")",
":",
"for",
"meta_graph",
"in",
"saved_model_handler",
".",
"meta_graphs",
":",
"used_collection_keys",
"=",
"set",
"(",
"meta_graph",
".",
"collection_def",
".",
"keys",
"(",
")",
")",
"unsupported",
"=",
"used_collection_keys",
"-",
"supported",
"if",
"unsupported",
":",
"raise",
"ValueError",
"(",
"\"Unsupported collections in graph: %s\\n\"",
"\"Use hub.create_module_spec(..., drop_collections=[...])\"",
"\" as appropriate.\"",
"%",
"list",
"(",
"unsupported",
")",
")"
] |
Checks that SavedModelHandler only uses supported collections.
|
[
"Checks",
"that",
"SavedModelHandler",
"only",
"uses",
"supported",
"collections",
"."
] |
python
|
train
| 58.666667 |
quantopian/zipline
|
zipline/algorithm.py
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/algorithm.py#L1077-L1105
|
def symbols(self, *args, **kwargs):
"""Lookup multuple Equities as a list.
Parameters
----------
*args : iterable[str]
The ticker symbols to lookup.
country_code : str or None, optional
A country to limit symbol searches to.
Returns
-------
equities : list[Equity]
The equities that held the given ticker symbols on the current
symbol lookup date.
Raises
------
SymbolNotFound
Raised when one of the symbols was not held on the current
lookup date.
See Also
--------
:func:`zipline.api.set_symbol_lookup_date`
"""
return [self.symbol(identifier, **kwargs) for identifier in args]
|
[
"def",
"symbols",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"[",
"self",
".",
"symbol",
"(",
"identifier",
",",
"*",
"*",
"kwargs",
")",
"for",
"identifier",
"in",
"args",
"]"
] |
Lookup multuple Equities as a list.
Parameters
----------
*args : iterable[str]
The ticker symbols to lookup.
country_code : str or None, optional
A country to limit symbol searches to.
Returns
-------
equities : list[Equity]
The equities that held the given ticker symbols on the current
symbol lookup date.
Raises
------
SymbolNotFound
Raised when one of the symbols was not held on the current
lookup date.
See Also
--------
:func:`zipline.api.set_symbol_lookup_date`
|
[
"Lookup",
"multuple",
"Equities",
"as",
"a",
"list",
"."
] |
python
|
train
| 26.103448 |
mikedh/trimesh
|
trimesh/path/traversal.py
|
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/path/traversal.py#L145-L185
|
def closed_paths(entities, vertices):
"""
Paths are lists of entity indices.
We first generate vertex paths using graph cycle algorithms,
and then convert them to entity paths.
This will also change the ordering of entity.points in place
so a path may be traversed without having to reverse the entity.
Parameters
-------------
entities : (n,) entity objects
Entity objects
vertices : (m, dimension) float
Vertex points in space
Returns
-------------
entity_paths : sequence of (n,) int
Ordered traversals of entities
"""
# get a networkx graph of entities
graph, closed = vertex_graph(entities)
# add entities that are closed as single- entity paths
entity_paths = np.reshape(closed, (-1, 1)).tolist()
# look for cycles in the graph, or closed loops
vertex_paths = np.array(nx.cycles.cycle_basis(graph))
# loop through every vertex cycle
for vertex_path in vertex_paths:
# a path has no length if it has fewer than 2 vertices
if len(vertex_path) < 2:
continue
# convert vertex indices to entity indices
entity_paths.append(
vertex_to_entity_path(vertex_path,
graph,
entities,
vertices))
entity_paths = np.array(entity_paths)
return entity_paths
|
[
"def",
"closed_paths",
"(",
"entities",
",",
"vertices",
")",
":",
"# get a networkx graph of entities",
"graph",
",",
"closed",
"=",
"vertex_graph",
"(",
"entities",
")",
"# add entities that are closed as single- entity paths",
"entity_paths",
"=",
"np",
".",
"reshape",
"(",
"closed",
",",
"(",
"-",
"1",
",",
"1",
")",
")",
".",
"tolist",
"(",
")",
"# look for cycles in the graph, or closed loops",
"vertex_paths",
"=",
"np",
".",
"array",
"(",
"nx",
".",
"cycles",
".",
"cycle_basis",
"(",
"graph",
")",
")",
"# loop through every vertex cycle",
"for",
"vertex_path",
"in",
"vertex_paths",
":",
"# a path has no length if it has fewer than 2 vertices",
"if",
"len",
"(",
"vertex_path",
")",
"<",
"2",
":",
"continue",
"# convert vertex indices to entity indices",
"entity_paths",
".",
"append",
"(",
"vertex_to_entity_path",
"(",
"vertex_path",
",",
"graph",
",",
"entities",
",",
"vertices",
")",
")",
"entity_paths",
"=",
"np",
".",
"array",
"(",
"entity_paths",
")",
"return",
"entity_paths"
] |
Paths are lists of entity indices.
We first generate vertex paths using graph cycle algorithms,
and then convert them to entity paths.
This will also change the ordering of entity.points in place
so a path may be traversed without having to reverse the entity.
Parameters
-------------
entities : (n,) entity objects
Entity objects
vertices : (m, dimension) float
Vertex points in space
Returns
-------------
entity_paths : sequence of (n,) int
Ordered traversals of entities
|
[
"Paths",
"are",
"lists",
"of",
"entity",
"indices",
".",
"We",
"first",
"generate",
"vertex",
"paths",
"using",
"graph",
"cycle",
"algorithms",
"and",
"then",
"convert",
"them",
"to",
"entity",
"paths",
"."
] |
python
|
train
| 33.829268 |
jameshilliard/hlk-sw16
|
hlk_sw16/protocol.py
|
https://github.com/jameshilliard/hlk-sw16/blob/4f0c5a7b76b42167f4dc9d2aa6312c7518a8cd56/hlk_sw16/protocol.py#L138-L143
|
def format_packet(command):
"""Format packet to be sent."""
frame_header = b"\xaa"
verify = b"\x0b"
send_delim = b"\xbb"
return frame_header + command.ljust(17, b"\x00") + verify + send_delim
|
[
"def",
"format_packet",
"(",
"command",
")",
":",
"frame_header",
"=",
"b\"\\xaa\"",
"verify",
"=",
"b\"\\x0b\"",
"send_delim",
"=",
"b\"\\xbb\"",
"return",
"frame_header",
"+",
"command",
".",
"ljust",
"(",
"17",
",",
"b\"\\x00\"",
")",
"+",
"verify",
"+",
"send_delim"
] |
Format packet to be sent.
|
[
"Format",
"packet",
"to",
"be",
"sent",
"."
] |
python
|
train
| 37.666667 |
tensorflow/tensor2tensor
|
tensor2tensor/models/research/moe_experiments.py
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/moe_experiments.py#L356-L361
|
def xmoe2_v1_l4k_local_only():
"""With sequence length 4096."""
hparams = xmoe2_v1_l4k()
hparams.decoder_layers = [
"local_att" if l == "att" else l for l in hparams.decoder_layers]
return hparams
|
[
"def",
"xmoe2_v1_l4k_local_only",
"(",
")",
":",
"hparams",
"=",
"xmoe2_v1_l4k",
"(",
")",
"hparams",
".",
"decoder_layers",
"=",
"[",
"\"local_att\"",
"if",
"l",
"==",
"\"att\"",
"else",
"l",
"for",
"l",
"in",
"hparams",
".",
"decoder_layers",
"]",
"return",
"hparams"
] |
With sequence length 4096.
|
[
"With",
"sequence",
"length",
"4096",
"."
] |
python
|
train
| 34.166667 |
locationlabs/mockredis
|
mockredis/client.py
|
https://github.com/locationlabs/mockredis/blob/fd4e3117066ff0c24e86ebca007853a8092e3254/mockredis/client.py#L552-L557
|
def hmget(self, hashkey, keys, *args):
"""Emulate hmget."""
redis_hash = self._get_hash(hashkey, 'HMGET')
attributes = self._list_or_args(keys, args)
return [redis_hash.get(self._encode(attribute)) for attribute in attributes]
|
[
"def",
"hmget",
"(",
"self",
",",
"hashkey",
",",
"keys",
",",
"*",
"args",
")",
":",
"redis_hash",
"=",
"self",
".",
"_get_hash",
"(",
"hashkey",
",",
"'HMGET'",
")",
"attributes",
"=",
"self",
".",
"_list_or_args",
"(",
"keys",
",",
"args",
")",
"return",
"[",
"redis_hash",
".",
"get",
"(",
"self",
".",
"_encode",
"(",
"attribute",
")",
")",
"for",
"attribute",
"in",
"attributes",
"]"
] |
Emulate hmget.
|
[
"Emulate",
"hmget",
"."
] |
python
|
train
| 42.333333 |
nion-software/nionswift
|
nion/swift/Application.py
|
https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/Application.py#L176-L229
|
def start(self, skip_choose=False, fixed_workspace_dir=None):
"""
Start the application.
Looks for workspace_location persistent string. If it doesn't find it, uses a default
workspace location.
Then checks to see if that workspace exists. If not and if skip_choose has not been
set to True, asks the user for a workspace location. User may choose new folder or
existing location. This works by putting up the dialog which will either call start
again or exit.
Creates workspace in location if it doesn't exist.
Migrates database to latest version.
Creates document model, resources path, etc.
"""
logging.getLogger("migration").setLevel(logging.INFO)
if fixed_workspace_dir:
workspace_dir = fixed_workspace_dir
else:
documents_dir = self.ui.get_document_location()
workspace_dir = os.path.join(documents_dir, "Nion Swift Libraries")
workspace_dir = self.ui.get_persistent_string("workspace_location", workspace_dir)
welcome_message_enabled = fixed_workspace_dir is None
profile, is_created = Profile.create_profile(pathlib.Path(workspace_dir), welcome_message_enabled, skip_choose)
if not profile:
self.choose_library()
return True
self.workspace_dir = workspace_dir
DocumentModel.DocumentModel.computation_min_period = 0.1
document_model = DocumentModel.DocumentModel(profile=profile)
document_model.create_default_data_groups()
document_model.start_dispatcher()
# parse the hardware aliases file
alias_path = os.path.join(self.workspace_dir, "aliases.ini")
HardwareSource.parse_hardware_aliases_config_file(alias_path)
# create the document controller
document_controller = self.create_document_controller(document_model, "library")
if self.__resources_path is not None:
document_model.create_sample_images(self.__resources_path)
workspace_history = self.ui.get_persistent_object("workspace_history", list())
if workspace_dir in workspace_history:
workspace_history.remove(workspace_dir)
workspace_history.insert(0, workspace_dir)
self.ui.set_persistent_object("workspace_history", workspace_history)
self.ui.set_persistent_string("workspace_location", workspace_dir)
if welcome_message_enabled:
logging.info("Welcome to Nion Swift.")
if is_created and len(document_model.display_items) > 0:
document_controller.selected_display_panel.set_display_panel_display_item(document_model.display_items[0])
document_controller.selected_display_panel.perform_action("set_fill_mode")
return True
|
[
"def",
"start",
"(",
"self",
",",
"skip_choose",
"=",
"False",
",",
"fixed_workspace_dir",
"=",
"None",
")",
":",
"logging",
".",
"getLogger",
"(",
"\"migration\"",
")",
".",
"setLevel",
"(",
"logging",
".",
"INFO",
")",
"if",
"fixed_workspace_dir",
":",
"workspace_dir",
"=",
"fixed_workspace_dir",
"else",
":",
"documents_dir",
"=",
"self",
".",
"ui",
".",
"get_document_location",
"(",
")",
"workspace_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"documents_dir",
",",
"\"Nion Swift Libraries\"",
")",
"workspace_dir",
"=",
"self",
".",
"ui",
".",
"get_persistent_string",
"(",
"\"workspace_location\"",
",",
"workspace_dir",
")",
"welcome_message_enabled",
"=",
"fixed_workspace_dir",
"is",
"None",
"profile",
",",
"is_created",
"=",
"Profile",
".",
"create_profile",
"(",
"pathlib",
".",
"Path",
"(",
"workspace_dir",
")",
",",
"welcome_message_enabled",
",",
"skip_choose",
")",
"if",
"not",
"profile",
":",
"self",
".",
"choose_library",
"(",
")",
"return",
"True",
"self",
".",
"workspace_dir",
"=",
"workspace_dir",
"DocumentModel",
".",
"DocumentModel",
".",
"computation_min_period",
"=",
"0.1",
"document_model",
"=",
"DocumentModel",
".",
"DocumentModel",
"(",
"profile",
"=",
"profile",
")",
"document_model",
".",
"create_default_data_groups",
"(",
")",
"document_model",
".",
"start_dispatcher",
"(",
")",
"# parse the hardware aliases file",
"alias_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"workspace_dir",
",",
"\"aliases.ini\"",
")",
"HardwareSource",
".",
"parse_hardware_aliases_config_file",
"(",
"alias_path",
")",
"# create the document controller",
"document_controller",
"=",
"self",
".",
"create_document_controller",
"(",
"document_model",
",",
"\"library\"",
")",
"if",
"self",
".",
"__resources_path",
"is",
"not",
"None",
":",
"document_model",
".",
"create_sample_images",
"(",
"self",
".",
"__resources_path",
")",
"workspace_history",
"=",
"self",
".",
"ui",
".",
"get_persistent_object",
"(",
"\"workspace_history\"",
",",
"list",
"(",
")",
")",
"if",
"workspace_dir",
"in",
"workspace_history",
":",
"workspace_history",
".",
"remove",
"(",
"workspace_dir",
")",
"workspace_history",
".",
"insert",
"(",
"0",
",",
"workspace_dir",
")",
"self",
".",
"ui",
".",
"set_persistent_object",
"(",
"\"workspace_history\"",
",",
"workspace_history",
")",
"self",
".",
"ui",
".",
"set_persistent_string",
"(",
"\"workspace_location\"",
",",
"workspace_dir",
")",
"if",
"welcome_message_enabled",
":",
"logging",
".",
"info",
"(",
"\"Welcome to Nion Swift.\"",
")",
"if",
"is_created",
"and",
"len",
"(",
"document_model",
".",
"display_items",
")",
">",
"0",
":",
"document_controller",
".",
"selected_display_panel",
".",
"set_display_panel_display_item",
"(",
"document_model",
".",
"display_items",
"[",
"0",
"]",
")",
"document_controller",
".",
"selected_display_panel",
".",
"perform_action",
"(",
"\"set_fill_mode\"",
")",
"return",
"True"
] |
Start the application.
Looks for workspace_location persistent string. If it doesn't find it, uses a default
workspace location.
Then checks to see if that workspace exists. If not and if skip_choose has not been
set to True, asks the user for a workspace location. User may choose new folder or
existing location. This works by putting up the dialog which will either call start
again or exit.
Creates workspace in location if it doesn't exist.
Migrates database to latest version.
Creates document model, resources path, etc.
|
[
"Start",
"the",
"application",
"."
] |
python
|
train
| 52 |
bwohlberg/sporco
|
sporco/admm/rpca.py
|
https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/admm/rpca.py#L215-L227
|
def eval_objfn(self):
"""Compute components of objective function as well as total
contribution to objective function.
"""
if self.opt['fEvalX']:
rnn = np.sum(self.ss)
else:
rnn = sp.norm_nuclear(self.obfn_fvar())
rl1 = np.sum(np.abs(self.obfn_gvar()))
cns = np.linalg.norm(self.X + self.Y - self.S)
obj = rnn + self.lmbda*rl1
return (obj, rnn, rl1, cns)
|
[
"def",
"eval_objfn",
"(",
"self",
")",
":",
"if",
"self",
".",
"opt",
"[",
"'fEvalX'",
"]",
":",
"rnn",
"=",
"np",
".",
"sum",
"(",
"self",
".",
"ss",
")",
"else",
":",
"rnn",
"=",
"sp",
".",
"norm_nuclear",
"(",
"self",
".",
"obfn_fvar",
"(",
")",
")",
"rl1",
"=",
"np",
".",
"sum",
"(",
"np",
".",
"abs",
"(",
"self",
".",
"obfn_gvar",
"(",
")",
")",
")",
"cns",
"=",
"np",
".",
"linalg",
".",
"norm",
"(",
"self",
".",
"X",
"+",
"self",
".",
"Y",
"-",
"self",
".",
"S",
")",
"obj",
"=",
"rnn",
"+",
"self",
".",
"lmbda",
"*",
"rl1",
"return",
"(",
"obj",
",",
"rnn",
",",
"rl1",
",",
"cns",
")"
] |
Compute components of objective function as well as total
contribution to objective function.
|
[
"Compute",
"components",
"of",
"objective",
"function",
"as",
"well",
"as",
"total",
"contribution",
"to",
"objective",
"function",
"."
] |
python
|
train
| 33.769231 |
tensorflow/tensorboard
|
tensorboard/plugins/audio/audio_plugin.py
|
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/audio/audio_plugin.py#L176-L198
|
def _query_for_individual_audio(self, run, tag, sample, index):
"""Builds a URL for accessing the specified audio.
This should be kept in sync with _serve_audio_metadata. Note that the URL is
*not* guaranteed to always return the same audio, since audio may be
unloaded from the reservoir as new audio entries come in.
Args:
run: The name of the run.
tag: The tag.
index: The index of the audio entry. Negative values are OK.
Returns:
A string representation of a URL that will load the index-th sampled audio
in the given run with the given tag.
"""
query_string = urllib.parse.urlencode({
'run': run,
'tag': tag,
'sample': sample,
'index': index,
})
return query_string
|
[
"def",
"_query_for_individual_audio",
"(",
"self",
",",
"run",
",",
"tag",
",",
"sample",
",",
"index",
")",
":",
"query_string",
"=",
"urllib",
".",
"parse",
".",
"urlencode",
"(",
"{",
"'run'",
":",
"run",
",",
"'tag'",
":",
"tag",
",",
"'sample'",
":",
"sample",
",",
"'index'",
":",
"index",
",",
"}",
")",
"return",
"query_string"
] |
Builds a URL for accessing the specified audio.
This should be kept in sync with _serve_audio_metadata. Note that the URL is
*not* guaranteed to always return the same audio, since audio may be
unloaded from the reservoir as new audio entries come in.
Args:
run: The name of the run.
tag: The tag.
index: The index of the audio entry. Negative values are OK.
Returns:
A string representation of a URL that will load the index-th sampled audio
in the given run with the given tag.
|
[
"Builds",
"a",
"URL",
"for",
"accessing",
"the",
"specified",
"audio",
"."
] |
python
|
train
| 32.782609 |
DataDog/integrations-core
|
datadog_checks_base/datadog_checks/base/checks/openmetrics/mixins.py
|
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/datadog_checks_base/datadog_checks/base/checks/openmetrics/mixins.py#L225-L241
|
def parse_metric_family(self, response, scraper_config):
"""
Parse the MetricFamily from a valid requests.Response object to provide a MetricFamily object (see [0])
The text format uses iter_lines() generator.
:param response: requests.Response
:return: core.Metric
"""
input_gen = response.iter_lines(chunk_size=self.REQUESTS_CHUNK_SIZE, decode_unicode=True)
if scraper_config['_text_filter_blacklist']:
input_gen = self._text_filter_input(input_gen, scraper_config)
for metric in text_fd_to_metric_families(input_gen):
metric.type = scraper_config['type_overrides'].get(metric.name, metric.type)
if metric.type not in self.METRIC_TYPES:
continue
metric.name = self._remove_metric_prefix(metric.name, scraper_config)
yield metric
|
[
"def",
"parse_metric_family",
"(",
"self",
",",
"response",
",",
"scraper_config",
")",
":",
"input_gen",
"=",
"response",
".",
"iter_lines",
"(",
"chunk_size",
"=",
"self",
".",
"REQUESTS_CHUNK_SIZE",
",",
"decode_unicode",
"=",
"True",
")",
"if",
"scraper_config",
"[",
"'_text_filter_blacklist'",
"]",
":",
"input_gen",
"=",
"self",
".",
"_text_filter_input",
"(",
"input_gen",
",",
"scraper_config",
")",
"for",
"metric",
"in",
"text_fd_to_metric_families",
"(",
"input_gen",
")",
":",
"metric",
".",
"type",
"=",
"scraper_config",
"[",
"'type_overrides'",
"]",
".",
"get",
"(",
"metric",
".",
"name",
",",
"metric",
".",
"type",
")",
"if",
"metric",
".",
"type",
"not",
"in",
"self",
".",
"METRIC_TYPES",
":",
"continue",
"metric",
".",
"name",
"=",
"self",
".",
"_remove_metric_prefix",
"(",
"metric",
".",
"name",
",",
"scraper_config",
")",
"yield",
"metric"
] |
Parse the MetricFamily from a valid requests.Response object to provide a MetricFamily object (see [0])
The text format uses iter_lines() generator.
:param response: requests.Response
:return: core.Metric
|
[
"Parse",
"the",
"MetricFamily",
"from",
"a",
"valid",
"requests",
".",
"Response",
"object",
"to",
"provide",
"a",
"MetricFamily",
"object",
"(",
"see",
"[",
"0",
"]",
")",
"The",
"text",
"format",
"uses",
"iter_lines",
"()",
"generator",
".",
":",
"param",
"response",
":",
"requests",
".",
"Response",
":",
"return",
":",
"core",
".",
"Metric"
] |
python
|
train
| 50.764706 |
Chilipp/docrep
|
docrep/__init__.py
|
https://github.com/Chilipp/docrep/blob/637971f76e1a6e1c70e36dcd1b02bbc37ba02487/docrep/__init__.py#L915-L943
|
def get_extended_summary(self, s, base=None):
"""Get the extended summary from a docstring
This here is the extended summary
Parameters
----------
s: str
The docstring to use
base: str or None
A key under which the summary shall be stored in the :attr:`params`
attribute. If not None, the summary will be stored in
``base + '.summary_ext'``. Otherwise, it will not be stored at
all
Returns
-------
str
The extracted extended summary"""
# Remove the summary and dedent
s = self._remove_summary(s)
ret = ''
if not self._all_sections_patt.match(s):
m = self._extended_summary_patt.match(s)
if m is not None:
ret = m.group().strip()
if base is not None:
self.params[base + '.summary_ext'] = ret
return ret
|
[
"def",
"get_extended_summary",
"(",
"self",
",",
"s",
",",
"base",
"=",
"None",
")",
":",
"# Remove the summary and dedent",
"s",
"=",
"self",
".",
"_remove_summary",
"(",
"s",
")",
"ret",
"=",
"''",
"if",
"not",
"self",
".",
"_all_sections_patt",
".",
"match",
"(",
"s",
")",
":",
"m",
"=",
"self",
".",
"_extended_summary_patt",
".",
"match",
"(",
"s",
")",
"if",
"m",
"is",
"not",
"None",
":",
"ret",
"=",
"m",
".",
"group",
"(",
")",
".",
"strip",
"(",
")",
"if",
"base",
"is",
"not",
"None",
":",
"self",
".",
"params",
"[",
"base",
"+",
"'.summary_ext'",
"]",
"=",
"ret",
"return",
"ret"
] |
Get the extended summary from a docstring
This here is the extended summary
Parameters
----------
s: str
The docstring to use
base: str or None
A key under which the summary shall be stored in the :attr:`params`
attribute. If not None, the summary will be stored in
``base + '.summary_ext'``. Otherwise, it will not be stored at
all
Returns
-------
str
The extracted extended summary
|
[
"Get",
"the",
"extended",
"summary",
"from",
"a",
"docstring"
] |
python
|
train
| 31.724138 |
PmagPy/PmagPy
|
pmagpy/mapping/map_magic.py
|
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/mapping/map_magic.py#L368-L382
|
def convert_meas(direction, Rec):
"""
converts measurments tables from magic 2 to 3 (direction=magic3)
or from model 3 to 2.5 (direction=magic2) [not available]
"""
if direction == 'magic3':
columns = meas_magic2_2_magic3_map
MeasRec = {}
for key in columns:
if key in list(Rec.keys()):
# transfer info and change column name to data model 3.0
MeasRec[columns[key]] = Rec[key]
return MeasRec
else: # haven't added this way yet
pass
|
[
"def",
"convert_meas",
"(",
"direction",
",",
"Rec",
")",
":",
"if",
"direction",
"==",
"'magic3'",
":",
"columns",
"=",
"meas_magic2_2_magic3_map",
"MeasRec",
"=",
"{",
"}",
"for",
"key",
"in",
"columns",
":",
"if",
"key",
"in",
"list",
"(",
"Rec",
".",
"keys",
"(",
")",
")",
":",
"# transfer info and change column name to data model 3.0",
"MeasRec",
"[",
"columns",
"[",
"key",
"]",
"]",
"=",
"Rec",
"[",
"key",
"]",
"return",
"MeasRec",
"else",
":",
"# haven't added this way yet",
"pass"
] |
converts measurments tables from magic 2 to 3 (direction=magic3)
or from model 3 to 2.5 (direction=magic2) [not available]
|
[
"converts",
"measurments",
"tables",
"from",
"magic",
"2",
"to",
"3",
"(",
"direction",
"=",
"magic3",
")",
"or",
"from",
"model",
"3",
"to",
"2",
".",
"5",
"(",
"direction",
"=",
"magic2",
")",
"[",
"not",
"available",
"]"
] |
python
|
train
| 35.066667 |
EUDAT-B2SAFE/B2HANDLE
|
b2handle/util/logutils.py
|
https://github.com/EUDAT-B2SAFE/B2HANDLE/blob/a6d216d459644e01fbdfd5b318a535950bc5cdbb/b2handle/util/logutils.py#L31-L58
|
def log_instantiation(LOGGER, classname, args, forbidden, with_date=False):
'''
Log the instantiation of an object to the given logger.
:LOGGER: A logger to log to. Please see module "logging".
:classname: The name of the class that is being
instantiated.
:args: A dictionary of arguments passed to the instantiation,
which will be logged on debug level.
:forbidden: A list of arguments whose values should not be
logged, e.g. "password".
:with_date: Optional. Boolean. Indicated whether the initiation
date and time should be logged.
'''
# Info:
if with_date:
LOGGER.info('Instantiating '+classname+' at '+datetime.datetime.now().strftime('%Y-%m-%d_%H:%M'))
else:
LOGGER.info('Instantiating '+classname)
# Debug:
for argname in args:
if args[argname] is not None:
if argname in forbidden:
LOGGER.debug('Param '+argname+'*******')
else:
LOGGER.debug('Param '+argname+'='+str(args[argname]))
|
[
"def",
"log_instantiation",
"(",
"LOGGER",
",",
"classname",
",",
"args",
",",
"forbidden",
",",
"with_date",
"=",
"False",
")",
":",
"# Info:",
"if",
"with_date",
":",
"LOGGER",
".",
"info",
"(",
"'Instantiating '",
"+",
"classname",
"+",
"' at '",
"+",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
".",
"strftime",
"(",
"'%Y-%m-%d_%H:%M'",
")",
")",
"else",
":",
"LOGGER",
".",
"info",
"(",
"'Instantiating '",
"+",
"classname",
")",
"# Debug:",
"for",
"argname",
"in",
"args",
":",
"if",
"args",
"[",
"argname",
"]",
"is",
"not",
"None",
":",
"if",
"argname",
"in",
"forbidden",
":",
"LOGGER",
".",
"debug",
"(",
"'Param '",
"+",
"argname",
"+",
"'*******'",
")",
"else",
":",
"LOGGER",
".",
"debug",
"(",
"'Param '",
"+",
"argname",
"+",
"'='",
"+",
"str",
"(",
"args",
"[",
"argname",
"]",
")",
")"
] |
Log the instantiation of an object to the given logger.
:LOGGER: A logger to log to. Please see module "logging".
:classname: The name of the class that is being
instantiated.
:args: A dictionary of arguments passed to the instantiation,
which will be logged on debug level.
:forbidden: A list of arguments whose values should not be
logged, e.g. "password".
:with_date: Optional. Boolean. Indicated whether the initiation
date and time should be logged.
|
[
"Log",
"the",
"instantiation",
"of",
"an",
"object",
"to",
"the",
"given",
"logger",
"."
] |
python
|
train
| 36.928571 |
globocom/GloboNetworkAPI-client-python
|
networkapiclient/Vlan.py
|
https://github.com/globocom/GloboNetworkAPI-client-python/blob/cf34f913da48d9abbf750114f5d2ac4b2dde137d/networkapiclient/Vlan.py#L713-L752
|
def listar_permissao(self, nome_equipamento, nome_interface):
"""List all VLANS having communication permission to trunk from a port in switch.
Run script 'configurador'.
::
The value of 'stdout' key of return dictionary can have a list of numbers or
number intervals of VLAN´s, comma separated. Examples of possible returns of 'stdout' below:
- 100,103,111,...
- 100-110,...
- 100-110,112,115,...
- 100,103,105-111,113,115-118,...
:param nome_equipamento: Equipment name.
:param nome_interface: Interface name.
:return: Following dictionary:
::
{‘sucesso’: {‘codigo’: < codigo >,
‘descricao’: {'stdout':< stdout >, 'stderr':< stderr >}}}
:raise InvalidParameterError: Equipment name and/or interface name is invalid or none.
:raise EquipamentoNaoExisteError: Equipment does not exist.
:raise LigacaoFrontInterfaceNaoExisteError: There is no interface on front link of informed interface.
:raise InterfaceNaoExisteError: Interface does not exist or is not associated to equipment.
:raise LigacaoFrontNaoTerminaSwitchError: Interface does not have switch connected.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
:raise ScriptError: Failed to run the script.
"""
vlan_map = dict()
vlan_map['nome'] = nome_equipamento
vlan_map['nome_interface'] = nome_interface
code, xml = self.submit({'equipamento': vlan_map}, 'PUT', 'vlan/list/')
return self.response(code, xml)
|
[
"def",
"listar_permissao",
"(",
"self",
",",
"nome_equipamento",
",",
"nome_interface",
")",
":",
"vlan_map",
"=",
"dict",
"(",
")",
"vlan_map",
"[",
"'nome'",
"]",
"=",
"nome_equipamento",
"vlan_map",
"[",
"'nome_interface'",
"]",
"=",
"nome_interface",
"code",
",",
"xml",
"=",
"self",
".",
"submit",
"(",
"{",
"'equipamento'",
":",
"vlan_map",
"}",
",",
"'PUT'",
",",
"'vlan/list/'",
")",
"return",
"self",
".",
"response",
"(",
"code",
",",
"xml",
")"
] |
List all VLANS having communication permission to trunk from a port in switch.
Run script 'configurador'.
::
The value of 'stdout' key of return dictionary can have a list of numbers or
number intervals of VLAN´s, comma separated. Examples of possible returns of 'stdout' below:
- 100,103,111,...
- 100-110,...
- 100-110,112,115,...
- 100,103,105-111,113,115-118,...
:param nome_equipamento: Equipment name.
:param nome_interface: Interface name.
:return: Following dictionary:
::
{‘sucesso’: {‘codigo’: < codigo >,
‘descricao’: {'stdout':< stdout >, 'stderr':< stderr >}}}
:raise InvalidParameterError: Equipment name and/or interface name is invalid or none.
:raise EquipamentoNaoExisteError: Equipment does not exist.
:raise LigacaoFrontInterfaceNaoExisteError: There is no interface on front link of informed interface.
:raise InterfaceNaoExisteError: Interface does not exist or is not associated to equipment.
:raise LigacaoFrontNaoTerminaSwitchError: Interface does not have switch connected.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
:raise ScriptError: Failed to run the script.
|
[
"List",
"all",
"VLANS",
"having",
"communication",
"permission",
"to",
"trunk",
"from",
"a",
"port",
"in",
"switch",
"."
] |
python
|
train
| 42 |
aboSamoor/polyglot
|
polyglot/load.py
|
https://github.com/aboSamoor/polyglot/blob/d0d2aa8d06cec4e03bd96618ae960030f7069a17/polyglot/load.py#L89-L103
|
def load_ner_model(lang="en", version="2"):
"""Return a named entity extractor parameters for `lang` and of version `version`
Args:
lang (string): language code.
version (string): version of the parameters to be used.
"""
src_dir = "ner{}".format(version)
p = locate_resource(src_dir, lang)
fh = _open(p)
try:
return pickle.load(fh)
except UnicodeDecodeError:
fh.seek(0)
return pickle.load(fh, encoding='latin1')
|
[
"def",
"load_ner_model",
"(",
"lang",
"=",
"\"en\"",
",",
"version",
"=",
"\"2\"",
")",
":",
"src_dir",
"=",
"\"ner{}\"",
".",
"format",
"(",
"version",
")",
"p",
"=",
"locate_resource",
"(",
"src_dir",
",",
"lang",
")",
"fh",
"=",
"_open",
"(",
"p",
")",
"try",
":",
"return",
"pickle",
".",
"load",
"(",
"fh",
")",
"except",
"UnicodeDecodeError",
":",
"fh",
".",
"seek",
"(",
"0",
")",
"return",
"pickle",
".",
"load",
"(",
"fh",
",",
"encoding",
"=",
"'latin1'",
")"
] |
Return a named entity extractor parameters for `lang` and of version `version`
Args:
lang (string): language code.
version (string): version of the parameters to be used.
|
[
"Return",
"a",
"named",
"entity",
"extractor",
"parameters",
"for",
"lang",
"and",
"of",
"version",
"version"
] |
python
|
train
| 29 |
numenta/htmresearch
|
projects/sdr_paper/poirazi_neuron_model/run_correlation_false_positive_experiment.py
|
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/projects/sdr_paper/poirazi_neuron_model/run_correlation_false_positive_experiment.py#L33-L95
|
def run_false_positive_experiment_correlation(seed,
num_neurons = 1,
a = 32,
dim = 4000,
num_samples = 20000,
num_dendrites = 500,
dendrite_length = 20,
num_trials = 1000,
nonlinearity = threshold_nonlinearity(10)):
"""
Run an experiment to test the false positive rate based on the correlation
between bits. Correlation is measured as the average pairwise correlation
between bits for each pattern in the data (across all of the data).
To generate the results shown in the false positive vs. correlation figure,
we used the parameter settings:
1. a = 32, dim = 2000
2. a = 32, dim = 4000
3. a = 64, dim = 4000
In each case, we ran approximately 4000 trials in parallel, and then binned
the results based on correlation. The code for binning can be found in
plot_effect_of_correlation.py. Note that your results may not match ours
exactly, as the number of different seeds used depends on how many processes
are created, but the general trend of the results should be very stable due
to the large number of data points.
"""
numpy.random.seed(seed)
possible_cluster_sizes = range(2, 10)
for trial in range(num_trials):
num_cluster_sizes = numpy.random.choice([1, 1, 2] + range(1, 8), 1)
cluster_sizes = numpy.random.choice(possible_cluster_sizes, num_cluster_sizes, replace = False)
num_cells_per_cluster_size = [numpy.random.randint(dim, 3*dim) for i in range(num_cluster_sizes)]
data = generate_correlated_data(dim = dim,
num_active = a,
num_samples = num_samples,
num_cells_per_cluster_size =
num_cells_per_cluster_size,
cluster_sizes = cluster_sizes)
correlation = get_pattern_correlations(data)
closest_correlations = get_biased_correlations(data, threshold = 10)
print "Generated {} samples with total average pattern correlation {}, biased threshold-10 correlation {}, using cluster sizes {} with cells per cluster size of {}".format(num_samples, correlation, closest_correlations, cluster_sizes, num_cells_per_cluster_size)
fps = []
fns = []
errors = []
for i in range((num_samples/2)/num_dendrites):
current_data = data.getSlice(i*(num_dendrites*2), (i+1)*(num_dendrites*2), 0, dim)
neuron = Neuron(size = dendrite_length*num_dendrites, num_dendrites = num_dendrites, dendrite_length = dendrite_length, dim = dim, nonlinearity = nonlinearity)
labels = numpy.asarray([1 for i in range(num_dendrites)] + [-1 for i in range(num_dendrites)])
neuron.HTM_style_initialize_on_data(current_data, labels)
error, fp, fn = get_error(current_data, labels, [neuron])
fps.append(fp)
fns.append(fn)
errors.append(error)
print "Error at r = {} is {}, with {} false positives out of {} samples".format(correlation, numpy.mean(errors), sum(fps), num_samples/2)
with open("correlation_results_a{}_n{}_s{}.txt".format(a, dim, dendrite_length), "a") as f:
f.write(str(correlation) + ", " + str(sum(fps)) + ", " + str(num_samples/2) + "\n")
|
[
"def",
"run_false_positive_experiment_correlation",
"(",
"seed",
",",
"num_neurons",
"=",
"1",
",",
"a",
"=",
"32",
",",
"dim",
"=",
"4000",
",",
"num_samples",
"=",
"20000",
",",
"num_dendrites",
"=",
"500",
",",
"dendrite_length",
"=",
"20",
",",
"num_trials",
"=",
"1000",
",",
"nonlinearity",
"=",
"threshold_nonlinearity",
"(",
"10",
")",
")",
":",
"numpy",
".",
"random",
".",
"seed",
"(",
"seed",
")",
"possible_cluster_sizes",
"=",
"range",
"(",
"2",
",",
"10",
")",
"for",
"trial",
"in",
"range",
"(",
"num_trials",
")",
":",
"num_cluster_sizes",
"=",
"numpy",
".",
"random",
".",
"choice",
"(",
"[",
"1",
",",
"1",
",",
"2",
"]",
"+",
"range",
"(",
"1",
",",
"8",
")",
",",
"1",
")",
"cluster_sizes",
"=",
"numpy",
".",
"random",
".",
"choice",
"(",
"possible_cluster_sizes",
",",
"num_cluster_sizes",
",",
"replace",
"=",
"False",
")",
"num_cells_per_cluster_size",
"=",
"[",
"numpy",
".",
"random",
".",
"randint",
"(",
"dim",
",",
"3",
"*",
"dim",
")",
"for",
"i",
"in",
"range",
"(",
"num_cluster_sizes",
")",
"]",
"data",
"=",
"generate_correlated_data",
"(",
"dim",
"=",
"dim",
",",
"num_active",
"=",
"a",
",",
"num_samples",
"=",
"num_samples",
",",
"num_cells_per_cluster_size",
"=",
"num_cells_per_cluster_size",
",",
"cluster_sizes",
"=",
"cluster_sizes",
")",
"correlation",
"=",
"get_pattern_correlations",
"(",
"data",
")",
"closest_correlations",
"=",
"get_biased_correlations",
"(",
"data",
",",
"threshold",
"=",
"10",
")",
"print",
"\"Generated {} samples with total average pattern correlation {}, biased threshold-10 correlation {}, using cluster sizes {} with cells per cluster size of {}\"",
".",
"format",
"(",
"num_samples",
",",
"correlation",
",",
"closest_correlations",
",",
"cluster_sizes",
",",
"num_cells_per_cluster_size",
")",
"fps",
"=",
"[",
"]",
"fns",
"=",
"[",
"]",
"errors",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"(",
"num_samples",
"/",
"2",
")",
"/",
"num_dendrites",
")",
":",
"current_data",
"=",
"data",
".",
"getSlice",
"(",
"i",
"*",
"(",
"num_dendrites",
"*",
"2",
")",
",",
"(",
"i",
"+",
"1",
")",
"*",
"(",
"num_dendrites",
"*",
"2",
")",
",",
"0",
",",
"dim",
")",
"neuron",
"=",
"Neuron",
"(",
"size",
"=",
"dendrite_length",
"*",
"num_dendrites",
",",
"num_dendrites",
"=",
"num_dendrites",
",",
"dendrite_length",
"=",
"dendrite_length",
",",
"dim",
"=",
"dim",
",",
"nonlinearity",
"=",
"nonlinearity",
")",
"labels",
"=",
"numpy",
".",
"asarray",
"(",
"[",
"1",
"for",
"i",
"in",
"range",
"(",
"num_dendrites",
")",
"]",
"+",
"[",
"-",
"1",
"for",
"i",
"in",
"range",
"(",
"num_dendrites",
")",
"]",
")",
"neuron",
".",
"HTM_style_initialize_on_data",
"(",
"current_data",
",",
"labels",
")",
"error",
",",
"fp",
",",
"fn",
"=",
"get_error",
"(",
"current_data",
",",
"labels",
",",
"[",
"neuron",
"]",
")",
"fps",
".",
"append",
"(",
"fp",
")",
"fns",
".",
"append",
"(",
"fn",
")",
"errors",
".",
"append",
"(",
"error",
")",
"print",
"\"Error at r = {} is {}, with {} false positives out of {} samples\"",
".",
"format",
"(",
"correlation",
",",
"numpy",
".",
"mean",
"(",
"errors",
")",
",",
"sum",
"(",
"fps",
")",
",",
"num_samples",
"/",
"2",
")",
"with",
"open",
"(",
"\"correlation_results_a{}_n{}_s{}.txt\"",
".",
"format",
"(",
"a",
",",
"dim",
",",
"dendrite_length",
")",
",",
"\"a\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"str",
"(",
"correlation",
")",
"+",
"\", \"",
"+",
"str",
"(",
"sum",
"(",
"fps",
")",
")",
"+",
"\", \"",
"+",
"str",
"(",
"num_samples",
"/",
"2",
")",
"+",
"\"\\n\"",
")"
] |
Run an experiment to test the false positive rate based on the correlation
between bits. Correlation is measured as the average pairwise correlation
between bits for each pattern in the data (across all of the data).
To generate the results shown in the false positive vs. correlation figure,
we used the parameter settings:
1. a = 32, dim = 2000
2. a = 32, dim = 4000
3. a = 64, dim = 4000
In each case, we ran approximately 4000 trials in parallel, and then binned
the results based on correlation. The code for binning can be found in
plot_effect_of_correlation.py. Note that your results may not match ours
exactly, as the number of different seeds used depends on how many processes
are created, but the general trend of the results should be very stable due
to the large number of data points.
|
[
"Run",
"an",
"experiment",
"to",
"test",
"the",
"false",
"positive",
"rate",
"based",
"on",
"the",
"correlation",
"between",
"bits",
".",
"Correlation",
"is",
"measured",
"as",
"the",
"average",
"pairwise",
"correlation",
"between",
"bits",
"for",
"each",
"pattern",
"in",
"the",
"data",
"(",
"across",
"all",
"of",
"the",
"data",
")",
"."
] |
python
|
train
| 55.142857 |
dmlc/gluon-nlp
|
src/gluonnlp/data/transforms.py
|
https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/src/gluonnlp/data/transforms.py#L758-L778
|
def _tokenize(self, text):
"""Tokenizes a piece of text."""
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = self._whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.lower:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = self._whitespace_tokenize(' '.join(split_tokens))
return output_tokens
|
[
"def",
"_tokenize",
"(",
"self",
",",
"text",
")",
":",
"text",
"=",
"self",
".",
"_clean_text",
"(",
"text",
")",
"# This was added on November 1st, 2018 for the multilingual and Chinese",
"# models. This is also applied to the English models now, but it doesn't",
"# matter since the English models were not trained on any Chinese data",
"# and generally don't have any Chinese data in them (there are Chinese",
"# characters in the vocabulary because Wikipedia does have some Chinese",
"# words in the English Wikipedia.).",
"text",
"=",
"self",
".",
"_tokenize_chinese_chars",
"(",
"text",
")",
"orig_tokens",
"=",
"self",
".",
"_whitespace_tokenize",
"(",
"text",
")",
"split_tokens",
"=",
"[",
"]",
"for",
"token",
"in",
"orig_tokens",
":",
"if",
"self",
".",
"lower",
":",
"token",
"=",
"token",
".",
"lower",
"(",
")",
"token",
"=",
"self",
".",
"_run_strip_accents",
"(",
"token",
")",
"split_tokens",
".",
"extend",
"(",
"self",
".",
"_run_split_on_punc",
"(",
"token",
")",
")",
"output_tokens",
"=",
"self",
".",
"_whitespace_tokenize",
"(",
"' '",
".",
"join",
"(",
"split_tokens",
")",
")",
"return",
"output_tokens"
] |
Tokenizes a piece of text.
|
[
"Tokenizes",
"a",
"piece",
"of",
"text",
"."
] |
python
|
train
| 46.714286 |
bpannier/simpletr64
|
simpletr64/actions/lan.py
|
https://github.com/bpannier/simpletr64/blob/31081139f4e6c85084a56de1617df73927135466/simpletr64/actions/lan.py#L108-L124
|
def getHostDetailsByIndex(self, index, lanInterfaceId=1, timeout=1):
"""Execute GetGenericHostEntry action to get detailed information's of a connected host.
:param index: the index of the host
:param int lanInterfaceId: the id of the LAN interface
:param float timeout: the timeout to wait for the action to be executed
:return: the detailed information's of a connected host.
:rtype: HostDetails
.. seealso:: :meth:`~simpletr64.actions.Lan.getAmountOfHostsConnected`
"""
namespace = Lan.getServiceType("getHostDetailsByIndex") + str(lanInterfaceId)
uri = self.getControlURL(namespace)
results = self.execute(uri, namespace, "GetGenericHostEntry", timeout=timeout, NewIndex=index)
return HostDetails(results)
|
[
"def",
"getHostDetailsByIndex",
"(",
"self",
",",
"index",
",",
"lanInterfaceId",
"=",
"1",
",",
"timeout",
"=",
"1",
")",
":",
"namespace",
"=",
"Lan",
".",
"getServiceType",
"(",
"\"getHostDetailsByIndex\"",
")",
"+",
"str",
"(",
"lanInterfaceId",
")",
"uri",
"=",
"self",
".",
"getControlURL",
"(",
"namespace",
")",
"results",
"=",
"self",
".",
"execute",
"(",
"uri",
",",
"namespace",
",",
"\"GetGenericHostEntry\"",
",",
"timeout",
"=",
"timeout",
",",
"NewIndex",
"=",
"index",
")",
"return",
"HostDetails",
"(",
"results",
")"
] |
Execute GetGenericHostEntry action to get detailed information's of a connected host.
:param index: the index of the host
:param int lanInterfaceId: the id of the LAN interface
:param float timeout: the timeout to wait for the action to be executed
:return: the detailed information's of a connected host.
:rtype: HostDetails
.. seealso:: :meth:`~simpletr64.actions.Lan.getAmountOfHostsConnected`
|
[
"Execute",
"GetGenericHostEntry",
"action",
"to",
"get",
"detailed",
"information",
"s",
"of",
"a",
"connected",
"host",
"."
] |
python
|
train
| 46.647059 |
Murali-group/halp
|
halp/algorithms/directed_random_walk.py
|
https://github.com/Murali-group/halp/blob/6eb27466ba84e2281e18f93b62aae5efb21ef8b3/halp/algorithms/directed_random_walk.py#L16-L64
|
def stationary_distribution(H, pi=None, P=None):
"""Computes the stationary distribution of a random walk on the given
hypergraph using the iterative approach explained in the paper:
Aurelien Ducournau, Alain Bretto, Random walks in directed hypergraphs and
application to semi-supervised image segmentation,
Computer Vision and Image Understanding, Volume 120, March 2014,
Pages 91-102, ISSN 1077-3142, http://dx.doi.org/10.1016/j.cviu.2013.10.012.
(http://www.sciencedirect.com/science/article/pii/S1077314213002038)
:param H: the hypergraph to find the 'Stationary Distribution'
algorithm on.
:param pi: the initial distribution over the nodes. If not provided,
it will be created with a random distribution.
:param P: the transition matrix for the hypergraph. If not provided,
it will be created.
:returns: list -- list of the stationary probabilities for all nodes
in the hypergraph.
:raises: TypeError -- Algorithm only applicable to undirected hypergraphs
:raises: AssertionError -- Each node must have at least 1 outgoing
hyperedge (even if it's only a self-loop).
"""
if not isinstance(H, DirectedHypergraph):
raise TypeError("Algorithm only applicable to undirected hypergraphs")
for node in H.node_iterator():
if len(H.get_forward_star(node)) == 0:
raise AssertionError("Each node must have at least 1 outgoing \
hyperedge (even if it's only a self-loop).")
indices_to_nodes, nodes_to_indices = \
dmat.get_node_mapping(H)
indices_to_hyperedge_ids, hyperedge_ids_to_indices = \
dmat.get_hyperedge_id_mapping(H)
if P is None:
P = _compute_transition_matrix(H,
nodes_to_indices,
hyperedge_ids_to_indices)
node_count = len(H.get_node_set())
if pi is None:
pi = _create_random_starter(node_count)
pi_star = _create_random_starter(node_count)
while not _has_converged(pi_star, pi):
pi = pi_star
pi_star = pi * P
return pi
|
[
"def",
"stationary_distribution",
"(",
"H",
",",
"pi",
"=",
"None",
",",
"P",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"H",
",",
"DirectedHypergraph",
")",
":",
"raise",
"TypeError",
"(",
"\"Algorithm only applicable to undirected hypergraphs\"",
")",
"for",
"node",
"in",
"H",
".",
"node_iterator",
"(",
")",
":",
"if",
"len",
"(",
"H",
".",
"get_forward_star",
"(",
"node",
")",
")",
"==",
"0",
":",
"raise",
"AssertionError",
"(",
"\"Each node must have at least 1 outgoing \\\n hyperedge (even if it's only a self-loop).\"",
")",
"indices_to_nodes",
",",
"nodes_to_indices",
"=",
"dmat",
".",
"get_node_mapping",
"(",
"H",
")",
"indices_to_hyperedge_ids",
",",
"hyperedge_ids_to_indices",
"=",
"dmat",
".",
"get_hyperedge_id_mapping",
"(",
"H",
")",
"if",
"P",
"is",
"None",
":",
"P",
"=",
"_compute_transition_matrix",
"(",
"H",
",",
"nodes_to_indices",
",",
"hyperedge_ids_to_indices",
")",
"node_count",
"=",
"len",
"(",
"H",
".",
"get_node_set",
"(",
")",
")",
"if",
"pi",
"is",
"None",
":",
"pi",
"=",
"_create_random_starter",
"(",
"node_count",
")",
"pi_star",
"=",
"_create_random_starter",
"(",
"node_count",
")",
"while",
"not",
"_has_converged",
"(",
"pi_star",
",",
"pi",
")",
":",
"pi",
"=",
"pi_star",
"pi_star",
"=",
"pi",
"*",
"P",
"return",
"pi"
] |
Computes the stationary distribution of a random walk on the given
hypergraph using the iterative approach explained in the paper:
Aurelien Ducournau, Alain Bretto, Random walks in directed hypergraphs and
application to semi-supervised image segmentation,
Computer Vision and Image Understanding, Volume 120, March 2014,
Pages 91-102, ISSN 1077-3142, http://dx.doi.org/10.1016/j.cviu.2013.10.012.
(http://www.sciencedirect.com/science/article/pii/S1077314213002038)
:param H: the hypergraph to find the 'Stationary Distribution'
algorithm on.
:param pi: the initial distribution over the nodes. If not provided,
it will be created with a random distribution.
:param P: the transition matrix for the hypergraph. If not provided,
it will be created.
:returns: list -- list of the stationary probabilities for all nodes
in the hypergraph.
:raises: TypeError -- Algorithm only applicable to undirected hypergraphs
:raises: AssertionError -- Each node must have at least 1 outgoing
hyperedge (even if it's only a self-loop).
|
[
"Computes",
"the",
"stationary",
"distribution",
"of",
"a",
"random",
"walk",
"on",
"the",
"given",
"hypergraph",
"using",
"the",
"iterative",
"approach",
"explained",
"in",
"the",
"paper",
":",
"Aurelien",
"Ducournau",
"Alain",
"Bretto",
"Random",
"walks",
"in",
"directed",
"hypergraphs",
"and",
"application",
"to",
"semi",
"-",
"supervised",
"image",
"segmentation",
"Computer",
"Vision",
"and",
"Image",
"Understanding",
"Volume",
"120",
"March",
"2014",
"Pages",
"91",
"-",
"102",
"ISSN",
"1077",
"-",
"3142",
"http",
":",
"//",
"dx",
".",
"doi",
".",
"org",
"/",
"10",
".",
"1016",
"/",
"j",
".",
"cviu",
".",
"2013",
".",
"10",
".",
"012",
".",
"(",
"http",
":",
"//",
"www",
".",
"sciencedirect",
".",
"com",
"/",
"science",
"/",
"article",
"/",
"pii",
"/",
"S1077314213002038",
")"
] |
python
|
train
| 43.469388 |
pudo-attic/loadkit
|
loadkit/logger.py
|
https://github.com/pudo-attic/loadkit/blob/1fb17e69e2ffaf3dac4f40b574c3b7afb2198b7c/loadkit/logger.py#L49-L77
|
def load(package, prefix, offset=0, limit=1000):
""" Load lines from the log file with pagination support. """
logs = package.all(LogFile, unicode(prefix))
logs = sorted(logs, key=lambda l: l.name, reverse=True)
seen = 0
record = None
tmp = tempfile.NamedTemporaryFile(suffix='.log')
for log in logs:
shutil.copyfileobj(log.fh(), tmp)
tmp.seek(0)
for line in reversed(list(tmp)):
seen += 1
if seen < offset:
continue
if seen > limit:
tmp.close()
return
try:
d, mo, l, m = line.split(' %s ' % SEP, 4)
if record is not None:
yield record
record = {'time': d, 'module': mo, 'level': l, 'message': m}
except ValueError:
if record is not None:
record['message'] += '\n' + line
tmp.seek(0)
tmp.close()
if record is not None:
yield record
|
[
"def",
"load",
"(",
"package",
",",
"prefix",
",",
"offset",
"=",
"0",
",",
"limit",
"=",
"1000",
")",
":",
"logs",
"=",
"package",
".",
"all",
"(",
"LogFile",
",",
"unicode",
"(",
"prefix",
")",
")",
"logs",
"=",
"sorted",
"(",
"logs",
",",
"key",
"=",
"lambda",
"l",
":",
"l",
".",
"name",
",",
"reverse",
"=",
"True",
")",
"seen",
"=",
"0",
"record",
"=",
"None",
"tmp",
"=",
"tempfile",
".",
"NamedTemporaryFile",
"(",
"suffix",
"=",
"'.log'",
")",
"for",
"log",
"in",
"logs",
":",
"shutil",
".",
"copyfileobj",
"(",
"log",
".",
"fh",
"(",
")",
",",
"tmp",
")",
"tmp",
".",
"seek",
"(",
"0",
")",
"for",
"line",
"in",
"reversed",
"(",
"list",
"(",
"tmp",
")",
")",
":",
"seen",
"+=",
"1",
"if",
"seen",
"<",
"offset",
":",
"continue",
"if",
"seen",
">",
"limit",
":",
"tmp",
".",
"close",
"(",
")",
"return",
"try",
":",
"d",
",",
"mo",
",",
"l",
",",
"m",
"=",
"line",
".",
"split",
"(",
"' %s '",
"%",
"SEP",
",",
"4",
")",
"if",
"record",
"is",
"not",
"None",
":",
"yield",
"record",
"record",
"=",
"{",
"'time'",
":",
"d",
",",
"'module'",
":",
"mo",
",",
"'level'",
":",
"l",
",",
"'message'",
":",
"m",
"}",
"except",
"ValueError",
":",
"if",
"record",
"is",
"not",
"None",
":",
"record",
"[",
"'message'",
"]",
"+=",
"'\\n'",
"+",
"line",
"tmp",
".",
"seek",
"(",
"0",
")",
"tmp",
".",
"close",
"(",
")",
"if",
"record",
"is",
"not",
"None",
":",
"yield",
"record"
] |
Load lines from the log file with pagination support.
|
[
"Load",
"lines",
"from",
"the",
"log",
"file",
"with",
"pagination",
"support",
"."
] |
python
|
train
| 34.172414 |
AnalogJ/lexicon
|
lexicon/providers/route53.py
|
https://github.com/AnalogJ/lexicon/blob/9330b871988753cad44fe2876a217b4c67b1fa0e/lexicon/providers/route53.py#L122-L134
|
def _authenticate(self):
"""Determine the hosted zone id for the domain."""
try:
hosted_zones = self.r53_client.list_hosted_zones_by_name()[
'HostedZones'
]
hosted_zone = next(
hz for hz in hosted_zones
if self.filter_zone(hz)
)
self.domain_id = hosted_zone['Id']
except StopIteration:
raise Exception('No domain found')
|
[
"def",
"_authenticate",
"(",
"self",
")",
":",
"try",
":",
"hosted_zones",
"=",
"self",
".",
"r53_client",
".",
"list_hosted_zones_by_name",
"(",
")",
"[",
"'HostedZones'",
"]",
"hosted_zone",
"=",
"next",
"(",
"hz",
"for",
"hz",
"in",
"hosted_zones",
"if",
"self",
".",
"filter_zone",
"(",
"hz",
")",
")",
"self",
".",
"domain_id",
"=",
"hosted_zone",
"[",
"'Id'",
"]",
"except",
"StopIteration",
":",
"raise",
"Exception",
"(",
"'No domain found'",
")"
] |
Determine the hosted zone id for the domain.
|
[
"Determine",
"the",
"hosted",
"zone",
"id",
"for",
"the",
"domain",
"."
] |
python
|
train
| 34.769231 |
NASA-AMMOS/AIT-Core
|
ait/core/bsc.py
|
https://github.com/NASA-AMMOS/AIT-Core/blob/9d85bd9c738e7a6a6fbdff672bea708238b02a3a/ait/core/bsc.py#L603-L613
|
def rotate_capture_handler_log(self, name):
''' Force a rotation of a handler's log file
Args:
name:
The name of the handler who's log file should be rotated.
'''
for sc_key, sc in self._stream_capturers.iteritems():
for h in sc[0].capture_handlers:
if h['name'] == name:
sc[0]._rotate_log(h)
|
[
"def",
"rotate_capture_handler_log",
"(",
"self",
",",
"name",
")",
":",
"for",
"sc_key",
",",
"sc",
"in",
"self",
".",
"_stream_capturers",
".",
"iteritems",
"(",
")",
":",
"for",
"h",
"in",
"sc",
"[",
"0",
"]",
".",
"capture_handlers",
":",
"if",
"h",
"[",
"'name'",
"]",
"==",
"name",
":",
"sc",
"[",
"0",
"]",
".",
"_rotate_log",
"(",
"h",
")"
] |
Force a rotation of a handler's log file
Args:
name:
The name of the handler who's log file should be rotated.
|
[
"Force",
"a",
"rotation",
"of",
"a",
"handler",
"s",
"log",
"file"
] |
python
|
train
| 35.545455 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.