repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
heuer/segno | segno/helpers.py | https://github.com/heuer/segno/blob/64d912a2bd17d0b5ff3e8b5d37098edfc663c2b3/segno/helpers.py#L102-L115 | def make_wifi(ssid, password, security, hidden=False):
"""\
Creates a WIFI configuration QR Code.
:param str ssid: The SSID of the network.
:param str|None password: The password.
:param str|None security: Authentication type; the value should
be "WEP" or "WPA". Set to ``None`` to omit the value.
"nopass" is equivalent to setting the value to ``None`` but in
the former case, the value is not omitted.
:param bool hidden: Indicates if the network is hidden (default: ``False``)
:rtype: segno.QRCode
"""
return segno.make_qr(make_wifi_data(ssid, password, security, hidden)) | [
"def",
"make_wifi",
"(",
"ssid",
",",
"password",
",",
"security",
",",
"hidden",
"=",
"False",
")",
":",
"return",
"segno",
".",
"make_qr",
"(",
"make_wifi_data",
"(",
"ssid",
",",
"password",
",",
"security",
",",
"hidden",
")",
")"
]
| \
Creates a WIFI configuration QR Code.
:param str ssid: The SSID of the network.
:param str|None password: The password.
:param str|None security: Authentication type; the value should
be "WEP" or "WPA". Set to ``None`` to omit the value.
"nopass" is equivalent to setting the value to ``None`` but in
the former case, the value is not omitted.
:param bool hidden: Indicates if the network is hidden (default: ``False``)
:rtype: segno.QRCode | [
"\\",
"Creates",
"a",
"WIFI",
"configuration",
"QR",
"Code",
"."
]
| python | train |
saltstack/salt | salt/modules/grains.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/grains.py#L492-L600 | def filter_by(lookup_dict, grain='os_family', merge=None, default='default', base=None):
'''
.. versionadded:: 0.17.0
Look up the given grain in a given dictionary for the current OS and return
the result
Although this may occasionally be useful at the CLI, the primary intent of
this function is for use in Jinja to make short work of creating lookup
tables for OS-specific data. For example:
.. code-block:: jinja
{% set apache = salt['grains.filter_by']({
'Debian': {'pkg': 'apache2', 'srv': 'apache2'},
'RedHat': {'pkg': 'httpd', 'srv': 'httpd'},
}, default='Debian') %}
myapache:
pkg.installed:
- name: {{ apache.pkg }}
service.running:
- name: {{ apache.srv }}
Values in the lookup table may be overridden by values in Pillar. An
example Pillar to override values in the example above could be as follows:
.. code-block:: yaml
apache:
lookup:
pkg: apache_13
srv: apache
The call to ``filter_by()`` would be modified as follows to reference those
Pillar values:
.. code-block:: jinja
{% set apache = salt['grains.filter_by']({
...
}, merge=salt['pillar.get']('apache:lookup')) %}
:param lookup_dict: A dictionary, keyed by a grain, containing a value or
values relevant to systems matching that grain. For example, a key
could be the grain for an OS and the value could the name of a package
on that particular OS.
.. versionchanged:: 2016.11.0
The dictionary key could be a globbing pattern. The function will
return the corresponding ``lookup_dict`` value where grain value
matches the pattern. For example:
.. code-block:: bash
# this will render 'got some salt' if Minion ID begins from 'salt'
salt '*' grains.filter_by '{salt*: got some salt, default: salt is not here}' id
:param grain: The name of a grain to match with the current system's
grains. For example, the value of the "os_family" grain for the current
system could be used to pull values from the ``lookup_dict``
dictionary.
.. versionchanged:: 2016.11.0
The grain value could be a list. The function will return the
``lookup_dict`` value for a first found item in the list matching
one of the ``lookup_dict`` keys.
:param merge: A dictionary to merge with the results of the grain selection
from ``lookup_dict``. This allows Pillar to override the values in the
``lookup_dict``. This could be useful, for example, to override the
values for non-standard package names such as when using a different
Python version from the default Python version provided by the OS
(e.g., ``python26-mysql`` instead of ``python-mysql``).
:param default: default lookup_dict's key used if the grain does not exists
or if the grain value has no match on lookup_dict. If unspecified
the value is "default".
.. versionadded:: 2014.1.0
:param base: A lookup_dict key to use for a base dictionary. The
grain-selected ``lookup_dict`` is merged over this and then finally
the ``merge`` dictionary is merged. This allows common values for
each case to be collected in the base and overridden by the grain
selection dictionary and the merge dictionary. Default is unset.
.. versionadded:: 2015.5.0
CLI Example:
.. code-block:: bash
salt '*' grains.filter_by '{Debian: Debheads rule, RedHat: I love my hat}'
# this one will render {D: {E: I, G: H}, J: K}
salt '*' grains.filter_by '{A: B, C: {D: {E: F, G: H}}}' 'xxx' '{D: {E: I}, J: K}' 'C'
# next one renders {A: {B: G}, D: J}
salt '*' grains.filter_by '{default: {A: {B: C}, D: E}, F: {A: {B: G}}, H: {D: I}}' 'xxx' '{D: J}' 'F' 'default'
# next same as above when default='H' instead of 'F' renders {A: {B: C}, D: J}
'''
return salt.utils.data.filter_by(lookup_dict=lookup_dict,
lookup=grain,
traverse=__grains__,
merge=merge,
default=default,
base=base) | [
"def",
"filter_by",
"(",
"lookup_dict",
",",
"grain",
"=",
"'os_family'",
",",
"merge",
"=",
"None",
",",
"default",
"=",
"'default'",
",",
"base",
"=",
"None",
")",
":",
"return",
"salt",
".",
"utils",
".",
"data",
".",
"filter_by",
"(",
"lookup_dict",
"=",
"lookup_dict",
",",
"lookup",
"=",
"grain",
",",
"traverse",
"=",
"__grains__",
",",
"merge",
"=",
"merge",
",",
"default",
"=",
"default",
",",
"base",
"=",
"base",
")"
]
| .. versionadded:: 0.17.0
Look up the given grain in a given dictionary for the current OS and return
the result
Although this may occasionally be useful at the CLI, the primary intent of
this function is for use in Jinja to make short work of creating lookup
tables for OS-specific data. For example:
.. code-block:: jinja
{% set apache = salt['grains.filter_by']({
'Debian': {'pkg': 'apache2', 'srv': 'apache2'},
'RedHat': {'pkg': 'httpd', 'srv': 'httpd'},
}, default='Debian') %}
myapache:
pkg.installed:
- name: {{ apache.pkg }}
service.running:
- name: {{ apache.srv }}
Values in the lookup table may be overridden by values in Pillar. An
example Pillar to override values in the example above could be as follows:
.. code-block:: yaml
apache:
lookup:
pkg: apache_13
srv: apache
The call to ``filter_by()`` would be modified as follows to reference those
Pillar values:
.. code-block:: jinja
{% set apache = salt['grains.filter_by']({
...
}, merge=salt['pillar.get']('apache:lookup')) %}
:param lookup_dict: A dictionary, keyed by a grain, containing a value or
values relevant to systems matching that grain. For example, a key
could be the grain for an OS and the value could the name of a package
on that particular OS.
.. versionchanged:: 2016.11.0
The dictionary key could be a globbing pattern. The function will
return the corresponding ``lookup_dict`` value where grain value
matches the pattern. For example:
.. code-block:: bash
# this will render 'got some salt' if Minion ID begins from 'salt'
salt '*' grains.filter_by '{salt*: got some salt, default: salt is not here}' id
:param grain: The name of a grain to match with the current system's
grains. For example, the value of the "os_family" grain for the current
system could be used to pull values from the ``lookup_dict``
dictionary.
.. versionchanged:: 2016.11.0
The grain value could be a list. The function will return the
``lookup_dict`` value for a first found item in the list matching
one of the ``lookup_dict`` keys.
:param merge: A dictionary to merge with the results of the grain selection
from ``lookup_dict``. This allows Pillar to override the values in the
``lookup_dict``. This could be useful, for example, to override the
values for non-standard package names such as when using a different
Python version from the default Python version provided by the OS
(e.g., ``python26-mysql`` instead of ``python-mysql``).
:param default: default lookup_dict's key used if the grain does not exists
or if the grain value has no match on lookup_dict. If unspecified
the value is "default".
.. versionadded:: 2014.1.0
:param base: A lookup_dict key to use for a base dictionary. The
grain-selected ``lookup_dict`` is merged over this and then finally
the ``merge`` dictionary is merged. This allows common values for
each case to be collected in the base and overridden by the grain
selection dictionary and the merge dictionary. Default is unset.
.. versionadded:: 2015.5.0
CLI Example:
.. code-block:: bash
salt '*' grains.filter_by '{Debian: Debheads rule, RedHat: I love my hat}'
# this one will render {D: {E: I, G: H}, J: K}
salt '*' grains.filter_by '{A: B, C: {D: {E: F, G: H}}}' 'xxx' '{D: {E: I}, J: K}' 'C'
# next one renders {A: {B: G}, D: J}
salt '*' grains.filter_by '{default: {A: {B: C}, D: E}, F: {A: {B: G}}, H: {D: I}}' 'xxx' '{D: J}' 'F' 'default'
# next same as above when default='H' instead of 'F' renders {A: {B: C}, D: J} | [
"..",
"versionadded",
"::",
"0",
".",
"17",
".",
"0"
]
| python | train |
concordusapps/python-shield | shield/utils.py | https://github.com/concordusapps/python-shield/blob/3c08d483eaec1ebaa814e31c7de5daf82234b8f7/shield/utils.py#L15-L61 | def filter_(*permissions, **kwargs):
"""
Constructs a clause to filter all bearers or targets for a given
berarer or target.
"""
bearer = kwargs['bearer']
target = kwargs.get('target')
bearer_cls = type_for(bearer)
# We need a query object. There are many ways to get one, Either we can
# be passed one, or we can make one from the session. We can either be
# passed the session, or we can grab the session from the bearer passed.
if 'query' in kwargs:
query = kwargs['query']
elif 'session' in kwargs:
query = kwargs['session'].query(target)
else:
query = object_session(bearer).query(target)
getter = functools.partial(
registry.retrieve,
bearer=bearer_cls,
target=target)
try:
# Generate a hash of {rulefn: permission} that we can use later
# to collect all of the rules.
if len(permissions):
rules = {getter(permission=x): x for x in permissions}
else:
rules = {getter(): None}
except KeyError:
# No rules defined. Default to no permission.
return query.filter(sql.false())
# Invoke all the rules and collect the results
# Abusing reduce here to invoke each rule and send the return value (query)
# from one rule to the next one. In this way the query becomes
# increasingly decorated as it marches through the system.
# q == query
# r = (rulefn, permission)
reducer = lambda q, r: r[0](permission=r[1], query=q, bearer=bearer)
return reduce(reducer, six.iteritems(rules), query) | [
"def",
"filter_",
"(",
"*",
"permissions",
",",
"*",
"*",
"kwargs",
")",
":",
"bearer",
"=",
"kwargs",
"[",
"'bearer'",
"]",
"target",
"=",
"kwargs",
".",
"get",
"(",
"'target'",
")",
"bearer_cls",
"=",
"type_for",
"(",
"bearer",
")",
"# We need a query object. There are many ways to get one, Either we can",
"# be passed one, or we can make one from the session. We can either be",
"# passed the session, or we can grab the session from the bearer passed.",
"if",
"'query'",
"in",
"kwargs",
":",
"query",
"=",
"kwargs",
"[",
"'query'",
"]",
"elif",
"'session'",
"in",
"kwargs",
":",
"query",
"=",
"kwargs",
"[",
"'session'",
"]",
".",
"query",
"(",
"target",
")",
"else",
":",
"query",
"=",
"object_session",
"(",
"bearer",
")",
".",
"query",
"(",
"target",
")",
"getter",
"=",
"functools",
".",
"partial",
"(",
"registry",
".",
"retrieve",
",",
"bearer",
"=",
"bearer_cls",
",",
"target",
"=",
"target",
")",
"try",
":",
"# Generate a hash of {rulefn: permission} that we can use later",
"# to collect all of the rules.",
"if",
"len",
"(",
"permissions",
")",
":",
"rules",
"=",
"{",
"getter",
"(",
"permission",
"=",
"x",
")",
":",
"x",
"for",
"x",
"in",
"permissions",
"}",
"else",
":",
"rules",
"=",
"{",
"getter",
"(",
")",
":",
"None",
"}",
"except",
"KeyError",
":",
"# No rules defined. Default to no permission.",
"return",
"query",
".",
"filter",
"(",
"sql",
".",
"false",
"(",
")",
")",
"# Invoke all the rules and collect the results",
"# Abusing reduce here to invoke each rule and send the return value (query)",
"# from one rule to the next one. In this way the query becomes",
"# increasingly decorated as it marches through the system.",
"# q == query",
"# r = (rulefn, permission)",
"reducer",
"=",
"lambda",
"q",
",",
"r",
":",
"r",
"[",
"0",
"]",
"(",
"permission",
"=",
"r",
"[",
"1",
"]",
",",
"query",
"=",
"q",
",",
"bearer",
"=",
"bearer",
")",
"return",
"reduce",
"(",
"reducer",
",",
"six",
".",
"iteritems",
"(",
"rules",
")",
",",
"query",
")"
]
| Constructs a clause to filter all bearers or targets for a given
berarer or target. | [
"Constructs",
"a",
"clause",
"to",
"filter",
"all",
"bearers",
"or",
"targets",
"for",
"a",
"given",
"berarer",
"or",
"target",
"."
]
| python | train |
decryptus/sonicprobe | sonicprobe/libs/xys.py | https://github.com/decryptus/sonicprobe/blob/72f73f3a40d2982d79ad68686e36aa31d94b76f8/sonicprobe/libs/xys.py#L350-L374 | def add_parameterized_validator(param_validator, base_tag, tag_prefix=None):
"""
Add a parameterized validator for the given tag prefix.
If tag_prefix is None, it is automatically constructed as
u'!~%s(' % param_validator.__name__
A parametrized validator is a function that accepts a document node
(in the form of a Python object), a schema node (also a Python
object), and other parameters (integer or string) that directly come
from its complete YAML name in the schema. It returns True if the
document node is valid according to the schema node. Note that the
validator function does not have to recurse in sub-nodes, because
XYS already does that.
"""
# pylint: disable-msg=C0111,W0621
if not tag_prefix:
tag_prefix = u'!~%s(' % param_validator.__name__
def multi_constructor(loader, tag_suffix, node):
def temp_validator(node, schema):
return param_validator(node, schema, *_split_params(tag_prefix, tag_suffix))
temp_validator.__name__ = str(tag_prefix + tag_suffix)
return ContructorValidatorNode(base_tag,
base_tag,
temp_validator)(loader, node)
yaml.add_multi_constructor(tag_prefix, multi_constructor) | [
"def",
"add_parameterized_validator",
"(",
"param_validator",
",",
"base_tag",
",",
"tag_prefix",
"=",
"None",
")",
":",
"# pylint: disable-msg=C0111,W0621",
"if",
"not",
"tag_prefix",
":",
"tag_prefix",
"=",
"u'!~%s('",
"%",
"param_validator",
".",
"__name__",
"def",
"multi_constructor",
"(",
"loader",
",",
"tag_suffix",
",",
"node",
")",
":",
"def",
"temp_validator",
"(",
"node",
",",
"schema",
")",
":",
"return",
"param_validator",
"(",
"node",
",",
"schema",
",",
"*",
"_split_params",
"(",
"tag_prefix",
",",
"tag_suffix",
")",
")",
"temp_validator",
".",
"__name__",
"=",
"str",
"(",
"tag_prefix",
"+",
"tag_suffix",
")",
"return",
"ContructorValidatorNode",
"(",
"base_tag",
",",
"base_tag",
",",
"temp_validator",
")",
"(",
"loader",
",",
"node",
")",
"yaml",
".",
"add_multi_constructor",
"(",
"tag_prefix",
",",
"multi_constructor",
")"
]
| Add a parameterized validator for the given tag prefix.
If tag_prefix is None, it is automatically constructed as
u'!~%s(' % param_validator.__name__
A parametrized validator is a function that accepts a document node
(in the form of a Python object), a schema node (also a Python
object), and other parameters (integer or string) that directly come
from its complete YAML name in the schema. It returns True if the
document node is valid according to the schema node. Note that the
validator function does not have to recurse in sub-nodes, because
XYS already does that. | [
"Add",
"a",
"parameterized",
"validator",
"for",
"the",
"given",
"tag",
"prefix",
".",
"If",
"tag_prefix",
"is",
"None",
"it",
"is",
"automatically",
"constructed",
"as",
"u",
"!~%s",
"(",
"%",
"param_validator",
".",
"__name__",
"A",
"parametrized",
"validator",
"is",
"a",
"function",
"that",
"accepts",
"a",
"document",
"node",
"(",
"in",
"the",
"form",
"of",
"a",
"Python",
"object",
")",
"a",
"schema",
"node",
"(",
"also",
"a",
"Python",
"object",
")",
"and",
"other",
"parameters",
"(",
"integer",
"or",
"string",
")",
"that",
"directly",
"come",
"from",
"its",
"complete",
"YAML",
"name",
"in",
"the",
"schema",
".",
"It",
"returns",
"True",
"if",
"the",
"document",
"node",
"is",
"valid",
"according",
"to",
"the",
"schema",
"node",
".",
"Note",
"that",
"the",
"validator",
"function",
"does",
"not",
"have",
"to",
"recurse",
"in",
"sub",
"-",
"nodes",
"because",
"XYS",
"already",
"does",
"that",
"."
]
| python | train |
westurner/pyrpo | pyrpo/pyrpo.py | https://github.com/westurner/pyrpo/blob/2a910af055dc405b761571a52ef87842397ddadf/pyrpo/pyrpo.py#L1701-L1761 | def search_upwards(self, fpath=None, repodirname='.svn', upwards={}):
"""
Traverse filesystem upwards, searching for .svn directories
with matching UUIDs (Recursive)
Args:
fpath (str): file path to search upwards from
repodirname (str): directory name to search for (``.svn``)
upwards (dict): dict of already-searched directories
example::
repo/.svn
repo/dir1/.svn
repo/dir1/dir2/.svn
>> search_upwards('repo/')
<< 'repo/'
>> search_upwards('repo/dir1')
<< 'repo/'
>> search_upwards('repo/dir1/dir2')
<< 'repo/'
repo/.svn
repo/dirA/
repo/dirA/dirB/.svn
>> search_upwards('repo/dirA')
<< 'repo/'
>> search_upwards('repo/dirA/dirB')
>> 'repo/dirB')
"""
fpath = fpath or self.fpath
uuid = self.unique_id
last_path = self
path_comp = fpath.split(os.path.sep)
# [0:-1], [0:-2], [0:-1*len(path_comp)]
for n in xrange(1, len(path_comp)-1):
checkpath = os.path.join(*path_comp[0:-1 * n])
repodir = os.path.join(checkpath, repodirname)
upw_uuid = upwards.get(repodir)
if upw_uuid:
if upw_uuid == uuid:
last_path = SvnRepository(checkpath)
continue
else:
break
elif os.path.exists(repodir):
repo = SvnRepository(checkpath)
upw_uuid = repo.unique_id
upwards[repodir] = upw_uuid
# TODO: match on REVISION too
if upw_uuid == uuid:
last_path = repo
continue
else:
break
return last_path | [
"def",
"search_upwards",
"(",
"self",
",",
"fpath",
"=",
"None",
",",
"repodirname",
"=",
"'.svn'",
",",
"upwards",
"=",
"{",
"}",
")",
":",
"fpath",
"=",
"fpath",
"or",
"self",
".",
"fpath",
"uuid",
"=",
"self",
".",
"unique_id",
"last_path",
"=",
"self",
"path_comp",
"=",
"fpath",
".",
"split",
"(",
"os",
".",
"path",
".",
"sep",
")",
"# [0:-1], [0:-2], [0:-1*len(path_comp)]",
"for",
"n",
"in",
"xrange",
"(",
"1",
",",
"len",
"(",
"path_comp",
")",
"-",
"1",
")",
":",
"checkpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"*",
"path_comp",
"[",
"0",
":",
"-",
"1",
"*",
"n",
"]",
")",
"repodir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"checkpath",
",",
"repodirname",
")",
"upw_uuid",
"=",
"upwards",
".",
"get",
"(",
"repodir",
")",
"if",
"upw_uuid",
":",
"if",
"upw_uuid",
"==",
"uuid",
":",
"last_path",
"=",
"SvnRepository",
"(",
"checkpath",
")",
"continue",
"else",
":",
"break",
"elif",
"os",
".",
"path",
".",
"exists",
"(",
"repodir",
")",
":",
"repo",
"=",
"SvnRepository",
"(",
"checkpath",
")",
"upw_uuid",
"=",
"repo",
".",
"unique_id",
"upwards",
"[",
"repodir",
"]",
"=",
"upw_uuid",
"# TODO: match on REVISION too",
"if",
"upw_uuid",
"==",
"uuid",
":",
"last_path",
"=",
"repo",
"continue",
"else",
":",
"break",
"return",
"last_path"
]
| Traverse filesystem upwards, searching for .svn directories
with matching UUIDs (Recursive)
Args:
fpath (str): file path to search upwards from
repodirname (str): directory name to search for (``.svn``)
upwards (dict): dict of already-searched directories
example::
repo/.svn
repo/dir1/.svn
repo/dir1/dir2/.svn
>> search_upwards('repo/')
<< 'repo/'
>> search_upwards('repo/dir1')
<< 'repo/'
>> search_upwards('repo/dir1/dir2')
<< 'repo/'
repo/.svn
repo/dirA/
repo/dirA/dirB/.svn
>> search_upwards('repo/dirA')
<< 'repo/'
>> search_upwards('repo/dirA/dirB')
>> 'repo/dirB') | [
"Traverse",
"filesystem",
"upwards",
"searching",
"for",
".",
"svn",
"directories",
"with",
"matching",
"UUIDs",
"(",
"Recursive",
")"
]
| python | train |
minhhoit/yacms | yacms/core/models.py | https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/core/models.py#L440-L449 | def delete(self, *args, **kwargs):
"""
Update the ordering values for siblings.
"""
lookup = self.with_respect_to()
lookup["_order__gte"] = self._order
concrete_model = base_concrete_model(Orderable, self)
after = concrete_model.objects.filter(**lookup)
after.update(_order=models.F("_order") - 1)
super(Orderable, self).delete(*args, **kwargs) | [
"def",
"delete",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"lookup",
"=",
"self",
".",
"with_respect_to",
"(",
")",
"lookup",
"[",
"\"_order__gte\"",
"]",
"=",
"self",
".",
"_order",
"concrete_model",
"=",
"base_concrete_model",
"(",
"Orderable",
",",
"self",
")",
"after",
"=",
"concrete_model",
".",
"objects",
".",
"filter",
"(",
"*",
"*",
"lookup",
")",
"after",
".",
"update",
"(",
"_order",
"=",
"models",
".",
"F",
"(",
"\"_order\"",
")",
"-",
"1",
")",
"super",
"(",
"Orderable",
",",
"self",
")",
".",
"delete",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
]
| Update the ordering values for siblings. | [
"Update",
"the",
"ordering",
"values",
"for",
"siblings",
"."
]
| python | train |
jssimporter/python-jss | jss/jamf_software_server.py | https://github.com/jssimporter/python-jss/blob/b95185d74e0c0531b0b563f280d4129e21d5fe5d/jss/jamf_software_server.py#L743-L745 | def Policy(self, data=None, subset=None):
"""{dynamic_docstring}"""
return self.factory.get_object(jssobjects.Policy, data, subset) | [
"def",
"Policy",
"(",
"self",
",",
"data",
"=",
"None",
",",
"subset",
"=",
"None",
")",
":",
"return",
"self",
".",
"factory",
".",
"get_object",
"(",
"jssobjects",
".",
"Policy",
",",
"data",
",",
"subset",
")"
]
| {dynamic_docstring} | [
"{",
"dynamic_docstring",
"}"
]
| python | train |
glue-viz/glue-vispy-viewers | glue_vispy_viewers/extern/vispy/gloo/glir.py | https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/gloo/glir.py#L235-L293 | def convert_shaders(convert, shaders):
""" Modify shading code so that we can write code once
and make it run "everywhere".
"""
# New version of the shaders
out = []
if convert == 'es2':
for isfragment, shader in enumerate(shaders):
has_version = False
has_prec_float = False
has_prec_int = False
lines = []
# Iterate over lines
for line in shader.lstrip().splitlines():
if line.startswith('#version'):
has_version = True
continue
if line.startswith('precision '):
has_prec_float = has_prec_float or 'float' in line
has_prec_int = has_prec_int or 'int' in line
lines.append(line.rstrip())
# Write
# BUG: fails on WebGL (Chrome)
# if True:
# lines.insert(has_version, '#line 0')
if not has_prec_float:
lines.insert(has_version, 'precision highp float;')
if not has_prec_int:
lines.insert(has_version, 'precision highp int;')
# BUG: fails on WebGL (Chrome)
# if not has_version:
# lines.insert(has_version, '#version 100')
out.append('\n'.join(lines))
elif convert == 'desktop':
for isfragment, shader in enumerate(shaders):
has_version = False
lines = []
# Iterate over lines
for line in shader.lstrip().splitlines():
has_version = has_version or line.startswith('#version')
if line.startswith('precision '):
line = ''
for prec in (' highp ', ' mediump ', ' lowp '):
line = line.replace(prec, ' ')
lines.append(line.rstrip())
# Write
if not has_version:
lines.insert(0, '#version 120\n')
out.append('\n'.join(lines))
else:
raise ValueError('Cannot convert shaders to %r.' % convert)
return tuple(out) | [
"def",
"convert_shaders",
"(",
"convert",
",",
"shaders",
")",
":",
"# New version of the shaders",
"out",
"=",
"[",
"]",
"if",
"convert",
"==",
"'es2'",
":",
"for",
"isfragment",
",",
"shader",
"in",
"enumerate",
"(",
"shaders",
")",
":",
"has_version",
"=",
"False",
"has_prec_float",
"=",
"False",
"has_prec_int",
"=",
"False",
"lines",
"=",
"[",
"]",
"# Iterate over lines",
"for",
"line",
"in",
"shader",
".",
"lstrip",
"(",
")",
".",
"splitlines",
"(",
")",
":",
"if",
"line",
".",
"startswith",
"(",
"'#version'",
")",
":",
"has_version",
"=",
"True",
"continue",
"if",
"line",
".",
"startswith",
"(",
"'precision '",
")",
":",
"has_prec_float",
"=",
"has_prec_float",
"or",
"'float'",
"in",
"line",
"has_prec_int",
"=",
"has_prec_int",
"or",
"'int'",
"in",
"line",
"lines",
".",
"append",
"(",
"line",
".",
"rstrip",
"(",
")",
")",
"# Write",
"# BUG: fails on WebGL (Chrome)",
"# if True:",
"# lines.insert(has_version, '#line 0')",
"if",
"not",
"has_prec_float",
":",
"lines",
".",
"insert",
"(",
"has_version",
",",
"'precision highp float;'",
")",
"if",
"not",
"has_prec_int",
":",
"lines",
".",
"insert",
"(",
"has_version",
",",
"'precision highp int;'",
")",
"# BUG: fails on WebGL (Chrome)",
"# if not has_version:",
"# lines.insert(has_version, '#version 100')",
"out",
".",
"append",
"(",
"'\\n'",
".",
"join",
"(",
"lines",
")",
")",
"elif",
"convert",
"==",
"'desktop'",
":",
"for",
"isfragment",
",",
"shader",
"in",
"enumerate",
"(",
"shaders",
")",
":",
"has_version",
"=",
"False",
"lines",
"=",
"[",
"]",
"# Iterate over lines",
"for",
"line",
"in",
"shader",
".",
"lstrip",
"(",
")",
".",
"splitlines",
"(",
")",
":",
"has_version",
"=",
"has_version",
"or",
"line",
".",
"startswith",
"(",
"'#version'",
")",
"if",
"line",
".",
"startswith",
"(",
"'precision '",
")",
":",
"line",
"=",
"''",
"for",
"prec",
"in",
"(",
"' highp '",
",",
"' mediump '",
",",
"' lowp '",
")",
":",
"line",
"=",
"line",
".",
"replace",
"(",
"prec",
",",
"' '",
")",
"lines",
".",
"append",
"(",
"line",
".",
"rstrip",
"(",
")",
")",
"# Write",
"if",
"not",
"has_version",
":",
"lines",
".",
"insert",
"(",
"0",
",",
"'#version 120\\n'",
")",
"out",
".",
"append",
"(",
"'\\n'",
".",
"join",
"(",
"lines",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Cannot convert shaders to %r.'",
"%",
"convert",
")",
"return",
"tuple",
"(",
"out",
")"
]
| Modify shading code so that we can write code once
and make it run "everywhere". | [
"Modify",
"shading",
"code",
"so",
"that",
"we",
"can",
"write",
"code",
"once",
"and",
"make",
"it",
"run",
"everywhere",
"."
]
| python | train |
twilio/twilio-python | twilio/rest/messaging/v1/session/__init__.py | https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/messaging/v1/session/__init__.py#L312-L321 | def participants(self):
"""
Access the participants
:returns: twilio.rest.messaging.v1.session.participant.ParticipantList
:rtype: twilio.rest.messaging.v1.session.participant.ParticipantList
"""
if self._participants is None:
self._participants = ParticipantList(self._version, session_sid=self._solution['sid'], )
return self._participants | [
"def",
"participants",
"(",
"self",
")",
":",
"if",
"self",
".",
"_participants",
"is",
"None",
":",
"self",
".",
"_participants",
"=",
"ParticipantList",
"(",
"self",
".",
"_version",
",",
"session_sid",
"=",
"self",
".",
"_solution",
"[",
"'sid'",
"]",
",",
")",
"return",
"self",
".",
"_participants"
]
| Access the participants
:returns: twilio.rest.messaging.v1.session.participant.ParticipantList
:rtype: twilio.rest.messaging.v1.session.participant.ParticipantList | [
"Access",
"the",
"participants"
]
| python | train |
QUANTAXIS/QUANTAXIS | QUANTAXIS/QAData/base_datastruct.py | https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAData/base_datastruct.py#L447-L451 | def price_diff(self):
'返回DataStruct.price的一阶差分'
res = self.price.groupby(level=1).apply(lambda x: x.diff(1))
res.name = 'price_diff'
return res | [
"def",
"price_diff",
"(",
"self",
")",
":",
"res",
"=",
"self",
".",
"price",
".",
"groupby",
"(",
"level",
"=",
"1",
")",
".",
"apply",
"(",
"lambda",
"x",
":",
"x",
".",
"diff",
"(",
"1",
")",
")",
"res",
".",
"name",
"=",
"'price_diff'",
"return",
"res"
]
| 返回DataStruct.price的一阶差分 | [
"返回DataStruct",
".",
"price的一阶差分"
]
| python | train |
pmacosta/peng | peng/wave_functions.py | https://github.com/pmacosta/peng/blob/976935377adaa3de26fc5677aceb2cdfbd6f93a7/peng/wave_functions.py#L67-L79 | def _build_units(indep_units, dep_units, op):
"""Build unit math operations."""
if (not dep_units) and (not indep_units):
return ""
if dep_units and (not indep_units):
return dep_units
if (not dep_units) and indep_units:
return (
remove_extra_delims("1{0}({1})".format(op, indep_units))
if op == "/"
else remove_extra_delims("({0})".format(indep_units))
)
return remove_extra_delims("({0}){1}({2})".format(dep_units, op, indep_units)) | [
"def",
"_build_units",
"(",
"indep_units",
",",
"dep_units",
",",
"op",
")",
":",
"if",
"(",
"not",
"dep_units",
")",
"and",
"(",
"not",
"indep_units",
")",
":",
"return",
"\"\"",
"if",
"dep_units",
"and",
"(",
"not",
"indep_units",
")",
":",
"return",
"dep_units",
"if",
"(",
"not",
"dep_units",
")",
"and",
"indep_units",
":",
"return",
"(",
"remove_extra_delims",
"(",
"\"1{0}({1})\"",
".",
"format",
"(",
"op",
",",
"indep_units",
")",
")",
"if",
"op",
"==",
"\"/\"",
"else",
"remove_extra_delims",
"(",
"\"({0})\"",
".",
"format",
"(",
"indep_units",
")",
")",
")",
"return",
"remove_extra_delims",
"(",
"\"({0}){1}({2})\"",
".",
"format",
"(",
"dep_units",
",",
"op",
",",
"indep_units",
")",
")"
]
| Build unit math operations. | [
"Build",
"unit",
"math",
"operations",
"."
]
| python | test |
Jajcus/pyxmpp2 | pyxmpp2/cache.py | https://github.com/Jajcus/pyxmpp2/blob/14a40a3950910a9cd008b55f0d8905aa0186ce18/pyxmpp2/cache.py#L591-L605 | def tick(self):
"""Do the regular cache maintenance.
Must be called from time to time for timeouts and cache old items
purging to work."""
self._lock.acquire()
try:
now = datetime.utcnow()
for t,f in list(self._active_fetchers):
if t > now:
break
f.timeout()
self.purge_items()
finally:
self._lock.release() | [
"def",
"tick",
"(",
"self",
")",
":",
"self",
".",
"_lock",
".",
"acquire",
"(",
")",
"try",
":",
"now",
"=",
"datetime",
".",
"utcnow",
"(",
")",
"for",
"t",
",",
"f",
"in",
"list",
"(",
"self",
".",
"_active_fetchers",
")",
":",
"if",
"t",
">",
"now",
":",
"break",
"f",
".",
"timeout",
"(",
")",
"self",
".",
"purge_items",
"(",
")",
"finally",
":",
"self",
".",
"_lock",
".",
"release",
"(",
")"
]
| Do the regular cache maintenance.
Must be called from time to time for timeouts and cache old items
purging to work. | [
"Do",
"the",
"regular",
"cache",
"maintenance",
"."
]
| python | valid |
kwikteam/phy | phy/plot/panzoom.py | https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/plot/panzoom.py#L435-L452 | def on_key_press(self, event):
"""Pan and zoom with the keyboard."""
# Zooming with the keyboard.
key = event.key
if event.modifiers:
return
# Pan.
if self.enable_keyboard_pan and key in self._arrows:
self._pan_keyboard(key)
# Zoom.
if key in self._pm:
self._zoom_keyboard(key)
# Reset with 'R'.
if key == 'R':
self.reset() | [
"def",
"on_key_press",
"(",
"self",
",",
"event",
")",
":",
"# Zooming with the keyboard.",
"key",
"=",
"event",
".",
"key",
"if",
"event",
".",
"modifiers",
":",
"return",
"# Pan.",
"if",
"self",
".",
"enable_keyboard_pan",
"and",
"key",
"in",
"self",
".",
"_arrows",
":",
"self",
".",
"_pan_keyboard",
"(",
"key",
")",
"# Zoom.",
"if",
"key",
"in",
"self",
".",
"_pm",
":",
"self",
".",
"_zoom_keyboard",
"(",
"key",
")",
"# Reset with 'R'.",
"if",
"key",
"==",
"'R'",
":",
"self",
".",
"reset",
"(",
")"
]
| Pan and zoom with the keyboard. | [
"Pan",
"and",
"zoom",
"with",
"the",
"keyboard",
"."
]
| python | train |
manns/pyspread | pyspread/src/gui/_grid.py | https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/_grid.py#L1084-L1090 | def OnRefreshSelectedCells(self, event):
"""Event handler for refreshing the selected cells via menu"""
self.grid.actions.refresh_selected_frozen_cells()
self.grid.ForceRefresh()
event.Skip() | [
"def",
"OnRefreshSelectedCells",
"(",
"self",
",",
"event",
")",
":",
"self",
".",
"grid",
".",
"actions",
".",
"refresh_selected_frozen_cells",
"(",
")",
"self",
".",
"grid",
".",
"ForceRefresh",
"(",
")",
"event",
".",
"Skip",
"(",
")"
]
| Event handler for refreshing the selected cells via menu | [
"Event",
"handler",
"for",
"refreshing",
"the",
"selected",
"cells",
"via",
"menu"
]
| python | train |
spyder-ide/spyder | spyder/plugins/editor/widgets/editor.py | https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/widgets/editor.py#L384-L389 | def keyPressEvent(self, event):
"""Reimplement Qt method to allow cyclic behavior."""
if event.key() == Qt.Key_Down:
self.select_row(1)
elif event.key() == Qt.Key_Up:
self.select_row(-1) | [
"def",
"keyPressEvent",
"(",
"self",
",",
"event",
")",
":",
"if",
"event",
".",
"key",
"(",
")",
"==",
"Qt",
".",
"Key_Down",
":",
"self",
".",
"select_row",
"(",
"1",
")",
"elif",
"event",
".",
"key",
"(",
")",
"==",
"Qt",
".",
"Key_Up",
":",
"self",
".",
"select_row",
"(",
"-",
"1",
")"
]
| Reimplement Qt method to allow cyclic behavior. | [
"Reimplement",
"Qt",
"method",
"to",
"allow",
"cyclic",
"behavior",
"."
]
| python | train |
cltk/cltk | cltk/corpus/greek/tlg/parse_tlg_indices.py | https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/corpus/greek/tlg/parse_tlg_indices.py#L148-L154 | def get_date_of_author(_id):
"""Pass author id and return the name of its associated date."""
_dict = get_date_author()
for date, ids in _dict.items():
if _id in ids:
return date
return None | [
"def",
"get_date_of_author",
"(",
"_id",
")",
":",
"_dict",
"=",
"get_date_author",
"(",
")",
"for",
"date",
",",
"ids",
"in",
"_dict",
".",
"items",
"(",
")",
":",
"if",
"_id",
"in",
"ids",
":",
"return",
"date",
"return",
"None"
]
| Pass author id and return the name of its associated date. | [
"Pass",
"author",
"id",
"and",
"return",
"the",
"name",
"of",
"its",
"associated",
"date",
"."
]
| python | train |
insightindustry/validator-collection | validator_collection/validators.py | https://github.com/insightindustry/validator-collection/blob/8c8047a0fa36cc88a021771279898278c4cc98e3/validator_collection/validators.py#L2517-L2552 | def ipv6(value,
allow_empty = False,
**kwargs):
"""Validate that ``value`` is a valid IP address version 6.
:param value: The value to validate.
:param allow_empty: If ``True``, returns :obj:`None <python:None>` if
``value`` is empty. If ``False``, raises a
:class:`EmptyValueError <validator_collection.errors.EmptyValueError>`
if ``value`` is empty. Defaults to ``False``.
:type allow_empty: :class:`bool <python:bool>`
:returns: ``value`` / :obj:`None <python:None>`
:raises EmptyValueError: if ``value`` is empty and ``allow_empty`` is ``False``
:raises InvalidIPAddressError: if ``value`` is not a valid IP version 6 address or
empty with ``allow_empty`` is not set to ``True``
"""
if not value and allow_empty is False:
raise errors.EmptyValueError('value (%s) was empty' % value)
elif not value:
return None
if not isinstance(value, str):
raise errors.InvalidIPAddressError('value (%s) is not a valid ipv6' % value)
value = value.lower().strip()
is_valid = IPV6_REGEX.match(value)
if not is_valid:
raise errors.InvalidIPAddressError('value (%s) is not a valid ipv6' % value)
return value | [
"def",
"ipv6",
"(",
"value",
",",
"allow_empty",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"value",
"and",
"allow_empty",
"is",
"False",
":",
"raise",
"errors",
".",
"EmptyValueError",
"(",
"'value (%s) was empty'",
"%",
"value",
")",
"elif",
"not",
"value",
":",
"return",
"None",
"if",
"not",
"isinstance",
"(",
"value",
",",
"str",
")",
":",
"raise",
"errors",
".",
"InvalidIPAddressError",
"(",
"'value (%s) is not a valid ipv6'",
"%",
"value",
")",
"value",
"=",
"value",
".",
"lower",
"(",
")",
".",
"strip",
"(",
")",
"is_valid",
"=",
"IPV6_REGEX",
".",
"match",
"(",
"value",
")",
"if",
"not",
"is_valid",
":",
"raise",
"errors",
".",
"InvalidIPAddressError",
"(",
"'value (%s) is not a valid ipv6'",
"%",
"value",
")",
"return",
"value"
]
| Validate that ``value`` is a valid IP address version 6.
:param value: The value to validate.
:param allow_empty: If ``True``, returns :obj:`None <python:None>` if
``value`` is empty. If ``False``, raises a
:class:`EmptyValueError <validator_collection.errors.EmptyValueError>`
if ``value`` is empty. Defaults to ``False``.
:type allow_empty: :class:`bool <python:bool>`
:returns: ``value`` / :obj:`None <python:None>`
:raises EmptyValueError: if ``value`` is empty and ``allow_empty`` is ``False``
:raises InvalidIPAddressError: if ``value`` is not a valid IP version 6 address or
empty with ``allow_empty`` is not set to ``True`` | [
"Validate",
"that",
"value",
"is",
"a",
"valid",
"IP",
"address",
"version",
"6",
"."
]
| python | train |
pneff/wsgiservice | wsgiservice/resource.py | https://github.com/pneff/wsgiservice/blob/03c064ac2e8c53a1aac9c7b99970f23cf79e20f4/wsgiservice/resource.py#L243-L256 | def handle_ignored_resources(self):
"""Ignore robots.txt and favicon.ico GET requests based on a list of
absolute paths in :attr:`IGNORED_PATHS`. Aborts the request with a 404
status code.
This is mostly a usability issue to avoid extra log entries for
resources we are not interested in.
:raises: :class:`webob.exceptions.ResponseException` of status 404 if
the resource is ignored.
"""
if (self.method in ('GET', 'HEAD') and
self.request.path_qs in self.IGNORED_PATHS):
raise_404(self) | [
"def",
"handle_ignored_resources",
"(",
"self",
")",
":",
"if",
"(",
"self",
".",
"method",
"in",
"(",
"'GET'",
",",
"'HEAD'",
")",
"and",
"self",
".",
"request",
".",
"path_qs",
"in",
"self",
".",
"IGNORED_PATHS",
")",
":",
"raise_404",
"(",
"self",
")"
]
| Ignore robots.txt and favicon.ico GET requests based on a list of
absolute paths in :attr:`IGNORED_PATHS`. Aborts the request with a 404
status code.
This is mostly a usability issue to avoid extra log entries for
resources we are not interested in.
:raises: :class:`webob.exceptions.ResponseException` of status 404 if
the resource is ignored. | [
"Ignore",
"robots",
".",
"txt",
"and",
"favicon",
".",
"ico",
"GET",
"requests",
"based",
"on",
"a",
"list",
"of",
"absolute",
"paths",
"in",
":",
"attr",
":",
"IGNORED_PATHS",
".",
"Aborts",
"the",
"request",
"with",
"a",
"404",
"status",
"code",
"."
]
| python | train |
lmjohns3/theanets | examples/utils.py | https://github.com/lmjohns3/theanets/blob/79db9f878ef2071f2f576a1cf5d43a752a55894a/examples/utils.py#L18-L30 | def find(dataset, url):
'''Find the location of a dataset on disk, downloading if needed.'''
fn = os.path.join(DATASETS, dataset)
dn = os.path.dirname(fn)
if not os.path.exists(dn):
print('creating dataset directory: %s', dn)
os.makedirs(dn)
if not os.path.exists(fn):
if sys.version_info < (3, ):
urllib.urlretrieve(url, fn)
else:
urllib.request.urlretrieve(url, fn)
return fn | [
"def",
"find",
"(",
"dataset",
",",
"url",
")",
":",
"fn",
"=",
"os",
".",
"path",
".",
"join",
"(",
"DATASETS",
",",
"dataset",
")",
"dn",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"fn",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"dn",
")",
":",
"print",
"(",
"'creating dataset directory: %s'",
",",
"dn",
")",
"os",
".",
"makedirs",
"(",
"dn",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"fn",
")",
":",
"if",
"sys",
".",
"version_info",
"<",
"(",
"3",
",",
")",
":",
"urllib",
".",
"urlretrieve",
"(",
"url",
",",
"fn",
")",
"else",
":",
"urllib",
".",
"request",
".",
"urlretrieve",
"(",
"url",
",",
"fn",
")",
"return",
"fn"
]
| Find the location of a dataset on disk, downloading if needed. | [
"Find",
"the",
"location",
"of",
"a",
"dataset",
"on",
"disk",
"downloading",
"if",
"needed",
"."
]
| python | test |
Erotemic/utool | utool/util_path.py | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_path.py#L1726-L1816 | def sedfile(fpath, regexpr, repl, force=False, verbose=True, veryverbose=False):
"""
Executes sed on a specific file
Args:
fpath (str): file path string
regexpr (str):
repl (str):
force (bool): (default = False)
verbose (bool): verbosity flag(default = True)
veryverbose (bool): (default = False)
Returns:
list: changed_lines
CommandLine:
python -m utool.util_path --exec-sedfile --show
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> fpath = ut.get_modpath(ut.util_path)
>>> regexpr = 'sedfile'
>>> repl = 'saidfile'
>>> force = False
>>> verbose = True
>>> veryverbose = False
>>> changed_lines = sedfile(fpath, regexpr, repl, force, verbose, veryverbose)
>>> result = ('changed_lines = %s' % (ut.repr3(changed_lines),))
>>> print(result)
"""
# TODO: move to util_edit
path, name = split(fpath)
new_file_lines = []
if veryverbose:
print('[sedfile] fpath=%r' % fpath)
print('[sedfile] regexpr=%r' % regexpr)
print('[sedfile] repl=%r' % repl)
print('[sedfile] force=%r' % force)
import utool as ut
file_lines = ut.readfrom(fpath, aslines=True, verbose=False)
# with open(fpath, 'r') as file:
# import utool
# with utool.embed_on_exception_context:
# file_lines = file.readlines()
# Search each line for the desired regexpr
new_file_lines = [re.sub(regexpr, repl, line) for line in file_lines]
changed_lines = [(newline, line)
for newline, line in zip(new_file_lines, file_lines)
if newline != line]
n_changed = len(changed_lines)
if n_changed > 0:
rel_fpath = relpath(fpath, os.getcwd())
print(' * %s changed %d lines in %r ' %
(['(dry-run)', '(real-run)'][force], n_changed, rel_fpath))
print(' * --------------------')
import utool as ut
new_file_lines = ut.lmap(ut.ensure_unicode, new_file_lines)
new_file = ''.join(new_file_lines)
#print(new_file.replace('\n','\n))
if verbose:
if True:
import utool as ut
old_file = ut.ensure_unicode(
''.join(ut.lmap(ut.ensure_unicode, file_lines)))
ut.print_difftext(old_file, new_file)
else:
changed_new, changed_old = zip(*changed_lines)
prefixold = ' * old (%d, %r): \n | ' % (n_changed, name)
prefixnew = ' * new (%d, %r): \n | ' % (n_changed, name)
print(prefixold + (' | '.join(changed_old)).strip('\n'))
print(' * ____________________')
print(prefixnew + (' | '.join(changed_new)).strip('\n'))
print(' * --------------------')
print(' * =====================================================')
# Write back to file
if force:
print(' ! WRITING CHANGES')
ut.writeto(fpath, new_file)
# with open(fpath, 'w') as file:
# file.write(new_file.encode('utf8'))
else:
print(' dry run')
return changed_lines
#elif verbose:
# print('Nothing changed')
return None | [
"def",
"sedfile",
"(",
"fpath",
",",
"regexpr",
",",
"repl",
",",
"force",
"=",
"False",
",",
"verbose",
"=",
"True",
",",
"veryverbose",
"=",
"False",
")",
":",
"# TODO: move to util_edit",
"path",
",",
"name",
"=",
"split",
"(",
"fpath",
")",
"new_file_lines",
"=",
"[",
"]",
"if",
"veryverbose",
":",
"print",
"(",
"'[sedfile] fpath=%r'",
"%",
"fpath",
")",
"print",
"(",
"'[sedfile] regexpr=%r'",
"%",
"regexpr",
")",
"print",
"(",
"'[sedfile] repl=%r'",
"%",
"repl",
")",
"print",
"(",
"'[sedfile] force=%r'",
"%",
"force",
")",
"import",
"utool",
"as",
"ut",
"file_lines",
"=",
"ut",
".",
"readfrom",
"(",
"fpath",
",",
"aslines",
"=",
"True",
",",
"verbose",
"=",
"False",
")",
"# with open(fpath, 'r') as file:",
"# import utool",
"# with utool.embed_on_exception_context:",
"# file_lines = file.readlines()",
"# Search each line for the desired regexpr",
"new_file_lines",
"=",
"[",
"re",
".",
"sub",
"(",
"regexpr",
",",
"repl",
",",
"line",
")",
"for",
"line",
"in",
"file_lines",
"]",
"changed_lines",
"=",
"[",
"(",
"newline",
",",
"line",
")",
"for",
"newline",
",",
"line",
"in",
"zip",
"(",
"new_file_lines",
",",
"file_lines",
")",
"if",
"newline",
"!=",
"line",
"]",
"n_changed",
"=",
"len",
"(",
"changed_lines",
")",
"if",
"n_changed",
">",
"0",
":",
"rel_fpath",
"=",
"relpath",
"(",
"fpath",
",",
"os",
".",
"getcwd",
"(",
")",
")",
"print",
"(",
"' * %s changed %d lines in %r '",
"%",
"(",
"[",
"'(dry-run)'",
",",
"'(real-run)'",
"]",
"[",
"force",
"]",
",",
"n_changed",
",",
"rel_fpath",
")",
")",
"print",
"(",
"' * --------------------'",
")",
"import",
"utool",
"as",
"ut",
"new_file_lines",
"=",
"ut",
".",
"lmap",
"(",
"ut",
".",
"ensure_unicode",
",",
"new_file_lines",
")",
"new_file",
"=",
"''",
".",
"join",
"(",
"new_file_lines",
")",
"#print(new_file.replace('\\n','\\n))",
"if",
"verbose",
":",
"if",
"True",
":",
"import",
"utool",
"as",
"ut",
"old_file",
"=",
"ut",
".",
"ensure_unicode",
"(",
"''",
".",
"join",
"(",
"ut",
".",
"lmap",
"(",
"ut",
".",
"ensure_unicode",
",",
"file_lines",
")",
")",
")",
"ut",
".",
"print_difftext",
"(",
"old_file",
",",
"new_file",
")",
"else",
":",
"changed_new",
",",
"changed_old",
"=",
"zip",
"(",
"*",
"changed_lines",
")",
"prefixold",
"=",
"' * old (%d, %r): \\n | '",
"%",
"(",
"n_changed",
",",
"name",
")",
"prefixnew",
"=",
"' * new (%d, %r): \\n | '",
"%",
"(",
"n_changed",
",",
"name",
")",
"print",
"(",
"prefixold",
"+",
"(",
"' | '",
".",
"join",
"(",
"changed_old",
")",
")",
".",
"strip",
"(",
"'\\n'",
")",
")",
"print",
"(",
"' * ____________________'",
")",
"print",
"(",
"prefixnew",
"+",
"(",
"' | '",
".",
"join",
"(",
"changed_new",
")",
")",
".",
"strip",
"(",
"'\\n'",
")",
")",
"print",
"(",
"' * --------------------'",
")",
"print",
"(",
"' * ====================================================='",
")",
"# Write back to file",
"if",
"force",
":",
"print",
"(",
"' ! WRITING CHANGES'",
")",
"ut",
".",
"writeto",
"(",
"fpath",
",",
"new_file",
")",
"# with open(fpath, 'w') as file:",
"# file.write(new_file.encode('utf8'))",
"else",
":",
"print",
"(",
"' dry run'",
")",
"return",
"changed_lines",
"#elif verbose:",
"# print('Nothing changed')",
"return",
"None"
]
| Executes sed on a specific file
Args:
fpath (str): file path string
regexpr (str):
repl (str):
force (bool): (default = False)
verbose (bool): verbosity flag(default = True)
veryverbose (bool): (default = False)
Returns:
list: changed_lines
CommandLine:
python -m utool.util_path --exec-sedfile --show
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> fpath = ut.get_modpath(ut.util_path)
>>> regexpr = 'sedfile'
>>> repl = 'saidfile'
>>> force = False
>>> verbose = True
>>> veryverbose = False
>>> changed_lines = sedfile(fpath, regexpr, repl, force, verbose, veryverbose)
>>> result = ('changed_lines = %s' % (ut.repr3(changed_lines),))
>>> print(result) | [
"Executes",
"sed",
"on",
"a",
"specific",
"file"
]
| python | train |
brutasse/graphite-api | graphite_api/functions.py | https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L3499-L3519 | def countSeries(requestContext, *seriesLists):
"""
Draws a horizontal line representing the number of nodes found in the
seriesList.
Example::
&target=countSeries(carbon.agents.*.*)
"""
if not seriesLists or not any(seriesLists):
series = constantLine(requestContext, 0).pop()
series.pathExpression = "countSeries()"
else:
seriesList, start, end, step = normalize(seriesLists)
name = "countSeries(%s)" % formatPathExpressions(seriesList)
values = (int(len(row)) for row in zip_longest(*seriesList))
series = TimeSeries(name, start, end, step, values)
series.pathExpression = name
return [series] | [
"def",
"countSeries",
"(",
"requestContext",
",",
"*",
"seriesLists",
")",
":",
"if",
"not",
"seriesLists",
"or",
"not",
"any",
"(",
"seriesLists",
")",
":",
"series",
"=",
"constantLine",
"(",
"requestContext",
",",
"0",
")",
".",
"pop",
"(",
")",
"series",
".",
"pathExpression",
"=",
"\"countSeries()\"",
"else",
":",
"seriesList",
",",
"start",
",",
"end",
",",
"step",
"=",
"normalize",
"(",
"seriesLists",
")",
"name",
"=",
"\"countSeries(%s)\"",
"%",
"formatPathExpressions",
"(",
"seriesList",
")",
"values",
"=",
"(",
"int",
"(",
"len",
"(",
"row",
")",
")",
"for",
"row",
"in",
"zip_longest",
"(",
"*",
"seriesList",
")",
")",
"series",
"=",
"TimeSeries",
"(",
"name",
",",
"start",
",",
"end",
",",
"step",
",",
"values",
")",
"series",
".",
"pathExpression",
"=",
"name",
"return",
"[",
"series",
"]"
]
| Draws a horizontal line representing the number of nodes found in the
seriesList.
Example::
&target=countSeries(carbon.agents.*.*) | [
"Draws",
"a",
"horizontal",
"line",
"representing",
"the",
"number",
"of",
"nodes",
"found",
"in",
"the",
"seriesList",
"."
]
| python | train |
manns/pyspread | pyspread/src/gui/_widgets.py | https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/_widgets.py#L363-L392 | def expand(self, line, do_expand, force=False, vislevels=0, level=-1):
"""Multi-purpose expand method from original STC class"""
lastchild = self.GetLastChild(line, level)
line += 1
while line <= lastchild:
if force:
if vislevels > 0:
self.ShowLines(line, line)
else:
self.HideLines(line, line)
elif do_expand:
self.ShowLines(line, line)
if level == -1:
level = self.GetFoldLevel(line)
if level & stc.STC_FOLDLEVELHEADERFLAG:
if force:
self.SetFoldExpanded(line, vislevels - 1)
line = self.expand(line, do_expand, force, vislevels - 1)
else:
expandsub = do_expand and self.GetFoldExpanded(line)
line = self.expand(line, expandsub, force, vislevels - 1)
else:
line += 1
return line | [
"def",
"expand",
"(",
"self",
",",
"line",
",",
"do_expand",
",",
"force",
"=",
"False",
",",
"vislevels",
"=",
"0",
",",
"level",
"=",
"-",
"1",
")",
":",
"lastchild",
"=",
"self",
".",
"GetLastChild",
"(",
"line",
",",
"level",
")",
"line",
"+=",
"1",
"while",
"line",
"<=",
"lastchild",
":",
"if",
"force",
":",
"if",
"vislevels",
">",
"0",
":",
"self",
".",
"ShowLines",
"(",
"line",
",",
"line",
")",
"else",
":",
"self",
".",
"HideLines",
"(",
"line",
",",
"line",
")",
"elif",
"do_expand",
":",
"self",
".",
"ShowLines",
"(",
"line",
",",
"line",
")",
"if",
"level",
"==",
"-",
"1",
":",
"level",
"=",
"self",
".",
"GetFoldLevel",
"(",
"line",
")",
"if",
"level",
"&",
"stc",
".",
"STC_FOLDLEVELHEADERFLAG",
":",
"if",
"force",
":",
"self",
".",
"SetFoldExpanded",
"(",
"line",
",",
"vislevels",
"-",
"1",
")",
"line",
"=",
"self",
".",
"expand",
"(",
"line",
",",
"do_expand",
",",
"force",
",",
"vislevels",
"-",
"1",
")",
"else",
":",
"expandsub",
"=",
"do_expand",
"and",
"self",
".",
"GetFoldExpanded",
"(",
"line",
")",
"line",
"=",
"self",
".",
"expand",
"(",
"line",
",",
"expandsub",
",",
"force",
",",
"vislevels",
"-",
"1",
")",
"else",
":",
"line",
"+=",
"1",
"return",
"line"
]
| Multi-purpose expand method from original STC class | [
"Multi",
"-",
"purpose",
"expand",
"method",
"from",
"original",
"STC",
"class"
]
| python | train |
phoebe-project/phoebe2 | phoebe/parameters/parameters.py | https://github.com/phoebe-project/phoebe2/blob/e64b8be683977064e2d55dd1b3ac400f64c3e379/phoebe/parameters/parameters.py#L5024-L5036 | def get_orbits(self):
"""
get 'component' of all orbits in order primary -> secondary
"""
#~ l = re.findall(r"[\w']+", self.get_value())
# now search for indices of orbit and take the next entry from this flat list
#~ return [l[i+1] for i,s in enumerate(l) if s=='orbit']
orbits = []
for star in self.get_stars():
parent = self.get_parent_of(star)
if parent not in orbits and parent!='component' and parent is not None:
orbits.append(parent)
return orbits | [
"def",
"get_orbits",
"(",
"self",
")",
":",
"#~ l = re.findall(r\"[\\w']+\", self.get_value())",
"# now search for indices of orbit and take the next entry from this flat list",
"#~ return [l[i+1] for i,s in enumerate(l) if s=='orbit']",
"orbits",
"=",
"[",
"]",
"for",
"star",
"in",
"self",
".",
"get_stars",
"(",
")",
":",
"parent",
"=",
"self",
".",
"get_parent_of",
"(",
"star",
")",
"if",
"parent",
"not",
"in",
"orbits",
"and",
"parent",
"!=",
"'component'",
"and",
"parent",
"is",
"not",
"None",
":",
"orbits",
".",
"append",
"(",
"parent",
")",
"return",
"orbits"
]
| get 'component' of all orbits in order primary -> secondary | [
"get",
"component",
"of",
"all",
"orbits",
"in",
"order",
"primary",
"-",
">",
"secondary"
]
| python | train |
dailymuse/oz | oz/blinks/middleware.py | https://github.com/dailymuse/oz/blob/4329f6a207dc9d2a8fbeb4d16d415dbe4570b5bd/oz/blinks/middleware.py#L14-L20 | def get_blink_cookie(self, name):
"""Gets a blink cookie value"""
value = self.get_cookie(name)
if value != None:
self.clear_cookie(name)
return escape.url_unescape(value) | [
"def",
"get_blink_cookie",
"(",
"self",
",",
"name",
")",
":",
"value",
"=",
"self",
".",
"get_cookie",
"(",
"name",
")",
"if",
"value",
"!=",
"None",
":",
"self",
".",
"clear_cookie",
"(",
"name",
")",
"return",
"escape",
".",
"url_unescape",
"(",
"value",
")"
]
| Gets a blink cookie value | [
"Gets",
"a",
"blink",
"cookie",
"value"
]
| python | train |
nerandell/cauldron | cauldron/sql.py | https://github.com/nerandell/cauldron/blob/d363bac763781bb2da18debfa0fdd4be28288b92/cauldron/sql.py#L285-L304 | def delete(cls, cur, table: str, where_keys: list):
"""
Creates a delete query with where keys
Supports multiple where clause with and or or both
Args:
table: a string indicating the name of the table
where_keys: list of dictionary
example of where keys: [{'name':('>', 'cip'),'url':('=', 'cip.com'},{'type':{'<=', 'manufacturer'}}]
where_clause will look like ((name>%s and url=%s) or (type <= %s))
items within each dictionary get 'AND'-ed and dictionaries themselves get 'OR'-ed
Returns:
an integer indicating count of rows deleted
"""
where_clause, values = cls._get_where_clause_with_values(where_keys)
query = cls._delete_query.format(table, where_clause)
yield from cur.execute(query, values)
return cur.rowcount | [
"def",
"delete",
"(",
"cls",
",",
"cur",
",",
"table",
":",
"str",
",",
"where_keys",
":",
"list",
")",
":",
"where_clause",
",",
"values",
"=",
"cls",
".",
"_get_where_clause_with_values",
"(",
"where_keys",
")",
"query",
"=",
"cls",
".",
"_delete_query",
".",
"format",
"(",
"table",
",",
"where_clause",
")",
"yield",
"from",
"cur",
".",
"execute",
"(",
"query",
",",
"values",
")",
"return",
"cur",
".",
"rowcount"
]
| Creates a delete query with where keys
Supports multiple where clause with and or or both
Args:
table: a string indicating the name of the table
where_keys: list of dictionary
example of where keys: [{'name':('>', 'cip'),'url':('=', 'cip.com'},{'type':{'<=', 'manufacturer'}}]
where_clause will look like ((name>%s and url=%s) or (type <= %s))
items within each dictionary get 'AND'-ed and dictionaries themselves get 'OR'-ed
Returns:
an integer indicating count of rows deleted | [
"Creates",
"a",
"delete",
"query",
"with",
"where",
"keys",
"Supports",
"multiple",
"where",
"clause",
"with",
"and",
"or",
"or",
"both"
]
| python | valid |
spyder-ide/spyder | spyder/app/mainwindow.py | https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/app/mainwindow.py#L2711-L2728 | def open_external_console(self, fname, wdir, args, interact, debug, python,
python_args, systerm, post_mortem=False):
"""Open external console"""
if systerm:
# Running script in an external system terminal
try:
if CONF.get('main_interpreter', 'default'):
executable = get_python_executable()
else:
executable = CONF.get('main_interpreter', 'executable')
programs.run_python_script_in_terminal(
fname, wdir, args, interact, debug, python_args,
executable)
except NotImplementedError:
QMessageBox.critical(self, _("Run"),
_("Running an external system terminal "
"is not supported on platform %s."
) % os.name) | [
"def",
"open_external_console",
"(",
"self",
",",
"fname",
",",
"wdir",
",",
"args",
",",
"interact",
",",
"debug",
",",
"python",
",",
"python_args",
",",
"systerm",
",",
"post_mortem",
"=",
"False",
")",
":",
"if",
"systerm",
":",
"# Running script in an external system terminal\r",
"try",
":",
"if",
"CONF",
".",
"get",
"(",
"'main_interpreter'",
",",
"'default'",
")",
":",
"executable",
"=",
"get_python_executable",
"(",
")",
"else",
":",
"executable",
"=",
"CONF",
".",
"get",
"(",
"'main_interpreter'",
",",
"'executable'",
")",
"programs",
".",
"run_python_script_in_terminal",
"(",
"fname",
",",
"wdir",
",",
"args",
",",
"interact",
",",
"debug",
",",
"python_args",
",",
"executable",
")",
"except",
"NotImplementedError",
":",
"QMessageBox",
".",
"critical",
"(",
"self",
",",
"_",
"(",
"\"Run\"",
")",
",",
"_",
"(",
"\"Running an external system terminal \"",
"\"is not supported on platform %s.\"",
")",
"%",
"os",
".",
"name",
")"
]
| Open external console | [
"Open",
"external",
"console"
]
| python | train |
Capitains/MyCapytain | MyCapytain/resources/texts/local/capitains/cts.py | https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/resources/texts/local/capitains/cts.py#L111-L145 | def _getSimplePassage(self, reference=None):
""" Retrieve a single node representing the passage.
.. warning:: Range support is awkward.
:param reference: Identifier of the subreference / passages
:type reference: list, reference
:returns: Asked passage
:rtype: CapitainsCtsPassage
"""
if reference is None:
return _SimplePassage(
resource=self.resource,
reference=None,
urn=self.urn,
citation=self.citation.root,
text=self
)
subcitation = self.citation.root[reference.depth-1]
resource = self.resource.xpath(
subcitation.fill(reference),
namespaces=XPATH_NAMESPACES
)
if len(resource) != 1:
raise InvalidURN
return _SimplePassage(
resource[0],
reference=reference,
urn=self.urn,
citation=subcitation,
text=self.textObject
) | [
"def",
"_getSimplePassage",
"(",
"self",
",",
"reference",
"=",
"None",
")",
":",
"if",
"reference",
"is",
"None",
":",
"return",
"_SimplePassage",
"(",
"resource",
"=",
"self",
".",
"resource",
",",
"reference",
"=",
"None",
",",
"urn",
"=",
"self",
".",
"urn",
",",
"citation",
"=",
"self",
".",
"citation",
".",
"root",
",",
"text",
"=",
"self",
")",
"subcitation",
"=",
"self",
".",
"citation",
".",
"root",
"[",
"reference",
".",
"depth",
"-",
"1",
"]",
"resource",
"=",
"self",
".",
"resource",
".",
"xpath",
"(",
"subcitation",
".",
"fill",
"(",
"reference",
")",
",",
"namespaces",
"=",
"XPATH_NAMESPACES",
")",
"if",
"len",
"(",
"resource",
")",
"!=",
"1",
":",
"raise",
"InvalidURN",
"return",
"_SimplePassage",
"(",
"resource",
"[",
"0",
"]",
",",
"reference",
"=",
"reference",
",",
"urn",
"=",
"self",
".",
"urn",
",",
"citation",
"=",
"subcitation",
",",
"text",
"=",
"self",
".",
"textObject",
")"
]
| Retrieve a single node representing the passage.
.. warning:: Range support is awkward.
:param reference: Identifier of the subreference / passages
:type reference: list, reference
:returns: Asked passage
:rtype: CapitainsCtsPassage | [
"Retrieve",
"a",
"single",
"node",
"representing",
"the",
"passage",
"."
]
| python | train |
synw/dataswim | dataswim/data/export.py | https://github.com/synw/dataswim/blob/4a4a53f80daa7cd8e8409d76a19ce07296269da2/dataswim/data/export.py#L58-L71 | def to_markdown_(self) -> str:
"""Convert the main dataframe to markdown
:return: markdown data
:rtype: str
:example: ``ds.to_markdown_()``
"""
try:
renderer = pytablewriter.MarkdownTableWriter
data = self._build_export(renderer)
return data
except Exception as e:
self.err(e, "Can not convert data to markdown") | [
"def",
"to_markdown_",
"(",
"self",
")",
"->",
"str",
":",
"try",
":",
"renderer",
"=",
"pytablewriter",
".",
"MarkdownTableWriter",
"data",
"=",
"self",
".",
"_build_export",
"(",
"renderer",
")",
"return",
"data",
"except",
"Exception",
"as",
"e",
":",
"self",
".",
"err",
"(",
"e",
",",
"\"Can not convert data to markdown\"",
")"
]
| Convert the main dataframe to markdown
:return: markdown data
:rtype: str
:example: ``ds.to_markdown_()`` | [
"Convert",
"the",
"main",
"dataframe",
"to",
"markdown"
]
| python | train |
casacore/python-casacore | casacore/images/coordinates.py | https://github.com/casacore/python-casacore/blob/975510861ea005f7919dd9e438b5f98a1682eebe/casacore/images/coordinates.py#L71-L87 | def _get_coordinatenames(self):
"""Create ordered list of coordinate names
"""
validnames = ("direction", "spectral", "linear", "stokes", "tabular")
self._names = [""] * len(validnames)
n = 0
for key in self._csys.keys():
for name in validnames:
if key.startswith(name):
idx = int(key[len(name):])
self._names[idx] = name
n += 1
# reverse as we are c order in python
self._names = self._names[:n][::-1]
if len(self._names) == 0:
raise LookupError("Coordinate record doesn't contain valid coordinates") | [
"def",
"_get_coordinatenames",
"(",
"self",
")",
":",
"validnames",
"=",
"(",
"\"direction\"",
",",
"\"spectral\"",
",",
"\"linear\"",
",",
"\"stokes\"",
",",
"\"tabular\"",
")",
"self",
".",
"_names",
"=",
"[",
"\"\"",
"]",
"*",
"len",
"(",
"validnames",
")",
"n",
"=",
"0",
"for",
"key",
"in",
"self",
".",
"_csys",
".",
"keys",
"(",
")",
":",
"for",
"name",
"in",
"validnames",
":",
"if",
"key",
".",
"startswith",
"(",
"name",
")",
":",
"idx",
"=",
"int",
"(",
"key",
"[",
"len",
"(",
"name",
")",
":",
"]",
")",
"self",
".",
"_names",
"[",
"idx",
"]",
"=",
"name",
"n",
"+=",
"1",
"# reverse as we are c order in python",
"self",
".",
"_names",
"=",
"self",
".",
"_names",
"[",
":",
"n",
"]",
"[",
":",
":",
"-",
"1",
"]",
"if",
"len",
"(",
"self",
".",
"_names",
")",
"==",
"0",
":",
"raise",
"LookupError",
"(",
"\"Coordinate record doesn't contain valid coordinates\"",
")"
]
| Create ordered list of coordinate names | [
"Create",
"ordered",
"list",
"of",
"coordinate",
"names"
]
| python | train |
brocade/pynos | pynos/versions/ver_6/ver_6_0_1/yang/brocade_mac_address_table.py | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_mac_address_table.py#L155-L165 | def mac_address_table_aging_time_conversational_time_out(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
mac_address_table = ET.SubElement(config, "mac-address-table", xmlns="urn:brocade.com:mgmt:brocade-mac-address-table")
aging_time = ET.SubElement(mac_address_table, "aging-time")
conversational_time_out = ET.SubElement(aging_time, "conversational-time-out")
conversational_time_out.text = kwargs.pop('conversational_time_out')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"mac_address_table_aging_time_conversational_time_out",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"mac_address_table",
"=",
"ET",
".",
"SubElement",
"(",
"config",
",",
"\"mac-address-table\"",
",",
"xmlns",
"=",
"\"urn:brocade.com:mgmt:brocade-mac-address-table\"",
")",
"aging_time",
"=",
"ET",
".",
"SubElement",
"(",
"mac_address_table",
",",
"\"aging-time\"",
")",
"conversational_time_out",
"=",
"ET",
".",
"SubElement",
"(",
"aging_time",
",",
"\"conversational-time-out\"",
")",
"conversational_time_out",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'conversational_time_out'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
]
| Auto Generated Code | [
"Auto",
"Generated",
"Code"
]
| python | train |
knipknap/exscript | Exscript/protocols/protocol.py | https://github.com/knipknap/exscript/blob/72718eee3e87b345d5a5255be9824e867e42927b/Exscript/protocols/protocol.py#L652-L678 | def login(self, account=None, app_account=None, flush=True):
"""
Log into the connected host using the best method available.
If an account is not given, default to the account that was
used during the last call to login(). If a previous call was not
made, use the account that was passed to the constructor. If that
also fails, raise a TypeError.
The app_account is passed to :class:`app_authenticate()` and
:class:`app_authorize()`.
If app_account is not given, default to the value of the account
argument.
:type account: Account
:param account: The account for protocol level authentication.
:type app_account: Account
:param app_account: The account for app level authentication.
:type flush: bool
:param flush: Whether to flush the last prompt from the buffer.
"""
with self._get_account(account) as account:
if app_account is None:
app_account = account
self.authenticate(account, flush=False)
if self.get_driver().supports_auto_authorize():
self.expect_prompt()
self.auto_app_authorize(app_account, flush=flush) | [
"def",
"login",
"(",
"self",
",",
"account",
"=",
"None",
",",
"app_account",
"=",
"None",
",",
"flush",
"=",
"True",
")",
":",
"with",
"self",
".",
"_get_account",
"(",
"account",
")",
"as",
"account",
":",
"if",
"app_account",
"is",
"None",
":",
"app_account",
"=",
"account",
"self",
".",
"authenticate",
"(",
"account",
",",
"flush",
"=",
"False",
")",
"if",
"self",
".",
"get_driver",
"(",
")",
".",
"supports_auto_authorize",
"(",
")",
":",
"self",
".",
"expect_prompt",
"(",
")",
"self",
".",
"auto_app_authorize",
"(",
"app_account",
",",
"flush",
"=",
"flush",
")"
]
| Log into the connected host using the best method available.
If an account is not given, default to the account that was
used during the last call to login(). If a previous call was not
made, use the account that was passed to the constructor. If that
also fails, raise a TypeError.
The app_account is passed to :class:`app_authenticate()` and
:class:`app_authorize()`.
If app_account is not given, default to the value of the account
argument.
:type account: Account
:param account: The account for protocol level authentication.
:type app_account: Account
:param app_account: The account for app level authentication.
:type flush: bool
:param flush: Whether to flush the last prompt from the buffer. | [
"Log",
"into",
"the",
"connected",
"host",
"using",
"the",
"best",
"method",
"available",
".",
"If",
"an",
"account",
"is",
"not",
"given",
"default",
"to",
"the",
"account",
"that",
"was",
"used",
"during",
"the",
"last",
"call",
"to",
"login",
"()",
".",
"If",
"a",
"previous",
"call",
"was",
"not",
"made",
"use",
"the",
"account",
"that",
"was",
"passed",
"to",
"the",
"constructor",
".",
"If",
"that",
"also",
"fails",
"raise",
"a",
"TypeError",
"."
]
| python | train |
JIC-CSB/jicimagelib | jicimagelib/geometry.py | https://github.com/JIC-CSB/jicimagelib/blob/fbd67accb2e6d55969c6d4ed7e8b4bb4ab65cd44/jicimagelib/geometry.py#L70-L87 | def _set_types(self):
"""Make sure that x, y have consistent types and set dtype."""
# If we given something that is not an int or a float we raise
# a RuntimeError as we do not want to have to guess if the given
# input should be interpreted as an int or a float, for example the
# interpretation of the string "1" vs the interpretation of the string
# "1.0".
for c in (self.x, self.y):
if not ( isinstance(c, int) or isinstance(c, float) ):
raise(RuntimeError('x, y coords should be int or float'))
if isinstance(self.x, int) and isinstance(self.y, int):
self._dtype = "int"
else:
# At least one value is a float so promote both to float.
self.x = float(self.x)
self.y = float(self.y)
self._dtype = "float" | [
"def",
"_set_types",
"(",
"self",
")",
":",
"# If we given something that is not an int or a float we raise",
"# a RuntimeError as we do not want to have to guess if the given",
"# input should be interpreted as an int or a float, for example the",
"# interpretation of the string \"1\" vs the interpretation of the string",
"# \"1.0\".",
"for",
"c",
"in",
"(",
"self",
".",
"x",
",",
"self",
".",
"y",
")",
":",
"if",
"not",
"(",
"isinstance",
"(",
"c",
",",
"int",
")",
"or",
"isinstance",
"(",
"c",
",",
"float",
")",
")",
":",
"raise",
"(",
"RuntimeError",
"(",
"'x, y coords should be int or float'",
")",
")",
"if",
"isinstance",
"(",
"self",
".",
"x",
",",
"int",
")",
"and",
"isinstance",
"(",
"self",
".",
"y",
",",
"int",
")",
":",
"self",
".",
"_dtype",
"=",
"\"int\"",
"else",
":",
"# At least one value is a float so promote both to float.",
"self",
".",
"x",
"=",
"float",
"(",
"self",
".",
"x",
")",
"self",
".",
"y",
"=",
"float",
"(",
"self",
".",
"y",
")",
"self",
".",
"_dtype",
"=",
"\"float\""
]
| Make sure that x, y have consistent types and set dtype. | [
"Make",
"sure",
"that",
"x",
"y",
"have",
"consistent",
"types",
"and",
"set",
"dtype",
"."
]
| python | train |
blockadeio/analyst_toolbench | blockade/cli/aws_serverless.py | https://github.com/blockadeio/analyst_toolbench/blob/159b6f8cf8a91c5ff050f1579636ea90ab269863/blockade/cli/aws_serverless.py#L642-L677 | def generate_lambda_functions():
"""Create the Blockade lambda functions."""
logger.debug("[#] Setting up the Lambda functions")
aws_lambda = boto3.client('lambda', region_name=PRIMARY_REGION)
functions = aws_lambda.list_functions().get('Functions')
existing_funcs = [x['FunctionName'] for x in functions]
iam = boto3.resource('iam')
account_id = iam.CurrentUser().arn.split(':')[4]
responses = list()
for label in LAMBDA_FUNCTIONS:
if label in existing_funcs:
logger.debug("[*] Lambda function %s already exists" % (label))
continue
dir_path = os.path.dirname(os.path.realpath(__file__))
dir_path = dir_path.replace('/cli', '/aws')
kwargs = {
'Runtime': 'python2.7',
'Role': 'arn:aws:iam::{0}:role/{1}'.format(account_id, BLOCKADE_ROLE),
'Timeout': 3,
'MemorySize': 128,
'Publish': True,
'Code': {
'ZipFile': open("{0}/lambda-zips/{1}.zip".format(dir_path, label), 'rb').read()
}
}
kwargs.update(LAMBDA_SCHEMA[label])
logger.debug("[#] Setting up the %s Lambda function" % (label))
response = aws_lambda.create_function(**kwargs)
responses.append(response)
logger.debug("[#] Successfully setup Lambda function %s" % (label))
logger.info("[#] Successfully setup Lambda functions")
return responses | [
"def",
"generate_lambda_functions",
"(",
")",
":",
"logger",
".",
"debug",
"(",
"\"[#] Setting up the Lambda functions\"",
")",
"aws_lambda",
"=",
"boto3",
".",
"client",
"(",
"'lambda'",
",",
"region_name",
"=",
"PRIMARY_REGION",
")",
"functions",
"=",
"aws_lambda",
".",
"list_functions",
"(",
")",
".",
"get",
"(",
"'Functions'",
")",
"existing_funcs",
"=",
"[",
"x",
"[",
"'FunctionName'",
"]",
"for",
"x",
"in",
"functions",
"]",
"iam",
"=",
"boto3",
".",
"resource",
"(",
"'iam'",
")",
"account_id",
"=",
"iam",
".",
"CurrentUser",
"(",
")",
".",
"arn",
".",
"split",
"(",
"':'",
")",
"[",
"4",
"]",
"responses",
"=",
"list",
"(",
")",
"for",
"label",
"in",
"LAMBDA_FUNCTIONS",
":",
"if",
"label",
"in",
"existing_funcs",
":",
"logger",
".",
"debug",
"(",
"\"[*] Lambda function %s already exists\"",
"%",
"(",
"label",
")",
")",
"continue",
"dir_path",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"realpath",
"(",
"__file__",
")",
")",
"dir_path",
"=",
"dir_path",
".",
"replace",
"(",
"'/cli'",
",",
"'/aws'",
")",
"kwargs",
"=",
"{",
"'Runtime'",
":",
"'python2.7'",
",",
"'Role'",
":",
"'arn:aws:iam::{0}:role/{1}'",
".",
"format",
"(",
"account_id",
",",
"BLOCKADE_ROLE",
")",
",",
"'Timeout'",
":",
"3",
",",
"'MemorySize'",
":",
"128",
",",
"'Publish'",
":",
"True",
",",
"'Code'",
":",
"{",
"'ZipFile'",
":",
"open",
"(",
"\"{0}/lambda-zips/{1}.zip\"",
".",
"format",
"(",
"dir_path",
",",
"label",
")",
",",
"'rb'",
")",
".",
"read",
"(",
")",
"}",
"}",
"kwargs",
".",
"update",
"(",
"LAMBDA_SCHEMA",
"[",
"label",
"]",
")",
"logger",
".",
"debug",
"(",
"\"[#] Setting up the %s Lambda function\"",
"%",
"(",
"label",
")",
")",
"response",
"=",
"aws_lambda",
".",
"create_function",
"(",
"*",
"*",
"kwargs",
")",
"responses",
".",
"append",
"(",
"response",
")",
"logger",
".",
"debug",
"(",
"\"[#] Successfully setup Lambda function %s\"",
"%",
"(",
"label",
")",
")",
"logger",
".",
"info",
"(",
"\"[#] Successfully setup Lambda functions\"",
")",
"return",
"responses"
]
| Create the Blockade lambda functions. | [
"Create",
"the",
"Blockade",
"lambda",
"functions",
"."
]
| python | train |
evansherlock/nytimesarticle | nytimesarticle.py | https://github.com/evansherlock/nytimesarticle/blob/89f551699ffb11f71b47271246d350a1043e9326/nytimesarticle.py#L91-L115 | def search(self,
response_format = None,
key = None,
**kwargs):
"""
Calls the API and returns a dictionary of the search results
:param response_format: the format that the API uses for its response,
includes JSON (.json) and JSONP (.jsonp).
Defaults to '.json'.
:param key: a developer key. Defaults to key given when the articleAPI class was initialized.
"""
if response_format is None:
response_format = self.response_format
if key is None:
key = self.key
url = '%s%s?%sapi-key=%s' % (
API_ROOT, response_format, self._options(**kwargs), key
)
r = requests.get(url)
return r.json() | [
"def",
"search",
"(",
"self",
",",
"response_format",
"=",
"None",
",",
"key",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"response_format",
"is",
"None",
":",
"response_format",
"=",
"self",
".",
"response_format",
"if",
"key",
"is",
"None",
":",
"key",
"=",
"self",
".",
"key",
"url",
"=",
"'%s%s?%sapi-key=%s'",
"%",
"(",
"API_ROOT",
",",
"response_format",
",",
"self",
".",
"_options",
"(",
"*",
"*",
"kwargs",
")",
",",
"key",
")",
"r",
"=",
"requests",
".",
"get",
"(",
"url",
")",
"return",
"r",
".",
"json",
"(",
")"
]
| Calls the API and returns a dictionary of the search results
:param response_format: the format that the API uses for its response,
includes JSON (.json) and JSONP (.jsonp).
Defaults to '.json'.
:param key: a developer key. Defaults to key given when the articleAPI class was initialized. | [
"Calls",
"the",
"API",
"and",
"returns",
"a",
"dictionary",
"of",
"the",
"search",
"results",
":",
"param",
"response_format",
":",
"the",
"format",
"that",
"the",
"API",
"uses",
"for",
"its",
"response",
"includes",
"JSON",
"(",
".",
"json",
")",
"and",
"JSONP",
"(",
".",
"jsonp",
")",
".",
"Defaults",
"to",
".",
"json",
".",
":",
"param",
"key",
":",
"a",
"developer",
"key",
".",
"Defaults",
"to",
"key",
"given",
"when",
"the",
"articleAPI",
"class",
"was",
"initialized",
"."
]
| python | train |
singularityhub/singularity-cli | spython/main/parse/docker.py | https://github.com/singularityhub/singularity-cli/blob/cb36b4504812ca87e29c6a40b222a545d1865799/spython/main/parse/docker.py#L190-L216 | def _add_files(self, source, dest):
'''add files is the underlying function called to add files to the
list, whether originally called from the functions to parse archives,
or https. We make sure that any local references are changed to
actual file locations before adding to the files list.
Parameters
==========
source: the source
dest: the destiation
'''
# Create data structure to iterate over
paths = {'source': source,
'dest': dest}
for pathtype, path in paths.items():
if path == ".":
paths[pathtype] = os.getcwd()
# Warning if doesn't exist
if not os.path.exists(path):
bot.warning("%s doesn't exist, ensure exists for build" %path)
# The pair is added to the files as a list
self.files.append([paths['source'], paths['dest']]) | [
"def",
"_add_files",
"(",
"self",
",",
"source",
",",
"dest",
")",
":",
"# Create data structure to iterate over",
"paths",
"=",
"{",
"'source'",
":",
"source",
",",
"'dest'",
":",
"dest",
"}",
"for",
"pathtype",
",",
"path",
"in",
"paths",
".",
"items",
"(",
")",
":",
"if",
"path",
"==",
"\".\"",
":",
"paths",
"[",
"pathtype",
"]",
"=",
"os",
".",
"getcwd",
"(",
")",
"# Warning if doesn't exist",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"bot",
".",
"warning",
"(",
"\"%s doesn't exist, ensure exists for build\"",
"%",
"path",
")",
"# The pair is added to the files as a list",
"self",
".",
"files",
".",
"append",
"(",
"[",
"paths",
"[",
"'source'",
"]",
",",
"paths",
"[",
"'dest'",
"]",
"]",
")"
]
| add files is the underlying function called to add files to the
list, whether originally called from the functions to parse archives,
or https. We make sure that any local references are changed to
actual file locations before adding to the files list.
Parameters
==========
source: the source
dest: the destiation | [
"add",
"files",
"is",
"the",
"underlying",
"function",
"called",
"to",
"add",
"files",
"to",
"the",
"list",
"whether",
"originally",
"called",
"from",
"the",
"functions",
"to",
"parse",
"archives",
"or",
"https",
".",
"We",
"make",
"sure",
"that",
"any",
"local",
"references",
"are",
"changed",
"to",
"actual",
"file",
"locations",
"before",
"adding",
"to",
"the",
"files",
"list",
".",
"Parameters",
"==========",
"source",
":",
"the",
"source",
"dest",
":",
"the",
"destiation"
]
| python | train |
PythonCharmers/python-future | src/future/backports/http/server.py | https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/http/server.py#L958-L979 | def is_cgi(self):
"""Test whether self.path corresponds to a CGI script.
Returns True and updates the cgi_info attribute to the tuple
(dir, rest) if self.path requires running a CGI script.
Returns False otherwise.
If any exception is raised, the caller should assume that
self.path was rejected as invalid and act accordingly.
The default implementation tests whether the normalized url
path begins with one of the strings in self.cgi_directories
(and the next character is a '/' or the end of the string).
"""
collapsed_path = _url_collapse_path(self.path)
dir_sep = collapsed_path.find('/', 1)
head, tail = collapsed_path[:dir_sep], collapsed_path[dir_sep+1:]
if head in self.cgi_directories:
self.cgi_info = head, tail
return True
return False | [
"def",
"is_cgi",
"(",
"self",
")",
":",
"collapsed_path",
"=",
"_url_collapse_path",
"(",
"self",
".",
"path",
")",
"dir_sep",
"=",
"collapsed_path",
".",
"find",
"(",
"'/'",
",",
"1",
")",
"head",
",",
"tail",
"=",
"collapsed_path",
"[",
":",
"dir_sep",
"]",
",",
"collapsed_path",
"[",
"dir_sep",
"+",
"1",
":",
"]",
"if",
"head",
"in",
"self",
".",
"cgi_directories",
":",
"self",
".",
"cgi_info",
"=",
"head",
",",
"tail",
"return",
"True",
"return",
"False"
]
| Test whether self.path corresponds to a CGI script.
Returns True and updates the cgi_info attribute to the tuple
(dir, rest) if self.path requires running a CGI script.
Returns False otherwise.
If any exception is raised, the caller should assume that
self.path was rejected as invalid and act accordingly.
The default implementation tests whether the normalized url
path begins with one of the strings in self.cgi_directories
(and the next character is a '/' or the end of the string). | [
"Test",
"whether",
"self",
".",
"path",
"corresponds",
"to",
"a",
"CGI",
"script",
"."
]
| python | train |
Hackerfleet/hfos | hfos/tool/rbac.py | https://github.com/Hackerfleet/hfos/blob/b6df14eacaffb6be5c844108873ff8763ec7f0c9/hfos/tool/rbac.py#L78-L94 | def add_action_role(ctx):
"""Adds a role to an action on objects"""
objects = ctx.obj['objects']
action = ctx.obj['action']
role = ctx.obj['role']
if action is None or role is None:
log('You need to specify an action or role to the RBAC command group for this to work.', lvl=warn)
return
for item in objects:
if role not in item.perms[action]:
item.perms[action].append(role)
item.save()
log("Done") | [
"def",
"add_action_role",
"(",
"ctx",
")",
":",
"objects",
"=",
"ctx",
".",
"obj",
"[",
"'objects'",
"]",
"action",
"=",
"ctx",
".",
"obj",
"[",
"'action'",
"]",
"role",
"=",
"ctx",
".",
"obj",
"[",
"'role'",
"]",
"if",
"action",
"is",
"None",
"or",
"role",
"is",
"None",
":",
"log",
"(",
"'You need to specify an action or role to the RBAC command group for this to work.'",
",",
"lvl",
"=",
"warn",
")",
"return",
"for",
"item",
"in",
"objects",
":",
"if",
"role",
"not",
"in",
"item",
".",
"perms",
"[",
"action",
"]",
":",
"item",
".",
"perms",
"[",
"action",
"]",
".",
"append",
"(",
"role",
")",
"item",
".",
"save",
"(",
")",
"log",
"(",
"\"Done\"",
")"
]
| Adds a role to an action on objects | [
"Adds",
"a",
"role",
"to",
"an",
"action",
"on",
"objects"
]
| python | train |
TomasTomecek/sen | sen/tui/ui.py | https://github.com/TomasTomecek/sen/blob/239b4868125814e8bf5527708119fc08b35f6cc0/sen/tui/ui.py#L82-L90 | def quit(self):
"""
This could be called from another thread, so let's do this via alarm
"""
def q(*args):
raise urwid.ExitMainLoop()
self.worker.shutdown(wait=False)
self.ui_worker.shutdown(wait=False)
self.loop.set_alarm_in(0, q) | [
"def",
"quit",
"(",
"self",
")",
":",
"def",
"q",
"(",
"*",
"args",
")",
":",
"raise",
"urwid",
".",
"ExitMainLoop",
"(",
")",
"self",
".",
"worker",
".",
"shutdown",
"(",
"wait",
"=",
"False",
")",
"self",
".",
"ui_worker",
".",
"shutdown",
"(",
"wait",
"=",
"False",
")",
"self",
".",
"loop",
".",
"set_alarm_in",
"(",
"0",
",",
"q",
")"
]
| This could be called from another thread, so let's do this via alarm | [
"This",
"could",
"be",
"called",
"from",
"another",
"thread",
"so",
"let",
"s",
"do",
"this",
"via",
"alarm"
]
| python | train |
ethereum/web3.py | web3/_utils/encoding.py | https://github.com/ethereum/web3.py/blob/71b8bf03dc6d332dd97d8902a38ffab6f8b5a5ab/web3/_utils/encoding.py#L204-L222 | def hexstr_if_str(to_type, hexstr_or_primitive):
"""
Convert to a type, assuming that strings can be only hexstr (not unicode text)
@param to_type is a function that takes the arguments (primitive, hexstr=hexstr, text=text),
eg~ to_bytes, to_text, to_hex, to_int, etc
@param text_or_primitive in bytes, str, or int.
"""
if isinstance(hexstr_or_primitive, str):
(primitive, hexstr) = (None, hexstr_or_primitive)
if remove_0x_prefix(hexstr) and not is_hex(hexstr):
raise ValueError(
"when sending a str, it must be a hex string. Got: {0!r}".format(
hexstr_or_primitive,
)
)
else:
(primitive, hexstr) = (hexstr_or_primitive, None)
return to_type(primitive, hexstr=hexstr) | [
"def",
"hexstr_if_str",
"(",
"to_type",
",",
"hexstr_or_primitive",
")",
":",
"if",
"isinstance",
"(",
"hexstr_or_primitive",
",",
"str",
")",
":",
"(",
"primitive",
",",
"hexstr",
")",
"=",
"(",
"None",
",",
"hexstr_or_primitive",
")",
"if",
"remove_0x_prefix",
"(",
"hexstr",
")",
"and",
"not",
"is_hex",
"(",
"hexstr",
")",
":",
"raise",
"ValueError",
"(",
"\"when sending a str, it must be a hex string. Got: {0!r}\"",
".",
"format",
"(",
"hexstr_or_primitive",
",",
")",
")",
"else",
":",
"(",
"primitive",
",",
"hexstr",
")",
"=",
"(",
"hexstr_or_primitive",
",",
"None",
")",
"return",
"to_type",
"(",
"primitive",
",",
"hexstr",
"=",
"hexstr",
")"
]
| Convert to a type, assuming that strings can be only hexstr (not unicode text)
@param to_type is a function that takes the arguments (primitive, hexstr=hexstr, text=text),
eg~ to_bytes, to_text, to_hex, to_int, etc
@param text_or_primitive in bytes, str, or int. | [
"Convert",
"to",
"a",
"type",
"assuming",
"that",
"strings",
"can",
"be",
"only",
"hexstr",
"(",
"not",
"unicode",
"text",
")"
]
| python | train |
dpmcmlxxvi/pixelscan | pixelscan/pixelscan.py | https://github.com/dpmcmlxxvi/pixelscan/blob/d641207b13a8fc5bf7ac9964b982971652bb0a7e/pixelscan/pixelscan.py#L26-L37 | def chebyshev(point1, point2):
"""Computes distance between 2D points using chebyshev metric
:param point1: 1st point
:type point1: list
:param point2: 2nd point
:type point2: list
:returns: Distance between point1 and point2
:rtype: float
"""
return max(abs(point1[0] - point2[0]), abs(point1[1] - point2[1])) | [
"def",
"chebyshev",
"(",
"point1",
",",
"point2",
")",
":",
"return",
"max",
"(",
"abs",
"(",
"point1",
"[",
"0",
"]",
"-",
"point2",
"[",
"0",
"]",
")",
",",
"abs",
"(",
"point1",
"[",
"1",
"]",
"-",
"point2",
"[",
"1",
"]",
")",
")"
]
| Computes distance between 2D points using chebyshev metric
:param point1: 1st point
:type point1: list
:param point2: 2nd point
:type point2: list
:returns: Distance between point1 and point2
:rtype: float | [
"Computes",
"distance",
"between",
"2D",
"points",
"using",
"chebyshev",
"metric"
]
| python | train |
google/grr | grr/core/grr_response_core/lib/rdfvalues/structs.py | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/core/grr_response_core/lib/rdfvalues/structs.py#L278-L320 | def ReadIntoObject(buff, index, value_obj, length=0):
"""Reads all tags until the next end group and store in the value_obj."""
raw_data = value_obj.GetRawData()
count = 0
# Split the buffer into tags and wire_format representations, then collect
# these into the raw data cache.
for (encoded_tag, encoded_length, encoded_field) in SplitBuffer(
buff, index=index, length=length):
type_info_obj = value_obj.type_infos_by_encoded_tag.get(encoded_tag)
# Internal format to store parsed fields.
wire_format = (encoded_tag, encoded_length, encoded_field)
# If the tag is not found we need to skip it. Skipped fields are
# inaccessible to this actual object, because they have no type info
# describing them, however they are still stored in the raw data
# representation because they will be re-serialized back. This way
# programs which simply read protobufs and write them back do not need to
# know all the fields, some of which were defined in a later version of
# the application. In order to avoid having to worry about repeated fields
# here, we just insert them into the raw data dict with a key which should
# be unique.
if type_info_obj is None:
# Record an unknown field. The key is unique and ensures we do not collide
# the dict on repeated fields of the encoded tag. Note that this field is
# not really accessible using Get() and does not have a python format
# representation. It will be written back using the same wire format it
# was read with, therefore does not require a type descriptor at all.
raw_data[count] = (None, wire_format, None)
count += 1
# Repeated fields are handled especially.
elif type_info_obj.__class__ is ProtoList:
value_obj.Get(type_info_obj.name).wrapped_list.append((None, wire_format))
else:
# Set the python_format as None so it gets converted lazily on access.
raw_data[type_info_obj.name] = (None, wire_format, type_info_obj)
value_obj.SetRawData(raw_data) | [
"def",
"ReadIntoObject",
"(",
"buff",
",",
"index",
",",
"value_obj",
",",
"length",
"=",
"0",
")",
":",
"raw_data",
"=",
"value_obj",
".",
"GetRawData",
"(",
")",
"count",
"=",
"0",
"# Split the buffer into tags and wire_format representations, then collect",
"# these into the raw data cache.",
"for",
"(",
"encoded_tag",
",",
"encoded_length",
",",
"encoded_field",
")",
"in",
"SplitBuffer",
"(",
"buff",
",",
"index",
"=",
"index",
",",
"length",
"=",
"length",
")",
":",
"type_info_obj",
"=",
"value_obj",
".",
"type_infos_by_encoded_tag",
".",
"get",
"(",
"encoded_tag",
")",
"# Internal format to store parsed fields.",
"wire_format",
"=",
"(",
"encoded_tag",
",",
"encoded_length",
",",
"encoded_field",
")",
"# If the tag is not found we need to skip it. Skipped fields are",
"# inaccessible to this actual object, because they have no type info",
"# describing them, however they are still stored in the raw data",
"# representation because they will be re-serialized back. This way",
"# programs which simply read protobufs and write them back do not need to",
"# know all the fields, some of which were defined in a later version of",
"# the application. In order to avoid having to worry about repeated fields",
"# here, we just insert them into the raw data dict with a key which should",
"# be unique.",
"if",
"type_info_obj",
"is",
"None",
":",
"# Record an unknown field. The key is unique and ensures we do not collide",
"# the dict on repeated fields of the encoded tag. Note that this field is",
"# not really accessible using Get() and does not have a python format",
"# representation. It will be written back using the same wire format it",
"# was read with, therefore does not require a type descriptor at all.",
"raw_data",
"[",
"count",
"]",
"=",
"(",
"None",
",",
"wire_format",
",",
"None",
")",
"count",
"+=",
"1",
"# Repeated fields are handled especially.",
"elif",
"type_info_obj",
".",
"__class__",
"is",
"ProtoList",
":",
"value_obj",
".",
"Get",
"(",
"type_info_obj",
".",
"name",
")",
".",
"wrapped_list",
".",
"append",
"(",
"(",
"None",
",",
"wire_format",
")",
")",
"else",
":",
"# Set the python_format as None so it gets converted lazily on access.",
"raw_data",
"[",
"type_info_obj",
".",
"name",
"]",
"=",
"(",
"None",
",",
"wire_format",
",",
"type_info_obj",
")",
"value_obj",
".",
"SetRawData",
"(",
"raw_data",
")"
]
| Reads all tags until the next end group and store in the value_obj. | [
"Reads",
"all",
"tags",
"until",
"the",
"next",
"end",
"group",
"and",
"store",
"in",
"the",
"value_obj",
"."
]
| python | train |
log2timeline/dfvfs | dfvfs/file_io/encrypted_stream_io.py | https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/file_io/encrypted_stream_io.py#L172-L192 | def _ReadEncryptedData(self, read_size):
"""Reads encrypted data from the file-like object.
Args:
read_size (int): number of bytes of encrypted data to read.
Returns:
int: number of bytes of encrypted data read.
"""
encrypted_data = self._file_object.read(read_size)
read_count = len(encrypted_data)
self._encrypted_data = b''.join([self._encrypted_data, encrypted_data])
self._decrypted_data, self._encrypted_data = (
self._decrypter.Decrypt(self._encrypted_data))
self._decrypted_data_size = len(self._decrypted_data)
return read_count | [
"def",
"_ReadEncryptedData",
"(",
"self",
",",
"read_size",
")",
":",
"encrypted_data",
"=",
"self",
".",
"_file_object",
".",
"read",
"(",
"read_size",
")",
"read_count",
"=",
"len",
"(",
"encrypted_data",
")",
"self",
".",
"_encrypted_data",
"=",
"b''",
".",
"join",
"(",
"[",
"self",
".",
"_encrypted_data",
",",
"encrypted_data",
"]",
")",
"self",
".",
"_decrypted_data",
",",
"self",
".",
"_encrypted_data",
"=",
"(",
"self",
".",
"_decrypter",
".",
"Decrypt",
"(",
"self",
".",
"_encrypted_data",
")",
")",
"self",
".",
"_decrypted_data_size",
"=",
"len",
"(",
"self",
".",
"_decrypted_data",
")",
"return",
"read_count"
]
| Reads encrypted data from the file-like object.
Args:
read_size (int): number of bytes of encrypted data to read.
Returns:
int: number of bytes of encrypted data read. | [
"Reads",
"encrypted",
"data",
"from",
"the",
"file",
"-",
"like",
"object",
"."
]
| python | train |
gmr/tinman | tinman/handlers/mixins.py | https://github.com/gmr/tinman/blob/98f0acd15a228d752caa1864cdf02aaa3d492a9f/tinman/handlers/mixins.py#L166-L172 | def _redis_connection_settings(self):
"""Return a dictionary of redis connection settings.
"""
return {config.HOST: self.settings.get(config.HOST, self._REDIS_HOST),
config.PORT: self.settings.get(config.PORT, self._REDIS_PORT),
'selected_db': self.settings.get(config.DB, self._REDIS_DB)} | [
"def",
"_redis_connection_settings",
"(",
"self",
")",
":",
"return",
"{",
"config",
".",
"HOST",
":",
"self",
".",
"settings",
".",
"get",
"(",
"config",
".",
"HOST",
",",
"self",
".",
"_REDIS_HOST",
")",
",",
"config",
".",
"PORT",
":",
"self",
".",
"settings",
".",
"get",
"(",
"config",
".",
"PORT",
",",
"self",
".",
"_REDIS_PORT",
")",
",",
"'selected_db'",
":",
"self",
".",
"settings",
".",
"get",
"(",
"config",
".",
"DB",
",",
"self",
".",
"_REDIS_DB",
")",
"}"
]
| Return a dictionary of redis connection settings. | [
"Return",
"a",
"dictionary",
"of",
"redis",
"connection",
"settings",
"."
]
| python | train |
pinterest/pymemcache | pymemcache/client/base.py | https://github.com/pinterest/pymemcache/blob/f3a348f4ce2248cce8b398e93e08d984fb9100e5/pymemcache/client/base.py#L285-L294 | def close(self):
"""Close the connection to memcached, if it is open. The next call to a
method that requires a connection will re-open it."""
if self.sock is not None:
try:
self.sock.close()
except Exception:
pass
finally:
self.sock = None | [
"def",
"close",
"(",
"self",
")",
":",
"if",
"self",
".",
"sock",
"is",
"not",
"None",
":",
"try",
":",
"self",
".",
"sock",
".",
"close",
"(",
")",
"except",
"Exception",
":",
"pass",
"finally",
":",
"self",
".",
"sock",
"=",
"None"
]
| Close the connection to memcached, if it is open. The next call to a
method that requires a connection will re-open it. | [
"Close",
"the",
"connection",
"to",
"memcached",
"if",
"it",
"is",
"open",
".",
"The",
"next",
"call",
"to",
"a",
"method",
"that",
"requires",
"a",
"connection",
"will",
"re",
"-",
"open",
"it",
"."
]
| python | train |
Nic30/hwt | hwt/serializer/resourceAnalyzer/analyzer.py | https://github.com/Nic30/hwt/blob/8cbb399e326da3b22c233b98188a9d08dec057e6/hwt/serializer/resourceAnalyzer/analyzer.py#L113-L163 | def HWProcess(cls, proc: HWProcess, ctx: ResourceContext) -> None:
"""
Gues resource usage by HWProcess
"""
seen = ctx.seen
for stm in proc.statements:
encl = stm._enclosed_for
full_ev_dep = stm._is_completly_event_dependent
now_ev_dep = stm._now_is_event_dependent
ev_dep = full_ev_dep or now_ev_dep
out_mux_dim = count_mux_inputs_for_outputs(stm)
for o in stm._outputs:
if o in seen:
continue
i = out_mux_dim[o]
if isinstance(o._dtype, HArray):
assert i == 1, (o, i, " only one ram port per HWProcess")
for a in walk_assignments(stm, o):
assert len(a.indexes) == 1, "one address per RAM port"
addr = a.indexes[0]
ctx.registerRAM_write_port(o, addr, ev_dep)
elif ev_dep:
ctx.registerFF(o)
if i > 1:
ctx.registerMUX(stm, o, i)
elif o not in encl:
ctx.registerLatch(o)
if i > 1:
ctx.registerMUX(stm, o, i)
elif i > 1:
ctx.registerMUX(stm, o, i)
else:
# just a connection
continue
if isinstance(stm, SwitchContainer):
caseEqs = set([stm.switchOn._eq(c[0]) for c in stm.cases])
inputs = chain(
[sig for sig in stm._inputs if sig not in caseEqs], [stm.switchOn])
else:
inputs = stm._inputs
for i in inputs:
# discover only internal signals in this statements for
# operators
if not i.hidden or i in seen:
continue
cls.HWProcess_operators(i, ctx, ev_dep) | [
"def",
"HWProcess",
"(",
"cls",
",",
"proc",
":",
"HWProcess",
",",
"ctx",
":",
"ResourceContext",
")",
"->",
"None",
":",
"seen",
"=",
"ctx",
".",
"seen",
"for",
"stm",
"in",
"proc",
".",
"statements",
":",
"encl",
"=",
"stm",
".",
"_enclosed_for",
"full_ev_dep",
"=",
"stm",
".",
"_is_completly_event_dependent",
"now_ev_dep",
"=",
"stm",
".",
"_now_is_event_dependent",
"ev_dep",
"=",
"full_ev_dep",
"or",
"now_ev_dep",
"out_mux_dim",
"=",
"count_mux_inputs_for_outputs",
"(",
"stm",
")",
"for",
"o",
"in",
"stm",
".",
"_outputs",
":",
"if",
"o",
"in",
"seen",
":",
"continue",
"i",
"=",
"out_mux_dim",
"[",
"o",
"]",
"if",
"isinstance",
"(",
"o",
".",
"_dtype",
",",
"HArray",
")",
":",
"assert",
"i",
"==",
"1",
",",
"(",
"o",
",",
"i",
",",
"\" only one ram port per HWProcess\"",
")",
"for",
"a",
"in",
"walk_assignments",
"(",
"stm",
",",
"o",
")",
":",
"assert",
"len",
"(",
"a",
".",
"indexes",
")",
"==",
"1",
",",
"\"one address per RAM port\"",
"addr",
"=",
"a",
".",
"indexes",
"[",
"0",
"]",
"ctx",
".",
"registerRAM_write_port",
"(",
"o",
",",
"addr",
",",
"ev_dep",
")",
"elif",
"ev_dep",
":",
"ctx",
".",
"registerFF",
"(",
"o",
")",
"if",
"i",
">",
"1",
":",
"ctx",
".",
"registerMUX",
"(",
"stm",
",",
"o",
",",
"i",
")",
"elif",
"o",
"not",
"in",
"encl",
":",
"ctx",
".",
"registerLatch",
"(",
"o",
")",
"if",
"i",
">",
"1",
":",
"ctx",
".",
"registerMUX",
"(",
"stm",
",",
"o",
",",
"i",
")",
"elif",
"i",
">",
"1",
":",
"ctx",
".",
"registerMUX",
"(",
"stm",
",",
"o",
",",
"i",
")",
"else",
":",
"# just a connection",
"continue",
"if",
"isinstance",
"(",
"stm",
",",
"SwitchContainer",
")",
":",
"caseEqs",
"=",
"set",
"(",
"[",
"stm",
".",
"switchOn",
".",
"_eq",
"(",
"c",
"[",
"0",
"]",
")",
"for",
"c",
"in",
"stm",
".",
"cases",
"]",
")",
"inputs",
"=",
"chain",
"(",
"[",
"sig",
"for",
"sig",
"in",
"stm",
".",
"_inputs",
"if",
"sig",
"not",
"in",
"caseEqs",
"]",
",",
"[",
"stm",
".",
"switchOn",
"]",
")",
"else",
":",
"inputs",
"=",
"stm",
".",
"_inputs",
"for",
"i",
"in",
"inputs",
":",
"# discover only internal signals in this statements for",
"# operators",
"if",
"not",
"i",
".",
"hidden",
"or",
"i",
"in",
"seen",
":",
"continue",
"cls",
".",
"HWProcess_operators",
"(",
"i",
",",
"ctx",
",",
"ev_dep",
")"
]
| Gues resource usage by HWProcess | [
"Gues",
"resource",
"usage",
"by",
"HWProcess"
]
| python | test |
pypa/pipenv | pipenv/vendor/requirementslib/models/dependencies.py | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/requirementslib/models/dependencies.py#L411-L445 | def get_dependencies_from_cache(ireq):
"""Retrieves dependencies for the given install requirement from the dependency cache.
:param ireq: A single InstallRequirement
:type ireq: :class:`~pip._internal.req.req_install.InstallRequirement`
:return: A set of dependency lines for generating new InstallRequirements.
:rtype: set(str) or None
"""
if ireq.editable or not is_pinned_requirement(ireq):
return
if ireq not in DEPENDENCY_CACHE:
return
cached = set(DEPENDENCY_CACHE[ireq])
# Preserving sanity: Run through the cache and make sure every entry if
# valid. If this fails, something is wrong with the cache. Drop it.
try:
broken = False
for line in cached:
dep_ireq = pip_shims.shims.InstallRequirement.from_line(line)
name = canonicalize_name(dep_ireq.name)
if _marker_contains_extra(dep_ireq):
broken = True # The "extra =" marker breaks everything.
elif name == canonicalize_name(ireq.name):
broken = True # A package cannot depend on itself.
if broken:
break
except Exception:
broken = True
if broken:
del DEPENDENCY_CACHE[ireq]
return
return cached | [
"def",
"get_dependencies_from_cache",
"(",
"ireq",
")",
":",
"if",
"ireq",
".",
"editable",
"or",
"not",
"is_pinned_requirement",
"(",
"ireq",
")",
":",
"return",
"if",
"ireq",
"not",
"in",
"DEPENDENCY_CACHE",
":",
"return",
"cached",
"=",
"set",
"(",
"DEPENDENCY_CACHE",
"[",
"ireq",
"]",
")",
"# Preserving sanity: Run through the cache and make sure every entry if",
"# valid. If this fails, something is wrong with the cache. Drop it.",
"try",
":",
"broken",
"=",
"False",
"for",
"line",
"in",
"cached",
":",
"dep_ireq",
"=",
"pip_shims",
".",
"shims",
".",
"InstallRequirement",
".",
"from_line",
"(",
"line",
")",
"name",
"=",
"canonicalize_name",
"(",
"dep_ireq",
".",
"name",
")",
"if",
"_marker_contains_extra",
"(",
"dep_ireq",
")",
":",
"broken",
"=",
"True",
"# The \"extra =\" marker breaks everything.",
"elif",
"name",
"==",
"canonicalize_name",
"(",
"ireq",
".",
"name",
")",
":",
"broken",
"=",
"True",
"# A package cannot depend on itself.",
"if",
"broken",
":",
"break",
"except",
"Exception",
":",
"broken",
"=",
"True",
"if",
"broken",
":",
"del",
"DEPENDENCY_CACHE",
"[",
"ireq",
"]",
"return",
"return",
"cached"
]
| Retrieves dependencies for the given install requirement from the dependency cache.
:param ireq: A single InstallRequirement
:type ireq: :class:`~pip._internal.req.req_install.InstallRequirement`
:return: A set of dependency lines for generating new InstallRequirements.
:rtype: set(str) or None | [
"Retrieves",
"dependencies",
"for",
"the",
"given",
"install",
"requirement",
"from",
"the",
"dependency",
"cache",
"."
]
| python | train |
dade-ai/snipy | snipy/iterflow.py | https://github.com/dade-ai/snipy/blob/408520867179f99b3158b57520e2619f3fecd69b/snipy/iterflow.py#L327-L337 | def batchzip(size, iterable=None, rest=False):
"""
todo : add example
:param size:
:param iterable:
:param rest:
:return:
"""
fn = ibatch(size, rest=rest) >> zipflow
return fn if iterable is None else fn(iterable) | [
"def",
"batchzip",
"(",
"size",
",",
"iterable",
"=",
"None",
",",
"rest",
"=",
"False",
")",
":",
"fn",
"=",
"ibatch",
"(",
"size",
",",
"rest",
"=",
"rest",
")",
">>",
"zipflow",
"return",
"fn",
"if",
"iterable",
"is",
"None",
"else",
"fn",
"(",
"iterable",
")"
]
| todo : add example
:param size:
:param iterable:
:param rest:
:return: | [
"todo",
":",
"add",
"example",
":",
"param",
"size",
":",
":",
"param",
"iterable",
":",
":",
"param",
"rest",
":",
":",
"return",
":"
]
| python | valid |
bhmm/bhmm | bhmm/estimators/_tmatrix_disconnected.py | https://github.com/bhmm/bhmm/blob/9804d18c2ddb684fb4d90b544cc209617a89ca9a/bhmm/estimators/_tmatrix_disconnected.py#L229-L251 | def stationary_distribution(P, C=None, mincount_connectivity=0):
""" Simple estimator for stationary distribution for multiple strongly connected sets """
# can be replaced by msmtools.analysis.stationary_distribution in next msmtools release
from msmtools.analysis.dense.stationary_vector import stationary_distribution as msmstatdist
if C is None:
if is_connected(P, strong=True):
return msmstatdist(P)
else:
raise ValueError('Computing stationary distribution for disconnected matrix. Need count matrix.')
# disconnected sets
n = np.shape(C)[0]
ctot = np.sum(C)
pi = np.zeros(n)
# treat each weakly connected set separately
sets = connected_sets(C, mincount_connectivity=mincount_connectivity, strong=False)
for s in sets:
# compute weight
w = np.sum(C[s, :]) / ctot
pi[s] = w * msmstatdist(P[s, :][:, s])
# reinforce normalization
pi /= np.sum(pi)
return pi | [
"def",
"stationary_distribution",
"(",
"P",
",",
"C",
"=",
"None",
",",
"mincount_connectivity",
"=",
"0",
")",
":",
"# can be replaced by msmtools.analysis.stationary_distribution in next msmtools release",
"from",
"msmtools",
".",
"analysis",
".",
"dense",
".",
"stationary_vector",
"import",
"stationary_distribution",
"as",
"msmstatdist",
"if",
"C",
"is",
"None",
":",
"if",
"is_connected",
"(",
"P",
",",
"strong",
"=",
"True",
")",
":",
"return",
"msmstatdist",
"(",
"P",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Computing stationary distribution for disconnected matrix. Need count matrix.'",
")",
"# disconnected sets",
"n",
"=",
"np",
".",
"shape",
"(",
"C",
")",
"[",
"0",
"]",
"ctot",
"=",
"np",
".",
"sum",
"(",
"C",
")",
"pi",
"=",
"np",
".",
"zeros",
"(",
"n",
")",
"# treat each weakly connected set separately",
"sets",
"=",
"connected_sets",
"(",
"C",
",",
"mincount_connectivity",
"=",
"mincount_connectivity",
",",
"strong",
"=",
"False",
")",
"for",
"s",
"in",
"sets",
":",
"# compute weight",
"w",
"=",
"np",
".",
"sum",
"(",
"C",
"[",
"s",
",",
":",
"]",
")",
"/",
"ctot",
"pi",
"[",
"s",
"]",
"=",
"w",
"*",
"msmstatdist",
"(",
"P",
"[",
"s",
",",
":",
"]",
"[",
":",
",",
"s",
"]",
")",
"# reinforce normalization",
"pi",
"/=",
"np",
".",
"sum",
"(",
"pi",
")",
"return",
"pi"
]
| Simple estimator for stationary distribution for multiple strongly connected sets | [
"Simple",
"estimator",
"for",
"stationary",
"distribution",
"for",
"multiple",
"strongly",
"connected",
"sets"
]
| python | train |
ethereum/pyethereum | ethereum/pow/consensus.py | https://github.com/ethereum/pyethereum/blob/b704a5c6577863edc539a1ec3d2620a443b950fb/ethereum/pow/consensus.py#L110-L132 | def finalize(state, block):
"""Apply rewards and commit."""
if state.is_METROPOLIS():
br = state.config['BYZANTIUM_BLOCK_REWARD']
nr = state.config['BYZANTIUM_NEPHEW_REWARD']
else:
br = state.config['BLOCK_REWARD']
nr = state.config['NEPHEW_REWARD']
delta = int(br + nr * len(block.uncles))
state.delta_balance(state.block_coinbase, delta)
udpf = state.config['UNCLE_DEPTH_PENALTY_FACTOR']
for uncle in block.uncles:
r = int(br * (udpf + uncle.number - state.block_number) // udpf)
state.delta_balance(uncle.coinbase, r)
if state.block_number - \
state.config['MAX_UNCLE_DEPTH'] in state.recent_uncles:
del state.recent_uncles[state.block_number -
state.config['MAX_UNCLE_DEPTH']] | [
"def",
"finalize",
"(",
"state",
",",
"block",
")",
":",
"if",
"state",
".",
"is_METROPOLIS",
"(",
")",
":",
"br",
"=",
"state",
".",
"config",
"[",
"'BYZANTIUM_BLOCK_REWARD'",
"]",
"nr",
"=",
"state",
".",
"config",
"[",
"'BYZANTIUM_NEPHEW_REWARD'",
"]",
"else",
":",
"br",
"=",
"state",
".",
"config",
"[",
"'BLOCK_REWARD'",
"]",
"nr",
"=",
"state",
".",
"config",
"[",
"'NEPHEW_REWARD'",
"]",
"delta",
"=",
"int",
"(",
"br",
"+",
"nr",
"*",
"len",
"(",
"block",
".",
"uncles",
")",
")",
"state",
".",
"delta_balance",
"(",
"state",
".",
"block_coinbase",
",",
"delta",
")",
"udpf",
"=",
"state",
".",
"config",
"[",
"'UNCLE_DEPTH_PENALTY_FACTOR'",
"]",
"for",
"uncle",
"in",
"block",
".",
"uncles",
":",
"r",
"=",
"int",
"(",
"br",
"*",
"(",
"udpf",
"+",
"uncle",
".",
"number",
"-",
"state",
".",
"block_number",
")",
"//",
"udpf",
")",
"state",
".",
"delta_balance",
"(",
"uncle",
".",
"coinbase",
",",
"r",
")",
"if",
"state",
".",
"block_number",
"-",
"state",
".",
"config",
"[",
"'MAX_UNCLE_DEPTH'",
"]",
"in",
"state",
".",
"recent_uncles",
":",
"del",
"state",
".",
"recent_uncles",
"[",
"state",
".",
"block_number",
"-",
"state",
".",
"config",
"[",
"'MAX_UNCLE_DEPTH'",
"]",
"]"
]
| Apply rewards and commit. | [
"Apply",
"rewards",
"and",
"commit",
"."
]
| python | train |
pandas-dev/pandas | pandas/core/reshape/tile.py | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/reshape/tile.py#L430-L450 | def _convert_bin_to_datelike_type(bins, dtype):
"""
Convert bins to a DatetimeIndex or TimedeltaIndex if the orginal dtype is
datelike
Parameters
----------
bins : list-like of bins
dtype : dtype of data
Returns
-------
bins : Array-like of bins, DatetimeIndex or TimedeltaIndex if dtype is
datelike
"""
if is_datetime64tz_dtype(dtype):
bins = to_datetime(bins.astype(np.int64),
utc=True).tz_convert(dtype.tz)
elif is_datetime_or_timedelta_dtype(dtype):
bins = Index(bins.astype(np.int64), dtype=dtype)
return bins | [
"def",
"_convert_bin_to_datelike_type",
"(",
"bins",
",",
"dtype",
")",
":",
"if",
"is_datetime64tz_dtype",
"(",
"dtype",
")",
":",
"bins",
"=",
"to_datetime",
"(",
"bins",
".",
"astype",
"(",
"np",
".",
"int64",
")",
",",
"utc",
"=",
"True",
")",
".",
"tz_convert",
"(",
"dtype",
".",
"tz",
")",
"elif",
"is_datetime_or_timedelta_dtype",
"(",
"dtype",
")",
":",
"bins",
"=",
"Index",
"(",
"bins",
".",
"astype",
"(",
"np",
".",
"int64",
")",
",",
"dtype",
"=",
"dtype",
")",
"return",
"bins"
]
| Convert bins to a DatetimeIndex or TimedeltaIndex if the orginal dtype is
datelike
Parameters
----------
bins : list-like of bins
dtype : dtype of data
Returns
-------
bins : Array-like of bins, DatetimeIndex or TimedeltaIndex if dtype is
datelike | [
"Convert",
"bins",
"to",
"a",
"DatetimeIndex",
"or",
"TimedeltaIndex",
"if",
"the",
"orginal",
"dtype",
"is",
"datelike"
]
| python | train |
Kozea/pygal | pygal/graph/time.py | https://github.com/Kozea/pygal/blob/5e25c98a59a0642eecd9fcc5dbfeeb2190fbb5e7/pygal/graph/time.py#L78-L88 | def seconds_to_time(x):
"""Convert a number of second into a time"""
t = int(x * 10**6)
ms = t % 10**6
t = t // 10**6
s = t % 60
t = t // 60
m = t % 60
t = t // 60
h = t
return time(h, m, s, ms) | [
"def",
"seconds_to_time",
"(",
"x",
")",
":",
"t",
"=",
"int",
"(",
"x",
"*",
"10",
"**",
"6",
")",
"ms",
"=",
"t",
"%",
"10",
"**",
"6",
"t",
"=",
"t",
"//",
"10",
"**",
"6",
"s",
"=",
"t",
"%",
"60",
"t",
"=",
"t",
"//",
"60",
"m",
"=",
"t",
"%",
"60",
"t",
"=",
"t",
"//",
"60",
"h",
"=",
"t",
"return",
"time",
"(",
"h",
",",
"m",
",",
"s",
",",
"ms",
")"
]
| Convert a number of second into a time | [
"Convert",
"a",
"number",
"of",
"second",
"into",
"a",
"time"
]
| python | train |
wmayner/pyphi | pyphi/node.py | https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/node.py#L156-L177 | def generate_nodes(tpm, cm, network_state, indices, node_labels=None):
"""Generate |Node| objects for a subsystem.
Args:
tpm (np.ndarray): The system's TPM
cm (np.ndarray): The corresponding CM.
network_state (tuple): The state of the network.
indices (tuple[int]): Indices to generate nodes for.
Keyword Args:
node_labels (|NodeLabels|): Textual labels for each node.
Returns:
tuple[Node]: The nodes of the system.
"""
if node_labels is None:
node_labels = NodeLabels(None, indices)
node_state = utils.state_of(indices, network_state)
return tuple(Node(tpm, cm, index, state, node_labels)
for index, state in zip(indices, node_state)) | [
"def",
"generate_nodes",
"(",
"tpm",
",",
"cm",
",",
"network_state",
",",
"indices",
",",
"node_labels",
"=",
"None",
")",
":",
"if",
"node_labels",
"is",
"None",
":",
"node_labels",
"=",
"NodeLabels",
"(",
"None",
",",
"indices",
")",
"node_state",
"=",
"utils",
".",
"state_of",
"(",
"indices",
",",
"network_state",
")",
"return",
"tuple",
"(",
"Node",
"(",
"tpm",
",",
"cm",
",",
"index",
",",
"state",
",",
"node_labels",
")",
"for",
"index",
",",
"state",
"in",
"zip",
"(",
"indices",
",",
"node_state",
")",
")"
]
| Generate |Node| objects for a subsystem.
Args:
tpm (np.ndarray): The system's TPM
cm (np.ndarray): The corresponding CM.
network_state (tuple): The state of the network.
indices (tuple[int]): Indices to generate nodes for.
Keyword Args:
node_labels (|NodeLabels|): Textual labels for each node.
Returns:
tuple[Node]: The nodes of the system. | [
"Generate",
"|Node|",
"objects",
"for",
"a",
"subsystem",
"."
]
| python | train |
NoviceLive/intellicoder | intellicoder/transformers.py | https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/transformers.py#L180-L192 | def build_strings(strings, prefix):
"""Construct string definitions according to
the previously maintained table.
"""
strings = [
(
make_c_str(prefix + str(number), value),
reloc_ptr(
prefix + str(number), 'reloc_delta', 'char *'
)
) for value, number in sort_values(strings)
]
return [i[0] for i in strings], [i[1] for i in strings] | [
"def",
"build_strings",
"(",
"strings",
",",
"prefix",
")",
":",
"strings",
"=",
"[",
"(",
"make_c_str",
"(",
"prefix",
"+",
"str",
"(",
"number",
")",
",",
"value",
")",
",",
"reloc_ptr",
"(",
"prefix",
"+",
"str",
"(",
"number",
")",
",",
"'reloc_delta'",
",",
"'char *'",
")",
")",
"for",
"value",
",",
"number",
"in",
"sort_values",
"(",
"strings",
")",
"]",
"return",
"[",
"i",
"[",
"0",
"]",
"for",
"i",
"in",
"strings",
"]",
",",
"[",
"i",
"[",
"1",
"]",
"for",
"i",
"in",
"strings",
"]"
]
| Construct string definitions according to
the previously maintained table. | [
"Construct",
"string",
"definitions",
"according",
"to",
"the",
"previously",
"maintained",
"table",
"."
]
| python | train |
jessamynsmith/paragres | paragres/command.py | https://github.com/jessamynsmith/paragres/blob/4e068cbfcafbe8f1b010741d38fb65d40de2c6aa/paragres/command.py#L135-L144 | def download_file_from_url(self, source_app, url):
""" Download file from source app or url, and return local filename. """
if source_app:
source_name = source_app
else:
source_name = urlparse.urlparse(url).netloc.replace('.', '_')
filename = self.create_file_name(source_name)
self.download_file(url, filename)
return filename | [
"def",
"download_file_from_url",
"(",
"self",
",",
"source_app",
",",
"url",
")",
":",
"if",
"source_app",
":",
"source_name",
"=",
"source_app",
"else",
":",
"source_name",
"=",
"urlparse",
".",
"urlparse",
"(",
"url",
")",
".",
"netloc",
".",
"replace",
"(",
"'.'",
",",
"'_'",
")",
"filename",
"=",
"self",
".",
"create_file_name",
"(",
"source_name",
")",
"self",
".",
"download_file",
"(",
"url",
",",
"filename",
")",
"return",
"filename"
]
| Download file from source app or url, and return local filename. | [
"Download",
"file",
"from",
"source",
"app",
"or",
"url",
"and",
"return",
"local",
"filename",
"."
]
| python | train |
eaton-lab/toytree | toytree/Drawing.py | https://github.com/eaton-lab/toytree/blob/0347ed2098acc5f707fadf52a0ecd411a6d1859c/toytree/Drawing.py#L624-L665 | def get_tip_label_coords(self):
"""
Get starting position of tip labels text based on locations of the
leaf nodes on the tree and style offset and align options. Node
positions are found using the .verts attribute of coords and is
already oriented for the tree face direction.
"""
# number of tips
ns = self.ttree.ntips
# x-coordinate of tips assuming down-face
tip_xpos = self.coords.verts[:ns, 0]
tip_ypos = self.coords.verts[:ns, 1]
align_edges = None
align_verts = None
# handle orientations
if self.style.orient in (0, 'down'):
# align tips at zero
if self.style.tip_labels_align:
tip_yend = np.zeros(ns)
align_edges = np.array([
(i + len(tip_ypos), i) for i in range(len(tip_ypos))
])
align_verts = np.array(
list(zip(tip_xpos, tip_ypos)) + \
list(zip(tip_xpos, tip_yend))
)
tip_ypos = tip_yend
else:
# tip labels align finds the zero axis for orientation...
if self.style.tip_labels_align:
tip_xend = np.zeros(ns)
align_edges = np.array([
(i + len(tip_xpos), i) for i in range(len(tip_xpos))
])
align_verts = np.array(
list(zip(tip_xpos, tip_ypos)) + \
list(zip(tip_xend, tip_ypos))
)
tip_xpos = tip_xend
return tip_xpos, tip_ypos, align_edges, align_verts | [
"def",
"get_tip_label_coords",
"(",
"self",
")",
":",
"# number of tips",
"ns",
"=",
"self",
".",
"ttree",
".",
"ntips",
"# x-coordinate of tips assuming down-face",
"tip_xpos",
"=",
"self",
".",
"coords",
".",
"verts",
"[",
":",
"ns",
",",
"0",
"]",
"tip_ypos",
"=",
"self",
".",
"coords",
".",
"verts",
"[",
":",
"ns",
",",
"1",
"]",
"align_edges",
"=",
"None",
"align_verts",
"=",
"None",
"# handle orientations",
"if",
"self",
".",
"style",
".",
"orient",
"in",
"(",
"0",
",",
"'down'",
")",
":",
"# align tips at zero",
"if",
"self",
".",
"style",
".",
"tip_labels_align",
":",
"tip_yend",
"=",
"np",
".",
"zeros",
"(",
"ns",
")",
"align_edges",
"=",
"np",
".",
"array",
"(",
"[",
"(",
"i",
"+",
"len",
"(",
"tip_ypos",
")",
",",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"tip_ypos",
")",
")",
"]",
")",
"align_verts",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"zip",
"(",
"tip_xpos",
",",
"tip_ypos",
")",
")",
"+",
"list",
"(",
"zip",
"(",
"tip_xpos",
",",
"tip_yend",
")",
")",
")",
"tip_ypos",
"=",
"tip_yend",
"else",
":",
"# tip labels align finds the zero axis for orientation...",
"if",
"self",
".",
"style",
".",
"tip_labels_align",
":",
"tip_xend",
"=",
"np",
".",
"zeros",
"(",
"ns",
")",
"align_edges",
"=",
"np",
".",
"array",
"(",
"[",
"(",
"i",
"+",
"len",
"(",
"tip_xpos",
")",
",",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"tip_xpos",
")",
")",
"]",
")",
"align_verts",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"zip",
"(",
"tip_xpos",
",",
"tip_ypos",
")",
")",
"+",
"list",
"(",
"zip",
"(",
"tip_xend",
",",
"tip_ypos",
")",
")",
")",
"tip_xpos",
"=",
"tip_xend",
"return",
"tip_xpos",
",",
"tip_ypos",
",",
"align_edges",
",",
"align_verts"
]
| Get starting position of tip labels text based on locations of the
leaf nodes on the tree and style offset and align options. Node
positions are found using the .verts attribute of coords and is
already oriented for the tree face direction. | [
"Get",
"starting",
"position",
"of",
"tip",
"labels",
"text",
"based",
"on",
"locations",
"of",
"the",
"leaf",
"nodes",
"on",
"the",
"tree",
"and",
"style",
"offset",
"and",
"align",
"options",
".",
"Node",
"positions",
"are",
"found",
"using",
"the",
".",
"verts",
"attribute",
"of",
"coords",
"and",
"is",
"already",
"oriented",
"for",
"the",
"tree",
"face",
"direction",
"."
]
| python | train |
tweekmonster/moult | moult/utils.py | https://github.com/tweekmonster/moult/blob/38d3a3b9002336219897ebe263ca1d8dcadbecf5/moult/utils.py#L88-L107 | def file_containing_import(import_path, import_root):
'''Finds the file that might contain the import_path.
'''
if not _import_paths:
load_stdlib()
if os.path.isfile(import_root):
import_root = os.path.dirname(import_root)
search_paths = [import_root] + _import_paths
module_parts = import_path.split('.')
for i in range(len(module_parts), 0, -1):
module_path = os.path.join(*module_parts[:i])
for sp in search_paths:
p = os.path.join(sp, module_path)
if os.path.isdir(p):
return os.path.join(p, '__init__.py')
elif os.path.isfile(p + '.py'):
return p + '.py'
return None | [
"def",
"file_containing_import",
"(",
"import_path",
",",
"import_root",
")",
":",
"if",
"not",
"_import_paths",
":",
"load_stdlib",
"(",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"import_root",
")",
":",
"import_root",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"import_root",
")",
"search_paths",
"=",
"[",
"import_root",
"]",
"+",
"_import_paths",
"module_parts",
"=",
"import_path",
".",
"split",
"(",
"'.'",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"module_parts",
")",
",",
"0",
",",
"-",
"1",
")",
":",
"module_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"*",
"module_parts",
"[",
":",
"i",
"]",
")",
"for",
"sp",
"in",
"search_paths",
":",
"p",
"=",
"os",
".",
"path",
".",
"join",
"(",
"sp",
",",
"module_path",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"p",
")",
":",
"return",
"os",
".",
"path",
".",
"join",
"(",
"p",
",",
"'__init__.py'",
")",
"elif",
"os",
".",
"path",
".",
"isfile",
"(",
"p",
"+",
"'.py'",
")",
":",
"return",
"p",
"+",
"'.py'",
"return",
"None"
]
| Finds the file that might contain the import_path. | [
"Finds",
"the",
"file",
"that",
"might",
"contain",
"the",
"import_path",
"."
]
| python | train |
acutesoftware/AIKIF | aikif/toolbox/audio_tools.py | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/audio_tools.py#L31-L63 | def get_audio_metadata(fname):
""" collects basic MP3 metadata
Works, once you use mutagenx (buried deep in issues page)
['Angels']
['Red Back Fever']
['Red Back Fever']
{'album': ['Red Back Fever'], 'title': ['Red Back Fever'], 'artist': ['Angels']}
"""
from mutagenx.easyid3 import EasyID3
audio = EasyID3(fname)
audio_dict = {}
try:
artist = audio["artist"]
except KeyError:
artist = ''
try:
title = audio["title"]
except KeyError:
print("Cant get title")
try:
album = audio["album"]
except KeyError:
album = ''
audio_dict['album'] = album
audio_dict['title'] = title
audio_dict['artist'] = artist
return audio_dict | [
"def",
"get_audio_metadata",
"(",
"fname",
")",
":",
"from",
"mutagenx",
".",
"easyid3",
"import",
"EasyID3",
"audio",
"=",
"EasyID3",
"(",
"fname",
")",
"audio_dict",
"=",
"{",
"}",
"try",
":",
"artist",
"=",
"audio",
"[",
"\"artist\"",
"]",
"except",
"KeyError",
":",
"artist",
"=",
"''",
"try",
":",
"title",
"=",
"audio",
"[",
"\"title\"",
"]",
"except",
"KeyError",
":",
"print",
"(",
"\"Cant get title\"",
")",
"try",
":",
"album",
"=",
"audio",
"[",
"\"album\"",
"]",
"except",
"KeyError",
":",
"album",
"=",
"''",
"audio_dict",
"[",
"'album'",
"]",
"=",
"album",
"audio_dict",
"[",
"'title'",
"]",
"=",
"title",
"audio_dict",
"[",
"'artist'",
"]",
"=",
"artist",
"return",
"audio_dict"
]
| collects basic MP3 metadata
Works, once you use mutagenx (buried deep in issues page)
['Angels']
['Red Back Fever']
['Red Back Fever']
{'album': ['Red Back Fever'], 'title': ['Red Back Fever'], 'artist': ['Angels']} | [
"collects",
"basic",
"MP3",
"metadata",
"Works",
"once",
"you",
"use",
"mutagenx",
"(",
"buried",
"deep",
"in",
"issues",
"page",
")",
"[",
"Angels",
"]",
"[",
"Red",
"Back",
"Fever",
"]",
"[",
"Red",
"Back",
"Fever",
"]",
"{",
"album",
":",
"[",
"Red",
"Back",
"Fever",
"]",
"title",
":",
"[",
"Red",
"Back",
"Fever",
"]",
"artist",
":",
"[",
"Angels",
"]",
"}"
]
| python | train |
maweigert/gputools | gputools/core/ocldevice.py | https://github.com/maweigert/gputools/blob/6ab26efeb05dceef74cf13aadeeeb9b009b529dd/gputools/core/ocldevice.py#L43-L50 | def device_priority(cls, device_with_type_tuple):
"""used to sort devices
device_with_type_tuple = (device, device_type)
"""
device, device_type = device_with_type_tuple
return (device_type is pyopencl.device_type.GPU,
device.get_info(pyopencl.device_info.GLOBAL_MEM_SIZE),
) | [
"def",
"device_priority",
"(",
"cls",
",",
"device_with_type_tuple",
")",
":",
"device",
",",
"device_type",
"=",
"device_with_type_tuple",
"return",
"(",
"device_type",
"is",
"pyopencl",
".",
"device_type",
".",
"GPU",
",",
"device",
".",
"get_info",
"(",
"pyopencl",
".",
"device_info",
".",
"GLOBAL_MEM_SIZE",
")",
",",
")"
]
| used to sort devices
device_with_type_tuple = (device, device_type) | [
"used",
"to",
"sort",
"devices",
"device_with_type_tuple",
"=",
"(",
"device",
"device_type",
")"
]
| python | train |
ronhanson/python-tbx | fabfile/virtualenv.py | https://github.com/ronhanson/python-tbx/blob/87f72ae0cadecafbcd144f1e930181fba77f6b83/fabfile/virtualenv.py#L9-L18 | def init():
"""Execute init tasks for all components (virtualenv, pip)."""
if not os.path.isdir('venv'):
print(cyan('\nCreating the virtual env...'))
local('pyvenv-3.4 venv')
print(green('Virtual env created.'))
print(green('Virtual Environment ready.')) | [
"def",
"init",
"(",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"'venv'",
")",
":",
"print",
"(",
"cyan",
"(",
"'\\nCreating the virtual env...'",
")",
")",
"local",
"(",
"'pyvenv-3.4 venv'",
")",
"print",
"(",
"green",
"(",
"'Virtual env created.'",
")",
")",
"print",
"(",
"green",
"(",
"'Virtual Environment ready.'",
")",
")"
]
| Execute init tasks for all components (virtualenv, pip). | [
"Execute",
"init",
"tasks",
"for",
"all",
"components",
"(",
"virtualenv",
"pip",
")",
"."
]
| python | train |
Clinical-Genomics/scout | scout/parse/hpo.py | https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/parse/hpo.py#L172-L216 | def parse_hpo_genes(hpo_lines):
"""Parse HPO gene information
Args:
hpo_lines(iterable(str))
Returns:
diseases(dict): A dictionary with hgnc symbols as keys
"""
LOG.info("Parsing HPO genes ...")
genes = {}
for index, line in enumerate(hpo_lines):
# First line is header
if index == 0:
continue
if len(line) < 5:
continue
gene_info = parse_hpo_gene(line)
hgnc_symbol = gene_info['hgnc_symbol']
description = gene_info['description']
if hgnc_symbol not in genes:
genes[hgnc_symbol] = {
'hgnc_symbol': hgnc_symbol
}
gene = genes[hgnc_symbol]
if description == 'Incomplete penetrance':
gene['incomplete_penetrance'] = True
if description == 'Autosomal dominant inheritance':
gene['ad'] = True
if description == 'Autosomal recessive inheritance':
gene['ar'] = True
if description == 'Mithochondrial inheritance':
gene['mt'] = True
if description == 'X-linked dominant inheritance':
gene['xd'] = True
if description == 'X-linked recessive inheritance':
gene['xr'] = True
if description == 'Y-linked inheritance':
gene['x'] = True
if description == 'X-linked inheritance':
gene['y'] = True
LOG.info("Parsing done.")
return genes | [
"def",
"parse_hpo_genes",
"(",
"hpo_lines",
")",
":",
"LOG",
".",
"info",
"(",
"\"Parsing HPO genes ...\"",
")",
"genes",
"=",
"{",
"}",
"for",
"index",
",",
"line",
"in",
"enumerate",
"(",
"hpo_lines",
")",
":",
"# First line is header",
"if",
"index",
"==",
"0",
":",
"continue",
"if",
"len",
"(",
"line",
")",
"<",
"5",
":",
"continue",
"gene_info",
"=",
"parse_hpo_gene",
"(",
"line",
")",
"hgnc_symbol",
"=",
"gene_info",
"[",
"'hgnc_symbol'",
"]",
"description",
"=",
"gene_info",
"[",
"'description'",
"]",
"if",
"hgnc_symbol",
"not",
"in",
"genes",
":",
"genes",
"[",
"hgnc_symbol",
"]",
"=",
"{",
"'hgnc_symbol'",
":",
"hgnc_symbol",
"}",
"gene",
"=",
"genes",
"[",
"hgnc_symbol",
"]",
"if",
"description",
"==",
"'Incomplete penetrance'",
":",
"gene",
"[",
"'incomplete_penetrance'",
"]",
"=",
"True",
"if",
"description",
"==",
"'Autosomal dominant inheritance'",
":",
"gene",
"[",
"'ad'",
"]",
"=",
"True",
"if",
"description",
"==",
"'Autosomal recessive inheritance'",
":",
"gene",
"[",
"'ar'",
"]",
"=",
"True",
"if",
"description",
"==",
"'Mithochondrial inheritance'",
":",
"gene",
"[",
"'mt'",
"]",
"=",
"True",
"if",
"description",
"==",
"'X-linked dominant inheritance'",
":",
"gene",
"[",
"'xd'",
"]",
"=",
"True",
"if",
"description",
"==",
"'X-linked recessive inheritance'",
":",
"gene",
"[",
"'xr'",
"]",
"=",
"True",
"if",
"description",
"==",
"'Y-linked inheritance'",
":",
"gene",
"[",
"'x'",
"]",
"=",
"True",
"if",
"description",
"==",
"'X-linked inheritance'",
":",
"gene",
"[",
"'y'",
"]",
"=",
"True",
"LOG",
".",
"info",
"(",
"\"Parsing done.\"",
")",
"return",
"genes"
]
| Parse HPO gene information
Args:
hpo_lines(iterable(str))
Returns:
diseases(dict): A dictionary with hgnc symbols as keys | [
"Parse",
"HPO",
"gene",
"information",
"Args",
":",
"hpo_lines",
"(",
"iterable",
"(",
"str",
"))",
"Returns",
":",
"diseases",
"(",
"dict",
")",
":",
"A",
"dictionary",
"with",
"hgnc",
"symbols",
"as",
"keys"
]
| python | test |
ipfs/py-ipfs-api | ipfsapi/client.py | https://github.com/ipfs/py-ipfs-api/blob/7574dad04877b45dbe4ad321dcfa9e880eb2d90c/ipfsapi/client.py#L1506-L1533 | def dht_query(self, peer_id, *peer_ids, **kwargs):
"""Finds the closest Peer IDs to a given Peer ID by querying the DHT.
.. code-block:: python
>>> c.dht_query("/ip4/104.131.131.82/tcp/4001/ipfs/QmaCpDM … uvuJ")
[{'ID': 'QmPkFbxAQ7DeKD5VGSh9HQrdS574pyNzDmxJeGrRJxoucF',
'Extra': '', 'Type': 2, 'Responses': None},
{'ID': 'QmR1MhHVLJSLt9ZthsNNhudb1ny1WdhY4FPW21ZYFWec4f',
'Extra': '', 'Type': 2, 'Responses': None},
{'ID': 'Qmcwx1K5aVme45ab6NYWb52K2TFBeABgCLccC7ntUeDsAs',
'Extra': '', 'Type': 2, 'Responses': None},
…
{'ID': 'QmYYy8L3YD1nsF4xtt4xmsc14yqvAAnKksjo3F3iZs5jPv',
'Extra': '', 'Type': 1, 'Responses': []}]
Parameters
----------
peer_id : str
The peerID to run the query against
Returns
-------
dict : List of peers IDs
"""
args = (peer_id,) + peer_ids
return self._client.request('/dht/query', args,
decoder='json', **kwargs) | [
"def",
"dht_query",
"(",
"self",
",",
"peer_id",
",",
"*",
"peer_ids",
",",
"*",
"*",
"kwargs",
")",
":",
"args",
"=",
"(",
"peer_id",
",",
")",
"+",
"peer_ids",
"return",
"self",
".",
"_client",
".",
"request",
"(",
"'/dht/query'",
",",
"args",
",",
"decoder",
"=",
"'json'",
",",
"*",
"*",
"kwargs",
")"
]
| Finds the closest Peer IDs to a given Peer ID by querying the DHT.
.. code-block:: python
>>> c.dht_query("/ip4/104.131.131.82/tcp/4001/ipfs/QmaCpDM … uvuJ")
[{'ID': 'QmPkFbxAQ7DeKD5VGSh9HQrdS574pyNzDmxJeGrRJxoucF',
'Extra': '', 'Type': 2, 'Responses': None},
{'ID': 'QmR1MhHVLJSLt9ZthsNNhudb1ny1WdhY4FPW21ZYFWec4f',
'Extra': '', 'Type': 2, 'Responses': None},
{'ID': 'Qmcwx1K5aVme45ab6NYWb52K2TFBeABgCLccC7ntUeDsAs',
'Extra': '', 'Type': 2, 'Responses': None},
…
{'ID': 'QmYYy8L3YD1nsF4xtt4xmsc14yqvAAnKksjo3F3iZs5jPv',
'Extra': '', 'Type': 1, 'Responses': []}]
Parameters
----------
peer_id : str
The peerID to run the query against
Returns
-------
dict : List of peers IDs | [
"Finds",
"the",
"closest",
"Peer",
"IDs",
"to",
"a",
"given",
"Peer",
"ID",
"by",
"querying",
"the",
"DHT",
"."
]
| python | train |
ofir123/py-printer | pyprinter/printer.py | https://github.com/ofir123/py-printer/blob/876c83b32120f3b6a7b06989b2cd9b86915d1a50/pyprinter/printer.py#L368-L382 | def _in_qtconsole() -> bool:
"""
A small utility function which determines if we're running in QTConsole's context.
"""
try:
from IPython import get_ipython
try:
from ipykernel.zmqshell import ZMQInteractiveShell
shell_object = ZMQInteractiveShell
except ImportError:
from IPython.kernel.zmq import zmqshell
shell_object = zmqshell.ZMQInteractiveShell
return isinstance(get_ipython(), shell_object)
except Exception:
return False | [
"def",
"_in_qtconsole",
"(",
")",
"->",
"bool",
":",
"try",
":",
"from",
"IPython",
"import",
"get_ipython",
"try",
":",
"from",
"ipykernel",
".",
"zmqshell",
"import",
"ZMQInteractiveShell",
"shell_object",
"=",
"ZMQInteractiveShell",
"except",
"ImportError",
":",
"from",
"IPython",
".",
"kernel",
".",
"zmq",
"import",
"zmqshell",
"shell_object",
"=",
"zmqshell",
".",
"ZMQInteractiveShell",
"return",
"isinstance",
"(",
"get_ipython",
"(",
")",
",",
"shell_object",
")",
"except",
"Exception",
":",
"return",
"False"
]
| A small utility function which determines if we're running in QTConsole's context. | [
"A",
"small",
"utility",
"function",
"which",
"determines",
"if",
"we",
"re",
"running",
"in",
"QTConsole",
"s",
"context",
"."
]
| python | train |
Neurita/boyle | boyle/utils/cache_mixin.py | https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/utils/cache_mixin.py#L93-L160 | def cache(func, memory, func_memory_level=None, memory_level=None,
**kwargs):
""" Return a joblib.Memory object.
The memory_level determines the level above which the wrapped
function output is cached. By specifying a numeric value for
this level, the user can to control the amount of cache memory
used. This function will cache the function call or not
depending on the cache level.
Parameters
----------
func: function
The function which output is to be cached.
memory: instance of joblib.Memory or string
Used to cache the function call.
func_memory_level: int, optional
The memory_level from which caching must be enabled for the wrapped
function.
memory_level: int, optional
The memory_level used to determine if function call must
be cached or not (if user_memory_level is equal of greater than
func_memory_level the function is cached)
kwargs: keyword arguments
The keyword arguments passed to memory.cache
Returns
-------
mem: joblib.MemorizedFunc
object that wraps the function func. This object may be
a no-op, if the requested level is lower than the value given
to _cache()). For consistency, a joblib.Memory object is always
returned.
"""
verbose = kwargs.get('verbose', 0)
# memory_level and func_memory_level must be both None or both integers.
memory_levels = [memory_level, func_memory_level]
both_params_integers = all(isinstance(lvl, int) for lvl in memory_levels)
both_params_none = all(lvl is None for lvl in memory_levels)
if not (both_params_integers or both_params_none):
raise ValueError('Reference and user memory levels must be both None '
'or both integers.')
if memory is not None and (func_memory_level is None or
memory_level >= func_memory_level):
if isinstance(memory, _basestring):
memory = Memory(cachedir=memory, verbose=verbose)
if not isinstance(memory, MEMORY_CLASSES):
raise TypeError("'memory' argument must be a string or a "
"joblib.Memory object. "
"%s %s was given." % (memory, type(memory)))
if (memory.cachedir is None and memory_level is not None
and memory_level > 1):
warnings.warn("Caching has been enabled (memory_level = %d) "
"but no Memory object or path has been provided"
" (parameter memory). Caching deactivated for "
"function %s." %
(memory_level, func.__name__),
stacklevel=2)
else:
memory = Memory(cachedir=None, verbose=verbose)
return _safe_cache(memory, func, **kwargs) | [
"def",
"cache",
"(",
"func",
",",
"memory",
",",
"func_memory_level",
"=",
"None",
",",
"memory_level",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"verbose",
"=",
"kwargs",
".",
"get",
"(",
"'verbose'",
",",
"0",
")",
"# memory_level and func_memory_level must be both None or both integers.",
"memory_levels",
"=",
"[",
"memory_level",
",",
"func_memory_level",
"]",
"both_params_integers",
"=",
"all",
"(",
"isinstance",
"(",
"lvl",
",",
"int",
")",
"for",
"lvl",
"in",
"memory_levels",
")",
"both_params_none",
"=",
"all",
"(",
"lvl",
"is",
"None",
"for",
"lvl",
"in",
"memory_levels",
")",
"if",
"not",
"(",
"both_params_integers",
"or",
"both_params_none",
")",
":",
"raise",
"ValueError",
"(",
"'Reference and user memory levels must be both None '",
"'or both integers.'",
")",
"if",
"memory",
"is",
"not",
"None",
"and",
"(",
"func_memory_level",
"is",
"None",
"or",
"memory_level",
">=",
"func_memory_level",
")",
":",
"if",
"isinstance",
"(",
"memory",
",",
"_basestring",
")",
":",
"memory",
"=",
"Memory",
"(",
"cachedir",
"=",
"memory",
",",
"verbose",
"=",
"verbose",
")",
"if",
"not",
"isinstance",
"(",
"memory",
",",
"MEMORY_CLASSES",
")",
":",
"raise",
"TypeError",
"(",
"\"'memory' argument must be a string or a \"",
"\"joblib.Memory object. \"",
"\"%s %s was given.\"",
"%",
"(",
"memory",
",",
"type",
"(",
"memory",
")",
")",
")",
"if",
"(",
"memory",
".",
"cachedir",
"is",
"None",
"and",
"memory_level",
"is",
"not",
"None",
"and",
"memory_level",
">",
"1",
")",
":",
"warnings",
".",
"warn",
"(",
"\"Caching has been enabled (memory_level = %d) \"",
"\"but no Memory object or path has been provided\"",
"\" (parameter memory). Caching deactivated for \"",
"\"function %s.\"",
"%",
"(",
"memory_level",
",",
"func",
".",
"__name__",
")",
",",
"stacklevel",
"=",
"2",
")",
"else",
":",
"memory",
"=",
"Memory",
"(",
"cachedir",
"=",
"None",
",",
"verbose",
"=",
"verbose",
")",
"return",
"_safe_cache",
"(",
"memory",
",",
"func",
",",
"*",
"*",
"kwargs",
")"
]
| Return a joblib.Memory object.
The memory_level determines the level above which the wrapped
function output is cached. By specifying a numeric value for
this level, the user can to control the amount of cache memory
used. This function will cache the function call or not
depending on the cache level.
Parameters
----------
func: function
The function which output is to be cached.
memory: instance of joblib.Memory or string
Used to cache the function call.
func_memory_level: int, optional
The memory_level from which caching must be enabled for the wrapped
function.
memory_level: int, optional
The memory_level used to determine if function call must
be cached or not (if user_memory_level is equal of greater than
func_memory_level the function is cached)
kwargs: keyword arguments
The keyword arguments passed to memory.cache
Returns
-------
mem: joblib.MemorizedFunc
object that wraps the function func. This object may be
a no-op, if the requested level is lower than the value given
to _cache()). For consistency, a joblib.Memory object is always
returned. | [
"Return",
"a",
"joblib",
".",
"Memory",
"object",
"."
]
| python | valid |
log2timeline/plaso | plaso/cli/helpers/storage_format.py | https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/cli/helpers/storage_format.py#L20-L39 | def AddArguments(cls, argument_group):
"""Adds command line arguments to an argument group.
This function takes an argument parser or an argument group object and adds
to it all the command line arguments this helper supports.
Args:
argument_group (argparse._ArgumentGroup|argparse.ArgumentParser):
argparse group.
"""
storage_formats = sorted(definitions.STORAGE_FORMATS)
argument_group.add_argument(
'--storage_format', '--storage-format', action='store',
choices=storage_formats, dest='storage_format', type=str,
metavar='FORMAT', default=definitions.DEFAULT_STORAGE_FORMAT, help=(
'Format of the storage file, the default is: {0:s}. Supported '
'options: {1:s}'.format(
definitions.DEFAULT_STORAGE_FORMAT,
', '.join(storage_formats)))) | [
"def",
"AddArguments",
"(",
"cls",
",",
"argument_group",
")",
":",
"storage_formats",
"=",
"sorted",
"(",
"definitions",
".",
"STORAGE_FORMATS",
")",
"argument_group",
".",
"add_argument",
"(",
"'--storage_format'",
",",
"'--storage-format'",
",",
"action",
"=",
"'store'",
",",
"choices",
"=",
"storage_formats",
",",
"dest",
"=",
"'storage_format'",
",",
"type",
"=",
"str",
",",
"metavar",
"=",
"'FORMAT'",
",",
"default",
"=",
"definitions",
".",
"DEFAULT_STORAGE_FORMAT",
",",
"help",
"=",
"(",
"'Format of the storage file, the default is: {0:s}. Supported '",
"'options: {1:s}'",
".",
"format",
"(",
"definitions",
".",
"DEFAULT_STORAGE_FORMAT",
",",
"', '",
".",
"join",
"(",
"storage_formats",
")",
")",
")",
")"
]
| Adds command line arguments to an argument group.
This function takes an argument parser or an argument group object and adds
to it all the command line arguments this helper supports.
Args:
argument_group (argparse._ArgumentGroup|argparse.ArgumentParser):
argparse group. | [
"Adds",
"command",
"line",
"arguments",
"to",
"an",
"argument",
"group",
"."
]
| python | train |
Erotemic/utool | utool/util_regex.py | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_regex.py#L91-L101 | def extend_regex2(regexpr, reflags=0):
"""
also preprocesses flags
"""
regexpr = extend_regex(regexpr)
IGNORE_CASE_PREF = '\\c'
if regexpr.startswith(IGNORE_CASE_PREF):
# hack for vim-like ignore case
regexpr = regexpr[len(IGNORE_CASE_PREF):]
reflags = reflags | re.IGNORECASE
return regexpr, reflags | [
"def",
"extend_regex2",
"(",
"regexpr",
",",
"reflags",
"=",
"0",
")",
":",
"regexpr",
"=",
"extend_regex",
"(",
"regexpr",
")",
"IGNORE_CASE_PREF",
"=",
"'\\\\c'",
"if",
"regexpr",
".",
"startswith",
"(",
"IGNORE_CASE_PREF",
")",
":",
"# hack for vim-like ignore case",
"regexpr",
"=",
"regexpr",
"[",
"len",
"(",
"IGNORE_CASE_PREF",
")",
":",
"]",
"reflags",
"=",
"reflags",
"|",
"re",
".",
"IGNORECASE",
"return",
"regexpr",
",",
"reflags"
]
| also preprocesses flags | [
"also",
"preprocesses",
"flags"
]
| python | train |
gholt/swiftly | swiftly/cli/delete.py | https://github.com/gholt/swiftly/blob/5bcc1c65323b1caf1f85adbefd9fc4988c072149/swiftly/cli/delete.py#L83-L137 | def cli_empty_container(context, path, until_empty=False):
"""
Deletes all objects in the container.
By default, this will perform one pass at deleting all objects in
the container; so if objects revert to previous versions or if new
objects otherwise arise during the process, the container may not be
empty once done.
Set `until_empty` to True if you want multiple passes to keep trying
to fully empty the container. Note until_empty=True could run
forever if something else is making new objects faster than they're
being deleted.
See :py:mod:`swiftly.cli.delete` for context usage information.
See :py:class:`CLIDelete` for more information.
"""
path = path.rstrip('/').decode('utf8')
conc = Concurrency(context.concurrency)
def check_conc():
for (exc_type, exc_value, exc_tb, result) in \
six.itervalues(conc.get_results()):
if exc_value:
with context.io_manager.with_stderr() as fp:
fp.write(str(exc_value))
fp.write('\n')
fp.flush()
marker = None
while True:
with context.client_manager.with_client() as client:
status, reason, headers, contents = client.get_container(
path, marker=marker, headers=context.headers,
query=context.query, cdn=context.cdn)
if status // 100 != 2:
if status == 404 and context.ignore_404:
return
raise ReturnCode(
'listing container %r: %s %s' % (path, status, reason))
if not contents:
if until_empty and marker:
marker = None
continue
break
for item in contents:
newpath = '%s/%s' % (path, item['name'])
new_context = context.copy()
new_context.ignore_404 = True
check_conc()
conc.spawn(newpath, cli_delete, new_context, newpath)
marker = item['name']
conc.join()
check_conc() | [
"def",
"cli_empty_container",
"(",
"context",
",",
"path",
",",
"until_empty",
"=",
"False",
")",
":",
"path",
"=",
"path",
".",
"rstrip",
"(",
"'/'",
")",
".",
"decode",
"(",
"'utf8'",
")",
"conc",
"=",
"Concurrency",
"(",
"context",
".",
"concurrency",
")",
"def",
"check_conc",
"(",
")",
":",
"for",
"(",
"exc_type",
",",
"exc_value",
",",
"exc_tb",
",",
"result",
")",
"in",
"six",
".",
"itervalues",
"(",
"conc",
".",
"get_results",
"(",
")",
")",
":",
"if",
"exc_value",
":",
"with",
"context",
".",
"io_manager",
".",
"with_stderr",
"(",
")",
"as",
"fp",
":",
"fp",
".",
"write",
"(",
"str",
"(",
"exc_value",
")",
")",
"fp",
".",
"write",
"(",
"'\\n'",
")",
"fp",
".",
"flush",
"(",
")",
"marker",
"=",
"None",
"while",
"True",
":",
"with",
"context",
".",
"client_manager",
".",
"with_client",
"(",
")",
"as",
"client",
":",
"status",
",",
"reason",
",",
"headers",
",",
"contents",
"=",
"client",
".",
"get_container",
"(",
"path",
",",
"marker",
"=",
"marker",
",",
"headers",
"=",
"context",
".",
"headers",
",",
"query",
"=",
"context",
".",
"query",
",",
"cdn",
"=",
"context",
".",
"cdn",
")",
"if",
"status",
"//",
"100",
"!=",
"2",
":",
"if",
"status",
"==",
"404",
"and",
"context",
".",
"ignore_404",
":",
"return",
"raise",
"ReturnCode",
"(",
"'listing container %r: %s %s'",
"%",
"(",
"path",
",",
"status",
",",
"reason",
")",
")",
"if",
"not",
"contents",
":",
"if",
"until_empty",
"and",
"marker",
":",
"marker",
"=",
"None",
"continue",
"break",
"for",
"item",
"in",
"contents",
":",
"newpath",
"=",
"'%s/%s'",
"%",
"(",
"path",
",",
"item",
"[",
"'name'",
"]",
")",
"new_context",
"=",
"context",
".",
"copy",
"(",
")",
"new_context",
".",
"ignore_404",
"=",
"True",
"check_conc",
"(",
")",
"conc",
".",
"spawn",
"(",
"newpath",
",",
"cli_delete",
",",
"new_context",
",",
"newpath",
")",
"marker",
"=",
"item",
"[",
"'name'",
"]",
"conc",
".",
"join",
"(",
")",
"check_conc",
"(",
")"
]
| Deletes all objects in the container.
By default, this will perform one pass at deleting all objects in
the container; so if objects revert to previous versions or if new
objects otherwise arise during the process, the container may not be
empty once done.
Set `until_empty` to True if you want multiple passes to keep trying
to fully empty the container. Note until_empty=True could run
forever if something else is making new objects faster than they're
being deleted.
See :py:mod:`swiftly.cli.delete` for context usage information.
See :py:class:`CLIDelete` for more information. | [
"Deletes",
"all",
"objects",
"in",
"the",
"container",
"."
]
| python | test |
tek/ribosome | ribosome/rpc/comm.py | https://github.com/tek/ribosome/blob/b2ce9e118faa46d93506cbbb5f27ecfbd4e8a1cc/ribosome/rpc/comm.py#L105-L114 | def exclusive_ns(guard: StateGuard[A], desc: str, thunk: Callable[..., NS[A, B]], *a: Any) -> Do:
'''this is the central unsafe function, using a lock and updating the state in `guard` in-place.
'''
yield guard.acquire()
log.debug2(lambda: f'exclusive: {desc}')
state, response = yield N.ensure_failure(thunk(*a).run(guard.state), guard.release)
yield N.delay(lambda v: unsafe_update_state(guard, state))
yield guard.release()
log.debug2(lambda: f'release: {desc}')
yield N.pure(response) | [
"def",
"exclusive_ns",
"(",
"guard",
":",
"StateGuard",
"[",
"A",
"]",
",",
"desc",
":",
"str",
",",
"thunk",
":",
"Callable",
"[",
"...",
",",
"NS",
"[",
"A",
",",
"B",
"]",
"]",
",",
"*",
"a",
":",
"Any",
")",
"->",
"Do",
":",
"yield",
"guard",
".",
"acquire",
"(",
")",
"log",
".",
"debug2",
"(",
"lambda",
":",
"f'exclusive: {desc}'",
")",
"state",
",",
"response",
"=",
"yield",
"N",
".",
"ensure_failure",
"(",
"thunk",
"(",
"*",
"a",
")",
".",
"run",
"(",
"guard",
".",
"state",
")",
",",
"guard",
".",
"release",
")",
"yield",
"N",
".",
"delay",
"(",
"lambda",
"v",
":",
"unsafe_update_state",
"(",
"guard",
",",
"state",
")",
")",
"yield",
"guard",
".",
"release",
"(",
")",
"log",
".",
"debug2",
"(",
"lambda",
":",
"f'release: {desc}'",
")",
"yield",
"N",
".",
"pure",
"(",
"response",
")"
]
| this is the central unsafe function, using a lock and updating the state in `guard` in-place. | [
"this",
"is",
"the",
"central",
"unsafe",
"function",
"using",
"a",
"lock",
"and",
"updating",
"the",
"state",
"in",
"guard",
"in",
"-",
"place",
"."
]
| python | test |
Azure/azure-multiapi-storage-python | azure/multiapi/storage/v2015_04_05/table/_deserialization.py | https://github.com/Azure/azure-multiapi-storage-python/blob/bd5482547f993c6eb56fd09070e15c2e9616e440/azure/multiapi/storage/v2015_04_05/table/_deserialization.py#L236-L243 | def _extract_etag(response):
''' Extracts the etag from the response headers. '''
if response and response.headers:
for name, value in response.headers:
if name.lower() == 'etag':
return value
return None | [
"def",
"_extract_etag",
"(",
"response",
")",
":",
"if",
"response",
"and",
"response",
".",
"headers",
":",
"for",
"name",
",",
"value",
"in",
"response",
".",
"headers",
":",
"if",
"name",
".",
"lower",
"(",
")",
"==",
"'etag'",
":",
"return",
"value",
"return",
"None"
]
| Extracts the etag from the response headers. | [
"Extracts",
"the",
"etag",
"from",
"the",
"response",
"headers",
"."
]
| python | train |
pandas-dev/pandas | pandas/util/_validators.py | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/util/_validators.py#L114-L127 | def _check_for_invalid_keys(fname, kwargs, compat_args):
"""
Checks whether 'kwargs' contains any keys that are not
in 'compat_args' and raises a TypeError if there is one.
"""
# set(dict) --> set of the dictionary's keys
diff = set(kwargs) - set(compat_args)
if diff:
bad_arg = list(diff)[0]
raise TypeError(("{fname}() got an unexpected "
"keyword argument '{arg}'".
format(fname=fname, arg=bad_arg))) | [
"def",
"_check_for_invalid_keys",
"(",
"fname",
",",
"kwargs",
",",
"compat_args",
")",
":",
"# set(dict) --> set of the dictionary's keys",
"diff",
"=",
"set",
"(",
"kwargs",
")",
"-",
"set",
"(",
"compat_args",
")",
"if",
"diff",
":",
"bad_arg",
"=",
"list",
"(",
"diff",
")",
"[",
"0",
"]",
"raise",
"TypeError",
"(",
"(",
"\"{fname}() got an unexpected \"",
"\"keyword argument '{arg}'\"",
".",
"format",
"(",
"fname",
"=",
"fname",
",",
"arg",
"=",
"bad_arg",
")",
")",
")"
]
| Checks whether 'kwargs' contains any keys that are not
in 'compat_args' and raises a TypeError if there is one. | [
"Checks",
"whether",
"kwargs",
"contains",
"any",
"keys",
"that",
"are",
"not",
"in",
"compat_args",
"and",
"raises",
"a",
"TypeError",
"if",
"there",
"is",
"one",
"."
]
| python | train |
learningequality/ricecooker | ricecooker/utils/metadata_provider.py | https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/utils/metadata_provider.py#L822-L836 | def channeldir_node_to_row(self, path_tuple):
"""
Return a dict with keys corresponding to Content.csv columns.
"""
row = dict()
for key in CONTENT_INFO_HEADER:
row[key] = None
row[CONTENT_PATH_KEY] = "/".join(path_tuple) # use / in .csv on Windows and UNIX
title = path_tuple[-1].replace('_', ' ')
for ext in content_kinds.MAPPING.keys():
if title.endswith(ext):
title = title.replace('.'+ext, '')
row[CONTENT_TITLE_KEY] = title
row[CONTENT_SOURCEID_KEY] = path_tuple[-1]
return row | [
"def",
"channeldir_node_to_row",
"(",
"self",
",",
"path_tuple",
")",
":",
"row",
"=",
"dict",
"(",
")",
"for",
"key",
"in",
"CONTENT_INFO_HEADER",
":",
"row",
"[",
"key",
"]",
"=",
"None",
"row",
"[",
"CONTENT_PATH_KEY",
"]",
"=",
"\"/\"",
".",
"join",
"(",
"path_tuple",
")",
"# use / in .csv on Windows and UNIX",
"title",
"=",
"path_tuple",
"[",
"-",
"1",
"]",
".",
"replace",
"(",
"'_'",
",",
"' '",
")",
"for",
"ext",
"in",
"content_kinds",
".",
"MAPPING",
".",
"keys",
"(",
")",
":",
"if",
"title",
".",
"endswith",
"(",
"ext",
")",
":",
"title",
"=",
"title",
".",
"replace",
"(",
"'.'",
"+",
"ext",
",",
"''",
")",
"row",
"[",
"CONTENT_TITLE_KEY",
"]",
"=",
"title",
"row",
"[",
"CONTENT_SOURCEID_KEY",
"]",
"=",
"path_tuple",
"[",
"-",
"1",
"]",
"return",
"row"
]
| Return a dict with keys corresponding to Content.csv columns. | [
"Return",
"a",
"dict",
"with",
"keys",
"corresponding",
"to",
"Content",
".",
"csv",
"columns",
"."
]
| python | train |
ianmiell/shutit | shutit_class.py | https://github.com/ianmiell/shutit/blob/19cd64cdfb23515b106b40213dccff4101617076/shutit_class.py#L1061-L1096 | def send_file(self,
path,
contents,
shutit_pexpect_child=None,
truncate=False,
note=None,
user=None,
echo=False,
group=None,
loglevel=logging.INFO,
encoding=None):
"""Sends the passed-in string as a file to the passed-in path on the
target.
@param path: Target location of file on target.
@param contents: Contents of file as a string.
@param shutit_pexpect_child: See send()
@param note: See send()
@param user: Set ownership to this user (defaults to whoami)
@param group: Set group to this user (defaults to first group in groups)
@type path: string
@type contents: string
"""
shutit_global.shutit_global_object.yield_to_draw()
shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child
shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)
return shutit_pexpect_session.send_file(path,
contents,
truncate=truncate,
note=note,
echo=echo,
user=user,
group=group,
loglevel=loglevel,
encoding=encoding) | [
"def",
"send_file",
"(",
"self",
",",
"path",
",",
"contents",
",",
"shutit_pexpect_child",
"=",
"None",
",",
"truncate",
"=",
"False",
",",
"note",
"=",
"None",
",",
"user",
"=",
"None",
",",
"echo",
"=",
"False",
",",
"group",
"=",
"None",
",",
"loglevel",
"=",
"logging",
".",
"INFO",
",",
"encoding",
"=",
"None",
")",
":",
"shutit_global",
".",
"shutit_global_object",
".",
"yield_to_draw",
"(",
")",
"shutit_pexpect_child",
"=",
"shutit_pexpect_child",
"or",
"self",
".",
"get_current_shutit_pexpect_session",
"(",
")",
".",
"pexpect_child",
"shutit_pexpect_session",
"=",
"self",
".",
"get_shutit_pexpect_session_from_child",
"(",
"shutit_pexpect_child",
")",
"return",
"shutit_pexpect_session",
".",
"send_file",
"(",
"path",
",",
"contents",
",",
"truncate",
"=",
"truncate",
",",
"note",
"=",
"note",
",",
"echo",
"=",
"echo",
",",
"user",
"=",
"user",
",",
"group",
"=",
"group",
",",
"loglevel",
"=",
"loglevel",
",",
"encoding",
"=",
"encoding",
")"
]
| Sends the passed-in string as a file to the passed-in path on the
target.
@param path: Target location of file on target.
@param contents: Contents of file as a string.
@param shutit_pexpect_child: See send()
@param note: See send()
@param user: Set ownership to this user (defaults to whoami)
@param group: Set group to this user (defaults to first group in groups)
@type path: string
@type contents: string | [
"Sends",
"the",
"passed",
"-",
"in",
"string",
"as",
"a",
"file",
"to",
"the",
"passed",
"-",
"in",
"path",
"on",
"the",
"target",
"."
]
| python | train |
kevinconway/daemons | daemons/pid/simple.py | https://github.com/kevinconway/daemons/blob/b0fe0db5821171a35aa9078596d19d630c570b38/daemons/pid/simple.py#L88-L101 | def pid(self):
"""Stop managing the current pid."""
try:
os.remove(self.pidfile)
except IOError:
if not os.path.isfile(self.pidfile):
return None
LOG.exception("Failed to clear pidfile {0}).".format(self.pidfile))
sys.exit(exit.PIDFILE_INACCESSIBLE) | [
"def",
"pid",
"(",
"self",
")",
":",
"try",
":",
"os",
".",
"remove",
"(",
"self",
".",
"pidfile",
")",
"except",
"IOError",
":",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"self",
".",
"pidfile",
")",
":",
"return",
"None",
"LOG",
".",
"exception",
"(",
"\"Failed to clear pidfile {0}).\"",
".",
"format",
"(",
"self",
".",
"pidfile",
")",
")",
"sys",
".",
"exit",
"(",
"exit",
".",
"PIDFILE_INACCESSIBLE",
")"
]
| Stop managing the current pid. | [
"Stop",
"managing",
"the",
"current",
"pid",
"."
]
| python | train |
tanghaibao/goatools | goatools/obo_parser.py | https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/obo_parser.py#L370-L379 | def _populate_relationships(self, rec_curr):
"""Convert GO IDs in relationships to GO Term record objects. Populate children."""
for relationship_type, goids in rec_curr.relationship.items():
parent_recs = set([self[goid] for goid in goids])
rec_curr.relationship[relationship_type] = parent_recs
for parent_rec in parent_recs:
if relationship_type not in parent_rec.relationship_rev:
parent_rec.relationship_rev[relationship_type] = set([rec_curr])
else:
parent_rec.relationship_rev[relationship_type].add(rec_curr) | [
"def",
"_populate_relationships",
"(",
"self",
",",
"rec_curr",
")",
":",
"for",
"relationship_type",
",",
"goids",
"in",
"rec_curr",
".",
"relationship",
".",
"items",
"(",
")",
":",
"parent_recs",
"=",
"set",
"(",
"[",
"self",
"[",
"goid",
"]",
"for",
"goid",
"in",
"goids",
"]",
")",
"rec_curr",
".",
"relationship",
"[",
"relationship_type",
"]",
"=",
"parent_recs",
"for",
"parent_rec",
"in",
"parent_recs",
":",
"if",
"relationship_type",
"not",
"in",
"parent_rec",
".",
"relationship_rev",
":",
"parent_rec",
".",
"relationship_rev",
"[",
"relationship_type",
"]",
"=",
"set",
"(",
"[",
"rec_curr",
"]",
")",
"else",
":",
"parent_rec",
".",
"relationship_rev",
"[",
"relationship_type",
"]",
".",
"add",
"(",
"rec_curr",
")"
]
| Convert GO IDs in relationships to GO Term record objects. Populate children. | [
"Convert",
"GO",
"IDs",
"in",
"relationships",
"to",
"GO",
"Term",
"record",
"objects",
".",
"Populate",
"children",
"."
]
| python | train |
supercoderz/pyzmq-wrapper | zmqwrapper/producers.py | https://github.com/supercoderz/pyzmq-wrapper/blob/b16c0313dd10febd5060ee0589285025a09fa26a/zmqwrapper/producers.py#L29-L38 | def push(self,message,message_type):
"""
Send a reply message of the given type
Args:
- message: the message to publish
- message_type: the type of message being sent
"""
super(Producer,self).send(message,message_type) | [
"def",
"push",
"(",
"self",
",",
"message",
",",
"message_type",
")",
":",
"super",
"(",
"Producer",
",",
"self",
")",
".",
"send",
"(",
"message",
",",
"message_type",
")"
]
| Send a reply message of the given type
Args:
- message: the message to publish
- message_type: the type of message being sent | [
"Send",
"a",
"reply",
"message",
"of",
"the",
"given",
"type",
"Args",
":",
"-",
"message",
":",
"the",
"message",
"to",
"publish",
"-",
"message_type",
":",
"the",
"type",
"of",
"message",
"being",
"sent"
]
| python | train |
pybel/pybel | src/pybel/struct/query/query.py | https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/struct/query/query.py#L132-L144 | def to_json(self) -> Dict:
"""Return this query as a JSON object."""
rv = {
'network_ids': self.network_ids,
}
if self.seeding:
rv['seeding'] = self.seeding.to_json()
if self.pipeline:
rv['pipeline'] = self.pipeline.to_json()
return rv | [
"def",
"to_json",
"(",
"self",
")",
"->",
"Dict",
":",
"rv",
"=",
"{",
"'network_ids'",
":",
"self",
".",
"network_ids",
",",
"}",
"if",
"self",
".",
"seeding",
":",
"rv",
"[",
"'seeding'",
"]",
"=",
"self",
".",
"seeding",
".",
"to_json",
"(",
")",
"if",
"self",
".",
"pipeline",
":",
"rv",
"[",
"'pipeline'",
"]",
"=",
"self",
".",
"pipeline",
".",
"to_json",
"(",
")",
"return",
"rv"
]
| Return this query as a JSON object. | [
"Return",
"this",
"query",
"as",
"a",
"JSON",
"object",
"."
]
| python | train |
saltstack/salt | salt/cloud/clouds/digitalocean.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/digitalocean.py#L791-L801 | def get_keyid(keyname):
'''
Return the ID of the keyname
'''
if not keyname:
return None
keypairs = list_keypairs(call='function')
keyid = keypairs[keyname]['id']
if keyid:
return keyid
raise SaltCloudNotFound('The specified ssh key could not be found.') | [
"def",
"get_keyid",
"(",
"keyname",
")",
":",
"if",
"not",
"keyname",
":",
"return",
"None",
"keypairs",
"=",
"list_keypairs",
"(",
"call",
"=",
"'function'",
")",
"keyid",
"=",
"keypairs",
"[",
"keyname",
"]",
"[",
"'id'",
"]",
"if",
"keyid",
":",
"return",
"keyid",
"raise",
"SaltCloudNotFound",
"(",
"'The specified ssh key could not be found.'",
")"
]
| Return the ID of the keyname | [
"Return",
"the",
"ID",
"of",
"the",
"keyname"
]
| python | train |
gwastro/pycbc | pycbc/waveform/utils.py | https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/waveform/utils.py#L407-L450 | def td_taper(out, start, end, beta=8, side='left'):
"""Applies a taper to the given TimeSeries.
A half-kaiser window is used for the roll-off.
Parameters
----------
out : TimeSeries
The ``TimeSeries`` to taper.
start : float
The time (in s) to start the taper window.
end : float
The time (in s) to end the taper window.
beta : int, optional
The beta parameter to use for the Kaiser window. See
``scipy.signal.kaiser`` for details. Default is 8.
side : {'left', 'right'}
The side to apply the taper to. If ``'left'`` (``'right'``), the taper
will roll up (down) between ``start`` and ``end``, with all values
before ``start`` (after ``end``) set to zero. Default is ``'left'``.
Returns
-------
TimeSeries
The tapered time series.
"""
out = out.copy()
width = end - start
winlen = 2 * int(width / out.delta_t)
window = Array(signal.get_window(('kaiser', beta), winlen))
xmin = int((start - out.start_time) / out.delta_t)
xmax = xmin + winlen//2
if side == 'left':
out[xmin:xmax] *= window[:winlen//2]
if xmin > 0:
out[:xmin].clear()
elif side == 'right':
out[xmin:xmax] *= window[winlen//2:]
if xmax < len(out):
out[xmax:].clear()
else:
raise ValueError("unrecognized side argument {}".format(side))
return out | [
"def",
"td_taper",
"(",
"out",
",",
"start",
",",
"end",
",",
"beta",
"=",
"8",
",",
"side",
"=",
"'left'",
")",
":",
"out",
"=",
"out",
".",
"copy",
"(",
")",
"width",
"=",
"end",
"-",
"start",
"winlen",
"=",
"2",
"*",
"int",
"(",
"width",
"/",
"out",
".",
"delta_t",
")",
"window",
"=",
"Array",
"(",
"signal",
".",
"get_window",
"(",
"(",
"'kaiser'",
",",
"beta",
")",
",",
"winlen",
")",
")",
"xmin",
"=",
"int",
"(",
"(",
"start",
"-",
"out",
".",
"start_time",
")",
"/",
"out",
".",
"delta_t",
")",
"xmax",
"=",
"xmin",
"+",
"winlen",
"//",
"2",
"if",
"side",
"==",
"'left'",
":",
"out",
"[",
"xmin",
":",
"xmax",
"]",
"*=",
"window",
"[",
":",
"winlen",
"//",
"2",
"]",
"if",
"xmin",
">",
"0",
":",
"out",
"[",
":",
"xmin",
"]",
".",
"clear",
"(",
")",
"elif",
"side",
"==",
"'right'",
":",
"out",
"[",
"xmin",
":",
"xmax",
"]",
"*=",
"window",
"[",
"winlen",
"//",
"2",
":",
"]",
"if",
"xmax",
"<",
"len",
"(",
"out",
")",
":",
"out",
"[",
"xmax",
":",
"]",
".",
"clear",
"(",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"unrecognized side argument {}\"",
".",
"format",
"(",
"side",
")",
")",
"return",
"out"
]
| Applies a taper to the given TimeSeries.
A half-kaiser window is used for the roll-off.
Parameters
----------
out : TimeSeries
The ``TimeSeries`` to taper.
start : float
The time (in s) to start the taper window.
end : float
The time (in s) to end the taper window.
beta : int, optional
The beta parameter to use for the Kaiser window. See
``scipy.signal.kaiser`` for details. Default is 8.
side : {'left', 'right'}
The side to apply the taper to. If ``'left'`` (``'right'``), the taper
will roll up (down) between ``start`` and ``end``, with all values
before ``start`` (after ``end``) set to zero. Default is ``'left'``.
Returns
-------
TimeSeries
The tapered time series. | [
"Applies",
"a",
"taper",
"to",
"the",
"given",
"TimeSeries",
"."
]
| python | train |
cdgriffith/Reusables | reusables/string_manipulation.py | https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/string_manipulation.py#L72-L94 | def int_to_roman(integer):
"""
Convert an integer into a string of roman numbers.
.. code: python
reusables.int_to_roman(445)
# 'CDXLV'
:param integer:
:return: roman string
"""
if not isinstance(integer, int):
raise ValueError("Input integer must be of type int")
output = []
while integer > 0:
for r, i in sorted(_roman_dict.items(),
key=lambda x: x[1], reverse=True):
while integer >= i:
output.append(r)
integer -= i
return "".join(output) | [
"def",
"int_to_roman",
"(",
"integer",
")",
":",
"if",
"not",
"isinstance",
"(",
"integer",
",",
"int",
")",
":",
"raise",
"ValueError",
"(",
"\"Input integer must be of type int\"",
")",
"output",
"=",
"[",
"]",
"while",
"integer",
">",
"0",
":",
"for",
"r",
",",
"i",
"in",
"sorted",
"(",
"_roman_dict",
".",
"items",
"(",
")",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"1",
"]",
",",
"reverse",
"=",
"True",
")",
":",
"while",
"integer",
">=",
"i",
":",
"output",
".",
"append",
"(",
"r",
")",
"integer",
"-=",
"i",
"return",
"\"\"",
".",
"join",
"(",
"output",
")"
]
| Convert an integer into a string of roman numbers.
.. code: python
reusables.int_to_roman(445)
# 'CDXLV'
:param integer:
:return: roman string | [
"Convert",
"an",
"integer",
"into",
"a",
"string",
"of",
"roman",
"numbers",
"."
]
| python | train |
google/grumpy | third_party/stdlib/urlparse.py | https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/urlparse.py#L486-L510 | def parse_qs(qs, keep_blank_values=0, strict_parsing=0):
"""Parse a query given as a string argument.
Arguments:
qs: percent-encoded query string to be parsed
keep_blank_values: flag indicating whether blank values in
percent-encoded queries should be treated as blank strings.
A true value indicates that blanks should be retained as
blank strings. The default false value indicates that
blank values are to be ignored and treated as if they were
not included.
strict_parsing: flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored.
If true, errors raise a ValueError exception.
"""
dict = {}
for name, value in parse_qsl(qs, keep_blank_values, strict_parsing):
if name in dict:
dict[name].append(value)
else:
dict[name] = [value]
return dict | [
"def",
"parse_qs",
"(",
"qs",
",",
"keep_blank_values",
"=",
"0",
",",
"strict_parsing",
"=",
"0",
")",
":",
"dict",
"=",
"{",
"}",
"for",
"name",
",",
"value",
"in",
"parse_qsl",
"(",
"qs",
",",
"keep_blank_values",
",",
"strict_parsing",
")",
":",
"if",
"name",
"in",
"dict",
":",
"dict",
"[",
"name",
"]",
".",
"append",
"(",
"value",
")",
"else",
":",
"dict",
"[",
"name",
"]",
"=",
"[",
"value",
"]",
"return",
"dict"
]
| Parse a query given as a string argument.
Arguments:
qs: percent-encoded query string to be parsed
keep_blank_values: flag indicating whether blank values in
percent-encoded queries should be treated as blank strings.
A true value indicates that blanks should be retained as
blank strings. The default false value indicates that
blank values are to be ignored and treated as if they were
not included.
strict_parsing: flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored.
If true, errors raise a ValueError exception. | [
"Parse",
"a",
"query",
"given",
"as",
"a",
"string",
"argument",
"."
]
| python | valid |
mrtazz/notifo.py | bin/notifo_cli.py | https://github.com/mrtazz/notifo.py/blob/26079db3b40c26661155af20a9f16a0eca06dbde/bin/notifo_cli.py#L10-L32 | def init_parser():
""" function to init option parser """
usage = "usage: %prog -u user -s secret -n name [-l label] \
[-t title] [-c callback] [TEXT]"
parser = OptionParser(usage, version="%prog " + notifo.__version__)
parser.add_option("-u", "--user", action="store", dest="user",
help="your notifo username")
parser.add_option("-s", "--secret", action="store", dest="secret",
help="your notifo API secret")
parser.add_option("-n", "--name", action="store", dest="name",
help="recipient for the notification")
parser.add_option("-l", "--label", action="store", dest="label",
help="label for the notification")
parser.add_option("-t", "--title", action="store", dest="title",
help="title of the notification")
parser.add_option("-c", "--callback", action="store", dest="callback",
help="callback URL to call")
parser.add_option("-m", "--message", action="store_true", dest="message",
default=False, help="send message instead of notification")
(options, args) = parser.parse_args()
return (parser, options, args) | [
"def",
"init_parser",
"(",
")",
":",
"usage",
"=",
"\"usage: %prog -u user -s secret -n name [-l label] \\\n[-t title] [-c callback] [TEXT]\"",
"parser",
"=",
"OptionParser",
"(",
"usage",
",",
"version",
"=",
"\"%prog \"",
"+",
"notifo",
".",
"__version__",
")",
"parser",
".",
"add_option",
"(",
"\"-u\"",
",",
"\"--user\"",
",",
"action",
"=",
"\"store\"",
",",
"dest",
"=",
"\"user\"",
",",
"help",
"=",
"\"your notifo username\"",
")",
"parser",
".",
"add_option",
"(",
"\"-s\"",
",",
"\"--secret\"",
",",
"action",
"=",
"\"store\"",
",",
"dest",
"=",
"\"secret\"",
",",
"help",
"=",
"\"your notifo API secret\"",
")",
"parser",
".",
"add_option",
"(",
"\"-n\"",
",",
"\"--name\"",
",",
"action",
"=",
"\"store\"",
",",
"dest",
"=",
"\"name\"",
",",
"help",
"=",
"\"recipient for the notification\"",
")",
"parser",
".",
"add_option",
"(",
"\"-l\"",
",",
"\"--label\"",
",",
"action",
"=",
"\"store\"",
",",
"dest",
"=",
"\"label\"",
",",
"help",
"=",
"\"label for the notification\"",
")",
"parser",
".",
"add_option",
"(",
"\"-t\"",
",",
"\"--title\"",
",",
"action",
"=",
"\"store\"",
",",
"dest",
"=",
"\"title\"",
",",
"help",
"=",
"\"title of the notification\"",
")",
"parser",
".",
"add_option",
"(",
"\"-c\"",
",",
"\"--callback\"",
",",
"action",
"=",
"\"store\"",
",",
"dest",
"=",
"\"callback\"",
",",
"help",
"=",
"\"callback URL to call\"",
")",
"parser",
".",
"add_option",
"(",
"\"-m\"",
",",
"\"--message\"",
",",
"action",
"=",
"\"store_true\"",
",",
"dest",
"=",
"\"message\"",
",",
"default",
"=",
"False",
",",
"help",
"=",
"\"send message instead of notification\"",
")",
"(",
"options",
",",
"args",
")",
"=",
"parser",
".",
"parse_args",
"(",
")",
"return",
"(",
"parser",
",",
"options",
",",
"args",
")"
]
| function to init option parser | [
"function",
"to",
"init",
"option",
"parser"
]
| python | test |
cds-astro/mocpy | mocpy/tmoc/tmoc.py | https://github.com/cds-astro/mocpy/blob/09472cabe537f6bfdb049eeea64d3ea57b391c21/mocpy/tmoc/tmoc.py#L84-L95 | def add_neighbours(self):
"""
Add all the pixels at max order in the neighbourhood of the moc
"""
time_delta = 1 << (2*(IntervalSet.HPY_MAX_ORDER - self.max_order))
intervals_arr = self._interval_set._intervals
intervals_arr[:, 0] = np.maximum(intervals_arr[:, 0] - time_delta, 0)
intervals_arr[:, 1] = np.minimum(intervals_arr[:, 1] + time_delta, (1 << 58) - 1)
self._interval_set = IntervalSet(intervals_arr) | [
"def",
"add_neighbours",
"(",
"self",
")",
":",
"time_delta",
"=",
"1",
"<<",
"(",
"2",
"*",
"(",
"IntervalSet",
".",
"HPY_MAX_ORDER",
"-",
"self",
".",
"max_order",
")",
")",
"intervals_arr",
"=",
"self",
".",
"_interval_set",
".",
"_intervals",
"intervals_arr",
"[",
":",
",",
"0",
"]",
"=",
"np",
".",
"maximum",
"(",
"intervals_arr",
"[",
":",
",",
"0",
"]",
"-",
"time_delta",
",",
"0",
")",
"intervals_arr",
"[",
":",
",",
"1",
"]",
"=",
"np",
".",
"minimum",
"(",
"intervals_arr",
"[",
":",
",",
"1",
"]",
"+",
"time_delta",
",",
"(",
"1",
"<<",
"58",
")",
"-",
"1",
")",
"self",
".",
"_interval_set",
"=",
"IntervalSet",
"(",
"intervals_arr",
")"
]
| Add all the pixels at max order in the neighbourhood of the moc | [
"Add",
"all",
"the",
"pixels",
"at",
"max",
"order",
"in",
"the",
"neighbourhood",
"of",
"the",
"moc"
]
| python | train |
QuantEcon/QuantEcon.py | quantecon/graph_tools.py | https://github.com/QuantEcon/QuantEcon.py/blob/26a66c552f2a73967d7efb6e1f4b4c4985a12643/quantecon/graph_tools.py#L334-L359 | def subgraph(self, nodes):
"""
Return the subgraph consisting of the given nodes and edges
between thses nodes.
Parameters
----------
nodes : array_like(int, ndim=1)
Array of node indices.
Returns
-------
DiGraph
A DiGraph representing the subgraph.
"""
adj_matrix = self.csgraph[np.ix_(nodes, nodes)]
weighted = True # To copy the dtype
if self.node_labels is not None:
node_labels = self.node_labels[nodes]
else:
node_labels = None
return DiGraph(adj_matrix, weighted=weighted, node_labels=node_labels) | [
"def",
"subgraph",
"(",
"self",
",",
"nodes",
")",
":",
"adj_matrix",
"=",
"self",
".",
"csgraph",
"[",
"np",
".",
"ix_",
"(",
"nodes",
",",
"nodes",
")",
"]",
"weighted",
"=",
"True",
"# To copy the dtype",
"if",
"self",
".",
"node_labels",
"is",
"not",
"None",
":",
"node_labels",
"=",
"self",
".",
"node_labels",
"[",
"nodes",
"]",
"else",
":",
"node_labels",
"=",
"None",
"return",
"DiGraph",
"(",
"adj_matrix",
",",
"weighted",
"=",
"weighted",
",",
"node_labels",
"=",
"node_labels",
")"
]
| Return the subgraph consisting of the given nodes and edges
between thses nodes.
Parameters
----------
nodes : array_like(int, ndim=1)
Array of node indices.
Returns
-------
DiGraph
A DiGraph representing the subgraph. | [
"Return",
"the",
"subgraph",
"consisting",
"of",
"the",
"given",
"nodes",
"and",
"edges",
"between",
"thses",
"nodes",
"."
]
| python | train |
pypa/pipenv | pipenv/vendor/jinja2/nodes.py | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/jinja2/nodes.py#L148-L162 | def iter_fields(self, exclude=None, only=None):
"""This method iterates over all fields that are defined and yields
``(key, value)`` tuples. Per default all fields are returned, but
it's possible to limit that to some fields by providing the `only`
parameter or to exclude some using the `exclude` parameter. Both
should be sets or tuples of field names.
"""
for name in self.fields:
if (exclude is only is None) or \
(exclude is not None and name not in exclude) or \
(only is not None and name in only):
try:
yield name, getattr(self, name)
except AttributeError:
pass | [
"def",
"iter_fields",
"(",
"self",
",",
"exclude",
"=",
"None",
",",
"only",
"=",
"None",
")",
":",
"for",
"name",
"in",
"self",
".",
"fields",
":",
"if",
"(",
"exclude",
"is",
"only",
"is",
"None",
")",
"or",
"(",
"exclude",
"is",
"not",
"None",
"and",
"name",
"not",
"in",
"exclude",
")",
"or",
"(",
"only",
"is",
"not",
"None",
"and",
"name",
"in",
"only",
")",
":",
"try",
":",
"yield",
"name",
",",
"getattr",
"(",
"self",
",",
"name",
")",
"except",
"AttributeError",
":",
"pass"
]
| This method iterates over all fields that are defined and yields
``(key, value)`` tuples. Per default all fields are returned, but
it's possible to limit that to some fields by providing the `only`
parameter or to exclude some using the `exclude` parameter. Both
should be sets or tuples of field names. | [
"This",
"method",
"iterates",
"over",
"all",
"fields",
"that",
"are",
"defined",
"and",
"yields",
"(",
"key",
"value",
")",
"tuples",
".",
"Per",
"default",
"all",
"fields",
"are",
"returned",
"but",
"it",
"s",
"possible",
"to",
"limit",
"that",
"to",
"some",
"fields",
"by",
"providing",
"the",
"only",
"parameter",
"or",
"to",
"exclude",
"some",
"using",
"the",
"exclude",
"parameter",
".",
"Both",
"should",
"be",
"sets",
"or",
"tuples",
"of",
"field",
"names",
"."
]
| python | train |
callowayproject/Transmogrify | transmogrify/images2gif.py | https://github.com/callowayproject/Transmogrify/blob/f1f891b8b923b3a1ede5eac7f60531c1c472379e/transmogrify/images2gif.py#L166-L197 | def get_image_descriptor(self, im, xy=None):
""" get_image_descriptor(im, xy=None)
Used for the local color table properties per image.
Otherwise global color table applies to all frames irrespective of
whether additional colors comes in play that require a redefined
palette. Still a maximum of 256 color per frame, obviously.
Written by Ant1 on 2010-08-22
Modified by Alex Robinson in Janurari 2011 to implement subrectangles.
"""
# Defaule use full image and place at upper left
if xy is None:
xy = (0, 0)
# Image separator,
bb = '\x2C'
# Image position and size
bb += int_to_bin(xy[0]) # Left position
bb += int_to_bin(xy[1]) # Top position
bb += int_to_bin(im.size[0]) # image width
bb += int_to_bin(im.size[1]) # image height
# packed field: local color table flag1, interlace0, sorted table0,
# reserved00, lct size111=7=2^(7+1)=256.
bb += '\x87'
# LZW minimum size code now comes later, begining of [image data] blocks
return bb | [
"def",
"get_image_descriptor",
"(",
"self",
",",
"im",
",",
"xy",
"=",
"None",
")",
":",
"# Defaule use full image and place at upper left",
"if",
"xy",
"is",
"None",
":",
"xy",
"=",
"(",
"0",
",",
"0",
")",
"# Image separator,",
"bb",
"=",
"'\\x2C'",
"# Image position and size",
"bb",
"+=",
"int_to_bin",
"(",
"xy",
"[",
"0",
"]",
")",
"# Left position",
"bb",
"+=",
"int_to_bin",
"(",
"xy",
"[",
"1",
"]",
")",
"# Top position",
"bb",
"+=",
"int_to_bin",
"(",
"im",
".",
"size",
"[",
"0",
"]",
")",
"# image width",
"bb",
"+=",
"int_to_bin",
"(",
"im",
".",
"size",
"[",
"1",
"]",
")",
"# image height",
"# packed field: local color table flag1, interlace0, sorted table0,",
"# reserved00, lct size111=7=2^(7+1)=256.",
"bb",
"+=",
"'\\x87'",
"# LZW minimum size code now comes later, begining of [image data] blocks",
"return",
"bb"
]
| get_image_descriptor(im, xy=None)
Used for the local color table properties per image.
Otherwise global color table applies to all frames irrespective of
whether additional colors comes in play that require a redefined
palette. Still a maximum of 256 color per frame, obviously.
Written by Ant1 on 2010-08-22
Modified by Alex Robinson in Janurari 2011 to implement subrectangles. | [
"get_image_descriptor",
"(",
"im",
"xy",
"=",
"None",
")"
]
| python | train |
senaite/senaite.core | bika/lims/jsonapi/update.py | https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/jsonapi/update.py#L167-L172 | def require(self, fieldname, allow_blank=False):
"""fieldname is required"""
if self.request.form and fieldname not in self.request.form.keys():
raise Exception("Required field not found in request: %s" % fieldname)
if self.request.form and (not self.request.form[fieldname] or allow_blank):
raise Exception("Required field %s may not have blank value") | [
"def",
"require",
"(",
"self",
",",
"fieldname",
",",
"allow_blank",
"=",
"False",
")",
":",
"if",
"self",
".",
"request",
".",
"form",
"and",
"fieldname",
"not",
"in",
"self",
".",
"request",
".",
"form",
".",
"keys",
"(",
")",
":",
"raise",
"Exception",
"(",
"\"Required field not found in request: %s\"",
"%",
"fieldname",
")",
"if",
"self",
".",
"request",
".",
"form",
"and",
"(",
"not",
"self",
".",
"request",
".",
"form",
"[",
"fieldname",
"]",
"or",
"allow_blank",
")",
":",
"raise",
"Exception",
"(",
"\"Required field %s may not have blank value\"",
")"
]
| fieldname is required | [
"fieldname",
"is",
"required"
]
| python | train |
ooici/elasticpy | elasticpy/filter.py | https://github.com/ooici/elasticpy/blob/ec221800a80c39e80d8c31667c5b138da39219f2/elasticpy/filter.py#L88-L102 | def geo_distance(cls, field, center, distance, distance_type=None):
'''
http://www.elasticsearch.org/guide/reference/query-dsl/geo-distance-filter.html
Filters documents that include only hits that exists within a specific distance from a geo point.
field - Field name
center - Center point (Geo point)
distance - String for the distance
distance_type - (arc | plane) How to compute the distance. Can either be arc (better precision) or plane (faster). Defaults to arc
> bounds = ElasticFilter().geo_distance('pin.location', [40.73, -74.1], '300km')
'''
instance = cls(geo_distance={'distance': distance, field: center})
if distance_type is not None:
instance['geo_distance']['distance_type'] = distance_type
return instance | [
"def",
"geo_distance",
"(",
"cls",
",",
"field",
",",
"center",
",",
"distance",
",",
"distance_type",
"=",
"None",
")",
":",
"instance",
"=",
"cls",
"(",
"geo_distance",
"=",
"{",
"'distance'",
":",
"distance",
",",
"field",
":",
"center",
"}",
")",
"if",
"distance_type",
"is",
"not",
"None",
":",
"instance",
"[",
"'geo_distance'",
"]",
"[",
"'distance_type'",
"]",
"=",
"distance_type",
"return",
"instance"
]
| http://www.elasticsearch.org/guide/reference/query-dsl/geo-distance-filter.html
Filters documents that include only hits that exists within a specific distance from a geo point.
field - Field name
center - Center point (Geo point)
distance - String for the distance
distance_type - (arc | plane) How to compute the distance. Can either be arc (better precision) or plane (faster). Defaults to arc
> bounds = ElasticFilter().geo_distance('pin.location', [40.73, -74.1], '300km') | [
"http",
":",
"//",
"www",
".",
"elasticsearch",
".",
"org",
"/",
"guide",
"/",
"reference",
"/",
"query",
"-",
"dsl",
"/",
"geo",
"-",
"distance",
"-",
"filter",
".",
"html",
"Filters",
"documents",
"that",
"include",
"only",
"hits",
"that",
"exists",
"within",
"a",
"specific",
"distance",
"from",
"a",
"geo",
"point",
".",
"field",
"-",
"Field",
"name",
"center",
"-",
"Center",
"point",
"(",
"Geo",
"point",
")",
"distance",
"-",
"String",
"for",
"the",
"distance",
"distance_type",
"-",
"(",
"arc",
"|",
"plane",
")",
"How",
"to",
"compute",
"the",
"distance",
".",
"Can",
"either",
"be",
"arc",
"(",
"better",
"precision",
")",
"or",
"plane",
"(",
"faster",
")",
".",
"Defaults",
"to",
"arc",
">",
"bounds",
"=",
"ElasticFilter",
"()",
".",
"geo_distance",
"(",
"pin",
".",
"location",
"[",
"40",
".",
"73",
"-",
"74",
".",
"1",
"]",
"300km",
")"
]
| python | train |
ray-project/ray | python/ray/experimental/array/distributed/linalg.py | https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/experimental/array/distributed/linalg.py#L91-L125 | def modified_lu(q):
"""Perform a modified LU decomposition of a matrix.
This takes a matrix q with orthonormal columns, returns l, u, s such that
q - s = l * u.
Args:
q: A two dimensional orthonormal matrix q.
Returns:
A tuple of a lower triangular matrix l, an upper triangular matrix u,
and a a vector representing a diagonal matrix s such that
q - s = l * u.
"""
q = q.assemble()
m, b = q.shape[0], q.shape[1]
S = np.zeros(b)
q_work = np.copy(q)
for i in range(b):
S[i] = -1 * np.sign(q_work[i, i])
q_work[i, i] -= S[i]
# Scale ith column of L by diagonal element.
q_work[(i + 1):m, i] /= q_work[i, i]
# Perform Schur complement update.
q_work[(i + 1):m, (i + 1):b] -= np.outer(q_work[(i + 1):m, i],
q_work[i, (i + 1):b])
L = np.tril(q_work)
for i in range(b):
L[i, i] = 1
U = np.triu(q_work)[:b, :]
# TODO(rkn): Get rid of the put below.
return ray.get(core.numpy_to_dist.remote(ray.put(L))), U, S | [
"def",
"modified_lu",
"(",
"q",
")",
":",
"q",
"=",
"q",
".",
"assemble",
"(",
")",
"m",
",",
"b",
"=",
"q",
".",
"shape",
"[",
"0",
"]",
",",
"q",
".",
"shape",
"[",
"1",
"]",
"S",
"=",
"np",
".",
"zeros",
"(",
"b",
")",
"q_work",
"=",
"np",
".",
"copy",
"(",
"q",
")",
"for",
"i",
"in",
"range",
"(",
"b",
")",
":",
"S",
"[",
"i",
"]",
"=",
"-",
"1",
"*",
"np",
".",
"sign",
"(",
"q_work",
"[",
"i",
",",
"i",
"]",
")",
"q_work",
"[",
"i",
",",
"i",
"]",
"-=",
"S",
"[",
"i",
"]",
"# Scale ith column of L by diagonal element.",
"q_work",
"[",
"(",
"i",
"+",
"1",
")",
":",
"m",
",",
"i",
"]",
"/=",
"q_work",
"[",
"i",
",",
"i",
"]",
"# Perform Schur complement update.",
"q_work",
"[",
"(",
"i",
"+",
"1",
")",
":",
"m",
",",
"(",
"i",
"+",
"1",
")",
":",
"b",
"]",
"-=",
"np",
".",
"outer",
"(",
"q_work",
"[",
"(",
"i",
"+",
"1",
")",
":",
"m",
",",
"i",
"]",
",",
"q_work",
"[",
"i",
",",
"(",
"i",
"+",
"1",
")",
":",
"b",
"]",
")",
"L",
"=",
"np",
".",
"tril",
"(",
"q_work",
")",
"for",
"i",
"in",
"range",
"(",
"b",
")",
":",
"L",
"[",
"i",
",",
"i",
"]",
"=",
"1",
"U",
"=",
"np",
".",
"triu",
"(",
"q_work",
")",
"[",
":",
"b",
",",
":",
"]",
"# TODO(rkn): Get rid of the put below.",
"return",
"ray",
".",
"get",
"(",
"core",
".",
"numpy_to_dist",
".",
"remote",
"(",
"ray",
".",
"put",
"(",
"L",
")",
")",
")",
",",
"U",
",",
"S"
]
| Perform a modified LU decomposition of a matrix.
This takes a matrix q with orthonormal columns, returns l, u, s such that
q - s = l * u.
Args:
q: A two dimensional orthonormal matrix q.
Returns:
A tuple of a lower triangular matrix l, an upper triangular matrix u,
and a a vector representing a diagonal matrix s such that
q - s = l * u. | [
"Perform",
"a",
"modified",
"LU",
"decomposition",
"of",
"a",
"matrix",
"."
]
| python | train |
tornadoweb/tornado | tornado/ioloop.py | https://github.com/tornadoweb/tornado/blob/b8b481770bcdb333a69afde5cce7eaa449128326/tornado/ioloop.py#L879-L887 | def start(self) -> None:
"""Starts the timer."""
# Looking up the IOLoop here allows to first instantiate the
# PeriodicCallback in another thread, then start it using
# IOLoop.add_callback().
self.io_loop = IOLoop.current()
self._running = True
self._next_timeout = self.io_loop.time()
self._schedule_next() | [
"def",
"start",
"(",
"self",
")",
"->",
"None",
":",
"# Looking up the IOLoop here allows to first instantiate the",
"# PeriodicCallback in another thread, then start it using",
"# IOLoop.add_callback().",
"self",
".",
"io_loop",
"=",
"IOLoop",
".",
"current",
"(",
")",
"self",
".",
"_running",
"=",
"True",
"self",
".",
"_next_timeout",
"=",
"self",
".",
"io_loop",
".",
"time",
"(",
")",
"self",
".",
"_schedule_next",
"(",
")"
]
| Starts the timer. | [
"Starts",
"the",
"timer",
"."
]
| python | train |
limpyd/redis-limpyd | limpyd/fields.py | https://github.com/limpyd/redis-limpyd/blob/3c745dde1390a0bd09690b77a089dcc08c6c7e43/limpyd/fields.py#L975-L983 | def _pushx(self, command, *args, **kwargs):
"""
Helper for lpushx and rpushx, that only index the new values if the list
existed when the command was called
"""
result = self._traverse_command(command, *args, **kwargs)
if self.indexable and result:
self.index(args)
return result | [
"def",
"_pushx",
"(",
"self",
",",
"command",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"result",
"=",
"self",
".",
"_traverse_command",
"(",
"command",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"self",
".",
"indexable",
"and",
"result",
":",
"self",
".",
"index",
"(",
"args",
")",
"return",
"result"
]
| Helper for lpushx and rpushx, that only index the new values if the list
existed when the command was called | [
"Helper",
"for",
"lpushx",
"and",
"rpushx",
"that",
"only",
"index",
"the",
"new",
"values",
"if",
"the",
"list",
"existed",
"when",
"the",
"command",
"was",
"called"
]
| python | train |
log2timeline/dfvfs | dfvfs/vfs/tsk_file_system.py | https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/vfs/tsk_file_system.py#L147-L162 | def GetFsType(self):
"""Retrieves the file system type.
Returns:
pytsk3.TSK_FS_TYPE_ENUM: file system type.
"""
if self._tsk_fs_type is None:
self._tsk_fs_type = pytsk3.TSK_FS_TYPE_UNSUPP
if (not self._tsk_file_system or
not hasattr(self._tsk_file_system, 'info')):
return self._tsk_fs_type
self._tsk_fs_type = getattr(
self._tsk_file_system.info, 'ftype', pytsk3.TSK_FS_TYPE_UNSUPP)
return self._tsk_fs_type | [
"def",
"GetFsType",
"(",
"self",
")",
":",
"if",
"self",
".",
"_tsk_fs_type",
"is",
"None",
":",
"self",
".",
"_tsk_fs_type",
"=",
"pytsk3",
".",
"TSK_FS_TYPE_UNSUPP",
"if",
"(",
"not",
"self",
".",
"_tsk_file_system",
"or",
"not",
"hasattr",
"(",
"self",
".",
"_tsk_file_system",
",",
"'info'",
")",
")",
":",
"return",
"self",
".",
"_tsk_fs_type",
"self",
".",
"_tsk_fs_type",
"=",
"getattr",
"(",
"self",
".",
"_tsk_file_system",
".",
"info",
",",
"'ftype'",
",",
"pytsk3",
".",
"TSK_FS_TYPE_UNSUPP",
")",
"return",
"self",
".",
"_tsk_fs_type"
]
| Retrieves the file system type.
Returns:
pytsk3.TSK_FS_TYPE_ENUM: file system type. | [
"Retrieves",
"the",
"file",
"system",
"type",
"."
]
| python | train |
mistio/mist.client | src/mistclient/model.py | https://github.com/mistio/mist.client/blob/bc190af2cba358fa556a69b205c12a77a34eb2a8/src/mistclient/model.py#L617-L665 | def add_python_plugin(self, name, python_file, value_type="gauge", unit=None):
"""
Add a custom python plugin to the collectd instance of a monitored plugin
:param python_file: Path of the python file to be added as custom python plugin
:param name: Name of the plugin
:param value_type: Optional. Can be either "gauge" or "derive"
:param unit: Optional. If given the new plugin will be measured according to this unit
"""
if not os.path.isfile(python_file):
raise Exception(python_file, "is not a file or could not be found in tho given path")
with open(python_file) as f:
script = f.read()
payload = {
'plugin_type': 'python',
'name': name,
'unit': unit,
'value_type': value_type,
'read_function': script,
'host': self.info['public_ips'][0]
}
data = json.dumps(payload)
#PLugin id must be in lowercase
plugin_id = name.lower()
#PLugin id must contain only alphanumeric chars
pattern = re.compile('\W')
plugin_id = re.sub(pattern, "_", plugin_id)
#Plugin id should not have double underscores
while "__" in plugin_id:
pattern = "\r?__"
plugin_id = re.sub(pattern, "_", plugin_id)
#Plugin id should not have underscore as first or last char
if plugin_id[-1] == "_":
plugin_id = plugin_id[:-2]
if plugin_id[0] == "_":
plugin_id = plugin_id[1:]
req = self.request(self.mist_client.uri+"/clouds/"+self.cloud.id+"/machines/"+self.id+"/plugins/"+plugin_id,
data=data)
req.post() | [
"def",
"add_python_plugin",
"(",
"self",
",",
"name",
",",
"python_file",
",",
"value_type",
"=",
"\"gauge\"",
",",
"unit",
"=",
"None",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"python_file",
")",
":",
"raise",
"Exception",
"(",
"python_file",
",",
"\"is not a file or could not be found in tho given path\"",
")",
"with",
"open",
"(",
"python_file",
")",
"as",
"f",
":",
"script",
"=",
"f",
".",
"read",
"(",
")",
"payload",
"=",
"{",
"'plugin_type'",
":",
"'python'",
",",
"'name'",
":",
"name",
",",
"'unit'",
":",
"unit",
",",
"'value_type'",
":",
"value_type",
",",
"'read_function'",
":",
"script",
",",
"'host'",
":",
"self",
".",
"info",
"[",
"'public_ips'",
"]",
"[",
"0",
"]",
"}",
"data",
"=",
"json",
".",
"dumps",
"(",
"payload",
")",
"#PLugin id must be in lowercase",
"plugin_id",
"=",
"name",
".",
"lower",
"(",
")",
"#PLugin id must contain only alphanumeric chars",
"pattern",
"=",
"re",
".",
"compile",
"(",
"'\\W'",
")",
"plugin_id",
"=",
"re",
".",
"sub",
"(",
"pattern",
",",
"\"_\"",
",",
"plugin_id",
")",
"#Plugin id should not have double underscores",
"while",
"\"__\"",
"in",
"plugin_id",
":",
"pattern",
"=",
"\"\\r?__\"",
"plugin_id",
"=",
"re",
".",
"sub",
"(",
"pattern",
",",
"\"_\"",
",",
"plugin_id",
")",
"#Plugin id should not have underscore as first or last char",
"if",
"plugin_id",
"[",
"-",
"1",
"]",
"==",
"\"_\"",
":",
"plugin_id",
"=",
"plugin_id",
"[",
":",
"-",
"2",
"]",
"if",
"plugin_id",
"[",
"0",
"]",
"==",
"\"_\"",
":",
"plugin_id",
"=",
"plugin_id",
"[",
"1",
":",
"]",
"req",
"=",
"self",
".",
"request",
"(",
"self",
".",
"mist_client",
".",
"uri",
"+",
"\"/clouds/\"",
"+",
"self",
".",
"cloud",
".",
"id",
"+",
"\"/machines/\"",
"+",
"self",
".",
"id",
"+",
"\"/plugins/\"",
"+",
"plugin_id",
",",
"data",
"=",
"data",
")",
"req",
".",
"post",
"(",
")"
]
| Add a custom python plugin to the collectd instance of a monitored plugin
:param python_file: Path of the python file to be added as custom python plugin
:param name: Name of the plugin
:param value_type: Optional. Can be either "gauge" or "derive"
:param unit: Optional. If given the new plugin will be measured according to this unit | [
"Add",
"a",
"custom",
"python",
"plugin",
"to",
"the",
"collectd",
"instance",
"of",
"a",
"monitored",
"plugin"
]
| python | train |
gitpython-developers/GitPython | git/refs/remote.py | https://github.com/gitpython-developers/GitPython/blob/1f66e25c25cde2423917ee18c4704fff83b837d1/git/refs/remote.py#L28-L46 | def delete(cls, repo, *refs, **kwargs):
"""Delete the given remote references
:note:
kwargs are given for comparability with the base class method as we
should not narrow the signature."""
repo.git.branch("-d", "-r", *refs)
# the official deletion method will ignore remote symbolic refs - these
# are generally ignored in the refs/ folder. We don't though
# and delete remainders manually
for ref in refs:
try:
os.remove(osp.join(repo.common_dir, ref.path))
except OSError:
pass
try:
os.remove(osp.join(repo.git_dir, ref.path))
except OSError:
pass | [
"def",
"delete",
"(",
"cls",
",",
"repo",
",",
"*",
"refs",
",",
"*",
"*",
"kwargs",
")",
":",
"repo",
".",
"git",
".",
"branch",
"(",
"\"-d\"",
",",
"\"-r\"",
",",
"*",
"refs",
")",
"# the official deletion method will ignore remote symbolic refs - these",
"# are generally ignored in the refs/ folder. We don't though",
"# and delete remainders manually",
"for",
"ref",
"in",
"refs",
":",
"try",
":",
"os",
".",
"remove",
"(",
"osp",
".",
"join",
"(",
"repo",
".",
"common_dir",
",",
"ref",
".",
"path",
")",
")",
"except",
"OSError",
":",
"pass",
"try",
":",
"os",
".",
"remove",
"(",
"osp",
".",
"join",
"(",
"repo",
".",
"git_dir",
",",
"ref",
".",
"path",
")",
")",
"except",
"OSError",
":",
"pass"
]
| Delete the given remote references
:note:
kwargs are given for comparability with the base class method as we
should not narrow the signature. | [
"Delete",
"the",
"given",
"remote",
"references"
]
| python | train |
kensho-technologies/graphql-compiler | graphql_compiler/schema_generation/schema_graph.py | https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/schema_generation/schema_graph.py#L297-L311 | def get_default_property_values(self, classname):
"""Return a dict with default values for all properties declared on this class."""
schema_element = self.get_element_by_class_name(classname)
result = {
property_name: property_descriptor.default
for property_name, property_descriptor in six.iteritems(schema_element.properties)
}
if schema_element.is_edge:
# Remove the source/destination properties for edges, if they exist.
result.pop(EDGE_SOURCE_PROPERTY_NAME, None)
result.pop(EDGE_DESTINATION_PROPERTY_NAME, None)
return result | [
"def",
"get_default_property_values",
"(",
"self",
",",
"classname",
")",
":",
"schema_element",
"=",
"self",
".",
"get_element_by_class_name",
"(",
"classname",
")",
"result",
"=",
"{",
"property_name",
":",
"property_descriptor",
".",
"default",
"for",
"property_name",
",",
"property_descriptor",
"in",
"six",
".",
"iteritems",
"(",
"schema_element",
".",
"properties",
")",
"}",
"if",
"schema_element",
".",
"is_edge",
":",
"# Remove the source/destination properties for edges, if they exist.",
"result",
".",
"pop",
"(",
"EDGE_SOURCE_PROPERTY_NAME",
",",
"None",
")",
"result",
".",
"pop",
"(",
"EDGE_DESTINATION_PROPERTY_NAME",
",",
"None",
")",
"return",
"result"
]
| Return a dict with default values for all properties declared on this class. | [
"Return",
"a",
"dict",
"with",
"default",
"values",
"for",
"all",
"properties",
"declared",
"on",
"this",
"class",
"."
]
| python | train |
icemac/toll | src/toll/config.py | https://github.com/icemac/toll/blob/aa25480fcbc2017519516ec1e7fe60d78fb2f30b/src/toll/config.py#L7-L11 | def parsed_file(config_file):
"""Parse an ini-style config file."""
parser = ConfigParser(allow_no_value=True)
parser.readfp(config_file)
return parser | [
"def",
"parsed_file",
"(",
"config_file",
")",
":",
"parser",
"=",
"ConfigParser",
"(",
"allow_no_value",
"=",
"True",
")",
"parser",
".",
"readfp",
"(",
"config_file",
")",
"return",
"parser"
]
| Parse an ini-style config file. | [
"Parse",
"an",
"ini",
"-",
"style",
"config",
"file",
"."
]
| python | train |
cltk/cltk | cltk/corpus/greek/beta_to_unicode.py | https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/corpus/greek/beta_to_unicode.py#L384-L396 | def beta_code(self, text):
"""Replace method. Note: regex.subn() returns a tuple (new_string,
number_of_subs_made).
"""
text = text.upper().replace('-', '')
for (pattern, repl) in self.pattern1:
text = pattern.subn(repl, text)[0]
for (pattern, repl) in self.pattern2:
text = pattern.subn(repl, text)[0]
# remove third run, if punct list not used
for (pattern, repl) in self.pattern3:
text = pattern.subn(repl, text)[0]
return text | [
"def",
"beta_code",
"(",
"self",
",",
"text",
")",
":",
"text",
"=",
"text",
".",
"upper",
"(",
")",
".",
"replace",
"(",
"'-'",
",",
"''",
")",
"for",
"(",
"pattern",
",",
"repl",
")",
"in",
"self",
".",
"pattern1",
":",
"text",
"=",
"pattern",
".",
"subn",
"(",
"repl",
",",
"text",
")",
"[",
"0",
"]",
"for",
"(",
"pattern",
",",
"repl",
")",
"in",
"self",
".",
"pattern2",
":",
"text",
"=",
"pattern",
".",
"subn",
"(",
"repl",
",",
"text",
")",
"[",
"0",
"]",
"# remove third run, if punct list not used",
"for",
"(",
"pattern",
",",
"repl",
")",
"in",
"self",
".",
"pattern3",
":",
"text",
"=",
"pattern",
".",
"subn",
"(",
"repl",
",",
"text",
")",
"[",
"0",
"]",
"return",
"text"
]
| Replace method. Note: regex.subn() returns a tuple (new_string,
number_of_subs_made). | [
"Replace",
"method",
".",
"Note",
":",
"regex",
".",
"subn",
"()",
"returns",
"a",
"tuple",
"(",
"new_string",
"number_of_subs_made",
")",
"."
]
| python | train |
ronaldguillen/wave | wave/views.py | https://github.com/ronaldguillen/wave/blob/20bb979c917f7634d8257992e6d449dc751256a9/wave/views.py#L55-L95 | def exception_handler(exc, context):
"""
Returns the response that should be used for any given exception.
By default we handle the REST framework `APIException`, and also
Django's built-in `Http404` and `PermissionDenied` exceptions.
Any unhandled exceptions may return `None`, which will cause a 500 error
to be raised.
"""
if isinstance(exc, exceptions.APIException):
headers = {}
if getattr(exc, 'auth_header', None):
headers['WWW-Authenticate'] = exc.auth_header
if getattr(exc, 'wait', None):
headers['Retry-After'] = '%d' % exc.wait
if isinstance(exc.detail, (list, dict)):
data = exc.detail
else:
data = {'message': exc.detail}
set_rollback()
return Response(data, status=exc.status_code, headers=headers)
elif isinstance(exc, Http404):
msg = _('Not found.')
data = {'message': six.text_type(msg)}
set_rollback()
return Response(data, status=status.HTTP_404_NOT_FOUND)
elif isinstance(exc, PermissionDenied):
msg = _('Permission denied.')
data = {'message': six.text_type(msg)}
set_rollback()
return Response(data, status=status.HTTP_403_FORBIDDEN)
# Note: Unhandled exceptions will raise a 500 error.
return None | [
"def",
"exception_handler",
"(",
"exc",
",",
"context",
")",
":",
"if",
"isinstance",
"(",
"exc",
",",
"exceptions",
".",
"APIException",
")",
":",
"headers",
"=",
"{",
"}",
"if",
"getattr",
"(",
"exc",
",",
"'auth_header'",
",",
"None",
")",
":",
"headers",
"[",
"'WWW-Authenticate'",
"]",
"=",
"exc",
".",
"auth_header",
"if",
"getattr",
"(",
"exc",
",",
"'wait'",
",",
"None",
")",
":",
"headers",
"[",
"'Retry-After'",
"]",
"=",
"'%d'",
"%",
"exc",
".",
"wait",
"if",
"isinstance",
"(",
"exc",
".",
"detail",
",",
"(",
"list",
",",
"dict",
")",
")",
":",
"data",
"=",
"exc",
".",
"detail",
"else",
":",
"data",
"=",
"{",
"'message'",
":",
"exc",
".",
"detail",
"}",
"set_rollback",
"(",
")",
"return",
"Response",
"(",
"data",
",",
"status",
"=",
"exc",
".",
"status_code",
",",
"headers",
"=",
"headers",
")",
"elif",
"isinstance",
"(",
"exc",
",",
"Http404",
")",
":",
"msg",
"=",
"_",
"(",
"'Not found.'",
")",
"data",
"=",
"{",
"'message'",
":",
"six",
".",
"text_type",
"(",
"msg",
")",
"}",
"set_rollback",
"(",
")",
"return",
"Response",
"(",
"data",
",",
"status",
"=",
"status",
".",
"HTTP_404_NOT_FOUND",
")",
"elif",
"isinstance",
"(",
"exc",
",",
"PermissionDenied",
")",
":",
"msg",
"=",
"_",
"(",
"'Permission denied.'",
")",
"data",
"=",
"{",
"'message'",
":",
"six",
".",
"text_type",
"(",
"msg",
")",
"}",
"set_rollback",
"(",
")",
"return",
"Response",
"(",
"data",
",",
"status",
"=",
"status",
".",
"HTTP_403_FORBIDDEN",
")",
"# Note: Unhandled exceptions will raise a 500 error.",
"return",
"None"
]
| Returns the response that should be used for any given exception.
By default we handle the REST framework `APIException`, and also
Django's built-in `Http404` and `PermissionDenied` exceptions.
Any unhandled exceptions may return `None`, which will cause a 500 error
to be raised. | [
"Returns",
"the",
"response",
"that",
"should",
"be",
"used",
"for",
"any",
"given",
"exception",
"."
]
| python | train |
KeithSSmith/switcheo-python | switcheo/ethereum/signatures.py | https://github.com/KeithSSmith/switcheo-python/blob/22f943dea1ad7d692b2bfcd9f0822ec80f4641a6/switcheo/ethereum/signatures.py#L19-L47 | def sign_create_cancellation(cancellation_params, private_key):
"""
Function to sign the parameters required to create a cancellation request from the Switcheo Exchange.
Execution of this function is as follows::
sign_create_cancellation(cancellation_params=signable_params, private_key=eth_private_key)
The expected return result for this function is as follows::
{
'order_id': '3125550a-04f9-4475-808b-42b5f89d6693',
'timestamp': 1542088842108,
'address': '0x32c46323b51c977814e05ef5e258ee4da0e4c3c3',
'signature': 'dac70ca711bcfbeefbdead2158ef8b15fab1a1....'
}
:param cancellation_params: Dictionary with Order ID and timestamp to sign for creating the cancellation.
:type cancellation_params: dict
:param private_key: The Ethereum private key to sign the deposit parameters.
:type private_key: str
:return: Dictionary of signed message to send to the Switcheo API.
"""
hash_message = defunct_hash_message(text=stringify_message(cancellation_params))
hex_message = binascii.hexlify(hash_message).decode()
signed_message = binascii.hexlify(Account.signHash(hex_message, private_key=private_key)['signature']).decode()
create_params = cancellation_params.copy()
create_params['address'] = to_normalized_address(Account.privateKeyToAccount(private_key=private_key).address)
create_params['signature'] = signed_message
return create_params | [
"def",
"sign_create_cancellation",
"(",
"cancellation_params",
",",
"private_key",
")",
":",
"hash_message",
"=",
"defunct_hash_message",
"(",
"text",
"=",
"stringify_message",
"(",
"cancellation_params",
")",
")",
"hex_message",
"=",
"binascii",
".",
"hexlify",
"(",
"hash_message",
")",
".",
"decode",
"(",
")",
"signed_message",
"=",
"binascii",
".",
"hexlify",
"(",
"Account",
".",
"signHash",
"(",
"hex_message",
",",
"private_key",
"=",
"private_key",
")",
"[",
"'signature'",
"]",
")",
".",
"decode",
"(",
")",
"create_params",
"=",
"cancellation_params",
".",
"copy",
"(",
")",
"create_params",
"[",
"'address'",
"]",
"=",
"to_normalized_address",
"(",
"Account",
".",
"privateKeyToAccount",
"(",
"private_key",
"=",
"private_key",
")",
".",
"address",
")",
"create_params",
"[",
"'signature'",
"]",
"=",
"signed_message",
"return",
"create_params"
]
| Function to sign the parameters required to create a cancellation request from the Switcheo Exchange.
Execution of this function is as follows::
sign_create_cancellation(cancellation_params=signable_params, private_key=eth_private_key)
The expected return result for this function is as follows::
{
'order_id': '3125550a-04f9-4475-808b-42b5f89d6693',
'timestamp': 1542088842108,
'address': '0x32c46323b51c977814e05ef5e258ee4da0e4c3c3',
'signature': 'dac70ca711bcfbeefbdead2158ef8b15fab1a1....'
}
:param cancellation_params: Dictionary with Order ID and timestamp to sign for creating the cancellation.
:type cancellation_params: dict
:param private_key: The Ethereum private key to sign the deposit parameters.
:type private_key: str
:return: Dictionary of signed message to send to the Switcheo API. | [
"Function",
"to",
"sign",
"the",
"parameters",
"required",
"to",
"create",
"a",
"cancellation",
"request",
"from",
"the",
"Switcheo",
"Exchange",
".",
"Execution",
"of",
"this",
"function",
"is",
"as",
"follows",
"::"
]
| python | train |
Kortemme-Lab/klab | klab/chainsequence.py | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/chainsequence.py#L57-L81 | def parse_atoms(self, pdb):
"""Parse the ATOM entries into the object"""
atomre = re.compile("ATOM")
atomlines = [line for line in pdb.lines if atomre.match(line)]
chainresnums = {}
for line in atomlines:
chain = line[21]
resname = line[17:20]
resnum = line[22:27]
#print resnum
chainresnums.setdefault(chain, [])
if resnum in chainresnums[chain]:
assert self[chain][chainresnums[chain].index(resnum)] == resname
else:
if resnum[-1] == ' ':
self.setdefault(chain, [])
self[chain] += [resname]
chainresnums[chain] += [resnum]
return chainresnums | [
"def",
"parse_atoms",
"(",
"self",
",",
"pdb",
")",
":",
"atomre",
"=",
"re",
".",
"compile",
"(",
"\"ATOM\"",
")",
"atomlines",
"=",
"[",
"line",
"for",
"line",
"in",
"pdb",
".",
"lines",
"if",
"atomre",
".",
"match",
"(",
"line",
")",
"]",
"chainresnums",
"=",
"{",
"}",
"for",
"line",
"in",
"atomlines",
":",
"chain",
"=",
"line",
"[",
"21",
"]",
"resname",
"=",
"line",
"[",
"17",
":",
"20",
"]",
"resnum",
"=",
"line",
"[",
"22",
":",
"27",
"]",
"#print resnum",
"chainresnums",
".",
"setdefault",
"(",
"chain",
",",
"[",
"]",
")",
"if",
"resnum",
"in",
"chainresnums",
"[",
"chain",
"]",
":",
"assert",
"self",
"[",
"chain",
"]",
"[",
"chainresnums",
"[",
"chain",
"]",
".",
"index",
"(",
"resnum",
")",
"]",
"==",
"resname",
"else",
":",
"if",
"resnum",
"[",
"-",
"1",
"]",
"==",
"' '",
":",
"self",
".",
"setdefault",
"(",
"chain",
",",
"[",
"]",
")",
"self",
"[",
"chain",
"]",
"+=",
"[",
"resname",
"]",
"chainresnums",
"[",
"chain",
"]",
"+=",
"[",
"resnum",
"]",
"return",
"chainresnums"
]
| Parse the ATOM entries into the object | [
"Parse",
"the",
"ATOM",
"entries",
"into",
"the",
"object"
]
| python | train |
pyca/pyopenssl | src/OpenSSL/crypto.py | https://github.com/pyca/pyopenssl/blob/1fbe064c50fd030948141d7d630673761525b0d0/src/OpenSSL/crypto.py#L490-L505 | def get_elliptic_curve(name):
"""
Return a single curve object selected by name.
See :py:func:`get_elliptic_curves` for information about curve objects.
:param name: The OpenSSL short name identifying the curve object to
retrieve.
:type name: :py:class:`unicode`
If the named curve is not supported then :py:class:`ValueError` is raised.
"""
for curve in get_elliptic_curves():
if curve.name == name:
return curve
raise ValueError("unknown curve name", name) | [
"def",
"get_elliptic_curve",
"(",
"name",
")",
":",
"for",
"curve",
"in",
"get_elliptic_curves",
"(",
")",
":",
"if",
"curve",
".",
"name",
"==",
"name",
":",
"return",
"curve",
"raise",
"ValueError",
"(",
"\"unknown curve name\"",
",",
"name",
")"
]
| Return a single curve object selected by name.
See :py:func:`get_elliptic_curves` for information about curve objects.
:param name: The OpenSSL short name identifying the curve object to
retrieve.
:type name: :py:class:`unicode`
If the named curve is not supported then :py:class:`ValueError` is raised. | [
"Return",
"a",
"single",
"curve",
"object",
"selected",
"by",
"name",
"."
]
| python | test |
Duke-GCB/lando-messaging | lando_messaging/workqueue.py | https://github.com/Duke-GCB/lando-messaging/blob/b90ccc79a874714e0776af8badf505bb2b56c0ec/lando_messaging/workqueue.py#L242-L249 | def shutdown(self, payload=None):
"""
Close the connection/shutdown the messaging loop.
:param payload: None: not used. Here to allow using this method with add_command.
"""
logging.info("Work queue shutdown.")
self.connection.close()
self.receiving_messages = False | [
"def",
"shutdown",
"(",
"self",
",",
"payload",
"=",
"None",
")",
":",
"logging",
".",
"info",
"(",
"\"Work queue shutdown.\"",
")",
"self",
".",
"connection",
".",
"close",
"(",
")",
"self",
".",
"receiving_messages",
"=",
"False"
]
| Close the connection/shutdown the messaging loop.
:param payload: None: not used. Here to allow using this method with add_command. | [
"Close",
"the",
"connection",
"/",
"shutdown",
"the",
"messaging",
"loop",
".",
":",
"param",
"payload",
":",
"None",
":",
"not",
"used",
".",
"Here",
"to",
"allow",
"using",
"this",
"method",
"with",
"add_command",
"."
]
| python | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.